diff --git a/ambari-common/src/main/python/resource_management/libraries/functions/config_helper.py b/ambari-common/src/main/python/resource_management/libraries/functions/config_helper.py deleted file mode 100644 index 2d85d29e474..00000000000 --- a/ambari-common/src/main/python/resource_management/libraries/functions/config_helper.py +++ /dev/null @@ -1,45 +0,0 @@ -#!/usr/bin/env python -""" -Licensed to the Apache Software Foundation (ASF) under one -or more contributor license agreements. See the NOTICE file -distributed with this work for additional information -regarding copyright ownership. The ASF licenses this file -to you under the Apache License, Version 2.0 (the -"License"); you may not use this file except in compliance -with the License. You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. - -Ambari Agent - -""" - - -def get_mpack_name(config): - return config['hostLevelParams']['stack_name'] - - -def get_mpack_version(config): - return config['hostLevelParams']['stack_version'] - - -def get_mpack_instance_name(config): - return config['serviceGroupName'] - - -def get_module_name(config): - return config['serviceName'] - - -def get_component_type(config): - return config['role'] - - -def get_component_instance_name(config): - return "default" diff --git a/ambari-common/src/main/python/resource_management/libraries/functions/get_not_managed_resources.py b/ambari-common/src/main/python/resource_management/libraries/functions/get_not_managed_resources.py index 10765bb32f4..4af636bc7d5 100644 --- a/ambari-common/src/main/python/resource_management/libraries/functions/get_not_managed_resources.py +++ b/ambari-common/src/main/python/resource_management/libraries/functions/get_not_managed_resources.py @@ -25,7 +25,6 @@ from resource_management.libraries.script import Script from resource_management.core.logger import Logger from resource_management.libraries.functions.default import default -from resource_management.libraries.functions.cluster_settings import get_cluster_setting_value, get_cluster_setting def get_not_managed_resources(): """ @@ -35,8 +34,8 @@ def get_not_managed_resources(): """ config = Script.get_config() not_managed_hdfs_path_list = json.loads(config['hostLevelParams']['not_managed_hdfs_path_list'])[:] - if get_cluster_setting('managed_hdfs_resource_property_names') is not None: - managed_hdfs_resource_property_names = get_cluster_setting_value('managed_hdfs_resource_property_names') + if 'managed_hdfs_resource_property_names' in config['configurations']['cluster-env']: + managed_hdfs_resource_property_names = config['configurations']['cluster-env']['managed_hdfs_resource_property_names'] managed_hdfs_resource_property_list = filter(None, [property.strip() for property in managed_hdfs_resource_property_names.split(',')]) for property_name in managed_hdfs_resource_property_list: diff --git a/ambari-common/src/main/python/resource_management/libraries/functions/mpack_manager_helper.py b/ambari-common/src/main/python/resource_management/libraries/functions/mpack_manager_helper.py deleted file mode 100644 index 652a9e959fa..00000000000 --- a/ambari-common/src/main/python/resource_management/libraries/functions/mpack_manager_helper.py +++ /dev/null @@ -1,91 +0,0 @@ -""" -Licensed to the Apache Software Foundation (ASF) under one -or more contributor license agreements. See the NOTICE file -distributed with this work for additional information -regarding copyright ownership. The ASF licenses this file -to you under the Apache License, Version 2.0 (the -"License"); you may not use this file except in compliance -with the License. You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. - -Ambari Agent - -""" - -import os -from instance_manager import create_mpack, set_mpack_instance, get_conf_dir, list_instances - -CONFIG_DIR_KEY_NAME = 'config_dir' -PATH_KEY_NAME = 'path' -COMPONENTS_PLURAL_KEY_NAME = 'components' -COMPONENT_INSTANCES_PLURAL_KEY_NAME = 'component-instances' - - -def get_component_conf_path(mpack_name, instance_name, module_name, components_instance_type, - subgroup_name='default', component_instance_name='default'): - """ - :returns the single string that contains the path to the configuration folder of given component instance - :raises ValueError if the parameters doesn't match the mpack or instances structure - """ - - conf_json = get_conf_dir(mpack_name, instance_name, subgroup_name, module_name, - {components_instance_type: [component_instance_name]}) - - return conf_json[COMPONENTS_PLURAL_KEY_NAME][components_instance_type.lower()][COMPONENT_INSTANCES_PLURAL_KEY_NAME][ - component_instance_name][CONFIG_DIR_KEY_NAME] - - -def get_component_target_path(mpack_name, instance_name, module_name, components_instance_type, - subgroup_name='default', component_instance_name='default'): - """ - :returns the single string that contains the path to the mpack component folder of given component instance - :raises ValueError if the parameters doesn't match the mpack or instances structure - """ - - instances_json = list_instances(mpack_name, instance_name, subgroup_name, module_name, - {components_instance_type: [component_instance_name]}) - - return instances_json[COMPONENTS_PLURAL_KEY_NAME][components_instance_type.lower()][ - COMPONENT_INSTANCES_PLURAL_KEY_NAME][component_instance_name][PATH_KEY_NAME] - - -def get_component_home_path(mpack_name, instance_name, module_name, components_instance_type, - subgroup_name='default', component_instance_name='default'): - """ - :returns the single string that contains the path to the module component folder of given component instance - :raises ValueError if the parameters doesn't match the mpack or instances structure - """ - - component_path = get_component_target_path(mpack_name=mpack_name, instance_name=instance_name, - subgroup_name=subgroup_name, - module_name=module_name, components_instance_type=components_instance_type, - component_instance_name=component_instance_name) - - return os.readlink(component_path) - - -def create_component_instance(mpack_name, mpack_version, instance_name, module_name, components_instance_type, - subgroup_name='default', component_instance_name='default'): - """ - creates the single component instance according to the parameters - :raises ValueError if the parameters doesn't match the mpack or instances structure - """ - create_mpack(mpack_name, mpack_version, instance_name, subgroup_name, module_name, - None, {components_instance_type: [component_instance_name]}) - - -def set_component_instance_version(mpack_name, mpack_version, instance_name, module_name, components_instance_type, - subgroup_name='default', component_instance_name='default'): - """ - changes the version of the single component instance according to the parameters - :raises ValueError if the parameters doesn't match the mpack or instances structure - """ - set_mpack_instance(mpack_name, mpack_version, instance_name, subgroup_name, module_name, - None, {components_instance_type: [component_instance_name]}) diff --git a/ambari-common/src/main/python/resource_management/libraries/functions/repository_util.py b/ambari-common/src/main/python/resource_management/libraries/functions/repository_util.py index 47720b8fcda..8a7e57779c2 100644 --- a/ambari-common/src/main/python/resource_management/libraries/functions/repository_util.py +++ b/ambari-common/src/main/python/resource_management/libraries/functions/repository_util.py @@ -51,13 +51,13 @@ def create_repo_files(self): Creates repositories in a consistent manner for all types :return: a dictionary with repo ID => repo file name mapping """ - if self.command_repository.mpack_id is None: + if self.command_repository.version_id is None: raise Fail("The command repository was not parsed correctly") if 0 == len(self.command_repository.items): Logger.warning( "Repository for {0}/{1} has no repositories. Ambari may not be managing this version.".format( - self.command_repository.mpack_name, self.command_repository.version_string)) + self.command_repository.stack_name, self.command_repository.version_string)) return {} append_to_file = False # initialize to False to create the file anew. @@ -73,7 +73,7 @@ def create_repo_files(self): if not repository.ambari_managed: Logger.warning( "Repository for {0}/{1}/{2} is not managed by Ambari".format( - self.command_repository.mpack_name, self.command_repository.version_string, repository.repo_id)) + self.command_repository.stack_name, self.command_repository.version_string, repository.repo_id)) else: Repository(repository.repo_id, action="create", @@ -133,9 +133,10 @@ def __init__(self, repo_object): else: raise Fail("Cannot deserialize command repository {0}".format(str(repo_object))) - self.mpack_id = _find_value(json_dict, 'mpackId') - self.mpack_name = _find_value(json_dict, 'mpackName') - self.version_string = _find_value(json_dict, 'mpackVersion') + # version_id is the primary id of the repo_version table in the database + self.version_id = _find_value(json_dict, 'repoVersionId') + self.stack_name = _find_value(json_dict, 'stackName') + self.version_string = _find_value(json_dict, 'repoVersion') self.repo_filename = _find_value(json_dict, 'repoFileName') self.feat = CommandRepositoryFeature(_find_value(json_dict, "feature", default={})) self.items = [] diff --git a/ambari-common/src/main/python/resource_management/libraries/script/script.py b/ambari-common/src/main/python/resource_management/libraries/script/script.py index 64f465b509e..7813efc582c 100644 --- a/ambari-common/src/main/python/resource_management/libraries/script/script.py +++ b/ambari-common/src/main/python/resource_management/libraries/script/script.py @@ -61,8 +61,6 @@ from resource_management.libraries.functions.show_logs import show_logs from resource_management.core.providers import get_provider from resource_management.libraries.functions.fcntl_based_process_lock import FcntlBasedProcessLock -from resource_management.libraries.functions.config_helper import get_mpack_name, get_mpack_version, \ - get_mpack_instance_name, get_module_name, get_component_type, get_component_instance_name import ambari_simplejson as json # simplejson is much faster comparing to Python 2.6 json module and has the same functions set. @@ -793,21 +791,6 @@ def load_available_packages(self): Logger.exception("Unable to load available packages") self.available_packages_in_repos = [] - def create_component_instance(self): - # should be used only when mpack-instance-manager is available - from resource_management.libraries.functions.mpack_manager_helper import create_component_instance - config = self.get_config() - mpack_name = get_mpack_name(config) - mpack_version = get_mpack_version(config) - mpack_instance_name = get_mpack_instance_name(config) - module_name = get_module_name(config) - component_type = get_component_type(config) - component_instance_name = get_component_instance_name(config) - - create_component_instance(mpack_name=mpack_name, mpack_version=mpack_version, instance_name=mpack_instance_name, - module_name=module_name, components_instance_type=component_type, - component_instance_name=component_instance_name) - def install_packages(self, env): """ diff --git a/ambari-project/pom.xml b/ambari-project/pom.xml index eb70fc889be..7bb01b5360c 100644 --- a/ambari-project/pom.xml +++ b/ambari-project/pom.xml @@ -485,13 +485,6 @@ swagger-maven-plugin ${swagger.maven.plugin.version} - - org.inferred - freebuilder - 1.14.8 - true - provided - @@ -515,10 +508,7 @@ UTF-8 true true - - src/main/java - src/test/java - + true false diff --git a/ambari-server/pom.xml b/ambari-server/pom.xml index 31d97ed050a..7ceae4872c3 100644 --- a/ambari-server/pom.xml +++ b/ambari-server/pom.xml @@ -1644,10 +1644,6 @@ 0.1.10 test - - org.inferred - freebuilder - diff --git a/ambari-server/src/main/java/org/apache/ambari/annotations/ExperimentalFeature.java b/ambari-server/src/main/java/org/apache/ambari/annotations/ExperimentalFeature.java index 9b88a9d7800..94be1774267 100644 --- a/ambari-server/src/main/java/org/apache/ambari/annotations/ExperimentalFeature.java +++ b/ambari-server/src/main/java/org/apache/ambari/annotations/ExperimentalFeature.java @@ -51,11 +51,5 @@ public enum ExperimentalFeature { * For code that is for upgrading Mpacks. Use this to mark code that may ultimately * be removed. */ - MPACK_UPGRADES, - - /** - * Used to mark code that is required for successful removal and refactoring - * when repo versions are dropped. - */ - REPO_VERSION_REMOVAL; + MPACK_UPGRADES } diff --git a/ambari-server/src/main/java/org/apache/ambari/server/ServiceComponentHostNotFoundException.java b/ambari-server/src/main/java/org/apache/ambari/server/ServiceComponentHostNotFoundException.java index af1cd694a50..c870b652d58 100644 --- a/ambari-server/src/main/java/org/apache/ambari/server/ServiceComponentHostNotFoundException.java +++ b/ambari-server/src/main/java/org/apache/ambari/server/ServiceComponentHostNotFoundException.java @@ -31,13 +31,4 @@ public ServiceComponentHostNotFoundException(String clusterName, + ", hostName=" + hostName); } - public ServiceComponentHostNotFoundException(String clusterName, - String serviceName, Long serviceComponentId, String hostName) { - super("ServiceComponentHost not found" - + ", clusterName=" + clusterName - + ", serviceName=" + serviceName - + ", serviceComponentId=" + serviceComponentId - + ", hostName=" + hostName); - } - } diff --git a/ambari-server/src/main/java/org/apache/ambari/server/StateRecoveryManager.java b/ambari-server/src/main/java/org/apache/ambari/server/StateRecoveryManager.java index 8d32dd6d98b..94a6e085dbd 100644 --- a/ambari-server/src/main/java/org/apache/ambari/server/StateRecoveryManager.java +++ b/ambari-server/src/main/java/org/apache/ambari/server/StateRecoveryManager.java @@ -20,10 +20,10 @@ import java.util.List; -import org.apache.ambari.server.api.services.AmbariMetaInfo; -import org.apache.ambari.server.orm.dao.MpackHostStateDAO; -import org.apache.ambari.server.orm.entities.MpackHostStateEntity; -import org.apache.ambari.server.state.Mpack; +import org.apache.ambari.server.orm.dao.HostVersionDAO; +import org.apache.ambari.server.orm.dao.ServiceComponentDesiredStateDAO; +import org.apache.ambari.server.orm.entities.HostVersionEntity; +import org.apache.ambari.server.orm.entities.ServiceComponentDesiredStateEntity; import org.apache.ambari.server.state.RepositoryVersionState; import org.slf4j.Logger; import org.slf4j.LoggerFactory; @@ -39,41 +39,48 @@ public class StateRecoveryManager { private static final Logger LOG = LoggerFactory.getLogger(StateRecoveryManager.class); @Inject - private MpackHostStateDAO mpackHostStateDAO; + private HostVersionDAO hostVersionDAO; - /** - * Used for looking up {@link Mpack} instances by IDs. - */ @Inject - private AmbariMetaInfo ambariMetaInfo; + private ServiceComponentDesiredStateDAO serviceComponentDAO; public void doWork() { - updateManagementPackInstallationState(); + checkHostAndClusterVersions(); } - /** - * Resets any management pack installation states from - * {@link RepositoryVersionState#INSTALLING} to - * {@link RepositoryVersionState#INSTALL_FAILED}. - */ - void updateManagementPackInstallationState() { - List mpackHostStates = mpackHostStateDAO.findAll(); - for (MpackHostStateEntity mpackHostState : mpackHostStates) { - if (mpackHostState.getState() == RepositoryVersionState.INSTALLING) { - mpackHostState.setState(RepositoryVersionState.INSTALL_FAILED); - - Mpack mpack = ambariMetaInfo.getMpack(mpackHostState.getMpackId()); - + void checkHostAndClusterVersions() { + List hostVersions = hostVersionDAO.findAll(); + for (HostVersionEntity hostVersion : hostVersions) { + if (hostVersion.getState().equals(RepositoryVersionState.INSTALLING)) { + hostVersion.setState(RepositoryVersionState.INSTALL_FAILED); String msg = String.format( - "The installation state of management pack %s on host %s was set from %s to %s", - mpack.getName(), - mpackHostState.getHostName(), + "Recovered state of host version %s on host %s from %s to %s", + hostVersion.getRepositoryVersion().getDisplayName(), + hostVersion.getHostName(), RepositoryVersionState.INSTALLING, RepositoryVersionState.INSTALL_FAILED); LOG.warn(msg); + hostVersionDAO.merge(hostVersion); + } + } - mpackHostStateDAO.merge(mpackHostState); + List components = serviceComponentDAO.findAll(); + for (ServiceComponentDesiredStateEntity component : components) { + if (RepositoryVersionState.INSTALLING == component.getRepositoryState()) { + component.setRepositoryState(RepositoryVersionState.INSTALL_FAILED); + serviceComponentDAO.merge(component); + String msg = String.format( + "Recovered state of cluster %s of component %s/%s for version %s from %s to %s", + component.getClusterId(), + component.getServiceId(), + component.getComponentName(), + component.getDesiredRepositoryVersion().getDisplayName(), + RepositoryVersionState.INSTALLING, + RepositoryVersionState.INSTALL_FAILED); + LOG.warn(msg); } } } + + } diff --git a/ambari-server/src/main/java/org/apache/ambari/server/actionmanager/ExecutionCommandWrapper.java b/ambari-server/src/main/java/org/apache/ambari/server/actionmanager/ExecutionCommandWrapper.java index 5370bb08b55..57f9c4128e8 100644 --- a/ambari-server/src/main/java/org/apache/ambari/server/actionmanager/ExecutionCommandWrapper.java +++ b/ambari-server/src/main/java/org/apache/ambari/server/actionmanager/ExecutionCommandWrapper.java @@ -34,22 +34,18 @@ import org.apache.ambari.server.agent.ExecutionCommand; import org.apache.ambari.server.api.services.AmbariMetaInfo; import org.apache.ambari.server.configuration.Configuration; +import org.apache.ambari.server.controller.spi.SystemException; import org.apache.ambari.server.orm.dao.HostRoleCommandDAO; -import org.apache.ambari.server.orm.dao.MpackDAO; -import org.apache.ambari.server.orm.dao.ServiceGroupDAO; -import org.apache.ambari.server.orm.entities.MpackEntity; import org.apache.ambari.server.orm.entities.RepoOsEntity; -import org.apache.ambari.server.orm.entities.ServiceGroupEntity; -import org.apache.ambari.server.orm.entities.StackEntity; +import org.apache.ambari.server.orm.entities.RepositoryVersionEntity; import org.apache.ambari.server.orm.entities.UpgradeEntity; import org.apache.ambari.server.state.Cluster; import org.apache.ambari.server.state.Clusters; import org.apache.ambari.server.state.ConfigHelper; import org.apache.ambari.server.state.DesiredConfig; import org.apache.ambari.server.state.Host; -import org.apache.ambari.server.state.ModuleComponent; -import org.apache.ambari.server.state.Mpack; import org.apache.ambari.server.state.Service; +import org.apache.ambari.server.state.ServiceComponent; import org.apache.ambari.server.state.ServiceInfo; import org.apache.ambari.server.state.StackId; import org.apache.ambari.server.state.StackInfo; @@ -100,19 +96,6 @@ public class ExecutionCommandWrapper { @Inject private Configuration configuration; - /** - * Used to get service groups (and from them mpacks) so that we can set the - * right information on the command. - */ - @Inject - private ServiceGroupDAO serviceGroupDAO; - - /** - * Used for retrieving mpack entities by their ID. - */ - @Inject - private MpackDAO mpackDAO; - @AssistedInject public ExecutionCommandWrapper(@Assisted String jsonExecutionCommand) { this.jsonExecutionCommand = jsonExecutionCommand; @@ -141,7 +124,7 @@ public ExecutionCommand getExecutionCommand() { return executionCommand; } - if (null == jsonExecutionCommand) { + if( null == jsonExecutionCommand ){ throw new RuntimeException( "Invalid ExecutionCommandWrapper, both object and string representations are null"); } @@ -249,18 +232,28 @@ public ExecutionCommand getExecutionCommand() { // setting repositoryFile final Host host = cluster.getHost(executionCommand.getHostname()); // can be null on internal commands + final String serviceName = executionCommand.getServiceName(); // can be null on executing special RU tasks - if (null == executionCommand.getRepositoryFile() && null != host) { + if (null == executionCommand.getRepositoryFile() && null != host && null != serviceName) { final CommandRepository commandRepository; + final Service service = cluster.getService(serviceName); + final String componentName = executionCommand.getComponentName(); - ServiceGroupEntity serviceGroupEntity = serviceGroupDAO.find(clusterId, executionCommand.getServiceGroupName()); - long mpackId = serviceGroupEntity.getStack().getMpackId(); - Mpack mpack = ambariMetaInfo.getMpack(mpackId); - MpackEntity mpackEntity = mpackDAO.findById(mpackId); + try { - RepoOsEntity osEntity = repoVersionHelper.getOSEntityForHost(mpackEntity, host); - commandRepository = repoVersionHelper.getCommandRepository(mpack, osEntity); - executionCommand.setRepositoryFile(commandRepository); + if (null != componentName) { + ServiceComponent serviceComponent = service.getServiceComponent(componentName); + commandRepository = repoVersionHelper.getCommandRepository(null, serviceComponent, host); + } else { + RepositoryVersionEntity repoVersion = service.getDesiredRepositoryVersion(); + RepoOsEntity osEntity = repoVersionHelper.getOSEntityForHost(host, repoVersion); + commandRepository = repoVersionHelper.getCommandRepository(repoVersion, osEntity); + } + executionCommand.setRepositoryFile(commandRepository); + + } catch (SystemException e) { + throw new RuntimeException(e); + } } } catch (ClusterNotFoundException cnfe) { @@ -271,7 +264,7 @@ public ExecutionCommand getExecutionCommand() { cnfe.getMessage()); return executionCommand; - } catch (Exception e) { + } catch (AmbariException e) { throw new RuntimeException(e); } @@ -279,45 +272,44 @@ public ExecutionCommand getExecutionCommand() { } public void setVersions(Cluster cluster) { - String serviceGroupName = executionCommand.getServiceGroupName(); + // set the repository version for the component this command is for - + // always use the current desired version String serviceName = executionCommand.getServiceName(); - String componentName = executionCommand.getComponentName(); - String serviceType = null; - try { - Mpack mpack = null; - StackEntity stackEntity = null; - - if (StringUtils.isNotBlank(serviceGroupName)) { - ServiceGroupEntity serviceGroupEntity = serviceGroupDAO.find(cluster.getClusterId(), serviceGroupName); - stackEntity = serviceGroupEntity.getStack(); - mpack = ambariMetaInfo.getMpack(stackEntity.getMpackId()); - } - - Service service = cluster.getService(serviceGroupName, serviceName); - if (null != service) { - serviceType = service.getServiceType(); + RepositoryVersionEntity repositoryVersion = null; + if (!StringUtils.isEmpty(serviceName)) { + Service service = cluster.getService(serviceName); + if (null != service) { + serviceType = service.getServiceType(); + repositoryVersion = service.getDesiredRepositoryVersion(); + + String componentName = executionCommand.getComponentName(); + if (!StringUtils.isEmpty(componentName)) { + ServiceComponent serviceComponent = service.getServiceComponent(componentName); + if (null != serviceComponent) { + repositoryVersion = serviceComponent.getDesiredRepositoryVersion(); + } + } + } } - ModuleComponent moduleComponent = null; Map commandParams = executionCommand.getCommandParams(); - if (null != mpack && StringUtils.isNotBlank(serviceName) && StringUtils.isNotBlank(componentName)) { + + if (null != repositoryVersion) { // only set the version if it's not set and this is NOT an install // command - - moduleComponent = mpack.getModuleComponent(serviceName, componentName); - } - - if (null != moduleComponent) { + // Some stack scripts use version for path purposes. Sending unresolved version first (for + // blueprints) and then resolved one would result in various issues: duplicate directories + // (/hdp/apps/2.6.3.0 + /hdp/apps/2.6.3.0-235), parent directory not found, and file not + // found, etc. Hence requiring repositoryVersion to be resolved. if (!commandParams.containsKey(VERSION) - && executionCommand.getRoleCommand() != RoleCommand.INSTALL) { - commandParams.put(VERSION, moduleComponent.getVersion()); + && repositoryVersion.isResolved() + && executionCommand.getRoleCommand() != RoleCommand.INSTALL) { + commandParams.put(VERSION, repositoryVersion.getVersion()); } - } - if (null != stackEntity) { - StackId stackId = new StackId(stackEntity); + StackId stackId = repositoryVersion.getStackId(); StackInfo stackInfo = ambariMetaInfo.getStack(stackId.getStackName(), stackId.getStackVersion()); @@ -335,7 +327,6 @@ public void setVersions(Cluster cluster) { } } - // set the desired versions of versionable components. This is safe even during an upgrade because // we are "loading-late": components that have not yet upgraded in an EU will have the correct versions. executionCommand.setComponentVersions(cluster); diff --git a/ambari-server/src/main/java/org/apache/ambari/server/agent/CommandRepository.java b/ambari-server/src/main/java/org/apache/ambari/server/agent/CommandRepository.java index 42b1e8ddcc1..ecb5a214b8d 100644 --- a/ambari-server/src/main/java/org/apache/ambari/server/agent/CommandRepository.java +++ b/ambari-server/src/main/java/org/apache/ambari/server/agent/CommandRepository.java @@ -40,14 +40,14 @@ public class CommandRepository { @SerializedName("repositories") private List m_repositories = new ArrayList<>(); - @SerializedName("mpackId") - private long m_mpackId; + @SerializedName("repoVersion") + private String m_repoVersion; - @SerializedName("mpackName") - private String m_mpackName; + @SerializedName("repoVersionId") + private long m_repoVersionId; - @SerializedName("mpackVersion") - private String m_mpackVersion; + @SerializedName("stackName") + private String m_stackName; @SerializedName("repoFileName") private String m_repoFileName; @@ -69,30 +69,27 @@ public CommandRepositoryFeature getFeature(){ * version after distribution. */ @SerializedName("resolved") - private final boolean m_resolved = true; + private boolean m_resolved; /** - * @param id - * the mpack id + * @param version the repo version */ - public void setMpackId(long id) { - m_mpackId = id; + public void setRepositoryVersion(String version) { + m_repoVersion = version; } /** - * @param name - * the mpack name + * @param id the repository id */ - public void setMpackName(String name) { - m_mpackName = name; + public void setRepositoryVersionId(long id) { + m_repoVersionId = id; } /** - * @param version - * the mpack version + * @param name the stack name */ - public void setMpackVersion(String mpackVersion) { - m_mpackVersion = mpackVersion; + public void setStackName(String name) { + m_stackName = name; } /** @@ -158,6 +155,16 @@ public boolean isResolved() { return m_resolved; } + /** + * Gets whether this repository has had its version resolved. + * + * @param resolved + * {@code true} to mark this repository as being resolved. + */ + public void setResolved(boolean resolved) { + m_resolved = resolved; + } + /** * Update repository id to be consistent with old format * @@ -180,7 +187,7 @@ public void setLegacyRepoId(String repoVersion){ @Deprecated @Experimental(feature= ExperimentalFeature.PATCH_UPGRADES) public void setLegacyRepoFileName(String stackName, String repoVersion) { - m_repoFileName = String.format("%s-%s", stackName, repoVersion); + this.m_repoFileName = String.format("%s-%s", stackName, repoVersion); } /** @@ -190,7 +197,7 @@ public void setLegacyRepoFileName(String stackName, String repoVersion) { * @param repoVersionId repository version id */ public void setRepoFileName(String stackName, Long repoVersionId) { - m_repoFileName = String.format("ambari-%s-%s", stackName.toLowerCase(), repoVersionId.toString()); + this.m_repoFileName = String.format("ambari-%s-%s", stackName.toLowerCase(), repoVersionId.toString()); } /** @@ -213,11 +220,11 @@ public static class CommandRepositoryFeature { private boolean m_isScoped = true; public void setIsScoped(boolean isScoped){ - m_isScoped = isScoped; + this.m_isScoped = isScoped; } public void setPreInstalled(String isPreInstalled) { - m_isPreInstalled = isPreInstalled.equalsIgnoreCase("true"); + this.m_isPreInstalled = isPreInstalled.equalsIgnoreCase("true"); } } diff --git a/ambari-server/src/main/java/org/apache/ambari/server/agent/ExecutionCommand.java b/ambari-server/src/main/java/org/apache/ambari/server/agent/ExecutionCommand.java index ec23720f994..8f0bd906dd5 100644 --- a/ambari-server/src/main/java/org/apache/ambari/server/agent/ExecutionCommand.java +++ b/ambari-server/src/main/java/org/apache/ambari/server/agent/ExecutionCommand.java @@ -482,6 +482,9 @@ public interface KeyNames { String STACK_NAME = "stack_name"; String SERVICE_TYPE = "service_type"; String STACK_VERSION = "stack_version"; + @Deprecated + @Experimental(feature=ExperimentalFeature.PATCH_UPGRADES) + String SERVICE_REPO_INFO = "service_repo_info"; String PACKAGE_LIST = "package_list"; String JDK_LOCATION = "jdk_location"; String JAVA_HOME = "java_home"; @@ -498,6 +501,12 @@ public interface KeyNames { String ORACLE_JDBC_URL = "oracle_jdbc_url"; String DB_DRIVER_FILENAME = "db_driver_filename"; String CLIENTS_TO_UPDATE_CONFIGS = "clientsToUpdateConfigs"; + /** + * Keep for backward compatibility. + */ + @Deprecated + @Experimental(feature=ExperimentalFeature.PATCH_UPGRADES) + String REPO_INFO = "repo_info"; String DB_NAME = "db_name"; String GLOBAL = "global"; @@ -527,11 +536,29 @@ public interface KeyNames { String SERVICE_CHECK = "SERVICE_CHECK"; // TODO: is it standard command? maybe add it to RoleCommand enum? String CUSTOM_COMMAND = "custom_command"; + /** + * The key indicating that the package_version string is available + */ + @Deprecated + @Experimental(feature=ExperimentalFeature.PATCH_UPGRADES) + String PACKAGE_VERSION = "package_version"; + /** * The key indicating that there is an un-finalized upgrade which is suspended. */ String UPGRADE_SUSPENDED = "upgrade_suspended"; + /** + * When installing packages, optionally provide the row id the version is + * for in order to precisely match response data. + *

+ * The agent will return this value back in its response so the repository + * can be looked up and possibly have its version updated. + */ + @Deprecated + @Experimental(feature=ExperimentalFeature.PATCH_UPGRADES) + String REPO_VERSION_ID = "repository_version_id"; + /** * The version of the component to send down with the command. Normally, * this is simply the repository version of the component. However, during @@ -543,15 +570,11 @@ public interface KeyNames { comment = "Change this to reflect the component version") String VERSION = "version"; + /** * When installing packages, includes what services will be included in the upgrade */ String CLUSTER_VERSION_SUMMARY = "cluster_version_summary"; - - /** - * The ID of the mpack associated with this command. - */ - String MPACK_ID = "mpack_id"; } /** diff --git a/ambari-server/src/main/java/org/apache/ambari/server/agent/HeartbeatProcessor.java b/ambari-server/src/main/java/org/apache/ambari/server/agent/HeartbeatProcessor.java index cbd78e1448d..8cae46d6a92 100644 --- a/ambari-server/src/main/java/org/apache/ambari/server/agent/HeartbeatProcessor.java +++ b/ambari-server/src/main/java/org/apache/ambari/server/agent/HeartbeatProcessor.java @@ -500,10 +500,10 @@ protected void processCommandReports( } String newVersion = structuredOutput == null ? null : structuredOutput.version; - Long mpackId = structuredOutput == null ? null : structuredOutput.mpackId; + Long repoVersionId = structuredOutput == null ? null : structuredOutput.repositoryVersionId; HostComponentVersionAdvertisedEvent event = new HostComponentVersionAdvertisedEvent( - cl, scHost, newVersion); + cl, scHost, newVersion, repoVersionId); versionEventPublisher.publish(event); } @@ -764,8 +764,8 @@ private static class ComponentVersionStructuredOut { @SerializedName("direction") private Direction upgradeDirection = null; - @SerializedName(KeyNames.MPACK_ID) - private Long mpackId; + @SerializedName(KeyNames.REPO_VERSION_ID) + private Long repositoryVersionId; } } diff --git a/ambari-server/src/main/java/org/apache/ambari/server/alerts/ComponentVersionAlertRunnable.java b/ambari-server/src/main/java/org/apache/ambari/server/alerts/ComponentVersionAlertRunnable.java index 7c5423ce79a..977dc9e76d3 100644 --- a/ambari-server/src/main/java/org/apache/ambari/server/alerts/ComponentVersionAlertRunnable.java +++ b/ambari-server/src/main/java/org/apache/ambari/server/alerts/ComponentVersionAlertRunnable.java @@ -28,16 +28,17 @@ import org.apache.ambari.server.AmbariException; import org.apache.ambari.server.api.services.AmbariMetaInfo; import org.apache.ambari.server.orm.entities.AlertDefinitionEntity; +import org.apache.ambari.server.orm.entities.RepositoryVersionEntity; import org.apache.ambari.server.orm.entities.UpgradeEntity; import org.apache.ambari.server.state.Alert; import org.apache.ambari.server.state.AlertState; import org.apache.ambari.server.state.Cluster; import org.apache.ambari.server.state.ComponentInfo; import org.apache.ambari.server.state.Host; -import org.apache.ambari.server.state.ModuleComponent; -import org.apache.ambari.server.state.Mpack; +import org.apache.ambari.server.state.Service; +import org.apache.ambari.server.state.ServiceComponent; import org.apache.ambari.server.state.ServiceComponentHost; -import org.apache.ambari.server.state.ServiceGroup; +import org.apache.ambari.server.state.StackId; import org.apache.ambari.server.state.stack.upgrade.Direction; import org.apache.commons.lang.StringUtils; @@ -108,15 +109,17 @@ List execute(Cluster cluster, AlertDefinitionEntity myDefinition) throws for (Host host : hosts) { List hostComponents = cluster.getServiceComponentHosts(host.getHostName()); for (ServiceComponentHost hostComponent : hostComponents) { - String serviceGroupName = hostComponent.getServiceGroupName(); - ServiceGroup serviceGroup = cluster.getServiceGroup(serviceGroupName); - Long mpackId = serviceGroup.getMpackId(); - Mpack mpack = m_metaInfo.getMpack(mpackId); + Service service = cluster.getService(hostComponent.getServiceName()); + ServiceComponent serviceComponent = service.getServiceComponent(hostComponent.getServiceComponentName()); + + RepositoryVersionEntity desiredRepositoryVersion = service.getDesiredRepositoryVersion(); + StackId desiredStackId = serviceComponent.getDesiredStackId(); + String desiredVersion = desiredRepositoryVersion.getVersion(); final ComponentInfo componentInfo; try { - componentInfo = m_metaInfo.getComponent(mpack.getName(), - mpack.getVersion(), hostComponent.getServiceType(), + componentInfo = m_metaInfo.getComponent(desiredStackId.getStackName(), + desiredStackId.getStackVersion(), hostComponent.getServiceType(), hostComponent.getServiceComponentName()); } catch (AmbariException ambariException) { // throw an UNKNOWN response if we can't load component info @@ -132,11 +135,8 @@ List execute(Cluster cluster, AlertDefinitionEntity myDefinition) throws continue; } - ModuleComponent moduleComponent = mpack.getModuleComponent(hostComponent.getServiceName(), - hostComponent.getServiceComponentName()); - String version = hostComponent.getVersion(); - if (!StringUtils.equals(version, moduleComponent.getVersion())) { + if (!StringUtils.equals(version, desiredVersion)) { Set mismatchedComponents = versionMismatches.get(host); if (null == mismatchedComponents) { mismatchedComponents = new HashSet<>(); diff --git a/ambari-server/src/main/java/org/apache/ambari/server/api/query/render/ClusterBlueprintRenderer.java b/ambari-server/src/main/java/org/apache/ambari/server/api/query/render/ClusterBlueprintRenderer.java index e34fcb13361..deb13db607a 100644 --- a/ambari-server/src/main/java/org/apache/ambari/server/api/query/render/ClusterBlueprintRenderer.java +++ b/ambari-server/src/main/java/org/apache/ambari/server/api/query/render/ClusterBlueprintRenderer.java @@ -18,8 +18,6 @@ package org.apache.ambari.server.api.query.render; -import static java.util.stream.Collectors.toList; - import java.util.ArrayList; import java.util.Collection; import java.util.Collections; @@ -28,12 +26,10 @@ import java.util.LinkedHashMap; import java.util.List; import java.util.Map; -import java.util.Objects; import java.util.Set; import org.apache.ambari.server.AmbariException; import org.apache.ambari.server.api.query.QueryInfo; -import org.apache.ambari.server.api.services.AmbariMetaInfo; import org.apache.ambari.server.api.services.Request; import org.apache.ambari.server.api.services.Result; import org.apache.ambari.server.api.services.ResultImpl; @@ -46,7 +42,6 @@ import org.apache.ambari.server.controller.internal.ArtifactResourceProvider; import org.apache.ambari.server.controller.internal.BlueprintConfigurationProcessor; import org.apache.ambari.server.controller.internal.BlueprintResourceProvider; -import org.apache.ambari.server.controller.internal.ClusterSettingResourceProvider; import org.apache.ambari.server.controller.internal.ExportBlueprintRequest; import org.apache.ambari.server.controller.internal.RequestImpl; import org.apache.ambari.server.controller.internal.ResourceImpl; @@ -73,7 +68,7 @@ import org.slf4j.Logger; import org.slf4j.LoggerFactory; -import com.google.common.collect.ImmutableMap; +import com.google.common.collect.Iterables; /** * Renderer which renders a cluster resource as a blueprint. @@ -85,12 +80,6 @@ public class ClusterBlueprintRenderer extends BaseRenderer implements Renderer { */ private AmbariManagementController controller = AmbariServer.getController(); - - /** - * MetaInfo used to get stack and mpack information. - */ - private AmbariMetaInfo metaInfo = controller.getAmbariMetaInfo(); - // /** // * Map of configuration type to configuration properties which are required that a user // * input. These properties will be stripped from the exported blueprint. @@ -112,37 +101,47 @@ public TreeNode> finalizeProperties( copyPropertiesToResult(queryProperties, resultTree); - ensureChild(resultTree, Resource.Type.Configuration, "properties"); - - ensureChild(resultTree, Resource.Type.ClusterSetting); - - TreeNode> serviceGroupNode = ensureChild(resultTree, Resource.Type.ServiceGroup); - TreeNode> serviceNode = ensureChild(serviceGroupNode, Resource.Type.Service); - ensureChild(serviceNode, Resource.Type.Component, - "ServiceComponentInfo/cluster_name", - "ServiceComponentInfo/service_name", - "ServiceComponentInfo/component_name", - "ServiceComponentInfo/recovery_enabled"); - - TreeNode> hostNode = ensureChild(resultTree, Resource.Type.Host); - ensureChild(hostNode, Resource.Type.HostComponent, "HostRoles/component_name"); - - return resultTree; - } + String configType = Resource.Type.Configuration.name(); + if (resultTree.getChild(configType) == null) { + resultTree.addChild(new HashSet<>(), configType); + } - private TreeNode> ensureChild(TreeNode> parent, - Resource.Type resourceType, - String... properties) { - TreeNode> child = parent.getChild(resourceType.name()); - if (null == child) { - child = parent.addChild(new HashSet<>(), resourceType.name()); + String serviceType = Resource.Type.Service.name(); + if (resultTree.getChild(serviceType) == null) { + resultTree.addChild(new HashSet<>(), serviceType); } - for (String property: properties) { - child.getObject().add(property); + TreeNode> serviceNode = resultTree.getChild(serviceType); + if (serviceNode == null) { + serviceNode = resultTree.addChild(new HashSet<>(), serviceType); } - return child; - } + String serviceComponentType = Resource.Type.Component.name(); + TreeNode> serviceComponentNode = resultTree.getChild( + serviceType + "/" + serviceComponentType); + if (serviceComponentNode == null) { + serviceComponentNode = serviceNode.addChild(new HashSet<>(), serviceComponentType); + } + serviceComponentNode.getObject().add("ServiceComponentInfo/cluster_name"); + serviceComponentNode.getObject().add("ServiceComponentInfo/service_name"); + serviceComponentNode.getObject().add("ServiceComponentInfo/component_name"); + serviceComponentNode.getObject().add("ServiceComponentInfo/recovery_enabled"); + + String hostType = Resource.Type.Host.name(); + String hostComponentType = Resource.Type.HostComponent.name(); + TreeNode> hostComponentNode = resultTree.getChild( + hostType + "/" + hostComponentType); + + if (hostComponentNode == null) { + TreeNode> hostNode = resultTree.getChild(hostType); + if (hostNode == null) { + hostNode = resultTree.addChild(new HashSet<>(), hostType); + } + hostComponentNode = hostNode.addChild(new HashSet<>(), hostComponentType); + } + resultTree.getChild(configType).getObject().add("properties"); + hostComponentNode.getObject().add("HostRoles/component_name"); + return resultTree; + } @Override public Result finalizeResult(Result queryResult) { @@ -198,11 +197,12 @@ private Resource createBlueprintResource(TreeNode clusterNode) { BlueprintConfigurationProcessor configProcessor = new BlueprintConfigurationProcessor(topology); configProcessor.doUpdateForBlueprintExport(); - Set stackIds = topology.getStackIds(); - // TODO: mpacks should come from service groups once https://github.com/apache/ambari/pull/234 will be committed - Collection> mpackInstances = stackIds.stream(). - map( stackId -> ImmutableMap.of("name", stackId.getStackName(), "version", stackId.getStackVersion())).collect(toList()); - blueprintResource.setProperty(BlueprintResourceProvider.MPACK_INSTANCES_PROPERTY_ID, mpackInstances); + Set stackIds = topology.getBlueprint().getStackIds(); + if (stackIds.size() == 1) { + StackId stackId = Iterables.getOnlyElement(stackIds); + blueprintResource.setProperty("Blueprints/stack_name", stackId.getStackName()); + blueprintResource.setProperty("Blueprints/stack_version", stackId.getStackVersion()); + } if (topology.isClusterKerberosEnabled()) { Map securityConfigMap = new LinkedHashMap<>(); @@ -210,7 +210,8 @@ private Resource createBlueprintResource(TreeNode clusterNode) { try { String clusterName = topology.getAmbariContext().getClusterName(topology.getClusterId()); - Map kerberosDescriptor = getKerberosDescriptor(AmbariContext.getClusterController(), clusterName); + Map kerberosDescriptor = getKerberosDescriptor(topology.getAmbariContext() + .getClusterController(), clusterName); if (kerberosDescriptor != null) { securityConfigMap.put(SecurityConfigurationFactory.KERBEROS_DESCRIPTOR_PROPERTY_ID, kerberosDescriptor); } @@ -237,7 +238,7 @@ private Resource createBlueprintResource(TreeNode clusterNode) { /*** * Constructs the Settings object of the following form: * "settings": [ { - "cluster_settings": [ + "recovery_settings": [ { "recovery_enabled": "true" } ] }, @@ -261,8 +262,6 @@ private Resource createBlueprintResource(TreeNode clusterNode) { "recovery_enabled": "true" } ] } ] * - * NOTE: As of 3.0 global recovery settings will move under a new section called cluster_settings. - * * @param clusterNode * @return A Collection> which represents the Setting Object */ @@ -272,27 +271,25 @@ private Collection> getSettings(TreeNode clusterNo //Initialize collections to create appropriate json structure Collection> blueprintSetting = new ArrayList<>(); + Set> recoverySettingValue = new HashSet<>(); Set> serviceSettingValue = new HashSet<>(); Set> componentSettingValue = new HashSet<>(); + HashMap property = new HashMap<>(); + HashMap componentProperty = new HashMap<>(); + Boolean globalRecoveryEnabled = false; + //Fetch the services, to obtain ServiceInfo and ServiceComponents - // TODO: set mpack instance if needed (multi-mpack case) - Collection> serviceChildren = - clusterNode.getChild("servicegroups"). - getChildren().stream().flatMap( - node -> node.getChild("services").getChildren().stream()). - collect(toList()); - - HashMap serviceProperty; + Collection> serviceChildren = clusterNode.getChild("services").getChildren(); for (TreeNode serviceNode : serviceChildren) { ResourceImpl service = (ResourceImpl) serviceNode.getObject(); Map ServiceInfoMap = service.getPropertiesMap().get("ServiceInfo"); //service_settings population - serviceProperty = new HashMap<>(); + property = new HashMap<>(); if (ServiceInfoMap.get("credential_store_enabled").equals("true")) { - serviceProperty.put("name", ServiceInfoMap.get("service_name").toString()); - serviceProperty.put("credential_store_enabled", "true"); + property.put("name", ServiceInfoMap.get("service_name").toString()); + property.put("credential_store_enabled", "true"); } //Fetch the service Components to obtain ServiceComponentInfo @@ -302,36 +299,34 @@ private Collection> getSettings(TreeNode clusterNo Map ServiceComponentInfoMap = component.getPropertiesMap().get("ServiceComponentInfo"); if (ServiceComponentInfoMap.get("recovery_enabled").equals("true")) { - serviceProperty.put("name", ServiceInfoMap.get("service_name").toString()); - serviceProperty.put("recovery_enabled", "true"); + globalRecoveryEnabled = true; + property.put("name", ServiceInfoMap.get("service_name").toString()); + property.put("recovery_enabled", "true"); //component_settings population - HashMap componentProperty = new HashMap<>(); + componentProperty = new HashMap<>(); componentProperty.put("name", ServiceComponentInfoMap.get("component_name").toString()); componentProperty.put("recovery_enabled", "true"); - componentSettingValue.add(componentProperty); } } - if (!serviceProperty.isEmpty()) - serviceSettingValue.add(serviceProperty); + if (!property.isEmpty()) + serviceSettingValue.add(property); + if (!componentProperty.isEmpty()) + componentSettingValue.add(componentProperty); } - - // Add cluster settings - Set> clusterSettings = new HashSet<>(); - TreeNode settingsNode = clusterNode.getChild("settings"); - if (null != settingsNode) { - for (TreeNode clusterSettingNode: settingsNode.getChildren()) { - Map nodeProperties = clusterSettingNode.getObject().getPropertiesMap().get(ClusterSettingResourceProvider.RESPONSE_KEY); - String key = Objects.toString(nodeProperties.get(ClusterSettingResourceProvider.CLUSTER_SETTING_NAME_PROPERTY_ID)); - String value = Objects.toString(nodeProperties.get(ClusterSettingResourceProvider.CLUSTER_SETTING_VALUE_PROPERTY_ID)); - clusterSettings.add(ImmutableMap.of(key, value)); - } + //recovery_settings population + property = new HashMap<>(); + if (globalRecoveryEnabled) { + property.put("recovery_enabled", "true"); + } else { + property.put("recovery_enabled", "false"); } + recoverySettingValue.add(property); //Add all the different setting values. Map settingMap = new HashMap<>(); - settingMap.put("cluster_settings", clusterSettings); + settingMap.put("recovery_settings", recoverySettingValue); blueprintSetting.add(settingMap); settingMap = new HashMap<>(); diff --git a/ambari-server/src/main/java/org/apache/ambari/server/api/query/render/HostSummaryRenderer.java b/ambari-server/src/main/java/org/apache/ambari/server/api/query/render/HostSummaryRenderer.java index 82eb796ddbb..3d6b843c082 100644 --- a/ambari-server/src/main/java/org/apache/ambari/server/api/query/render/HostSummaryRenderer.java +++ b/ambari-server/src/main/java/org/apache/ambari/server/api/query/render/HostSummaryRenderer.java @@ -22,9 +22,7 @@ import java.util.HashMap; import java.util.List; import java.util.Map; -import java.util.Set; -import org.apache.ambari.server.api.query.QueryInfo; import org.apache.ambari.server.api.services.Result; import org.apache.ambari.server.api.services.ResultImpl; import org.apache.ambari.server.api.util.TreeNode; @@ -64,13 +62,6 @@ public String getProperty() { } } - @Override - public TreeNode> finalizeProperties(TreeNode queryTree, boolean isCollection) { - TreeNode> propertiesNode = super.finalizeProperties(queryTree, isCollection); - propertiesNode.getObject().add(HostResourceProvider.HOST_OS_TYPE_PROPERTY_ID); - return propertiesNode; - } - @Override public Result finalizeResult(Result queryResult) { TreeNode queryResultTree = queryResult.getResultTree(); @@ -92,7 +83,7 @@ private void buildFinalizedSummary(TreeNode queryResultTree, List summary) { resource.setProperty(HostResourceProvider.SUMMARY_PROPERTY_ID, summary); return result; } - } diff --git a/ambari-server/src/main/java/org/apache/ambari/server/api/resources/MpackResourceDefinition.java b/ambari-server/src/main/java/org/apache/ambari/server/api/resources/MpackResourceDefinition.java index fc3d5eb92d5..4fbe5be916e 100644 --- a/ambari-server/src/main/java/org/apache/ambari/server/api/resources/MpackResourceDefinition.java +++ b/ambari-server/src/main/java/org/apache/ambari/server/api/resources/MpackResourceDefinition.java @@ -28,12 +28,17 @@ import org.apache.ambari.server.controller.internal.ResourceImpl; import org.apache.ambari.server.controller.spi.Resource; import org.apache.ambari.server.controller.spi.Resource.Type; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; /** * Resource Definition for Mpack Resource types. */ public class MpackResourceDefinition extends BaseResourceDefinition { + private final static Logger LOG = + LoggerFactory.getLogger(MpackResourceDefinition.class); + public MpackResourceDefinition(Type resourceType) { super(Resource.Type.Mpack); } @@ -56,7 +61,6 @@ public String getSingularName() { public Set getSubResourceDefinitions() { Set setChildren = new HashSet<>(); setChildren.add(new SubResourceDefinition(Resource.Type.StackVersion, null, false)); - setChildren.add(new SubResourceDefinition(Resource.Type.OperatingSystem, null, true)); return setChildren; } diff --git a/ambari-server/src/main/java/org/apache/ambari/server/api/resources/OperatingSystemReadOnlyResourceDefinition.java b/ambari-server/src/main/java/org/apache/ambari/server/api/resources/OperatingSystemReadOnlyResourceDefinition.java deleted file mode 100644 index 339f411b012..00000000000 --- a/ambari-server/src/main/java/org/apache/ambari/server/api/resources/OperatingSystemReadOnlyResourceDefinition.java +++ /dev/null @@ -1,56 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package org.apache.ambari.server.api.resources; - -import java.util.Collections; -import java.util.Set; - -import org.apache.ambari.annotations.Experimental; -import org.apache.ambari.annotations.ExperimentalFeature; -import org.apache.ambari.server.controller.spi.Resource; -import org.apache.ambari.server.controller.spi.Resource.Type; - -@Deprecated -@Experimental(feature = ExperimentalFeature.REPO_VERSION_REMOVAL) -public class OperatingSystemReadOnlyResourceDefinition extends BaseResourceDefinition { - - public OperatingSystemReadOnlyResourceDefinition(Type resourceType) { - super(resourceType); - } - - public OperatingSystemReadOnlyResourceDefinition() { - super(Resource.Type.OperatingSystemReadOnly); - } - - @Override - public String getPluralName() { - return "operating_systems"; - } - - @Override - public String getSingularName() { - return "operating_system"; - } - - @Override - public Set getSubResourceDefinitions() { - return Collections.singleton(new SubResourceDefinition(Resource.Type.Repository)); - } - -} diff --git a/ambari-server/src/main/java/org/apache/ambari/server/api/resources/OperatingSystemResourceDefinition.java b/ambari-server/src/main/java/org/apache/ambari/server/api/resources/OperatingSystemResourceDefinition.java index 14419c2894e..44d228d370e 100644 --- a/ambari-server/src/main/java/org/apache/ambari/server/api/resources/OperatingSystemResourceDefinition.java +++ b/ambari-server/src/main/java/org/apache/ambari/server/api/resources/OperatingSystemResourceDefinition.java @@ -18,6 +18,9 @@ package org.apache.ambari.server.api.resources; +import java.util.Collections; +import java.util.Set; + import org.apache.ambari.server.controller.spi.Resource; import org.apache.ambari.server.controller.spi.Resource.Type; @@ -40,4 +43,10 @@ public String getPluralName() { public String getSingularName() { return "operating_system"; } + + @Override + public Set getSubResourceDefinitions() { + return Collections.singleton(new SubResourceDefinition(Resource.Type.Repository)); + } + } diff --git a/ambari-server/src/main/java/org/apache/ambari/server/api/resources/RepositoryVersionResourceDefinition.java b/ambari-server/src/main/java/org/apache/ambari/server/api/resources/RepositoryVersionResourceDefinition.java index 3391f27bdeb..f12269139f3 100644 --- a/ambari-server/src/main/java/org/apache/ambari/server/api/resources/RepositoryVersionResourceDefinition.java +++ b/ambari-server/src/main/java/org/apache/ambari/server/api/resources/RepositoryVersionResourceDefinition.java @@ -40,7 +40,7 @@ public String getSingularName() { @Override public Set getSubResourceDefinitions() { - return Collections.singleton(new SubResourceDefinition(Resource.Type.OperatingSystemReadOnly)); + return Collections.singleton(new SubResourceDefinition(Resource.Type.OperatingSystem)); } } diff --git a/ambari-server/src/main/java/org/apache/ambari/server/api/resources/ResourceInstanceFactoryImpl.java b/ambari-server/src/main/java/org/apache/ambari/server/api/resources/ResourceInstanceFactoryImpl.java index b73f97ba3b1..7d16105256b 100644 --- a/ambari-server/src/main/java/org/apache/ambari/server/api/resources/ResourceInstanceFactoryImpl.java +++ b/ambari-server/src/main/java/org/apache/ambari/server/api/resources/ResourceInstanceFactoryImpl.java @@ -258,10 +258,6 @@ public static ResourceDefinition getResourceDefinition(Resource.Type type, Map getSubResourceDefinitions() { Set children = new HashSet<>(); - children.add(new SubResourceDefinition(Resource.Type.OperatingSystemReadOnly)); + children.add(new SubResourceDefinition(Resource.Type.OperatingSystem)); children.add(new SubResourceDefinition(Resource.Type.StackService)); children.add(new SubResourceDefinition(Resource.Type.StackLevelConfiguration)); children.add(new SubResourceDefinition(Resource.Type.RepositoryVersion)); diff --git a/ambari-server/src/main/java/org/apache/ambari/server/api/resources/VersionDefinitionResourceDefinition.java b/ambari-server/src/main/java/org/apache/ambari/server/api/resources/VersionDefinitionResourceDefinition.java index 1b32ba11dca..135ff4c0937 100644 --- a/ambari-server/src/main/java/org/apache/ambari/server/api/resources/VersionDefinitionResourceDefinition.java +++ b/ambari-server/src/main/java/org/apache/ambari/server/api/resources/VersionDefinitionResourceDefinition.java @@ -49,7 +49,7 @@ public String getSingularName() { @Override public Set getSubResourceDefinitions() { - return Collections.singleton(new SubResourceDefinition(Type.OperatingSystemReadOnly)); + return Collections.singleton(new SubResourceDefinition(Type.OperatingSystem)); } @Override diff --git a/ambari-server/src/main/java/org/apache/ambari/server/api/rest/BootStrapResource.java b/ambari-server/src/main/java/org/apache/ambari/server/api/rest/BootStrapResource.java index e0aa7f23eb5..d9852d8d911 100644 --- a/ambari-server/src/main/java/org/apache/ambari/server/api/rest/BootStrapResource.java +++ b/ambari-server/src/main/java/org/apache/ambari/server/api/rest/BootStrapResource.java @@ -56,7 +56,6 @@ public class BootStrapResource { public static void init(BootStrapImpl instance) { bsImpl = instance; } - /** * Run bootstrap on a list of hosts. * @response.representation.200.doc @@ -69,35 +68,11 @@ public static void init(BootStrapImpl instance) { @POST @ApiIgnore // until documented @Consumes(MediaType.APPLICATION_JSON) @Produces({MediaType.APPLICATION_JSON, MediaType.APPLICATION_XML}) - public BSResponse bootStrap(SshHostInfo sshInfo, - @Context UriInfo uriInfo) { + public BSResponse bootStrap(SshHostInfo sshInfo, @Context UriInfo uriInfo) { normalizeHosts(sshInfo); - BSResponse resp = bsImpl.runBootStrap(sshInfo, false); - - return resp; - } - - /** - * Run host reachability validation on a list of hosts. - * @response.representation.200.doc - * - * @response.representation.200.mediaType application/json - * @response.representation.406.doc Error in format - * @response.representation.408.doc Request Timed out - * @throws Exception - */ - @POST @ApiIgnore // until documented - @Path("/validations") - @Consumes(MediaType.APPLICATION_JSON) - @Produces({MediaType.APPLICATION_JSON, MediaType.APPLICATION_XML}) - public BSResponse hostValidate(SshHostInfo sshInfo, - @Context UriInfo uriInfo) { - - normalizeHosts(sshInfo); - - BSResponse resp = bsImpl.runBootStrap(sshInfo, true); + BSResponse resp = bsImpl.runBootStrap(sshInfo); return resp; } diff --git a/ambari-server/src/main/java/org/apache/ambari/server/api/services/AmbariMetaInfo.java b/ambari-server/src/main/java/org/apache/ambari/server/api/services/AmbariMetaInfo.java index baf80f37d35..410903540cb 100644 --- a/ambari-server/src/main/java/org/apache/ambari/server/api/services/AmbariMetaInfo.java +++ b/ambari-server/src/main/java/org/apache/ambari/server/api/services/AmbariMetaInfo.java @@ -719,20 +719,13 @@ public Collection getStacks(String stackName) throws AmbariException * @param stackId * the stack id * @return the stack info + * @throws AmbariException */ - public StackInfo getStack(StackId stackId) throws StackAccessException { + public StackInfo getStack(StackId stackId) throws AmbariException { return getStack(stackId.getStackName(), stackId.getStackVersion()); } - public boolean isKnownStack(StackId stackId) { - try { - return getStack(stackId) != null; - } catch (StackAccessException e) { - return false; - } - } - - public StackInfo getStack(String stackName, String version) throws StackAccessException { + public StackInfo getStack(String stackName, String version) throws AmbariException { StackInfo stackInfoResult = stackManager.getStack(stackName, version); if (stackInfoResult == null) { diff --git a/ambari-server/src/main/java/org/apache/ambari/server/api/services/CompatibleRepositoryVersionService.java b/ambari-server/src/main/java/org/apache/ambari/server/api/services/CompatibleRepositoryVersionService.java index 44d1e56a777..813013b82d8 100644 --- a/ambari-server/src/main/java/org/apache/ambari/server/api/services/CompatibleRepositoryVersionService.java +++ b/ambari-server/src/main/java/org/apache/ambari/server/api/services/CompatibleRepositoryVersionService.java @@ -90,12 +90,12 @@ public Response getRepositoryVersion(@Context HttpHeaders headers, * @return operating systems service */ @Path("{repositoryVersionId}/operating_systems") - public OperatingSystemReadOnlyService getOperatingSystemsHandler(@PathParam("repositoryVersionId") String repositoryVersionId) { + public OperatingSystemService getOperatingSystemsHandler(@PathParam("repositoryVersionId") String repositoryVersionId) { Map mapIds = new HashMap<>(); mapIds.putAll(parentKeyProperties); mapIds.put(Resource.Type.CompatibleRepositoryVersion, repositoryVersionId); - return new OperatingSystemReadOnlyService(mapIds); + return new OperatingSystemService(mapIds); } /** diff --git a/ambari-server/src/main/java/org/apache/ambari/server/api/services/HostComponentService.java b/ambari-server/src/main/java/org/apache/ambari/server/api/services/HostComponentService.java index 503c301dabc..a5e9aec1783 100644 --- a/ambari-server/src/main/java/org/apache/ambari/server/api/services/HostComponentService.java +++ b/ambari-server/src/main/java/org/apache/ambari/server/api/services/HostComponentService.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -35,34 +35,20 @@ import javax.ws.rs.QueryParam; import javax.ws.rs.core.Context; import javax.ws.rs.core.HttpHeaders; -import javax.ws.rs.core.MediaType; import javax.ws.rs.core.Response; import javax.ws.rs.core.UriInfo; import org.apache.ambari.annotations.ApiIgnore; import org.apache.ambari.server.api.resources.ResourceInstance; import org.apache.ambari.server.configuration.Configuration; -import org.apache.ambari.server.controller.ServiceComponentHostResponse; import org.apache.ambari.server.controller.spi.Resource; import org.apache.commons.lang.StringUtils; import org.apache.commons.lang.Validate; -import org.apache.http.HttpStatus; - -import io.swagger.annotations.Api; -import io.swagger.annotations.ApiImplicitParam; -import io.swagger.annotations.ApiImplicitParams; -import io.swagger.annotations.ApiOperation; -import io.swagger.annotations.ApiResponse; -import io.swagger.annotations.ApiResponses; /** * Service responsible for host_components resource requests. */ -@Api(value = "HostComponents", description = "Endpoint for host component specific operations") public class HostComponentService extends BaseService { - private static final String HOST_COMPONENT_REQUEST_TYPE = "org.apache.ambari.server.controller.ServiceComponentHostRequestSwagger"; - - /** * Parent cluster id. */ @@ -85,33 +71,19 @@ public HostComponentService(String clusterName, String hostName) { } /** - * Handles GET /clusters/{clusterName}/hosts/{hostID}/host_components/{hostComponentID} + * Handles GET /clusters/{clusterID}/hosts/{hostID}/host_components/{hostComponentID} * Get a specific host_component. * * @param headers http headers * @param ui uri info - * @param hostComponentId host_component id - * @return host_component resource representation + * @param hostComponentName host_component id + * @return host_component resource representation */ - @GET - @Path("{hostComponentId}") + @GET @ApiIgnore // until documented + @Path("{hostComponentName}") @Produces("text/plain") - @ApiOperation(value = "Get the details of a given Host Component", - nickname = "HostComponentService#getHostComponent", - notes = "Returns the details of a hostComponent", - response = ServiceComponentHostResponse.ServiceComponentHostResponseSwagger.class, - responseContainer = RESPONSE_CONTAINER_LIST) - @ApiImplicitParams({ - @ApiImplicitParam(name = QUERY_FIELDS, value = QUERY_FILTER_DESCRIPTION, defaultValue = "HostRoles/*", - dataType = DATA_TYPE_STRING, paramType = PARAM_TYPE_QUERY) - }) - @ApiResponses(value = { - @ApiResponse(code = HttpStatus.SC_OK, message = MSG_SUCCESSFUL_OPERATION), - @ApiResponse(code = HttpStatus.SC_NOT_FOUND, message = MSG_RESOURCE_NOT_FOUND), - @ApiResponse(code = HttpStatus.SC_INTERNAL_SERVER_ERROR, message = MSG_SERVER_ERROR) - }) public Response getHostComponent(String body, @Context HttpHeaders headers, @Context UriInfo ui, - @PathParam("hostComponentId") String hostComponentId, @QueryParam("format") String format) { + @PathParam("hostComponentName") String hostComponentName, @QueryParam("format") String format) { //todo: needs to be refactored when properly handling exceptions if (m_hostName == null) { @@ -121,11 +93,11 @@ public Response getHostComponent(String body, @Context HttpHeaders headers, @Con } if (format != null && format.equals("client_config_tar")) { - return createClientConfigResource(body, headers, ui, hostComponentId); + return createClientConfigResource(body, headers, ui, hostComponentName); } return handleRequest(headers, body, ui, Request.Type.GET, - createHostComponentResource(m_clusterName, m_hostName, hostComponentId)); + createHostComponentResource(m_clusterName, m_hostName, hostComponentName)); } /** @@ -136,30 +108,8 @@ public Response getHostComponent(String body, @Context HttpHeaders headers, @Con * @param ui uri info * @return host_component collection resource representation */ - @GET - @Produces(MediaType.TEXT_PLAIN) - @ApiOperation(value = "Get all Host Components", - nickname = "HostComponentService#getHostComponents", - notes = "Returns all Host Components.", - response = ServiceComponentHostResponse.ServiceComponentHostResponseSwagger.class, - responseContainer = RESPONSE_CONTAINER_LIST) - @ApiImplicitParams({ - @ApiImplicitParam(name = QUERY_FIELDS, value = QUERY_FILTER_DESCRIPTION, - defaultValue = "HostRoles/cluster_name, HostRoles/component_name, HostRoles/host_name, HostRoles/id, " + - "HostRoles/service_group_name, HostRoles/service_name", - dataType = DATA_TYPE_STRING, paramType = PARAM_TYPE_QUERY), - @ApiImplicitParam(name = QUERY_SORT, value = QUERY_SORT_DESCRIPTION, - defaultValue = "HostRoles/cluster_name.asc, HostRoles/component_name.asc, HostRoles/host_name.asc, " + - "HostRoles/service_group_name.asc, HostRoles/service_name.asc", - dataType = DATA_TYPE_STRING, paramType = PARAM_TYPE_QUERY), - @ApiImplicitParam(name = QUERY_PAGE_SIZE, value = QUERY_PAGE_SIZE_DESCRIPTION, defaultValue = DEFAULT_PAGE_SIZE, dataType = DATA_TYPE_INT, paramType = PARAM_TYPE_QUERY), - @ApiImplicitParam(name = QUERY_FROM, value = QUERY_FROM_DESCRIPTION, defaultValue = DEFAULT_FROM, dataType = DATA_TYPE_STRING, paramType = PARAM_TYPE_QUERY), - @ApiImplicitParam(name = QUERY_TO, value = QUERY_TO_DESCRIPTION, dataType = DATA_TYPE_STRING, paramType = PARAM_TYPE_QUERY) - }) - @ApiResponses(value = { - @ApiResponse(code = HttpStatus.SC_OK, message = MSG_SUCCESSFUL_OPERATION), - @ApiResponse(code = HttpStatus.SC_INTERNAL_SERVER_ERROR, message = MSG_SERVER_ERROR) - }) + @GET @ApiIgnore // until documented + @Produces("text/plain") public Response getHostComponents(String body, @Context HttpHeaders headers, @Context UriInfo ui, @QueryParam("format") String format) { if (format != null && format.equals("client_config_tar")) { return createClientConfigResource(body, headers, ui, null); @@ -179,30 +129,35 @@ public Response getHostComponents(String body, @Context HttpHeaders headers, @Co * * @return status code only, 201 if successful */ - @POST - @Produces(MediaType.TEXT_PLAIN) - @ApiOperation(value = "Creates a Host Component", - nickname = "HostComponentService#createHostComponents" - ) - @ApiImplicitParams({ - @ApiImplicitParam(dataType = HOST_COMPONENT_REQUEST_TYPE, paramType = PARAM_TYPE_BODY) - }) - @ApiResponses({ - @ApiResponse(code = HttpStatus.SC_CREATED, message = MSG_SUCCESSFUL_OPERATION), - @ApiResponse(code = HttpStatus.SC_ACCEPTED, message = MSG_REQUEST_ACCEPTED), - @ApiResponse(code = HttpStatus.SC_BAD_REQUEST, message = MSG_INVALID_ARGUMENTS), - @ApiResponse(code = HttpStatus.SC_NOT_FOUND, message = MSG_RESOURCE_NOT_FOUND), - @ApiResponse(code = HttpStatus.SC_CONFLICT, message = MSG_RESOURCE_ALREADY_EXISTS), - @ApiResponse(code = HttpStatus.SC_UNAUTHORIZED, message = MSG_NOT_AUTHENTICATED), - @ApiResponse(code = HttpStatus.SC_FORBIDDEN, message = MSG_PERMISSION_DENIED), - @ApiResponse(code = HttpStatus.SC_INTERNAL_SERVER_ERROR, message = MSG_SERVER_ERROR), - }) + @POST @ApiIgnore // until documented + @Produces("text/plain") public Response createHostComponents(String body, @Context HttpHeaders headers, @Context UriInfo ui) { return handleRequest(headers, body, ui, Request.Type.POST, createHostComponentResource(m_clusterName, m_hostName, null)); } + /** + * Handles POST /clusters/{clusterID}/hosts/{hostID}/host_components/{hostComponentID} + * Create a specific host_component. + * + * @param body http body + * @param headers http headers + * @param ui uri info + * @param hostComponentName host_component id + * + * @return host_component resource representation + */ + @POST @ApiIgnore // until documented + @Path("{hostComponentName}") + @Produces("text/plain") + public Response createHostComponent(String body, @Context HttpHeaders headers, @Context UriInfo ui, + @PathParam("hostComponentName") String hostComponentName) { + + return handleRequest(headers, body, ui, Request.Type.POST, + createHostComponentResource(m_clusterName, m_hostName, hostComponentName)); + } + /** * Handles PUT /clusters/{clusterID}/hosts/{hostID}/host_components/{hostComponentID} * Updates a specific host_component. @@ -210,33 +165,18 @@ public Response createHostComponents(String body, @Context HttpHeaders headers, * @param body http body * @param headers http headers * @param ui uri info - * @param hostComponentId host_component id + * @param hostComponentName host_component id * * @return information regarding updated host_component */ - @PUT - @Path("{hostComponentId}") - @Produces(MediaType.TEXT_PLAIN) - @ApiOperation(value = "Updates a given Host Component", - nickname = "HostComponentService#updateHostComponent" - ) - @ApiImplicitParams({ - @ApiImplicitParam(dataType = HOST_COMPONENT_REQUEST_TYPE, paramType = PARAM_TYPE_BODY) - }) - @ApiResponses({ - @ApiResponse(code = HttpStatus.SC_OK, message = MSG_SUCCESSFUL_OPERATION), - @ApiResponse(code = HttpStatus.SC_ACCEPTED, message = MSG_REQUEST_ACCEPTED), - @ApiResponse(code = HttpStatus.SC_BAD_REQUEST, message = MSG_INVALID_ARGUMENTS), - @ApiResponse(code = HttpStatus.SC_NOT_FOUND, message = MSG_RESOURCE_NOT_FOUND), - @ApiResponse(code = HttpStatus.SC_UNAUTHORIZED, message = MSG_NOT_AUTHENTICATED), - @ApiResponse(code = HttpStatus.SC_FORBIDDEN, message = MSG_PERMISSION_DENIED), - @ApiResponse(code = HttpStatus.SC_INTERNAL_SERVER_ERROR, message = MSG_SERVER_ERROR), - }) + @PUT @ApiIgnore // until documented + @Path("{hostComponentName}") + @Produces("text/plain") public Response updateHostComponent(String body, @Context HttpHeaders headers, @Context UriInfo ui, - @PathParam("hostComponentId") String hostComponentId) { + @PathParam("hostComponentName") String hostComponentName) { return handleRequest(headers, body, ui, Request.Type.PUT, - createHostComponentResource(m_clusterName, m_hostName, hostComponentId)); + createHostComponentResource(m_clusterName, m_hostName, hostComponentName)); } /** @@ -249,23 +189,8 @@ public Response updateHostComponent(String body, @Context HttpHeaders headers, @ * * @return information regarding updated host_component resources */ - @PUT - @Produces(MediaType.TEXT_PLAIN) - @ApiOperation(value = "Updates multiple Host Components", - nickname = "HostComponentService#updateHostComponents" - ) - @ApiImplicitParams({ - @ApiImplicitParam(dataType = HOST_COMPONENT_REQUEST_TYPE, paramType = PARAM_TYPE_BODY) - }) - @ApiResponses({ - @ApiResponse(code = HttpStatus.SC_OK, message = MSG_SUCCESSFUL_OPERATION), - @ApiResponse(code = HttpStatus.SC_ACCEPTED, message = MSG_REQUEST_ACCEPTED), - @ApiResponse(code = HttpStatus.SC_BAD_REQUEST, message = MSG_INVALID_ARGUMENTS), - @ApiResponse(code = HttpStatus.SC_NOT_FOUND, message = MSG_RESOURCE_NOT_FOUND), - @ApiResponse(code = HttpStatus.SC_UNAUTHORIZED, message = MSG_NOT_AUTHENTICATED), - @ApiResponse(code = HttpStatus.SC_FORBIDDEN, message = MSG_PERMISSION_DENIED), - @ApiResponse(code = HttpStatus.SC_INTERNAL_SERVER_ERROR, message = MSG_SERVER_ERROR), - }) + @PUT @ApiIgnore // until documented + @Produces("text/plain") public Response updateHostComponents(String body, @Context HttpHeaders headers, @Context UriInfo ui) { return handleRequest(headers, body, ui, Request.Type.PUT, @@ -278,28 +203,18 @@ public Response updateHostComponents(String body, @Context HttpHeaders headers, * * @param headers http headers * @param ui uri info - * @param hostComponentId host_component id + * @param hostComponentName host_component id * * @return host_component resource representation */ @DELETE @ApiIgnore // until documented - @Path("{hostComponentId}") - @Produces(MediaType.TEXT_PLAIN) - @ApiOperation(value = "Deletes a hostComponent", - nickname = "HostComponentService#deleteHostComponent" - ) - @ApiResponses({ - @ApiResponse(code = HttpStatus.SC_OK, message = MSG_SUCCESSFUL_OPERATION), - @ApiResponse(code = HttpStatus.SC_NOT_FOUND, message = MSG_RESOURCE_NOT_FOUND), - @ApiResponse(code = HttpStatus.SC_UNAUTHORIZED, message = MSG_NOT_AUTHENTICATED), - @ApiResponse(code = HttpStatus.SC_FORBIDDEN, message = MSG_PERMISSION_DENIED), - @ApiResponse(code = HttpStatus.SC_INTERNAL_SERVER_ERROR, message = MSG_SERVER_ERROR), - }) + @Path("{hostComponentName}") + @Produces("text/plain") public Response deleteHostComponent(@Context HttpHeaders headers, @Context UriInfo ui, - @PathParam("hostComponentId") String hostComponentId) { + @PathParam("hostComponentName") String hostComponentName) { return handleRequest(headers, null, ui, Request.Type.DELETE, - createHostComponentResource(m_clusterName, m_hostName, hostComponentId)); + createHostComponentResource(m_clusterName, m_hostName, hostComponentName)); } /** @@ -312,45 +227,22 @@ public Response deleteHostComponent(@Context HttpHeaders headers, @Context UriIn * @return host_component resource representation */ @DELETE @ApiIgnore // until documented - @Produces(MediaType.TEXT_PLAIN) - @ApiOperation(value = "Deletes multiple hostComponents", - nickname = "HostComponentService#deleteHostComponents" - ) - @ApiResponses({ - @ApiResponse(code = HttpStatus.SC_OK, message = MSG_SUCCESSFUL_OPERATION), - @ApiResponse(code = HttpStatus.SC_NOT_FOUND, message = MSG_RESOURCE_NOT_FOUND), - @ApiResponse(code = HttpStatus.SC_UNAUTHORIZED, message = MSG_NOT_AUTHENTICATED), - @ApiResponse(code = HttpStatus.SC_FORBIDDEN, message = MSG_PERMISSION_DENIED), - @ApiResponse(code = HttpStatus.SC_INTERNAL_SERVER_ERROR, message = MSG_SERVER_ERROR), - }) + @Produces("text/plain") public Response deleteHostComponents(String body, @Context HttpHeaders headers, @Context UriInfo ui) { return handleRequest(headers, body, ui, Request.Type.DELETE, createHostComponentResource(m_clusterName, m_hostName, null)); } - @GET - @Produces(MediaType.TEXT_PLAIN) - @Path("{hostComponentId}/processes") - @ApiOperation(value = "Get details of processes.", - nickname = "HostComponentService#getProcesses", - notes = "Returns the details of a host component processes.", - responseContainer = RESPONSE_CONTAINER_LIST) - @ApiImplicitParams({ - @ApiImplicitParam(name = QUERY_FIELDS, value = QUERY_FILTER_DESCRIPTION, defaultValue = "HostRoles/*", - dataType = DATA_TYPE_STRING, paramType = PARAM_TYPE_QUERY) - }) - @ApiResponses(value = { - @ApiResponse(code = HttpStatus.SC_OK, message = MSG_SUCCESSFUL_OPERATION), - @ApiResponse(code = HttpStatus.SC_NOT_FOUND, message = MSG_RESOURCE_NOT_FOUND), - @ApiResponse(code = HttpStatus.SC_INTERNAL_SERVER_ERROR, message = MSG_SERVER_ERROR) - }) + @GET @ApiIgnore // until documented + @Path("{hostComponentName}/processes") + @Produces("text/plain") public Response getProcesses(@Context HttpHeaders headers, @Context UriInfo ui, - @PathParam("hostComponentId") String hostComponentId) { + @PathParam("hostComponentName") String hostComponentName) { Map mapIds = new HashMap<>(); mapIds.put(Resource.Type.Cluster, m_clusterName); mapIds.put(Resource.Type.Host, m_hostName); - mapIds.put(Resource.Type.HostComponent, hostComponentId); + mapIds.put(Resource.Type.HostComponent, hostComponentName); ResourceInstance ri = createResource(Resource.Type.HostComponentProcess, mapIds); @@ -362,25 +254,25 @@ public Response getProcesses(@Context HttpHeaders headers, @Context UriInfo ui, * * @param clusterName cluster name * @param hostName host name - * @param hostComponentId host_component id + * @param hostComponentName host_component name * * @return a host resource instance */ - ResourceInstance createHostComponentResource(String clusterName, String hostName, String hostComponentId) { + ResourceInstance createHostComponentResource(String clusterName, String hostName, String hostComponentName) { Map mapIds = new HashMap<>(); mapIds.put(Resource.Type.Cluster, clusterName); mapIds.put(Resource.Type.Host, hostName); - mapIds.put(Resource.Type.HostComponent, hostComponentId); + mapIds.put(Resource.Type.HostComponent, hostComponentName); return createResource(Resource.Type.HostComponent, mapIds); } private Response createClientConfigResource(String body, HttpHeaders headers, UriInfo ui, - String hostComponentId) { + String hostComponentName) { Map mapIds = new HashMap<>(); mapIds.put(Resource.Type.Cluster, m_clusterName); mapIds.put(Resource.Type.Host, m_hostName); - mapIds.put(Resource.Type.Component, hostComponentId); + mapIds.put(Resource.Type.Component, hostComponentName); Response response = handleRequest(headers, body, ui, Request.Type.GET, createResource(Resource.Type.ClientConfig, mapIds)); @@ -392,10 +284,10 @@ private Response createClientConfigResource(String body, HttpHeaders headers, Ur String filePrefixName; - if (StringUtils.isEmpty(hostComponentId)) { + if (StringUtils.isEmpty(hostComponentName)) { filePrefixName = m_hostName + "(" + Resource.InternalType.Host.toString().toUpperCase()+")"; } else { - filePrefixName = hostComponentId; + filePrefixName = hostComponentName; } Validate.notNull(filePrefixName, "compressed config file name should not be null"); diff --git a/ambari-server/src/main/java/org/apache/ambari/server/api/services/MpacksService.java b/ambari-server/src/main/java/org/apache/ambari/server/api/services/MpacksService.java index dfb90c19bee..ba76fe1748d 100644 --- a/ambari-server/src/main/java/org/apache/ambari/server/api/services/MpacksService.java +++ b/ambari-server/src/main/java/org/apache/ambari/server/api/services/MpacksService.java @@ -41,7 +41,6 @@ import io.swagger.annotations.ApiImplicitParam; import io.swagger.annotations.ApiImplicitParams; import io.swagger.annotations.ApiOperation; -import io.swagger.annotations.ApiParam; import io.swagger.annotations.ApiResponse; import io.swagger.annotations.ApiResponses; @@ -152,19 +151,6 @@ public Response getMpack(String body, @Context HttpHeaders headers, @Context Uri createMpackResource(id)); } - /** - * Handles ANY {id}/operating_systems request - * - * @return operating system service - */ - // TODO: find a way to handle this with Swagger (refactor or custom annotation?) - @Path("{id}/operating_systems") - public OperatingSystemService getOperatingSystemsHandler( - @ApiParam @PathParam("id") String mpackId) { - return new OperatingSystemService(mpackId); - } - - @DELETE @Path("{id}") @Produces(MediaType.TEXT_PLAIN) diff --git a/ambari-server/src/main/java/org/apache/ambari/server/api/services/OperatingSystemReadOnlyService.java b/ambari-server/src/main/java/org/apache/ambari/server/api/services/OperatingSystemReadOnlyService.java deleted file mode 100644 index a4f28c722f7..00000000000 --- a/ambari-server/src/main/java/org/apache/ambari/server/api/services/OperatingSystemReadOnlyService.java +++ /dev/null @@ -1,115 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package org.apache.ambari.server.api.services; - -import java.util.HashMap; -import java.util.Map; - -import javax.ws.rs.GET; -import javax.ws.rs.Path; -import javax.ws.rs.PathParam; -import javax.ws.rs.Produces; -import javax.ws.rs.core.Context; -import javax.ws.rs.core.HttpHeaders; -import javax.ws.rs.core.Response; -import javax.ws.rs.core.UriInfo; - -import org.apache.ambari.annotations.ApiIgnore; -import org.apache.ambari.annotations.Experimental; -import org.apache.ambari.annotations.ExperimentalFeature; -import org.apache.ambari.server.api.resources.ResourceInstance; -import org.apache.ambari.server.controller.spi.Resource; - -/** - * Service responsible for operating systems requests. - */ -@Deprecated -@Experimental(feature = ExperimentalFeature.REPO_VERSION_REMOVAL) -public class OperatingSystemReadOnlyService extends BaseService { - - /** - * Extra properties to be inserted into created resource. - */ - private Map parentKeyProperties; - - /** - * Constructor. - * - * @param parentKeyProperties extra properties to be inserted into created resource - */ - public OperatingSystemReadOnlyService(Map parentKeyProperties) { - this.parentKeyProperties = parentKeyProperties; - } - - /** - * Gets all operating systems. - * Handles: GET /operating_systems requests. - * - * @param headers http headers - * @param ui uri info - */ - @GET @ApiIgnore // until documented - @Produces("text/plain") - public Response getOperatingSystems(@Context HttpHeaders headers, @Context UriInfo ui) { - return handleRequest(headers, null, ui, Request.Type.GET, createResource(null)); - } - - /** - * Gets a single operating system. - * Handles: GET /operating_systems/{osType} requests. - * - * @param headers http headers - * @param ui uri info - * @param osType os type - * @return information regarding the specified operating system - */ - @GET @ApiIgnore // until documented - @Path("{osType}") - @Produces("text/plain") - public Response getOperatingSystem(@Context HttpHeaders headers, @Context UriInfo ui, @PathParam("osType") String osType) { - return handleRequest(headers, null, ui, Request.Type.GET, createResource(osType)); - } - - /** - * Handles ANY /{osType}/repositories requests. - * - * @param osType the os type - * @return repositories service - */ - @Path("{osType}/repositories") - public RepositoryService getOperatingSystemsHandler(@PathParam("osType") String osType) { - final Map mapIds = new HashMap<>(); - mapIds.putAll(parentKeyProperties); - mapIds.put(Resource.Type.OperatingSystemReadOnly, osType); - return new RepositoryService(mapIds); - } - - /** - * Create an operating system resource instance. - * - * @param osType os type - * - * @return an operating system instance - */ - private ResourceInstance createResource(String osType) { - final Map mapIds = new HashMap<>(); - mapIds.putAll(parentKeyProperties); - mapIds.put(Resource.Type.OperatingSystemReadOnly, osType); - return createResource(Resource.Type.OperatingSystemReadOnly, mapIds); - } -} diff --git a/ambari-server/src/main/java/org/apache/ambari/server/api/services/OperatingSystemService.java b/ambari-server/src/main/java/org/apache/ambari/server/api/services/OperatingSystemService.java index 87aa3d5d1d8..4a6032ecd53 100644 --- a/ambari-server/src/main/java/org/apache/ambari/server/api/services/OperatingSystemService.java +++ b/ambari-server/src/main/java/org/apache/ambari/server/api/services/OperatingSystemService.java @@ -20,10 +20,7 @@ import java.util.HashMap; import java.util.Map; -import javax.ws.rs.DELETE; import javax.ws.rs.GET; -import javax.ws.rs.POST; -import javax.ws.rs.PUT; import javax.ws.rs.Path; import javax.ws.rs.PathParam; import javax.ws.rs.Produces; @@ -37,135 +34,77 @@ import org.apache.ambari.server.controller.spi.Resource; /** - * The {@link OperatingSystemService} is a sub resource off of - * {@link MpacksService} which provides the ability to expose and update - * repositories which ship with management packs. + * Service responsible for operating systems requests. */ public class OperatingSystemService extends BaseService { /** - * The parent of each OS resource. + * Extra properties to be inserted into created resource. */ - private final String m_mpackId; + private Map parentKeyProperties; /** * Constructor. * - * @param parentKeyProperties - * extra properties to be inserted into created resource + * @param parentKeyProperties extra properties to be inserted into created resource */ - public OperatingSystemService(String mpackId) { - m_mpackId = mpackId; + public OperatingSystemService(Map parentKeyProperties) { + this.parentKeyProperties = parentKeyProperties; } /** - * Gets all operating systems. Handles: GET /operating_systems requests. + * Gets all operating systems. + * Handles: GET /operating_systems requests. * - * @param headers - * http headers - * @param ui - * uri info + * @param headers http headers + * @param ui uri info */ - @GET - @ApiIgnore + @GET @ApiIgnore // until documented @Produces("text/plain") public Response getOperatingSystems(@Context HttpHeaders headers, @Context UriInfo ui) { return handleRequest(headers, null, ui, Request.Type.GET, createResource(null)); } /** - * Gets a single operating system. Handles: GET /operating_systems/{osType} - * requests. + * Gets a single operating system. + * Handles: GET /operating_systems/{osType} requests. * - * @param headers - * http headers - * @param ui - * uri info - * @param osType - * os type + * @param headers http headers + * @param ui uri info + * @param osType os type * @return information regarding the specified operating system */ - @GET - @ApiIgnore + @GET @ApiIgnore // until documented @Path("{osType}") @Produces("text/plain") - public Response getOperatingSystem(@Context HttpHeaders headers, @Context UriInfo ui, - @PathParam("osType") String osType) { + public Response getOperatingSystem(@Context HttpHeaders headers, @Context UriInfo ui, @PathParam("osType") String osType) { return handleRequest(headers, null, ui, Request.Type.GET, createResource(osType)); } /** - * Creates the repositories and properties of a specified operating system. + * Handles ANY /{osType}/repositories requests. * - * @param headers - * http headers - * @param ui - * uri info - * @param osType - * os type - * @return information regarding the specified operating system - */ - @POST - @ApiIgnore - @Path("{osType}") - @Produces("text/plain") - public Response createOperatingSystem(String body, @Context HttpHeaders headers, - @Context UriInfo ui, @PathParam("osType") String osType) { - return handleRequest(headers, body, ui, Request.Type.POST, createResource(osType)); - } - - /** - * Updates the repositories and properties of a specified operating system. - * - * @param headers - * http headers - * @param ui - * uri info - * @param osType - * os type - * @return information regarding the specified operating system - */ - @PUT - @ApiIgnore - @Path("{osType}") - @Produces("text/plain") - public Response updateOperatingSystem(String body, @Context HttpHeaders headers, - @Context UriInfo ui, - @PathParam("osType") String osType) { - return handleRequest(headers, body, ui, Request.Type.PUT, createResource(osType)); - } - - /** - * Removes the specified operating system. - * - * @param headers - * http headers - * @param ui - * uri info - * @param osType - * os type - * @return the delete request status + * @param osType the os type + * @return repositories service */ - @DELETE - @ApiIgnore - @Path("{osType}") - @Produces("text/plain") - public Response deleteOperatingSystem(@Context HttpHeaders headers, @Context UriInfo ui, - @PathParam("osType") String osType) { - return handleRequest(headers, null, ui, Request.Type.DELETE, createResource(osType)); + @Path("{osType}/repositories") + public RepositoryService getOperatingSystemsHandler(@PathParam("osType") String osType) { + final Map mapIds = new HashMap<>(); + mapIds.putAll(parentKeyProperties); + mapIds.put(Resource.Type.OperatingSystem, osType); + return new RepositoryService(mapIds); } /** * Create an operating system resource instance. * - * @param osType - * os type + * @param osType os type * * @return an operating system instance */ private ResourceInstance createResource(String osType) { final Map mapIds = new HashMap<>(); - mapIds.put(Resource.Type.Mpack, m_mpackId); + mapIds.putAll(parentKeyProperties); mapIds.put(Resource.Type.OperatingSystem, osType); return createResource(Resource.Type.OperatingSystem, mapIds); } diff --git a/ambari-server/src/main/java/org/apache/ambari/server/api/services/RepositoryVersionService.java b/ambari-server/src/main/java/org/apache/ambari/server/api/services/RepositoryVersionService.java index d834e9ee385..c282b7b7b1e 100644 --- a/ambari-server/src/main/java/org/apache/ambari/server/api/services/RepositoryVersionService.java +++ b/ambari-server/src/main/java/org/apache/ambari/server/api/services/RepositoryVersionService.java @@ -138,11 +138,11 @@ public Response updateRepositoryVersion(String body, @Context HttpHeaders header * @return operating systems service */ @Path("{repositoryVersionId}/operating_systems") - public OperatingSystemReadOnlyService getOperatingSystemsHandler(@PathParam("repositoryVersionId") String repositoryVersionId) { + public OperatingSystemService getOperatingSystemsHandler(@PathParam("repositoryVersionId") String repositoryVersionId) { final Map mapIds = new HashMap<>(); mapIds.putAll(parentKeyProperties); mapIds.put(Resource.Type.RepositoryVersion, repositoryVersionId); - return new OperatingSystemReadOnlyService(mapIds); + return new OperatingSystemService(mapIds); } /** diff --git a/ambari-server/src/main/java/org/apache/ambari/server/api/services/RootClusterSettingService.java b/ambari-server/src/main/java/org/apache/ambari/server/api/services/RootClusterSettingService.java index bcc5343a97d..d8a37853791 100644 --- a/ambari-server/src/main/java/org/apache/ambari/server/api/services/RootClusterSettingService.java +++ b/ambari-server/src/main/java/org/apache/ambari/server/api/services/RootClusterSettingService.java @@ -33,6 +33,7 @@ import org.apache.ambari.server.api.resources.ResourceInstance; import org.apache.ambari.server.controller.ReadOnlyConfigurationResponse; +import org.apache.ambari.server.controller.internal.MpackResourceProvider; import org.apache.ambari.server.controller.internal.RootClusterSettingsResourceProvider; import org.apache.ambari.server.controller.spi.Resource; import org.apache.http.HttpStatus; @@ -68,6 +69,8 @@ public RootClusterSettingService() { @ApiOperation(value = "Returns information for all the read only 'cluster settings'", response = ReadOnlyConfigurationResponse.ReadOnlyConfigurationResponseSwagger.class, responseContainer = RESPONSE_CONTAINER_LIST) @ApiImplicitParams({ + @ApiImplicitParam(name = QUERY_FIELDS, value = QUERY_FILTER_DESCRIPTION, dataType = DATA_TYPE_STRING, + paramType = PARAM_TYPE_QUERY, defaultValue = MpackResourceProvider.MPACK_RESOURCE_ID), @ApiImplicitParam(name = QUERY_SORT, value = QUERY_SORT_DESCRIPTION, dataType = DATA_TYPE_STRING, paramType = PARAM_TYPE_QUERY), @ApiImplicitParam(name = QUERY_PAGE_SIZE, value = QUERY_PAGE_SIZE_DESCRIPTION, defaultValue = DEFAULT_PAGE_SIZE, diff --git a/ambari-server/src/main/java/org/apache/ambari/server/api/services/StacksService.java b/ambari-server/src/main/java/org/apache/ambari/server/api/services/StacksService.java index 9c5080268b0..4b0208ff312 100644 --- a/ambari-server/src/main/java/org/apache/ambari/server/api/services/StacksService.java +++ b/ambari-server/src/main/java/org/apache/ambari/server/api/services/StacksService.java @@ -869,12 +869,12 @@ public Response getServiceComponent(String body, @Context HttpHeaders headers, */ // TODO: find a way to handle this with Swagger (refactor or custom annotation?) @Path("{stackName}/versions/{stackVersion}/operating_systems") - public OperatingSystemReadOnlyService getOperatingSystemsHandler(@ApiParam @PathParam("stackName") String stackName, + public OperatingSystemService getOperatingSystemsHandler(@ApiParam @PathParam("stackName") String stackName, @ApiParam @PathParam("stackVersion") String stackVersion) { final Map stackProperties = new HashMap<>(); stackProperties.put(Resource.Type.Stack, stackName); stackProperties.put(Resource.Type.StackVersion, stackVersion); - return new OperatingSystemReadOnlyService(stackProperties); + return new OperatingSystemService(stackProperties); } /** diff --git a/ambari-server/src/main/java/org/apache/ambari/server/api/services/VersionDefinitionService.java b/ambari-server/src/main/java/org/apache/ambari/server/api/services/VersionDefinitionService.java index c61991be874..9d0f32bb01c 100644 --- a/ambari-server/src/main/java/org/apache/ambari/server/api/services/VersionDefinitionService.java +++ b/ambari-server/src/main/java/org/apache/ambari/server/api/services/VersionDefinitionService.java @@ -70,10 +70,10 @@ public Response getService(@Context HttpHeaders headers, @Context UriInfo ui, * @return operating systems service */ @Path("{versionNumber}/operating_systems") - public OperatingSystemReadOnlyService getOperatingSystemsHandler(@PathParam("versionNumber") String versionNumber) { + public OperatingSystemService getOperatingSystemsHandler(@PathParam("versionNumber") String versionNumber) { final Map mapIds = new HashMap<>(); mapIds.put(Resource.Type.VersionDefinition, versionNumber); - return new OperatingSystemReadOnlyService(mapIds); + return new OperatingSystemService(mapIds); } @POST @ApiIgnore // until documented diff --git a/ambari-server/src/main/java/org/apache/ambari/server/api/services/stackadvisor/StackAdvisorBlueprintProcessor.java b/ambari-server/src/main/java/org/apache/ambari/server/api/services/stackadvisor/StackAdvisorBlueprintProcessor.java index 87b64a56e81..94ba563ed1f 100644 --- a/ambari-server/src/main/java/org/apache/ambari/server/api/services/stackadvisor/StackAdvisorBlueprintProcessor.java +++ b/ambari-server/src/main/java/org/apache/ambari/server/api/services/stackadvisor/StackAdvisorBlueprintProcessor.java @@ -31,6 +31,7 @@ import org.apache.ambari.server.state.StackId; import org.apache.ambari.server.state.ValueAttributesInfo; import org.apache.ambari.server.topology.AdvisedConfiguration; +import org.apache.ambari.server.topology.Blueprint; import org.apache.ambari.server.topology.ClusterTopology; import org.apache.ambari.server.topology.ConfigRecommendationStrategy; import org.apache.ambari.server.topology.HostGroup; @@ -75,7 +76,7 @@ public static void init(StackAdvisorHelper instance) { * @param userProvidedConfigurations User configurations of cluster provided in Blueprint + Cluster template */ public void adviseConfiguration(ClusterTopology clusterTopology, Map> userProvidedConfigurations) throws ConfigurationTopologyException { - for (StackId stackId : clusterTopology.getStackIds()) { + for (StackId stackId : clusterTopology.getBlueprint().getStackIds()) { StackAdvisorRequest request = createStackAdvisorRequest(clusterTopology, stackId, StackAdvisorRequestType.CONFIGURATIONS); try { RecommendationResponse response = stackAdvisorHelper.recommend(request); @@ -95,7 +96,7 @@ private StackAdvisorRequest createStackAdvisorRequest(ClusterTopology clusterTop hgHostsMap); return StackAdvisorRequest.StackAdvisorRequestBuilder .forStack(stackId) - .forServices(clusterTopology.getStack().getServices(stackId)) + .forServices(clusterTopology.getBlueprint().getStack().getServices(stackId)) .forHosts(gatherHosts(clusterTopology)) .forHostsGroupBindings(gatherHostGroupBindings(clusterTopology)) .forHostComponents(gatherHostGroupComponents(clusterTopology)) @@ -175,11 +176,12 @@ private void addAdvisedConfigurationsToTopology(RecommendationResponse response, Map recommendedConfigurations = response.getRecommendations().getBlueprint().getConfigurations(); + Blueprint blueprint = topology.getBlueprint(); for (Map.Entry configEntry : recommendedConfigurations.entrySet()) { String configType = configEntry.getKey(); // add recommended config type only if related service is present in Blueprint - if (topology.isValidConfigType(configType)) { + if (blueprint.isValidConfigType(configType)) { BlueprintConfigurations blueprintConfig = filterBlueprintConfig(configType, configEntry.getValue(), userProvidedConfigurations, topology); topology.getAdvisedConfigurations().put(configType, new AdvisedConfiguration( diff --git a/ambari-server/src/main/java/org/apache/ambari/server/audit/request/eventcreator/RepositoryVersionEventCreator.java b/ambari-server/src/main/java/org/apache/ambari/server/audit/request/eventcreator/RepositoryVersionEventCreator.java index 702ea26f30b..476c4e770bb 100644 --- a/ambari-server/src/main/java/org/apache/ambari/server/audit/request/eventcreator/RepositoryVersionEventCreator.java +++ b/ambari-server/src/main/java/org/apache/ambari/server/audit/request/eventcreator/RepositoryVersionEventCreator.java @@ -33,7 +33,7 @@ import org.apache.ambari.server.audit.event.request.AddRepositoryVersionRequestAuditEvent; import org.apache.ambari.server.audit.event.request.ChangeRepositoryVersionRequestAuditEvent; import org.apache.ambari.server.audit.event.request.DeleteRepositoryVersionRequestAuditEvent; -import org.apache.ambari.server.controller.internal.OperatingSystemReadOnlyResourceProvider; +import org.apache.ambari.server.controller.internal.OperatingSystemResourceProvider; import org.apache.ambari.server.controller.internal.RepositoryResourceProvider; import org.apache.ambari.server.controller.internal.RepositoryVersionResourceProvider; import org.apache.ambari.server.controller.spi.Resource; @@ -160,7 +160,7 @@ private SortedMap>> createResultForOperationSys for (Object entry : set) { if (entry instanceof Map) { Map map = (Map) entry; - String osType = (String) map.get(OperatingSystemReadOnlyResourceProvider.OPERATING_SYSTEM_OS_TYPE_PROPERTY_ID); + String osType = (String) map.get(OperatingSystemResourceProvider.OPERATING_SYSTEM_OS_TYPE_PROPERTY_ID); if (!result.containsKey(osType)) { result.put(osType, new LinkedList<>()); } diff --git a/ambari-server/src/main/java/org/apache/ambari/server/bootstrap/BSHostStatus.java b/ambari-server/src/main/java/org/apache/ambari/server/bootstrap/BSHostStatus.java index 4c1da6abbdb..3d1b31cba1a 100644 --- a/ambari-server/src/main/java/org/apache/ambari/server/bootstrap/BSHostStatus.java +++ b/ambari-server/src/main/java/org/apache/ambari/server/bootstrap/BSHostStatus.java @@ -38,8 +38,6 @@ public class BSHostStatus { @XmlElement private String statusCode; @XmlElement - private String error; - @XmlElement private String statusAction; @XmlElement private String log; @@ -76,14 +74,6 @@ public String getStatusCode() { public void setStatusCode(String code) { statusCode = code; } - - public String getError() { - return this.error; - } - - public void setError(String error) { - this.error = error; - } public String getStatusAction() { return statusAction; diff --git a/ambari-server/src/main/java/org/apache/ambari/server/bootstrap/BSHostStatusCollector.java b/ambari-server/src/main/java/org/apache/ambari/server/bootstrap/BSHostStatusCollector.java index b83e4b9288b..b72ca204d51 100644 --- a/ambari-server/src/main/java/org/apache/ambari/server/bootstrap/BSHostStatusCollector.java +++ b/ambari-server/src/main/java/org/apache/ambari/server/bootstrap/BSHostStatusCollector.java @@ -98,10 +98,6 @@ public void run() { while (null != (line = reader.readLine())) { if (line.startsWith("tcgetattr:") || line.startsWith("tput:")) continue; - if (line.startsWith("ERROR MESSAGE:") && !status.getStatusCode().equals("0")) { - // Remove "ERROR MESSAGE: " string whose length is 15 - status.setError(line.substring(15)); - } if (0 != sb.length() || 0 == line.length()) sb.append('\n'); diff --git a/ambari-server/src/main/java/org/apache/ambari/server/bootstrap/BSRunner.java b/ambari-server/src/main/java/org/apache/ambari/server/bootstrap/BSRunner.java index 2f7f02f5a90..c7976ee5bb4 100644 --- a/ambari-server/src/main/java/org/apache/ambari/server/bootstrap/BSRunner.java +++ b/ambari-server/src/main/java/org/apache/ambari/server/bootstrap/BSRunner.java @@ -44,8 +44,6 @@ class BSRunner extends Thread { private static final String DEFAULT_USER = "root"; private static final String DEFAULT_SSHPORT = "22"; - private static final String VALIDATION_OPERATION = "Validation"; - private static final String BOOTSTRAP_OPERATION = "Bootstrap"; private boolean finished = false; private SshHostInfo sshHostInfo; @@ -63,12 +61,11 @@ class BSRunner extends Thread { private final String clusterOsFamily; private String projectVersion; private int serverPort; - private boolean validationInstance; public BSRunner(BootStrapImpl impl, SshHostInfo sshHostInfo, String bootDir, String bsScript, String agentSetupScript, String agentSetupPassword, int requestId, long timeout, String hostName, boolean isVerbose, String clusterOsFamily, - String projectVersion, int serverPort, boolean validationInstance) + String projectVersion, int serverPort) { this.requestId = requestId; this.sshHostInfo = sshHostInfo; @@ -88,8 +85,6 @@ public BSRunner(BootStrapImpl impl, SshHostInfo sshHostInfo, String bootDir, status.setLog("RUNNING"); status.setStatus(BSStat.RUNNING); bsImpl.updateStatus(requestId, status); - // If validationInstance is true, BSRunner is to validate host reachability only - this.validationInstance = validationInstance; } /** @@ -182,14 +177,6 @@ private long calculateBSTimeout(int hostCount) { return Math.max(HOST_BS_TIMEOUT, HOST_BS_TIMEOUT * hostCount / PARALLEL_BS_COUNT); } - /** - * Get operation name which this runner is for - * @return operation name - */ - private String getBsOperationType() { - return validationInstance ? VALIDATION_OPERATION : BOOTSTRAP_OPERATION; - } - public synchronized void finished() { this.finished = true; } @@ -215,9 +202,9 @@ public void run() { sshPort = DEFAULT_SSHPORT; } - String command[] = new String[14]; + String command[] = new String[13]; BSStat stat = BSStat.RUNNING; - StringBuilder scriptlog = new StringBuilder(); + String scriptlog = ""; try { createRunDir(); handle = scheduler.scheduleWithFixedDelay(statusCollector, @@ -255,7 +242,6 @@ public void run() { command[10] = this.serverPort+""; command[11] = userRunAs; command[12] = (this.passwordFile==null) ? "null" : this.passwordFile.toString(); - command[13] = String.valueOf(this.validationInstance); Map envVariables = new HashMap<>(); @@ -275,7 +261,7 @@ public void run() { requestIdDir + " user=" + user + " sshPort=" + sshPort + " keyfile=" + this.sshKeyFile + " passwordFile " + this.passwordFile + " server=" + this.ambariHostname + " version=" + projectVersion + " serverPort=" + this.serverPort + " userRunAs=" + userRunAs + - " timeout=" + bootstrapTimeout / 1000 + " validation=" + validationInstance); + " timeout=" + bootstrapTimeout / 1000); envVariables.put("AMBARI_PASSPHRASE", agentSetupPassword); if (this.verbose) @@ -296,17 +282,17 @@ public void run() { Process process = pb.start(); - StringBuilder logInfoMessage = new StringBuilder(getBsOperationType()); try { - logInfoMessage.append(" output, log=").append(bootStrapErrorFilePath).append(" ").append(bootStrapOutputFilePath).append(" at ").append(ambariHostname); - LOG.info(logInfoMessage.toString()); + String logInfoMessage = "Bootstrap output, log=" + + bootStrapErrorFilePath + " " + bootStrapOutputFilePath + " at " + this.ambariHostname; + LOG.info(logInfoMessage); int exitCode = 1; boolean timedOut = false; if (waitForProcessTermination(process, bootstrapTimeout)){ exitCode = process.exitValue(); } else { - LOG.warn(logInfoMessage.append(" process timed out. It will be destroyed.").toString()); + LOG.warn("Bootstrap process timed out. It will be destroyed."); process.destroy(); timedOut = true; } @@ -319,14 +305,14 @@ public void run() { } catch(IOException io) { LOG.info("Error in reading files ", io); } - scriptlog.append(outMesg).append("\n\n").append(errMesg); + scriptlog = outMesg + "\n\n" + errMesg; if (timedOut) { - scriptlog.append("\n\n ").append(getBsOperationType()).append(" process timed out. It was destroyed."); + scriptlog += "\n\n Bootstrap process timed out. It was destroyed."; } - LOG.info("Script log Mesg " + scriptlog.toString()); + LOG.info("Script log Mesg " + scriptlog); if (exitCode != 0) { stat = BSStat.ERROR; - interuptSetupAgent(99, scriptlog.toString()); + interuptSetupAgent(99, scriptlog); } else { stat = BSStat.SUCCESS; } @@ -373,10 +359,8 @@ public void run() { if (handle != null) { handle.cancel(true); } - if (!validationInstance) { - /* schedule a last update */ - scheduler.schedule(new BSStatusCollector(), 0, TimeUnit.SECONDS); - } + /* schedule a last update */ + scheduler.schedule(new BSStatusCollector(), 0, TimeUnit.SECONDS); scheduler.shutdownNow(); try { scheduler.awaitTermination(10, TimeUnit.SECONDS); @@ -408,7 +392,7 @@ public void run() { // creating new status instance to avoid modifying exposed object BootStrapStatus newStat = new BootStrapStatus(); newStat.setHostsStatus(hostStatusList); - newStat.setLog(scriptlog.toString()); + newStat.setLog(scriptlog); newStat.setStatus(stat); // Remove private ssh key after bootstrap is complete diff --git a/ambari-server/src/main/java/org/apache/ambari/server/bootstrap/BootStrapImpl.java b/ambari-server/src/main/java/org/apache/ambari/server/bootstrap/BootStrapImpl.java index b2b94ef0818..d3f683816b1 100644 --- a/ambari-server/src/main/java/org/apache/ambari/server/bootstrap/BootStrapImpl.java +++ b/ambari-server/src/main/java/org/apache/ambari/server/bootstrap/BootStrapImpl.java @@ -101,13 +101,7 @@ public synchronized void init() throws IOException { } } - /** - * Start Bsrunner thread to run bootstrap script - * @param info credential info needed to connect hosts - * @param validate if it is true, bootstrap script will only validate reachability of hosts, not run actual bootstrap - * @return - */ - public synchronized BSResponse runBootStrap(SshHostInfo info, boolean validate) { + public synchronized BSResponse runBootStrap(SshHostInfo info) { BSResponse response = new BSResponse(); /* Run some checks for ssh host */ LOG.info("BootStrapping hosts " + info.hostListAsString()); @@ -133,7 +127,7 @@ public synchronized BSResponse runBootStrap(SshHostInfo info, boolean validate) } else { bsRunner = new BSRunner(this, info, bootStrapDir.toString(), bootScript, bootSetupAgentScript, bootSetupAgentPassword, requestId, 0L, - this.masterHostname, info.isVerbose(), this.clusterOsFamily, this.projectVersion, this.serverPort, validate); + this.masterHostname, info.isVerbose(), this.clusterOsFamily, this.projectVersion, this.serverPort); bsRunner.start(); response.setStatus(BSRunStat.OK); response.setLog("Running Bootstrap now."); diff --git a/ambari-server/src/main/java/org/apache/ambari/server/checks/AtlasPresenceCheck.java b/ambari-server/src/main/java/org/apache/ambari/server/checks/AtlasPresenceCheck.java new file mode 100644 index 00000000000..035116abaf4 --- /dev/null +++ b/ambari-server/src/main/java/org/apache/ambari/server/checks/AtlasPresenceCheck.java @@ -0,0 +1,58 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.ambari.server.checks; + +import java.util.Set; + +import org.apache.ambari.server.AmbariException; +import org.apache.ambari.server.controller.PrereqCheckRequest; +import org.apache.ambari.server.state.stack.PrereqCheckStatus; +import org.apache.ambari.server.state.stack.PrerequisiteCheck; + +import com.google.common.collect.Sets; +import com.google.inject.Singleton; + +/** + * Checks if Atlas service is present. Upgrade to stack HDP 2.5 from previous stack + * must first delete Atlas from the cluster. + */ +@Singleton +@UpgradeCheck(group = UpgradeCheckGroup.DEFAULT) +public class AtlasPresenceCheck extends AbstractCheckDescriptor{ + + private static final String serviceName = "ATLAS"; + + public AtlasPresenceCheck(){ + super(CheckDescription.ATLAS_SERVICE_PRESENCE_CHECK); + } + + /** + * {@inheritDoc} + */ + @Override + public Set getApplicableServices() { + return Sets.newHashSet(serviceName); + } + + @Override + public void perform(PrerequisiteCheck prerequisiteCheck, PrereqCheckRequest request) throws AmbariException { + prerequisiteCheck.getFailedOn().add(serviceName); + prerequisiteCheck.setStatus(PrereqCheckStatus.FAIL); + prerequisiteCheck.setFailReason(getFailReason(prerequisiteCheck, request)); + } +} diff --git a/ambari-server/src/main/java/org/apache/ambari/server/checks/CheckDescription.java b/ambari-server/src/main/java/org/apache/ambari/server/checks/CheckDescription.java index d1573578554..20bb1b00e4d 100644 --- a/ambari-server/src/main/java/org/apache/ambari/server/checks/CheckDescription.java +++ b/ambari-server/src/main/java/org/apache/ambari/server/checks/CheckDescription.java @@ -88,6 +88,19 @@ public class CheckDescription { .put(AbstractCheckDescriptor.DEFAULT, "The following hosts must have version {{version}} installed: {{fails}}.").build()); + public static CheckDescription SECONDARY_NAMENODE_MUST_BE_DELETED = new CheckDescription("SECONDARY_NAMENODE_MUST_BE_DELETED", + PrereqCheckType.HOST, + "The SNameNode component must be deleted from all hosts", + new ImmutableMap.Builder() + .put(AbstractCheckDescriptor.DEFAULT, "The SNameNode component must be deleted from host: %s.").build()); + + public static CheckDescription SERVICES_HIVE_MULTIPLE_METASTORES = new CheckDescription("SERVICES_HIVE_MULTIPLE_METASTORES", + PrereqCheckType.SERVICE, + "Hive Metastore Availability", + new ImmutableMap.Builder() + .put(AbstractCheckDescriptor.DEFAULT, + "Multiple Hive Metastore instances are recommended for Rolling Upgrade. This ensures that there is at least one Metastore running during the upgrade process.").build()); + public static CheckDescription SERVICES_MAINTENANCE_MODE = new CheckDescription("SERVICES_MAINTENANCE_MODE", PrereqCheckType.SERVICE, "No services can be in Maintenance Mode", @@ -95,6 +108,46 @@ public class CheckDescription { .put(AbstractCheckDescriptor.DEFAULT, "The following Services must not be in Maintenance Mode: {{fails}}.").build()); + public static CheckDescription SERVICES_MR_DISTRIBUTED_CACHE = new CheckDescription("SERVICES_MR_DISTRIBUTED_CACHE", + PrereqCheckType.SERVICE, + "MapReduce should reference Hadoop libraries from the distributed cache in HDFS", + new ImmutableMap.Builder() + .put(ServicesMapReduceDistributedCacheCheck.KEY_APP_CLASSPATH, + "The mapred-site.xml property mapreduce.application.classpath should be set.") + .put(ServicesMapReduceDistributedCacheCheck.KEY_FRAMEWORK_PATH, + "The mapred-site.xml property mapreduce.application.framework.path should be set.") + .put(ServicesMapReduceDistributedCacheCheck.KEY_NOT_DFS, + "The mapred-site.xml property mapreduce.application.framework.path or the core-site.xml property fs.defaultFS should point to *dfs:/ url.").build()); + + public static CheckDescription SERVICES_NAMENODE_HA = new CheckDescription("SERVICES_NAMENODE_HA", + PrereqCheckType.SERVICE, + "NameNode High Availability must be enabled", + new ImmutableMap.Builder() + .put(AbstractCheckDescriptor.DEFAULT, + "NameNode High Availability is not enabled. Verify that dfs.internal.nameservices property is present in hdfs-site.xml.").build()); + + public static CheckDescription SERVICES_NAMENODE_TRUNCATE = new CheckDescription("SERVICES_NAMENODE_TRUNCATE", + PrereqCheckType.SERVICE, + "NameNode Truncate must not be allowed", + new ImmutableMap.Builder() + .put(AbstractCheckDescriptor.DEFAULT, + "NameNode Truncate is allowed. Verify that dfs.allow.truncate is set to 'false' in hdfs-site.xml.").build()); + + public static CheckDescription SERVICES_TEZ_DISTRIBUTED_CACHE = new CheckDescription("SERVICES_TEZ_DISTRIBUTED_CACHE", + PrereqCheckType.SERVICE, + "Tez should reference Hadoop libraries from the distributed cache in HDFS", + new ImmutableMap.Builder() + .put(ServicesTezDistributedCacheCheck.KEY_LIB_URI_MISSING, + "The tez-site.xml property tez.lib.uris should be set.") + .put(ServicesTezDistributedCacheCheck.KEY_USE_HADOOP_LIBS, + "The tez-site.xml property tez.use.cluster-hadoop-libs should be set.") + .put(ServicesTezDistributedCacheCheck.KEY_LIB_NOT_DFS, + "The tez-site.xml property tez.lib.uris or the core-site.xml property fs.defaultFS should point to *dfs:/ url.") + .put(ServicesTezDistributedCacheCheck.KEY_LIB_NOT_TARGZ, + "The tez-site.xml property tez.lib.uris should point to tar.gz file.") + .put(ServicesTezDistributedCacheCheck.KEY_USE_HADOOP_LIBS_FALSE, + "The tez-site.xml property tez.use.cluster.hadoop-libs should be set to false.").build()); + public static CheckDescription SERVICES_UP = new CheckDescription("SERVICES_UP", PrereqCheckType.SERVICE, "All services must be started", @@ -123,6 +176,49 @@ public class CheckDescription { .put(AbstractCheckDescriptor.DEFAULT, "Re-run Install Packages before starting upgrade").build()); + public static CheckDescription SERVICES_YARN_WP = new CheckDescription("SERVICES_YARN_WP", + PrereqCheckType.SERVICE, + "YARN work preserving restart should be enabled", + new ImmutableMap.Builder() + .put(AbstractCheckDescriptor.DEFAULT, + "YARN should have work preserving restart enabled. The yarn-site.xml property yarn.resourcemanager.work-preserving-recovery.enabled property should be set to true.").build()); + + public static CheckDescription SERVICES_YARN_RM_HA = new CheckDescription("SERVICES_YARN_RM_HA", + PrereqCheckType.SERVICE, + "YARN ResourceManager High Availability is not enabled.", + new ImmutableMap.Builder() + .put(AbstractCheckDescriptor.DEFAULT, + "YARN ResourceManager HA should be enabled to prevent a disruption in service during the upgrade").build()); + + public static CheckDescription SERVICES_YARN_TIMELINE_ST = new CheckDescription("SERVICES_YARN_TIMELINE_ST", + PrereqCheckType.SERVICE, + "YARN Timeline state preserving restart should be enabled", + new ImmutableMap.Builder() + .put(AbstractCheckDescriptor.DEFAULT, + "YARN should have state preserving restart enabled for the Timeline server. The yarn-site.xml property yarn.timeline-service.recovery.enabled should be set to true.").build()); + + public static CheckDescription SERVICES_MR2_JOBHISTORY_ST = new CheckDescription("SERVICES_MR2_JOBHISTORY_ST", + PrereqCheckType.SERVICE, + "MapReduce2 JobHistory recovery should be enabled", + new ImmutableMap.Builder() + .put(MapReduce2JobHistoryStatePreservingCheck.MAPREDUCE2_JOBHISTORY_RECOVERY_ENABLE_KEY, + "MapReduce2 should have recovery enabled for the JobHistory server. The mapred-site.xml property mapreduce.jobhistory.recovery.enable should be set to true.") + .put(MapReduce2JobHistoryStatePreservingCheck.MAPREDUCE2_JOBHISTORY_RECOVERY_STORE_KEY, + "MapReduce2 should have recovery enabled for the JobHistory server. The mapred-site.xml property mapreduce.jobhistory.recovery.store.class should be set to org.apache.hadoop.mapreduce.v2.hs.HistoryServerLeveldbStateStoreService.") + .put(MapReduce2JobHistoryStatePreservingCheck.MAPREDUCE2_JOBHISTORY_RECOVERY_STORE_LEVELDB_PATH_KEY, + "MapReduce2 should have recovery enabled for the JobHistory server. The mapred-site.xml property mapreduce.jobhistory.recovery.store.leveldb.path should be set. Please note that \"mapreduce.jobhistory.recovery.store.leveldb.path\" should be on a mount with ~3 GB of free space.").build()); + + public static CheckDescription SERVICES_HIVE_DYNAMIC_SERVICE_DISCOVERY = new CheckDescription("SERVICES_HIVE_DYNAMIC_SERVICE_DISCOVERY", + PrereqCheckType.SERVICE, + "Hive Dynamic Service Discovery", + new ImmutableMap.Builder() + .put(HiveDynamicServiceDiscoveryCheck.HIVE_DYNAMIC_SERVICE_DISCOVERY_ENABLED_KEY, + "The hive-site.xml property hive.server2.support.dynamic.service.discovery should be set to true.") + .put(HiveDynamicServiceDiscoveryCheck.HIVE_DYNAMIC_SERVICE_ZK_QUORUM_KEY, + "The hive-site.xml property hive.zookeeper.quorum should be set to a comma-separate list of ZooKeeper hosts:port pairs.") + .put(HiveDynamicServiceDiscoveryCheck.HIVE_DYNAMIC_SERVICE_ZK_NAMESPACE_KEY, + "The hive-site.xml property hive.server2.zookeeper.namespace should be set to the value for the root namespace on ZooKeeper.").build()); + public static CheckDescription CONFIG_MERGE = new CheckDescription("CONFIG_MERGE", PrereqCheckType.CLUSTER, "Configuration Merge Check", @@ -145,6 +241,34 @@ public class CheckDescription { .put(AbstractCheckDescriptor.DEFAULT, "There are components which are not reporting the expected stack version: \n%s").build()); + public static CheckDescription SERVICES_RANGER_PASSWORD_VERIFY = new CheckDescription("SERVICES_RANGER_PASSWORD_VERIFY", + PrereqCheckType.SERVICE, + "Verify Ambari and Ranger Password Synchronization", + new ImmutableMap.Builder() + .put(AbstractCheckDescriptor.DEFAULT, + "There was a problem verifying Ranger and Ambari users") + .put(RangerPasswordCheck.KEY_RANGER_PASSWORD_MISMATCH, + "Credentials for user '%s' in Ambari do not match Ranger.") + .put(RangerPasswordCheck.KEY_RANGER_UNKNOWN_RESPONSE, + "Could not verify credentials for user '%s'. Response code %s received from %s") + .put(RangerPasswordCheck.KEY_RANGER_COULD_NOT_ACCESS, + "Could not access Ranger to verify user '%s' against %s. %s") + .put(RangerPasswordCheck.KEY_RANGER_USERS_ELEMENT_MISSING, + "The response from Ranger received, but there is no users element. Request: %s") + .put(RangerPasswordCheck.KEY_RANGER_OTHER_ISSUE, + "The response from Ranger was malformed. %s. Request: %s") + .put(RangerPasswordCheck.KEY_RANGER_CONFIG_MISSING, + "Could not check credentials. Missing property %s/%s").build()); + + public static CheckDescription ATLAS_SERVICE_PRESENCE_CHECK = new CheckDescription("ATLAS_SERVICE_PRESENCE_CHECK", + PrereqCheckType.SERVICE, + "Atlas Is Not Supported For Upgrades", + new ImmutableMap.Builder() + .put(AbstractCheckDescriptor.DEFAULT, + "The Atlas service is currently installed on the cluster. " + + "This service does not support upgrades and must be removed before the upgrade can continue. " + + "After upgrading, Atlas can be reinstalled").build()); + public static CheckDescription SERVICE_PRESENCE_CHECK = new CheckDescription("SERVICE_PRESENCE_CHECK", PrereqCheckType.SERVICE, "Service Is Not Supported For Upgrades", @@ -158,6 +282,35 @@ public class CheckDescription { "This service is removed from the new release and must be removed before the upgrade can continue. " + "After upgrading, %s can be installed").build()); + public static CheckDescription RANGER_SERVICE_AUDIT_DB_CHECK = new CheckDescription("RANGER_SERVICE_AUDIT_DB_CHECK", + PrereqCheckType.SERVICE, + "Remove the Ranger Audit to Database Capability", + new ImmutableMap.Builder() + .put(AbstractCheckDescriptor.DEFAULT, + "After upgrading, Ranger will no longer support the Audit to Database feature. Instead, Ranger will audit to Solr. " + + "To migrate the existing audit logs to Solr, follow the steps in Apache Ranger documention for 0.6 release.").build()); + + public static CheckDescription KAFKA_KERBEROS_CHECK = new CheckDescription("KAFKA_KERBEROS_CHECK", + PrereqCheckType.SERVICE, + "Kafka upgrade on Kerberized cluster", + new ImmutableMap.Builder() + .put(AbstractCheckDescriptor.DEFAULT, + "Kafka is currently not Kerberized, but your cluster is. After upgrading, Kafka will automatically be Kerberized for you.").build()); + + public static CheckDescription SERVICES_HIVE_ROLLING_WARNING = new CheckDescription("SERVICES_HIVE_ROLLING_WARNING", + PrereqCheckType.SERVICE, + "HiveServer2 Downtime", + new ImmutableMap.Builder() + .put(AbstractCheckDescriptor.DEFAULT, + "HiveServer2 does not currently support rolling upgrades. HiveServer2 will be upgraded, however existing queries which have not completed will fail and need to be resubmitted after HiveServer2 has been upgraded.").build()); + + public static CheckDescription SERVICES_STORM_ROLLING_WARNING = new CheckDescription("SERVICES_STORM_ROLLING_WARNING", + PrereqCheckType.SERVICE, + "Storm Downtime During Upgrade", + new ImmutableMap.Builder() + .put(AbstractCheckDescriptor.DEFAULT, + "Storm does not support rolling upgrades on this version of the stack. If you proceed, you will be required to stop all running topologies before Storm is restarted.").build()); + public static CheckDescription AUTO_START_DISABLED = new CheckDescription("AUTO_START_DISABLED", PrereqCheckType.CLUSTER, "Auto-Start Disabled Check", @@ -166,6 +319,13 @@ public class CheckDescription { "Auto Start must be disabled before performing an Upgrade. To disable Auto Start, navigate to " + "Admin > Service Auto Start. Turn the toggle switch off to Disabled and hit Save.").build()); + public static CheckDescription RANGER_SSL_CONFIG_CHECK = new CheckDescription("RANGER_SSL_CONFIG_CHECK", + PrereqCheckType.SERVICE, + "Change Ranger SSL configuration path for Keystore and Truststore.", + new ImmutableMap.Builder() + .put(AbstractCheckDescriptor.DEFAULT, + "As Ranger is SSL enabled, Ranger SSL configurations will need to be changed from default value of /etc/ranger/*/conf folder to /etc/ranger/security. " + + "Since the certificates/keystores/truststores in this path may affect the upgrade/downgrade process, it is recommended to manually move the certificates/keystores/truststores out of the conf folders and change the appropriate config values before proceeding.").build()); public static CheckDescription LZO_CONFIG_CHECK = new CheckDescription("LZO_CONFIG_CHECK", PrereqCheckType.CLUSTER, @@ -190,13 +350,25 @@ public class CheckDescription { .put(AbstractCheckDescriptor.DEFAULT, "The following components do not exist in the target repository's stack. They must be removed from the cluster before upgrading.") .build()); + public static CheckDescription DRUID_HA_WARNING = new CheckDescription( + "DRUID_HA", + PrereqCheckType.SERVICE, + "Druid Downtime During Upgrade", + new ImmutableMap.Builder() + .put( + AbstractCheckDescriptor.DEFAULT, + "High Availability is not enabled for Druid. Druid Service may have some downtime during upgrade. Deploy multiple instances of %s in the Cluster to avoid any downtime." + ) + .build() + ); + public static CheckDescription VALID_SERVICES_INCLUDED_IN_REPOSITORY = new CheckDescription("VALID_SERVICES_INCLUDED_IN_REPOSITORY", PrereqCheckType.CLUSTER, "The repository is missing services which are required", new ImmutableMap.Builder() .put(AbstractCheckDescriptor.DEFAULT, "The following services are included in the upgrade but the repository is missing their dependencies:\n%s").build()); - + private String m_name; private PrereqCheckType m_type; diff --git a/ambari-server/src/main/java/org/apache/ambari/server/checks/DruidHighAvailabilityCheck.java b/ambari-server/src/main/java/org/apache/ambari/server/checks/DruidHighAvailabilityCheck.java new file mode 100644 index 00000000000..28a3f298e84 --- /dev/null +++ b/ambari-server/src/main/java/org/apache/ambari/server/checks/DruidHighAvailabilityCheck.java @@ -0,0 +1,121 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.ambari.server.checks; + +import java.util.Arrays; +import java.util.HashSet; +import java.util.List; +import java.util.Set; + +import org.apache.ambari.server.AmbariException; +import org.apache.ambari.server.ServiceComponentNotFoundException; +import org.apache.ambari.server.controller.PrereqCheckRequest; +import org.apache.ambari.server.state.Cluster; +import org.apache.ambari.server.state.ServiceComponent; +import org.apache.ambari.server.state.stack.PrereqCheckStatus; +import org.apache.ambari.server.state.stack.PrerequisiteCheck; +import org.apache.commons.lang.StringUtils; + +import com.google.common.collect.Lists; +import com.google.common.collect.Sets; +import com.google.inject.Singleton; + +/** + * Checks that namenode high availability is enabled. + */ +@Singleton +@UpgradeCheck(group = UpgradeCheckGroup.MULTIPLE_COMPONENT_WARNING, order = 16.0f) +public class DruidHighAvailabilityCheck extends AbstractCheckDescriptor +{ + + public static final String DRUID_SERVICE_NAME = "DRUID"; + public static final String[] DRUID_COMPONENT_NAMES = new String[]{ + "DRUID_BROKER", + "DRUID_COORDINATOR", + "DRUID_HISTORICAL", + "DRUID_OVERLORD", + "DRUID_MIDDLEMANAGER", + "DRUID_ROUTER" + }; + + /** + * Constructor. + */ + public DruidHighAvailabilityCheck() + { + super(CheckDescription.DRUID_HA_WARNING); + } + + /** + * {@inheritDoc} + */ + @Override + public Set getApplicableServices() + { + return Sets.newHashSet(DRUID_SERVICE_NAME); + } + + /** + * {@inheritDoc} + */ + @Override + public List getQualifications() + { + return Arrays.asList( + new PriorCheckQualification(CheckDescription.DRUID_HA_WARNING)); + } + + @Override + public void perform(PrerequisiteCheck prerequisiteCheck, PrereqCheckRequest request) throws AmbariException + { + List haNotEnabledComponents = Lists.newArrayList(); + for (String component : DRUID_COMPONENT_NAMES) { + Set hosts = getHostsForComponent(request, component); + if (hosts.size() == 1) { + // This component is installed on only 1 host, HA is not enabled for it. + haNotEnabledComponents.add(component); + } + } + if (!haNotEnabledComponents.isEmpty()) { + prerequisiteCheck.getFailedOn().add(DRUID_SERVICE_NAME); + prerequisiteCheck.setStatus(PrereqCheckStatus.WARNING); + String failReason = getFailReason(prerequisiteCheck, request); + prerequisiteCheck.setFailReason(String.format(failReason, StringUtils.join(haNotEnabledComponents.toArray(), ", "))); + } + + } + + private Set getHostsForComponent(PrereqCheckRequest request, String componentName) + throws AmbariException + { + Set hosts = new HashSet<>(); + final String clusterName = request.getClusterName(); + final Cluster cluster = clustersProvider.get().getCluster(clusterName); + try { + ServiceComponent serviceComponent = cluster.getService(DRUID_SERVICE_NAME).getServiceComponent(componentName); + if (serviceComponent != null) { + hosts = serviceComponent.getServiceComponentHosts().keySet(); + } + } + catch (ServiceComponentNotFoundException err) { + // This exception can be ignored if the component doesn't exist because it is a best-attempt at finding it. + } + + return hosts; + } +} diff --git a/ambari-server/src/main/java/org/apache/ambari/server/checks/HiveDynamicServiceDiscoveryCheck.java b/ambari-server/src/main/java/org/apache/ambari/server/checks/HiveDynamicServiceDiscoveryCheck.java new file mode 100644 index 00000000000..c631013cf52 --- /dev/null +++ b/ambari-server/src/main/java/org/apache/ambari/server/checks/HiveDynamicServiceDiscoveryCheck.java @@ -0,0 +1,128 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.ambari.server.checks; + +import java.util.ArrayList; +import java.util.List; +import java.util.Map; +import java.util.Set; + +import org.apache.ambari.server.AmbariException; +import org.apache.ambari.server.controller.PrereqCheckRequest; +import org.apache.ambari.server.orm.entities.RepositoryVersionEntity; +import org.apache.ambari.server.state.StackId; +import org.apache.ambari.server.state.stack.PrereqCheckStatus; +import org.apache.ambari.server.state.stack.PrerequisiteCheck; +import org.apache.ambari.server.state.stack.UpgradePack.PrerequisiteCheckConfig; +import org.apache.ambari.server.state.stack.upgrade.UpgradeType; +import org.apache.ambari.server.utils.VersionUtils; +import org.apache.commons.lang.StringUtils; + +import com.google.common.collect.Sets; +import com.google.inject.Singleton; + +/** + * The {@link HiveDynamicServiceDiscoveryCheck} class is used to check that HIVE + * is properly configured for dynamic discovery. + */ +@Singleton +@UpgradeCheck( + group = UpgradeCheckGroup.DEFAULT, + order = 20.0f, + required = { UpgradeType.ROLLING, UpgradeType.EXPRESS }) +public class HiveDynamicServiceDiscoveryCheck extends AbstractCheckDescriptor { + + static final String HIVE_DYNAMIC_SERVICE_DISCOVERY_ENABLED_KEY = "hive.dynamic-service.discovery.enabled.key"; + static final String HIVE_DYNAMIC_SERVICE_ZK_QUORUM_KEY = "hive.dynamic-service.discovery.zk-quorum.key"; + static final String HIVE_DYNAMIC_SERVICE_ZK_NAMESPACE_KEY = "hive.dynamic-service.zk-namespace.key"; + static final String MIN_FAILURE_STACK_VERSION_PROPERTY_NAME = "min-failure-stack-version"; + + /** + * Constructor. + */ + public HiveDynamicServiceDiscoveryCheck() { + super(CheckDescription.SERVICES_HIVE_DYNAMIC_SERVICE_DISCOVERY); + } + + /** + * {@inheritDoc} + */ + @Override + public Set getApplicableServices() { + return Sets.newHashSet("HIVE"); + } + + /** + * {@inheritDoc} + */ + @Override + public void perform(PrerequisiteCheck prerequisiteCheck, PrereqCheckRequest request) throws AmbariException { + List errorMessages = new ArrayList<>(); + + String dynamicServiceDiscoveryEnabled = getProperty(request, "hive-site", "hive.server2.support.dynamic.service.discovery"); + String zookeeperQuorum = getProperty(request, "hive-site", "hive.zookeeper.quorum"); + String zookeeperNamespace = getProperty(request, "hive-site", "hive.server2.zookeeper.namespace"); + + if (null == dynamicServiceDiscoveryEnabled || !Boolean.parseBoolean(dynamicServiceDiscoveryEnabled)) { + errorMessages.add(getFailReason(HIVE_DYNAMIC_SERVICE_DISCOVERY_ENABLED_KEY, prerequisiteCheck, request)); + } + + if (StringUtils.isBlank(zookeeperQuorum)) { + errorMessages.add(getFailReason(HIVE_DYNAMIC_SERVICE_ZK_QUORUM_KEY, prerequisiteCheck, + request)); + } + + if (StringUtils.isBlank(zookeeperNamespace)) { + errorMessages.add(getFailReason(HIVE_DYNAMIC_SERVICE_ZK_NAMESPACE_KEY, prerequisiteCheck, + request)); + } + + String minFailureStackVersion = null; + PrerequisiteCheckConfig prerequisiteCheckConfig = request.getPrerequisiteCheckConfig(); + Map checkProperties = null; + if(prerequisiteCheckConfig != null) { + checkProperties = prerequisiteCheckConfig.getCheckProperties(this.getClass().getName()); + } + if(checkProperties != null && checkProperties.containsKey(MIN_FAILURE_STACK_VERSION_PROPERTY_NAME)) { + minFailureStackVersion = checkProperties.get(MIN_FAILURE_STACK_VERSION_PROPERTY_NAME); + } + + if (!errorMessages.isEmpty()) { + prerequisiteCheck.setFailReason(StringUtils.join(errorMessages, " ")); + prerequisiteCheck.getFailedOn().add("HIVE"); + PrereqCheckStatus checkStatus = PrereqCheckStatus.FAIL; + if(minFailureStackVersion != null && !minFailureStackVersion.isEmpty()) { + String[] minStack = minFailureStackVersion.split("-"); + if (minStack.length == 2) { + String minStackName = minStack[0]; + String minStackVersion = minStack[1]; + if (minStackName.equals(request.getSourceStackId().getStackName())) { + RepositoryVersionEntity repositoryVersion = request.getTargetRepositoryVersion(); + StackId targetStackId = repositoryVersion.getStackId(); + if (VersionUtils.compareVersions(request.getSourceStackId().getStackVersion(), minStackVersion) < 0 + && VersionUtils.compareVersions(targetStackId.getStackVersion(), minStackVersion) < 0 + && VersionUtils.compareVersions(request.getSourceStackId().getStackVersion(), targetStackId.getStackVersion()) < 0) { + checkStatus = PrereqCheckStatus.WARNING; + } + } + } + } + prerequisiteCheck.setStatus(checkStatus); + } + } +} diff --git a/ambari-server/src/main/java/org/apache/ambari/server/checks/HiveMultipleMetastoreCheck.java b/ambari-server/src/main/java/org/apache/ambari/server/checks/HiveMultipleMetastoreCheck.java new file mode 100644 index 00000000000..e876696e553 --- /dev/null +++ b/ambari-server/src/main/java/org/apache/ambari/server/checks/HiveMultipleMetastoreCheck.java @@ -0,0 +1,83 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.ambari.server.checks; + +import java.util.Map; +import java.util.Set; + +import org.apache.ambari.server.AmbariException; +import org.apache.ambari.server.ServiceComponentNotFoundException; +import org.apache.ambari.server.controller.PrereqCheckRequest; +import org.apache.ambari.server.state.Cluster; +import org.apache.ambari.server.state.Service; +import org.apache.ambari.server.state.ServiceComponent; +import org.apache.ambari.server.state.ServiceComponentHost; +import org.apache.ambari.server.state.stack.PrereqCheckStatus; +import org.apache.ambari.server.state.stack.PrerequisiteCheck; + +import com.google.common.collect.Sets; +import com.google.inject.Singleton; + +/** + * The {@link HiveMultipleMetastoreCheck} checks that there are at least 2 Hive + * Metastore instances in the cluster. + */ +@Singleton +@UpgradeCheck(group = UpgradeCheckGroup.MULTIPLE_COMPONENT_WARNING, order = 20.1f) +public class HiveMultipleMetastoreCheck extends AbstractCheckDescriptor { + + /** + * Constructor. + */ + public HiveMultipleMetastoreCheck() { + super(CheckDescription.SERVICES_HIVE_MULTIPLE_METASTORES); + } + + /** + * {@inheritDoc} + */ + @Override + public Set getApplicableServices() { + return Sets.newHashSet("HIVE"); + } + + /** + * {@inheritDoc} + */ + @Override + public void perform(PrerequisiteCheck prerequisiteCheck, PrereqCheckRequest request) throws AmbariException { + final String clusterName = request.getClusterName(); + final Cluster cluster = clustersProvider.get().getCluster(clusterName); + + try { + Service hive = cluster.getService("HIVE"); + ServiceComponent metastore = hive.getServiceComponent("HIVE_METASTORE"); + Map metastores = metastore.getServiceComponentHosts(); + + if (metastores.size() < 2) { + prerequisiteCheck.getFailedOn().add("HIVE"); + prerequisiteCheck.setStatus(PrereqCheckStatus.WARNING); + prerequisiteCheck.setFailReason(getFailReason(prerequisiteCheck, request)); + } + } catch (ServiceComponentNotFoundException scnfe) { + prerequisiteCheck.getFailedOn().add("HIVE"); + prerequisiteCheck.setStatus(PrereqCheckStatus.WARNING); + prerequisiteCheck.setFailReason(getFailReason(prerequisiteCheck, request)); + } + } +} diff --git a/ambari-server/src/main/java/org/apache/ambari/server/checks/HiveNotRollingWarning.java b/ambari-server/src/main/java/org/apache/ambari/server/checks/HiveNotRollingWarning.java new file mode 100644 index 00000000000..8e862c83a77 --- /dev/null +++ b/ambari-server/src/main/java/org/apache/ambari/server/checks/HiveNotRollingWarning.java @@ -0,0 +1,71 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.ambari.server.checks; + +import java.util.Set; + +import org.apache.ambari.server.AmbariException; +import org.apache.ambari.server.controller.PrereqCheckRequest; +import org.apache.ambari.server.state.stack.PrereqCheckStatus; +import org.apache.ambari.server.state.stack.PrerequisiteCheck; +import org.apache.ambari.server.state.stack.upgrade.UpgradeType; + +import com.google.common.collect.Sets; +import com.google.inject.Singleton; + +/** + * The {@link HiveNotRollingWarning} to see if Hive is installed and if the + * upgrade type is {@link UpgradeType#ROLLING}. If so, then a + * {@link PrereqCheckStatus#WARNING} is produced which will let the operator + * know that Hive does not support rolling upgrades. + *

+ * In actuality, it does, however in order to support no downtime, a new Hive + * server is spun up on a new port while the old server drains. If clients are + * not using the ZK discovery service for Hive and connecting via a URL + * directly, they will cease to function. For this reason, it's been determined + * that at this point in time, Hive will not be upgraded in a rolling fashion. + */ +@Singleton +@UpgradeCheck(group = UpgradeCheckGroup.INFORMATIONAL_WARNING, required = UpgradeType.ROLLING) +public class HiveNotRollingWarning extends AbstractCheckDescriptor { + + /** + * Constructor. + */ + public HiveNotRollingWarning() { + super(CheckDescription.SERVICES_HIVE_ROLLING_WARNING); + } + + /** + * {@inheritDoc} + */ + @Override + public Set getApplicableServices() { + return Sets.newHashSet("HIVE"); + } + + /** + * {@inheritDoc} + */ + @Override + public void perform(PrerequisiteCheck prerequisiteCheck, PrereqCheckRequest request) throws AmbariException { + prerequisiteCheck.getFailedOn().add("HIVE"); + prerequisiteCheck.setStatus(PrereqCheckStatus.WARNING); + prerequisiteCheck.setFailReason(getFailReason(prerequisiteCheck, request)); + } +} diff --git a/ambari-server/src/main/java/org/apache/ambari/server/checks/KafkaKerberosCheck.java b/ambari-server/src/main/java/org/apache/ambari/server/checks/KafkaKerberosCheck.java new file mode 100644 index 00000000000..724feab4797 --- /dev/null +++ b/ambari-server/src/main/java/org/apache/ambari/server/checks/KafkaKerberosCheck.java @@ -0,0 +1,70 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.ambari.server.checks; + +import java.util.Set; + +import org.apache.ambari.server.AmbariException; +import org.apache.ambari.server.controller.PrereqCheckRequest; +import org.apache.ambari.server.state.Cluster; +import org.apache.ambari.server.state.SecurityType; +import org.apache.ambari.server.state.stack.PrereqCheckStatus; +import org.apache.ambari.server.state.stack.PrerequisiteCheck; + +import com.google.common.collect.Sets; +import com.google.inject.Singleton; + +/** + * Check that cluster is kerberized while trying to upgrade Kafka. + * Will show warning for kerberized cluster with Kafka service and nothing if + * cluster is not kerberized + */ +@Singleton +@UpgradeCheck(group = UpgradeCheckGroup.KERBEROS, order = 1.0f) +public class KafkaKerberosCheck extends AbstractCheckDescriptor { + + private final String KAFKA_SERVICE = "KAFKA"; + + /** + * Constructor. + */ + public KafkaKerberosCheck() { + super(CheckDescription.KAFKA_KERBEROS_CHECK); + } + + /** + * {@inheritDoc} + */ + @Override + public Set getApplicableServices() { + return Sets.newHashSet(KAFKA_SERVICE); + } + + @Override + public void perform(PrerequisiteCheck prerequisiteCheck, PrereqCheckRequest request) throws AmbariException { + final String clusterName = request.getClusterName(); + final Cluster cluster = clustersProvider.get().getCluster(clusterName); + + if (cluster.getSecurityType() == SecurityType.KERBEROS){ + prerequisiteCheck.getFailedOn().add(KAFKA_SERVICE); + prerequisiteCheck.setStatus(PrereqCheckStatus.WARNING); + prerequisiteCheck.setFailReason(getFailReason(prerequisiteCheck, request)); + } + } +} diff --git a/ambari-server/src/main/java/org/apache/ambari/server/checks/MapReduce2JobHistoryStatePreservingCheck.java b/ambari-server/src/main/java/org/apache/ambari/server/checks/MapReduce2JobHistoryStatePreservingCheck.java new file mode 100644 index 00000000000..2015be3b9b2 --- /dev/null +++ b/ambari-server/src/main/java/org/apache/ambari/server/checks/MapReduce2JobHistoryStatePreservingCheck.java @@ -0,0 +1,101 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.ambari.server.checks; + +import java.util.ArrayList; +import java.util.List; +import java.util.Set; + +import org.apache.ambari.server.AmbariException; +import org.apache.ambari.server.controller.PrereqCheckRequest; +import org.apache.ambari.server.state.stack.PrereqCheckStatus; +import org.apache.ambari.server.state.stack.PrerequisiteCheck; +import org.apache.commons.lang.StringUtils; + +import com.google.common.collect.Sets; +import com.google.inject.Singleton; + +/** + * The {@link MapReduce2JobHistoryStatePreservingCheck} + * is used to check that the MR2 History server has state preserving mode enabled. + */ +@Singleton +@UpgradeCheck(group = UpgradeCheckGroup.CONFIGURATION_WARNING, order = 17.0f) +public class MapReduce2JobHistoryStatePreservingCheck extends AbstractCheckDescriptor { + + final static String MAPREDUCE2_JOBHISTORY_RECOVERY_ENABLE_KEY = + "mapreduce.jobhistory.recovery.enable"; + final static String MAPREDUCE2_JOBHISTORY_RECOVERY_STORE_KEY = + "mapreduce.jobhistory.recovery.store.class"; + final static String MAPREDUCE2_JOBHISTORY_RECOVERY_STORE_LEVELDB_PATH_KEY = + "mapreduce.jobhistory.recovery.store.leveldb.path"; + final static String YARN_TIMELINE_SERVICE_LEVELDB_STATE_STORE_PATH_KEY = + "yarn.timeline-service.leveldb-state-store.path"; + + /** + * Constructor. + */ + public MapReduce2JobHistoryStatePreservingCheck() { + super(CheckDescription.SERVICES_MR2_JOBHISTORY_ST); + } + + /** + * {@inheritDoc} + */ + @Override + public Set getApplicableServices() { + return Sets.newHashSet("MAPREDUCE2"); + } + + /** + * {@inheritDoc} + */ + @Override + public void perform(PrerequisiteCheck prerequisiteCheck, PrereqCheckRequest request) throws AmbariException { + List errorMessages = new ArrayList<>(); + PrereqCheckStatus checkStatus = PrereqCheckStatus.FAIL; + + String enabled = + getProperty(request, "mapred-site", MAPREDUCE2_JOBHISTORY_RECOVERY_ENABLE_KEY); + String storeClass = + getProperty(request, "mapred-site", MAPREDUCE2_JOBHISTORY_RECOVERY_STORE_KEY); + String storeLevelDbPath = + getProperty(request, "mapred-site", MAPREDUCE2_JOBHISTORY_RECOVERY_STORE_LEVELDB_PATH_KEY); + + if (null == enabled || !Boolean.parseBoolean(enabled)) { + errorMessages.add(getFailReason(MAPREDUCE2_JOBHISTORY_RECOVERY_ENABLE_KEY, prerequisiteCheck, request)); + } + + if (StringUtils.isBlank(storeClass)) { + errorMessages.add(getFailReason(MAPREDUCE2_JOBHISTORY_RECOVERY_STORE_KEY, prerequisiteCheck, + request)); + } + + if (StringUtils.isBlank(storeLevelDbPath)) { + errorMessages.add(getFailReason(MAPREDUCE2_JOBHISTORY_RECOVERY_STORE_LEVELDB_PATH_KEY, prerequisiteCheck, + request)); + + } + + if (!errorMessages.isEmpty()) { + prerequisiteCheck.setFailReason(StringUtils.join(errorMessages, "\n")); + prerequisiteCheck.getFailedOn().add("MAPREDUCE2"); + prerequisiteCheck.setStatus(checkStatus); + } + } +} diff --git a/ambari-server/src/main/java/org/apache/ambari/server/checks/RangerAuditDbCheck.java b/ambari-server/src/main/java/org/apache/ambari/server/checks/RangerAuditDbCheck.java new file mode 100644 index 00000000000..e5f02577b4a --- /dev/null +++ b/ambari-server/src/main/java/org/apache/ambari/server/checks/RangerAuditDbCheck.java @@ -0,0 +1,65 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.ambari.server.checks; + +import java.util.Set; + +import org.apache.ambari.server.AmbariException; +import org.apache.ambari.server.controller.PrereqCheckRequest; +import org.apache.ambari.server.state.stack.PrereqCheckStatus; +import org.apache.ambari.server.state.stack.PrerequisiteCheck; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; + +import com.google.common.collect.Sets; +import com.google.inject.Singleton; + +/** + * Ranger Service will not support Audit to DB after upgrade to 2.5 stack. + */ +@Singleton +@UpgradeCheck(group = UpgradeCheckGroup.INFORMATIONAL_WARNING) +public class RangerAuditDbCheck extends AbstractCheckDescriptor{ + + private static final Logger LOG = LoggerFactory.getLogger(RangerAuditDbCheck.class); + private static final String serviceName = "RANGER"; + + public RangerAuditDbCheck(){ + super(CheckDescription.RANGER_SERVICE_AUDIT_DB_CHECK); + } + + /** + * {@inheritDoc} + */ + @Override + public Set getApplicableServices() { + return Sets.newHashSet(serviceName); + } + + @Override + public void perform(PrerequisiteCheck prerequisiteCheck, PrereqCheckRequest request) throws AmbariException { + + String propertyValue = getProperty(request, "ranger-admin-site", "ranger.audit.source.type"); + + if (null != propertyValue && propertyValue.equalsIgnoreCase("db")) { + prerequisiteCheck.getFailedOn().add(serviceName); + prerequisiteCheck.setStatus(PrereqCheckStatus.WARNING); + prerequisiteCheck.setFailReason(getFailReason(prerequisiteCheck, request)); + } + } +} diff --git a/ambari-server/src/main/java/org/apache/ambari/server/checks/RangerPasswordCheck.java b/ambari-server/src/main/java/org/apache/ambari/server/checks/RangerPasswordCheck.java new file mode 100644 index 00000000000..c5eca750b7b --- /dev/null +++ b/ambari-server/src/main/java/org/apache/ambari/server/checks/RangerPasswordCheck.java @@ -0,0 +1,378 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.ambari.server.checks; + +import java.io.IOException; +import java.io.InputStream; +import java.io.InputStreamReader; +import java.net.HttpURLConnection; +import java.nio.charset.Charset; +import java.util.ArrayList; +import java.util.Arrays; +import java.util.HashMap; +import java.util.List; +import java.util.Map; +import java.util.Set; + +import org.apache.ambari.server.AmbariException; +import org.apache.ambari.server.configuration.ComponentSSLConfiguration; +import org.apache.ambari.server.controller.PrereqCheckRequest; +import org.apache.ambari.server.controller.internal.URLStreamProvider; +import org.apache.ambari.server.state.stack.PrereqCheckStatus; +import org.apache.ambari.server.state.stack.PrerequisiteCheck; +import org.apache.ambari.server.state.stack.upgrade.UpgradeType; +import org.apache.commons.codec.binary.Base64; +import org.apache.commons.lang.StringUtils; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; + +import com.google.common.collect.Sets; +import com.google.gson.Gson; +import com.google.inject.Singleton; + +/** + * Used to make sure that the password in Ambari matches that for Ranger, in case the + * user had changed the password using the Ranger UI. + */ +@Singleton +@UpgradeCheck( + group = UpgradeCheckGroup.CONFIGURATION_WARNING, + order = 23.0f, + required = { UpgradeType.ROLLING, UpgradeType.EXPRESS, UpgradeType.HOST_ORDERED }) +public class RangerPasswordCheck extends AbstractCheckDescriptor { + + private static final Logger LOG = LoggerFactory.getLogger(RangerPasswordCheck.class); + + static final String KEY_RANGER_PASSWORD_MISMATCH = "could_not_verify_password"; + static final String KEY_RANGER_COULD_NOT_ACCESS = "could_not_access"; + static final String KEY_RANGER_UNKNOWN_RESPONSE = "unknown_response"; + static final String KEY_RANGER_USERS_ELEMENT_MISSING = "missing_vxusers"; + static final String KEY_RANGER_OTHER_ISSUE = "invalid_response"; + static final String KEY_RANGER_CONFIG_MISSING = "missing_config"; + + /** + * Constructor. + */ + public RangerPasswordCheck() { + super(CheckDescription.SERVICES_RANGER_PASSWORD_VERIFY); + } + + /** + * {@inheritDoc} + */ + @Override + public Set getApplicableServices() { + return Sets.newHashSet("RANGER"); + } + + @Override + public void perform(PrerequisiteCheck check, PrereqCheckRequest request) throws AmbariException { + // !!! ComponentSSLConfiguration is an old-school singleton which doesn't + // get initialized until after Guice is done - because this check is bound + // as a singleton via Guice, we can't initialize the stream provider in the + // constructor since the SSL configuration instance hasn't been initialized + URLStreamProvider streamProvider = new URLStreamProvider(2000, 2000, + ComponentSSLConfiguration.instance()); + + String rangerUrl = checkEmpty("admin-properties", "policymgr_external_url", check, request); + if (null == rangerUrl) { + // !!! check results already filled + return; + } + + String adminUsername = checkEmpty("ranger-env", "admin_username", check, request); + if (null == adminUsername) { + return; + } + + String adminPassword = checkEmpty("ranger-env", "admin_password", check, request); + if (null == adminPassword) { + return; + } + + String rangerAdminUsername = checkEmpty("ranger-env", "ranger_admin_username", check, request); + if (null == rangerAdminUsername) { + return; + } + + String rangerAdminPassword = checkEmpty("ranger-env", "ranger_admin_password", check, request); + if (null == rangerAdminPassword) { + return; + } + + if (rangerUrl.endsWith("/")) { + rangerUrl = rangerUrl.substring(0, rangerUrl.length()-1); + } + + String rangerAuthUrl = String.format("%s/%s", rangerUrl, + "service/public/api/repository/count"); + String rangerUserUrl = String.format("%s/%s", rangerUrl, + "service/xusers/users"); + + List failReasons = new ArrayList<>(); + List warnReasons = new ArrayList<>(); + + // !!! first, just try the service with the admin credentials + try { + int response = checkLogin(streamProvider, rangerAuthUrl, adminUsername, adminPassword); + + switch (response) { + case 401: { + String reason = getFailReason(KEY_RANGER_PASSWORD_MISMATCH, check, request); + failReasons.add(String.format(reason, adminUsername)); + break; + } + case 200: { + break; + } + default: { + String reason = getFailReason(KEY_RANGER_UNKNOWN_RESPONSE, check, request); + warnReasons.add(String.format(reason, adminUsername, response, rangerAuthUrl)); + break; + } + } + + } catch (IOException e) { + LOG.warn("Could not access the url {}. Message: {}", rangerAuthUrl, e.getMessage(), e); + LOG.debug("Could not access the url {}. Message: {}", rangerAuthUrl, e.getMessage()); + + String reason = getFailReason(KEY_RANGER_COULD_NOT_ACCESS, check, request); + warnReasons.add(String.format(reason, adminUsername, rangerAuthUrl, e.getMessage())); + } + + // !!! shortcut when something happened with the admin user + if (!failReasons.isEmpty()) { + check.setFailReason(StringUtils.join(failReasons, '\n')); + check.getFailedOn().add("RANGER"); + check.setStatus(PrereqCheckStatus.FAIL); + return; + } else if (!warnReasons.isEmpty()) { + check.setFailReason(StringUtils.join(warnReasons, '\n')); + check.getFailedOn().add("RANGER"); + check.setStatus(PrereqCheckStatus.WARNING); + return; + } + + // !!! Check for the user, capture exceptions as a warning. + boolean hasUser = checkRangerUser(streamProvider, rangerUserUrl, adminUsername, adminPassword, + rangerAdminUsername, check, request, warnReasons); + + if (hasUser) { + // !!! try credentials for specific user + try { + int response = checkLogin(streamProvider, rangerAuthUrl, rangerAdminUsername, + rangerAdminPassword); + + switch (response) { + case 401: { + String reason = getFailReason(KEY_RANGER_PASSWORD_MISMATCH, check, request); + failReasons.add(String.format(reason, rangerAdminUsername)); + break; + } + case 200: { + break; + } + default: { + String reason = getFailReason(KEY_RANGER_UNKNOWN_RESPONSE, check, request); + warnReasons.add(String.format(reason, rangerAdminUsername, response, rangerAuthUrl)); + break; + } + } + + } catch (IOException e) { + LOG.warn("Could not access the url {}. Message: {}", rangerAuthUrl, e.getMessage()); + LOG.debug("Could not access the url {}. Message: {}", rangerAuthUrl, e.getMessage(), e); + + String reason = getFailReason(KEY_RANGER_COULD_NOT_ACCESS, check, request); + warnReasons.add(String.format(reason, rangerAdminUsername, rangerAuthUrl, e.getMessage())); + } + } + + if (!failReasons.isEmpty()) { + check.setFailReason(StringUtils.join(failReasons, '\n')); + check.getFailedOn().add("RANGER"); + check.setStatus(PrereqCheckStatus.FAIL); + } else if (!warnReasons.isEmpty()) { + check.setFailReason(StringUtils.join(warnReasons, '\n')); + check.getFailedOn().add("RANGER"); + check.setStatus(PrereqCheckStatus.WARNING); + } else { + check.setStatus(PrereqCheckStatus.PASS); + } + + } + + /** + * Checks the credentials. From the Ranger team, bad credentials result in a + * successful call, but the Ranger admin server will redirect to the home + * page. They recommend parsing the result. If it parses, the credentials are + * good, otherwise consider the user as unverified. + * + * @param streamProvider + * the stream provider to use when making requests + * @param url + * the url to check + * @param username + * the user to check + * @param password + * the password to check + * @return the http response code + * @throws IOException + * if there was an error reading the response + */ + private int checkLogin(URLStreamProvider streamProvider, String url, String username, + String password) throws IOException { + + Map> headers = getHeaders(username, password); + + HttpURLConnection conn = streamProvider.processURL(url, "GET", (InputStream) null, headers); + + int result = conn.getResponseCode(); + + // !!! see javadoc + if (result == 200) { + Gson gson = new Gson(); + try { + gson.fromJson(new InputStreamReader(conn.getInputStream()), Object.class); + } catch (Exception e) { + result = 401; + } + } + + return result; + } + + /** + * @param streamProvider + * the stream provider to use when making requests + * @param rangerUserUrl + * the url to use when looking for the user + * @param username + * the username to use when loading the url + * @param password + * the password for the user url + * @param userToSearch + * the user to look for + * @param check + * the check instance for loading failure reasons + * @param request + * the request instance for loading failure reasons + * @param warnReasons + * the list of warn reasons to fill + * @return {@code true} if the user was found + */ + private boolean checkRangerUser(URLStreamProvider streamProvider, String rangerUserUrl, + String username, String password, String userToSearch, PrerequisiteCheck check, + PrereqCheckRequest request, List warnReasons) throws AmbariException { + + String url = String.format("%s?name=%s", rangerUserUrl, userToSearch); + + Map> headers = getHeaders(username, password); + + try { + HttpURLConnection conn = streamProvider.processURL(url, "GET", (InputStream) null, headers); + + int result = conn.getResponseCode(); + + if (result == 200) { + + Gson gson = new Gson(); + Object o = gson.fromJson(new InputStreamReader(conn.getInputStream()), Object.class); + + Map map = (Map) o; + + if (!map.containsKey("vXUsers")) { + String reason = getFailReason(KEY_RANGER_USERS_ELEMENT_MISSING, check, request); + warnReasons.add(String.format(reason, url)); + + return false; + } + + @SuppressWarnings("unchecked") + List> list = (List>) map.get("vXUsers"); + + for (Map listMap : list) { + if (listMap.containsKey("name") && listMap.get("name").equals(userToSearch)) { + return true; + } + } + } + } catch (IOException e) { + LOG.warn("Could not determine user {}. Error is {}", userToSearch, e.getMessage()); + LOG.debug("Could not determine user {}. Error is {}", userToSearch, e.getMessage(), e); + + String reason = getFailReason(KEY_RANGER_COULD_NOT_ACCESS, check, request); + warnReasons.add(String.format(reason, username, url, e.getMessage())); + + } catch (Exception e) { + LOG.warn("Could not determine user {}. Error is {}", userToSearch, e.getMessage()); + LOG.debug("Could not determine user {}. Error is {}", userToSearch, e.getMessage(), e); + + String reason = getFailReason(KEY_RANGER_OTHER_ISSUE, check, request); + warnReasons.add(String.format(reason, e.getMessage(), url)); + } + + return false; + } + + /** + * Generates a list of headers, including {@code Basic} authentication + * @param username the username + * @param password the password + * @return the map of headers + */ + private Map> getHeaders(String username, String password) { + Map> headers = new HashMap<>(); + + String base64 = Base64.encodeBase64String( + String.format("%s:%s", username, password).getBytes(Charset.forName("UTF8"))); + + headers.put("Content-Type", Arrays.asList("application/json")); + headers.put("Accept", Arrays.asList("application/json")); + headers.put("Authorization", Arrays.asList(String.format("Basic %s", base64))); + + return headers; + } + + /** + * Finds the property value. If not found, then the failure reason for the check + * is filled in and processing should not continue. + * + * @param type the type of property to find + * @param key the key in configs matching the type + * @param check the check for loading failure reasons + * @param request the request for loading failure reasons + * @return the property value, or {@code null} if the property doesn't exist + * @throws AmbariException + */ + private String checkEmpty(String type, String key, PrerequisiteCheck check, + PrereqCheckRequest request) throws AmbariException { + + String value = getProperty(request, type, key); + if (null == value) { + String reason = getFailReason(KEY_RANGER_CONFIG_MISSING, check, request); + reason = String.format(reason, type, key); + check.setFailReason(reason); + check.getFailedOn().add("RANGER"); + check.setStatus(PrereqCheckStatus.WARNING); + } + return value; + } + + +} \ No newline at end of file diff --git a/ambari-server/src/main/java/org/apache/ambari/server/checks/RangerSSLConfigCheck.java b/ambari-server/src/main/java/org/apache/ambari/server/checks/RangerSSLConfigCheck.java new file mode 100644 index 00000000000..540fd3e7fd6 --- /dev/null +++ b/ambari-server/src/main/java/org/apache/ambari/server/checks/RangerSSLConfigCheck.java @@ -0,0 +1,81 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + *

+ * http://www.apache.org/licenses/LICENSE-2.0 + *

+ * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.ambari.server.checks; + + +import java.util.Set; + +import org.apache.ambari.server.AmbariException; +import org.apache.ambari.server.controller.PrereqCheckRequest; +import org.apache.ambari.server.state.stack.PrereqCheckStatus; +import org.apache.ambari.server.state.stack.PrerequisiteCheck; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; + +import com.google.common.collect.Sets; +import com.google.inject.Singleton; + + +/** + * This service check will mainly be for 2.6 stacks so as to encourage user + * to move the certificate, keystore and truststore from the default conf dir to + * an external directory untoched while RU/EU during upgrades/downgrades. + */ +@Singleton +@UpgradeCheck(group = UpgradeCheckGroup.INFORMATIONAL_WARNING) +public class RangerSSLConfigCheck extends AbstractCheckDescriptor { + + private static final Logger LOG = LoggerFactory.getLogger(RangerSSLConfigCheck.class); + private static final String serviceName = "RANGER"; + + + /** + * Constructor + */ + public RangerSSLConfigCheck() { + super(CheckDescription.RANGER_SSL_CONFIG_CHECK); + } + + /** + * {@inheritDoc} + */ + @Override + public Set getApplicableServices() { + return Sets.newHashSet(serviceName); + } + + /** + * {@inheritDoc} + */ + @Override + public void perform(PrerequisiteCheck prerequisiteCheck, PrereqCheckRequest request) throws AmbariException { + String isRangerHTTPEnabled = getProperty(request, "ranger-admin-site", "ranger.service.http.enabled"); + String isRangerSSLEnabled = getProperty(request, "ranger-admin-site", "ranger.service.https.attrib.ssl.enabled"); + String rangerSSLKeystoreFile = getProperty(request, "ranger-admin-site", "ranger.https.attrib.keystore.file"); + + if (("false").equalsIgnoreCase(isRangerHTTPEnabled) && ("true").equalsIgnoreCase(isRangerSSLEnabled) && rangerSSLKeystoreFile.contains("/etc/ranger/admin/conf") ) { + LOG.info("Ranger is SSL enabled, need to show Configuration changes warning before upragade proceeds."); + prerequisiteCheck.getFailedOn().add(serviceName); + prerequisiteCheck.setStatus(PrereqCheckStatus.WARNING); + prerequisiteCheck.setFailReason(getFailReason(prerequisiteCheck, request)); + } else { + LOG.info("Ranger is not SSL enabled, no need to show Configuration changes warning before upragade proceeds."); + } + + } +} diff --git a/ambari-server/src/main/java/org/apache/ambari/server/checks/SecondaryNamenodeDeletedCheck.java b/ambari-server/src/main/java/org/apache/ambari/server/checks/SecondaryNamenodeDeletedCheck.java new file mode 100644 index 00000000000..349b2609516 --- /dev/null +++ b/ambari-server/src/main/java/org/apache/ambari/server/checks/SecondaryNamenodeDeletedCheck.java @@ -0,0 +1,116 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.ambari.server.checks; + +import java.util.Arrays; +import java.util.HashSet; +import java.util.List; +import java.util.Set; + +import org.apache.ambari.server.AmbariException; +import org.apache.ambari.server.ServiceComponentNotFoundException; +import org.apache.ambari.server.controller.PrereqCheckRequest; +import org.apache.ambari.server.orm.dao.HostComponentStateDAO; +import org.apache.ambari.server.orm.entities.HostComponentStateEntity; +import org.apache.ambari.server.stack.MasterHostResolver; +import org.apache.ambari.server.state.Cluster; +import org.apache.ambari.server.state.Service; +import org.apache.ambari.server.state.ServiceComponent; +import org.apache.ambari.server.state.stack.PrereqCheckStatus; +import org.apache.ambari.server.state.stack.PrerequisiteCheck; + +import com.google.common.collect.Sets; +import com.google.inject.Inject; +import com.google.inject.Singleton; + +/** + * Checks that the Secondary NameNode is not present on any of the hosts. + */ +@Singleton +@UpgradeCheck(group = UpgradeCheckGroup.NAMENODE_HA, order = 16.0f) +public class SecondaryNamenodeDeletedCheck extends AbstractCheckDescriptor { + private static final String HDFS_SERVICE_NAME = MasterHostResolver.Service.HDFS.name(); + + @Inject + HostComponentStateDAO hostComponentStateDao; + /** + * Constructor. + */ + public SecondaryNamenodeDeletedCheck() { + super(CheckDescription.SECONDARY_NAMENODE_MUST_BE_DELETED); + } + + /** + * {@inheritDoc} + */ + @Override + public Set getApplicableServices() { + return Sets.newHashSet(HDFS_SERVICE_NAME); + } + + /** + * {@inheritDoc} + */ + @Override + public List getQualifications() { + return Arrays.asList( + new PriorCheckQualification(CheckDescription.SERVICES_NAMENODE_HA)); + } + + // TODO AMBARI-12698, there are 2 ways to filter the prechecks. + // 1. Explictly mention them in each upgrade pack, which is more flexible, but requires adding the name of checks + // to perform in each upgrade pack. + // 2. Make each upgrade check class call a function before perform() that will determine if the check is appropriate + // given the type of upgrade. The PrereqCheckRequest object has a field for the type of upgrade. + @Override + public void perform(PrerequisiteCheck prerequisiteCheck, PrereqCheckRequest request) throws AmbariException { + Set hosts = new HashSet<>(); + final String SECONDARY_NAMENODE = "SECONDARY_NAMENODE"; + + final String clusterName = request.getClusterName(); + final Cluster cluster = clustersProvider.get().getCluster(clusterName); + try { + ServiceComponent serviceComponent = cluster.getService(HDFS_SERVICE_NAME).getServiceComponent(SECONDARY_NAMENODE); + if (serviceComponent != null) { + hosts = serviceComponent.getServiceComponentHosts().keySet(); + } + } catch (ServiceComponentNotFoundException err) { + // This exception can be ignored if the component doesn't exist because it is a best-attempt at finding it. + ; + } + + // Try another method to find references to SECONDARY_NAMENODE + if (hosts.isEmpty()) { + List allHostComponents = hostComponentStateDao.findAll(); + for(HostComponentStateEntity hc : allHostComponents) { + Service s = cluster.getService(hc.getServiceId()); + if (s.getServiceType().equalsIgnoreCase(HDFS_SERVICE_NAME) && hc.getComponentName().equalsIgnoreCase(SECONDARY_NAMENODE)) { + hosts.add(hc.getHostName()); + } + } + } + + if (!hosts.isEmpty()) { + String foundHost = hosts.toArray(new String[hosts.size()])[0]; + prerequisiteCheck.getFailedOn().add(HDFS_SERVICE_NAME); + prerequisiteCheck.setStatus(PrereqCheckStatus.FAIL); + String failReason = getFailReason(prerequisiteCheck, request); + prerequisiteCheck.setFailReason(String.format(failReason, foundHost)); + } + } +} diff --git a/ambari-server/src/main/java/org/apache/ambari/server/checks/ServicesMapReduceDistributedCacheCheck.java b/ambari-server/src/main/java/org/apache/ambari/server/checks/ServicesMapReduceDistributedCacheCheck.java new file mode 100644 index 00000000000..e24e669863a --- /dev/null +++ b/ambari-server/src/main/java/org/apache/ambari/server/checks/ServicesMapReduceDistributedCacheCheck.java @@ -0,0 +1,124 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.ambari.server.checks; + +import java.util.ArrayList; +import java.util.Arrays; +import java.util.List; +import java.util.Map; +import java.util.Set; + +import org.apache.ambari.server.AmbariException; +import org.apache.ambari.server.controller.PrereqCheckRequest; +import org.apache.ambari.server.state.Cluster; +import org.apache.ambari.server.state.Config; +import org.apache.ambari.server.state.DesiredConfig; +import org.apache.ambari.server.state.stack.PrereqCheckStatus; +import org.apache.ambari.server.state.stack.PrerequisiteCheck; +import org.apache.ambari.server.state.stack.UpgradePack.PrerequisiteCheckConfig; +import org.apache.commons.lang.StringUtils; + +import com.google.common.collect.Sets; +import com.google.inject.Singleton; + +/** + * Checks that MR jobs reference hadoop libraries from the distributed cache. + */ +@Singleton +@UpgradeCheck(group = UpgradeCheckGroup.NAMENODE_HA, order = 17.1f) +public class ServicesMapReduceDistributedCacheCheck extends AbstractCheckDescriptor { + + static final String KEY_APP_CLASSPATH = "app_classpath"; + static final String KEY_FRAMEWORK_PATH = "framework_path"; + static final String KEY_NOT_DFS = "not_dfs"; + static final String DFS_PROTOCOLS_REGEX_PROPERTY_NAME = "dfs-protocols-regex"; + static final String DFS_PROTOCOLS_REGEX_DEFAULT = "^([^:]*dfs|wasb|ecs):.*"; + + /** + * {@inheritDoc} + */ + @Override + public Set getApplicableServices() { + return Sets.newHashSet("YARN"); + } + + /** + * {@inheritDoc} + */ + @Override + public List getQualifications() { + return Arrays.asList( + new PriorCheckQualification(CheckDescription.SERVICES_NAMENODE_HA)); + } + + /** + * Constructor. + */ + public ServicesMapReduceDistributedCacheCheck() { + super(CheckDescription.SERVICES_MR_DISTRIBUTED_CACHE); + } + + @Override + public void perform(PrerequisiteCheck prerequisiteCheck, PrereqCheckRequest request) throws AmbariException { + String dfsProtocolsRegex = DFS_PROTOCOLS_REGEX_DEFAULT; + PrerequisiteCheckConfig prerequisiteCheckConfig = request.getPrerequisiteCheckConfig(); + Map checkProperties = null; + if(prerequisiteCheckConfig != null) { + checkProperties = prerequisiteCheckConfig.getCheckProperties(this.getClass().getName()); + } + if(checkProperties != null && checkProperties.containsKey(DFS_PROTOCOLS_REGEX_PROPERTY_NAME)) { + dfsProtocolsRegex = checkProperties.get(DFS_PROTOCOLS_REGEX_PROPERTY_NAME); + } + + final String clusterName = request.getClusterName(); + final Cluster cluster = clustersProvider.get().getCluster(clusterName); + final String mrConfigType = "mapred-site"; + final String coreSiteConfigType = "core-site"; + final Map desiredConfigs = cluster.getDesiredConfigs(); + + final DesiredConfig mrDesiredConfig = desiredConfigs.get(mrConfigType); + final DesiredConfig coreSiteDesiredConfig = desiredConfigs.get(coreSiteConfigType); + final Config mrConfig = cluster.getConfig(mrConfigType, mrDesiredConfig.getTag()); + final Config coreSiteConfig = cluster.getConfig(coreSiteConfigType, coreSiteDesiredConfig.getTag()); + final String applicationClasspath = mrConfig.getProperties().get("mapreduce.application.classpath"); + final String frameworkPath = mrConfig.getProperties().get("mapreduce.application.framework.path"); + final String defaultFS = coreSiteConfig.getProperties().get("fs.defaultFS"); + + List errorMessages = new ArrayList<>(); + if (applicationClasspath == null || applicationClasspath.isEmpty()) { + errorMessages.add(getFailReason(KEY_APP_CLASSPATH, prerequisiteCheck, request)); + } + + if (frameworkPath == null || frameworkPath.isEmpty()) { + errorMessages.add(getFailReason(KEY_FRAMEWORK_PATH, prerequisiteCheck, request)); + } + + if (!errorMessages.isEmpty()) { + prerequisiteCheck.getFailedOn().add("MAPREDUCE2"); + prerequisiteCheck.setStatus(PrereqCheckStatus.FAIL); + prerequisiteCheck.setFailReason(StringUtils.join(errorMessages, " ")); + return; + } + + if (!frameworkPath.matches(dfsProtocolsRegex) && (defaultFS == null || !defaultFS.matches(dfsProtocolsRegex))) { + prerequisiteCheck.getFailedOn().add("MAPREDUCE2"); + prerequisiteCheck.setStatus(PrereqCheckStatus.FAIL); + prerequisiteCheck.setFailReason(getFailReason(KEY_NOT_DFS, prerequisiteCheck, request)); + } + } +} diff --git a/ambari-server/src/main/java/org/apache/ambari/server/checks/ServicesNamenodeHighAvailabilityCheck.java b/ambari-server/src/main/java/org/apache/ambari/server/checks/ServicesNamenodeHighAvailabilityCheck.java new file mode 100644 index 00000000000..ecd88edde01 --- /dev/null +++ b/ambari-server/src/main/java/org/apache/ambari/server/checks/ServicesNamenodeHighAvailabilityCheck.java @@ -0,0 +1,71 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.ambari.server.checks; + +import java.util.Map; +import java.util.Set; + +import org.apache.ambari.server.AmbariException; +import org.apache.ambari.server.controller.PrereqCheckRequest; +import org.apache.ambari.server.state.Cluster; +import org.apache.ambari.server.state.Config; +import org.apache.ambari.server.state.DesiredConfig; +import org.apache.ambari.server.state.stack.PrereqCheckStatus; +import org.apache.ambari.server.state.stack.PrerequisiteCheck; + +import com.google.common.collect.Sets; +import com.google.inject.Singleton; + +/** + * Checks that namenode high availability is enabled. + */ +@Singleton +@UpgradeCheck(group = UpgradeCheckGroup.NAMENODE_HA, order = 16.1f) +public class ServicesNamenodeHighAvailabilityCheck extends AbstractCheckDescriptor { + + /** + * Constructor. + */ + public ServicesNamenodeHighAvailabilityCheck() { + super(CheckDescription.SERVICES_NAMENODE_HA); + } + + /** + * {@inheritDoc} + */ + @Override + public Set getApplicableServices() { + return Sets.newHashSet("HDFS"); + } + + + @Override + public void perform(PrerequisiteCheck prerequisiteCheck, PrereqCheckRequest request) throws AmbariException { + final String clusterName = request.getClusterName(); + final Cluster cluster = clustersProvider.get().getCluster(clusterName); + final String configType = "hdfs-site"; + final Map desiredConfigs = cluster.getDesiredConfigs(); + final DesiredConfig desiredConfig = desiredConfigs.get(configType); + final Config config = cluster.getConfig(configType, desiredConfig.getTag()); + if (!config.getProperties().containsKey("dfs.internal.nameservices")) { + prerequisiteCheck.getFailedOn().add("HDFS"); + prerequisiteCheck.setStatus(PrereqCheckStatus.FAIL); + prerequisiteCheck.setFailReason(getFailReason(prerequisiteCheck, request)); + } + } +} diff --git a/ambari-server/src/main/java/org/apache/ambari/server/checks/ServicesNamenodeTruncateCheck.java b/ambari-server/src/main/java/org/apache/ambari/server/checks/ServicesNamenodeTruncateCheck.java new file mode 100644 index 00000000000..4d9e7d72aba --- /dev/null +++ b/ambari-server/src/main/java/org/apache/ambari/server/checks/ServicesNamenodeTruncateCheck.java @@ -0,0 +1,80 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.ambari.server.checks; + +import java.util.Arrays; +import java.util.List; +import java.util.Set; + +import org.apache.ambari.server.AmbariException; +import org.apache.ambari.server.controller.PrereqCheckRequest; +import org.apache.ambari.server.state.Cluster; +import org.apache.ambari.server.state.Config; +import org.apache.ambari.server.state.stack.PrereqCheckStatus; +import org.apache.ambari.server.state.stack.PrerequisiteCheck; + +import com.google.common.collect.Sets; +import com.google.inject.Singleton; + +/** + * Checks that namenode high availability is enabled. + */ +@Singleton +@UpgradeCheck(group = UpgradeCheckGroup.NAMENODE_HA, order = 16.2f) +public class ServicesNamenodeTruncateCheck extends AbstractCheckDescriptor { + + /** + * Constructor. + */ + public ServicesNamenodeTruncateCheck() { + super(CheckDescription.SERVICES_NAMENODE_TRUNCATE); + } + + /** + * {@inheritDoc} + */ + @Override + public Set getApplicableServices() { + return Sets.newHashSet("HDFS"); + } + + /** + * {@inheritDoc} + */ + @Override + public List getQualifications() { + return Arrays.asList( + new PriorCheckQualification(CheckDescription.SERVICES_NAMENODE_HA)); + } + + @Override + public void perform(PrerequisiteCheck prerequisiteCheck, PrereqCheckRequest request) throws AmbariException { + final String clusterName = request.getClusterName(); + final Cluster cluster = clustersProvider.get().getCluster(clusterName); + Config config = cluster.getDesiredConfigByType("hdfs-site"); + + String truncateEnabled = config.getProperties().get("dfs.allow.truncate"); + + if (Boolean.valueOf(truncateEnabled)) { + prerequisiteCheck.getFailedOn().add("HDFS"); + PrereqCheckStatus checkStatus = PrereqCheckStatus.FAIL; + prerequisiteCheck.setStatus(checkStatus); + prerequisiteCheck.setFailReason(getFailReason(prerequisiteCheck, request)); + } + } +} diff --git a/ambari-server/src/main/java/org/apache/ambari/server/checks/ServicesTezDistributedCacheCheck.java b/ambari-server/src/main/java/org/apache/ambari/server/checks/ServicesTezDistributedCacheCheck.java new file mode 100644 index 00000000000..8331ebf9540 --- /dev/null +++ b/ambari-server/src/main/java/org/apache/ambari/server/checks/ServicesTezDistributedCacheCheck.java @@ -0,0 +1,138 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.ambari.server.checks; + +import java.util.ArrayList; +import java.util.Arrays; +import java.util.List; +import java.util.Map; +import java.util.Set; + +import org.apache.ambari.server.AmbariException; +import org.apache.ambari.server.controller.PrereqCheckRequest; +import org.apache.ambari.server.state.Cluster; +import org.apache.ambari.server.state.Config; +import org.apache.ambari.server.state.DesiredConfig; +import org.apache.ambari.server.state.stack.PrereqCheckStatus; +import org.apache.ambari.server.state.stack.PrerequisiteCheck; +import org.apache.ambari.server.state.stack.UpgradePack.PrerequisiteCheckConfig; +import org.apache.commons.lang.StringUtils; + +import com.google.common.collect.Sets; +import com.google.inject.Singleton; + +/** + * Checks that Tez jobs reference hadoop libraries from the distributed cache. + */ +@Singleton +@UpgradeCheck(group = UpgradeCheckGroup.NAMENODE_HA, order = 21.0f) +public class ServicesTezDistributedCacheCheck extends AbstractCheckDescriptor { + + static final String KEY_LIB_URI_MISSING = "tez_lib_uri_missing"; + static final String KEY_USE_HADOOP_LIBS = "tez_use_hadoop_libs"; + static final String KEY_LIB_NOT_DFS = "lib_not_dfs"; + static final String KEY_LIB_NOT_TARGZ = "lib_not_targz"; + static final String KEY_USE_HADOOP_LIBS_FALSE = "tez_use_hadoop_libs_false"; + static final String DFS_PROTOCOLS_REGEX_PROPERTY_NAME = "dfs-protocols-regex"; + static final String DFS_PROTOCOLS_REGEX_DEFAULT = "^([^:]*dfs|wasb|ecs):.*"; + + /** + * {@inheritDoc} + */ + @Override + public Set getApplicableServices() { + return Sets.newHashSet("TEZ"); + } + + /** + * {@inheritDoc} + */ + @Override + public List getQualifications() { + return Arrays.asList( + new PriorCheckQualification(CheckDescription.SERVICES_NAMENODE_HA)); + } + + /** + * Constructor. + */ + public ServicesTezDistributedCacheCheck() { + super(CheckDescription.SERVICES_TEZ_DISTRIBUTED_CACHE); + } + + @Override + public void perform(PrerequisiteCheck prerequisiteCheck, PrereqCheckRequest request) throws AmbariException { + String dfsProtocolsRegex = DFS_PROTOCOLS_REGEX_DEFAULT; + PrerequisiteCheckConfig prerequisiteCheckConfig = request.getPrerequisiteCheckConfig(); + Map checkProperties = null; + if(prerequisiteCheckConfig != null) { + checkProperties = prerequisiteCheckConfig.getCheckProperties(this.getClass().getName()); + } + if(checkProperties != null && checkProperties.containsKey(DFS_PROTOCOLS_REGEX_PROPERTY_NAME)) { + dfsProtocolsRegex = checkProperties.get(DFS_PROTOCOLS_REGEX_PROPERTY_NAME); + } + + final String clusterName = request.getClusterName(); + final Cluster cluster = clustersProvider.get().getCluster(clusterName); + final String tezConfigType = "tez-site"; + final String coreSiteConfigType = "core-site"; + final Map desiredConfigs = cluster.getDesiredConfigs(); + + final DesiredConfig tezDesiredConfig = desiredConfigs.get(tezConfigType); + final Config tezConfig = cluster.getConfig(tezConfigType, tezDesiredConfig.getTag()); + final DesiredConfig coreSiteDesiredConfig = desiredConfigs.get(coreSiteConfigType); + final Config coreSiteConfig = cluster.getConfig(coreSiteConfigType, coreSiteDesiredConfig.getTag()); + final String libUris = tezConfig.getProperties().get("tez.lib.uris"); + final String useHadoopLibs = tezConfig.getProperties().get("tez.use.cluster.hadoop-libs"); + final String defaultFS = coreSiteConfig.getProperties().get("fs.defaultFS"); + + List errorMessages = new ArrayList<>(); + if (libUris == null || libUris.isEmpty()) { + errorMessages.add(getFailReason(KEY_LIB_URI_MISSING, prerequisiteCheck, request)); + } + + if (useHadoopLibs == null || useHadoopLibs.isEmpty()) { + errorMessages.add(getFailReason(KEY_USE_HADOOP_LIBS, prerequisiteCheck, request)); + } + + if (!errorMessages.isEmpty()) { + prerequisiteCheck.getFailedOn().add("TEZ"); + prerequisiteCheck.setStatus(PrereqCheckStatus.FAIL); + prerequisiteCheck.setFailReason(StringUtils.join(errorMessages, " ")); + return; + } + + if (!libUris.matches(dfsProtocolsRegex) && (defaultFS == null || !defaultFS.matches(dfsProtocolsRegex))) { + errorMessages.add(getFailReason(KEY_LIB_NOT_DFS, prerequisiteCheck, request)); + } + + if (!libUris.contains("tar.gz")) { + errorMessages.add(getFailReason(KEY_LIB_NOT_TARGZ, prerequisiteCheck, request)); + } + + if (Boolean.parseBoolean(useHadoopLibs)) { + errorMessages.add(getFailReason(KEY_USE_HADOOP_LIBS_FALSE, prerequisiteCheck, request)); + } + + if (!errorMessages.isEmpty()) { + prerequisiteCheck.getFailedOn().add("TEZ"); + prerequisiteCheck.setStatus(PrereqCheckStatus.FAIL); + prerequisiteCheck.setFailReason(StringUtils.join(errorMessages, " ")); + } + } +} diff --git a/ambari-server/src/main/java/org/apache/ambari/server/checks/ServicesYarnWorkPreservingCheck.java b/ambari-server/src/main/java/org/apache/ambari/server/checks/ServicesYarnWorkPreservingCheck.java new file mode 100644 index 00000000000..0b102a99c8a --- /dev/null +++ b/ambari-server/src/main/java/org/apache/ambari/server/checks/ServicesYarnWorkPreservingCheck.java @@ -0,0 +1,67 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.ambari.server.checks; + +import java.util.Set; + +import org.apache.ambari.server.AmbariException; +import org.apache.ambari.server.controller.PrereqCheckRequest; +import org.apache.ambari.server.state.stack.PrereqCheckStatus; +import org.apache.ambari.server.state.stack.PrerequisiteCheck; +import org.apache.commons.lang.BooleanUtils; + +import com.google.common.collect.Sets; +import com.google.inject.Singleton; + +/** + * Checks that YARN has work-preserving restart enabled. + */ +@Singleton +@UpgradeCheck(group = UpgradeCheckGroup.DEFAULT, order = 17.1f) +public class ServicesYarnWorkPreservingCheck extends AbstractCheckDescriptor { + + /** + * Constructor. + */ + public ServicesYarnWorkPreservingCheck() { + super(CheckDescription.SERVICES_YARN_WP); + } + + /** + * {@inheritDoc} + */ + @Override + public Set getApplicableServices() { + return Sets.newHashSet("YARN"); + } + + /** + * {@inheritDoc} + */ + @Override + public void perform(PrerequisiteCheck prerequisiteCheck, PrereqCheckRequest request) throws AmbariException { + String propertyValue = getProperty(request, "yarn-site", + "yarn.resourcemanager.work-preserving-recovery.enabled"); + + if (null == propertyValue || !BooleanUtils.toBoolean(propertyValue)) { + prerequisiteCheck.getFailedOn().add("YARN"); + prerequisiteCheck.setStatus(PrereqCheckStatus.FAIL); + prerequisiteCheck.setFailReason(getFailReason(prerequisiteCheck, request)); + } + } +} diff --git a/ambari-server/src/main/java/org/apache/ambari/server/checks/StormShutdownWarning.java b/ambari-server/src/main/java/org/apache/ambari/server/checks/StormShutdownWarning.java new file mode 100644 index 00000000000..067cd8043a1 --- /dev/null +++ b/ambari-server/src/main/java/org/apache/ambari/server/checks/StormShutdownWarning.java @@ -0,0 +1,69 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.ambari.server.checks; + +import java.util.Set; + +import org.apache.ambari.server.AmbariException; +import org.apache.ambari.server.controller.PrereqCheckRequest; +import org.apache.ambari.server.state.stack.PrereqCheckStatus; +import org.apache.ambari.server.state.stack.PrerequisiteCheck; +import org.apache.ambari.server.state.stack.upgrade.UpgradeType; + +import com.google.common.collect.Sets; +import com.google.inject.Singleton; + +/** + * The {@link StormShutdownWarning} to see if Storm is installed and if the + * upgrade type is {@link UpgradeType#ROLLING}. If so, then a + * {@link PrereqCheckStatus#WARNING} is produced which will let the operator + * know that Storm cannot be rolling on ceratin versions of the HDP stack. + *

+ * The upgrade packs must include this check where it is applicable. It contains + * no logic for determine stack versions and only checks for the presence of + * Storm and the type of upgrade. + */ +@Singleton +@UpgradeCheck(group = UpgradeCheckGroup.INFORMATIONAL_WARNING, required = UpgradeType.ROLLING) +public class StormShutdownWarning extends AbstractCheckDescriptor { + + /** + * Constructor. + */ + public StormShutdownWarning() { + super(CheckDescription.SERVICES_STORM_ROLLING_WARNING); + } + + /** + * {@inheritDoc} + */ + @Override + public Set getApplicableServices() { + return Sets.newHashSet("STORM"); + } + + /** + * {@inheritDoc} + */ + @Override + public void perform(PrerequisiteCheck prerequisiteCheck, PrereqCheckRequest request) throws AmbariException { + prerequisiteCheck.getFailedOn().add("STORM"); + prerequisiteCheck.setStatus(PrereqCheckStatus.WARNING); + prerequisiteCheck.setFailReason(getFailReason(prerequisiteCheck, request)); + } +} diff --git a/ambari-server/src/main/java/org/apache/ambari/server/checks/YarnRMHighAvailabilityCheck.java b/ambari-server/src/main/java/org/apache/ambari/server/checks/YarnRMHighAvailabilityCheck.java new file mode 100644 index 00000000000..e0d3df7fc16 --- /dev/null +++ b/ambari-server/src/main/java/org/apache/ambari/server/checks/YarnRMHighAvailabilityCheck.java @@ -0,0 +1,68 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.ambari.server.checks; + +import java.util.Set; + +import org.apache.ambari.server.AmbariException; +import org.apache.ambari.server.controller.PrereqCheckRequest; +import org.apache.ambari.server.state.stack.PrereqCheckStatus; +import org.apache.ambari.server.state.stack.PrerequisiteCheck; +import org.apache.commons.lang.BooleanUtils; + +import com.google.common.collect.Sets; +import com.google.inject.Singleton; + +/** + * The {@link YarnRMHighAvailabilityCheck} checks that YARN has HA mode enabled + * for ResourceManager.. + */ +@Singleton +@UpgradeCheck(group = UpgradeCheckGroup.MULTIPLE_COMPONENT_WARNING, order = 17.2f) +public class YarnRMHighAvailabilityCheck extends AbstractCheckDescriptor { + + /** + * Constructor. + */ + public YarnRMHighAvailabilityCheck() { + super(CheckDescription.SERVICES_YARN_RM_HA); + } + + /** + * {@inheritDoc} + */ + @Override + public Set getApplicableServices() { + return Sets.newHashSet("YARN"); + } + /** + * {@inheritDoc} + */ + @Override + public void perform(PrerequisiteCheck prerequisiteCheck, PrereqCheckRequest request) throws AmbariException { + // pretty weak sauce here; probably should do a bit more, like query JMX to + // see that there is at least 1 RM active and 1 in standby + String propertyValue = getProperty(request, "yarn-site", "yarn.resourcemanager.ha.enabled"); + + if (null == propertyValue || !BooleanUtils.toBoolean(propertyValue)) { + prerequisiteCheck.getFailedOn().add("YARN"); + prerequisiteCheck.setStatus(PrereqCheckStatus.WARNING); + prerequisiteCheck.setFailReason(getFailReason(prerequisiteCheck, request)); + } + } +} diff --git a/ambari-server/src/main/java/org/apache/ambari/server/checks/YarnTimelineServerStatePreservingCheck.java b/ambari-server/src/main/java/org/apache/ambari/server/checks/YarnTimelineServerStatePreservingCheck.java new file mode 100644 index 00000000000..27d4ace8847 --- /dev/null +++ b/ambari-server/src/main/java/org/apache/ambari/server/checks/YarnTimelineServerStatePreservingCheck.java @@ -0,0 +1,136 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.ambari.server.checks; + +import java.util.List; +import java.util.Map; +import java.util.Set; + +import org.apache.ambari.server.AmbariException; +import org.apache.ambari.server.controller.PrereqCheckRequest; +import org.apache.ambari.server.state.Cluster; +import org.apache.ambari.server.state.Service; +import org.apache.ambari.server.state.stack.PrereqCheckStatus; +import org.apache.ambari.server.state.stack.PrerequisiteCheck; +import org.apache.ambari.server.state.stack.UpgradePack.PrerequisiteCheckConfig; +import org.apache.ambari.server.utils.VersionUtils; +import org.apache.commons.lang.BooleanUtils; + +import com.google.common.collect.Lists; +import com.google.common.collect.Sets; +import com.google.inject.Singleton; + +/** + * The {@link YarnTimelineServerStatePreservingCheck} is used to check that the + * YARN Timeline server has state preserving mode enabled. This value is only + * present in HDP 2.2.4.2+. + */ +@Singleton +@UpgradeCheck(group = UpgradeCheckGroup.DEFAULT, order = 17.3f) +public class YarnTimelineServerStatePreservingCheck extends AbstractCheckDescriptor { + + private final static String YARN_TIMELINE_STATE_RECOVERY_ENABLED_KEY = "yarn.timeline-service.recovery.enabled"; + private final static String MIN_APPLICABLE_STACK_VERSION_PROPERTY_NAME = "min-applicable-stack-version"; + + /** + * Constructor. + */ + public YarnTimelineServerStatePreservingCheck() { + super(CheckDescription.SERVICES_YARN_TIMELINE_ST); + } + + /** + * {@inheritDoc} + */ + @Override + public Set getApplicableServices() { + return Sets.newHashSet("YARN"); + } + + /** + * {@inheritDoc} + */ + @Override + public List getQualifications() { + return Lists.newArrayList(new YarnTimelineServerMinVersionQualification()); + } + + /** + * {@inheritDoc} + */ + @Override + public void perform(PrerequisiteCheck prerequisiteCheck, PrereqCheckRequest request) throws AmbariException { + String propertyValue = getProperty(request, "yarn-site", + YARN_TIMELINE_STATE_RECOVERY_ENABLED_KEY); + + if (null == propertyValue || !BooleanUtils.toBoolean(propertyValue)) { + prerequisiteCheck.getFailedOn().add("YARN"); + prerequisiteCheck.setStatus(PrereqCheckStatus.FAIL); + prerequisiteCheck.setFailReason(getFailReason(prerequisiteCheck, request)); + } + } + + /** + * The {@link YarnTimelineServerMinVersionQualification} is used to determine + * if the ATS component needs to have the + * {@value #MIN_APPLICABLE_STACK_VERSION_PROPERTY_NAME} set. + */ + private class YarnTimelineServerMinVersionQualification implements CheckQualification { + + /** + * {@inheritDoc} + */ + @Override + public boolean isApplicable(PrereqCheckRequest request) throws AmbariException { + final Cluster cluster = clustersProvider.get().getCluster(request.getClusterName()); + + String minApplicableStackVersion = null; + PrerequisiteCheckConfig prerequisiteCheckConfig = request.getPrerequisiteCheckConfig(); + Map checkProperties = null; + if(prerequisiteCheckConfig != null) { + checkProperties = prerequisiteCheckConfig.getCheckProperties(this.getClass().getName()); + } + + if(checkProperties != null && checkProperties.containsKey(MIN_APPLICABLE_STACK_VERSION_PROPERTY_NAME)) { + minApplicableStackVersion = checkProperties.get(MIN_APPLICABLE_STACK_VERSION_PROPERTY_NAME); + } + + // Due to the introduction of YARN Timeline state recovery only from certain + // stack-versions onwards, this check is not applicable to earlier versions + // of the stack. + // Applicable only if min-applicable-stack-version config property is not defined, or + // version equals or exceeds the configured version. + if(minApplicableStackVersion != null && !minApplicableStackVersion.isEmpty()) { + String[] minStack = minApplicableStackVersion.split("-"); + if(minStack.length == 2) { + String minStackName = minStack[0]; + String minStackVersion = minStack[1]; + Service yarnService = cluster.getService("YARN"); + String stackName = yarnService.getDesiredStackId().getStackName(); + if (minStackName.equals(stackName)) { + String currentRepositoryVersion = yarnService.getDesiredRepositoryVersion().getVersion(); + return VersionUtils.compareVersions(currentRepositoryVersion, minStackVersion) >= 0; + } + } + } + + return true; + + } + } +} diff --git a/ambari-server/src/main/java/org/apache/ambari/server/controller/ActionExecutionContext.java b/ambari-server/src/main/java/org/apache/ambari/server/controller/ActionExecutionContext.java index 91189aff7dd..5334b7c8390 100644 --- a/ambari-server/src/main/java/org/apache/ambari/server/controller/ActionExecutionContext.java +++ b/ambari-server/src/main/java/org/apache/ambari/server/controller/ActionExecutionContext.java @@ -23,14 +23,11 @@ import java.util.List; import java.util.Map; -import org.apache.ambari.annotations.Experimental; -import org.apache.ambari.annotations.ExperimentalFeature; import org.apache.ambari.server.actionmanager.TargetHostType; import org.apache.ambari.server.agent.ExecutionCommand; import org.apache.ambari.server.controller.internal.RequestOperationLevel; import org.apache.ambari.server.controller.internal.RequestResourceFilter; import org.apache.ambari.server.orm.entities.RepositoryVersionEntity; -import org.apache.ambari.server.state.Mpack; /** * The context required to create tasks and stages for a custom action @@ -49,7 +46,6 @@ public class ActionExecutionContext { private boolean hostsInMaintenanceModeExcluded = true; private boolean allowRetry = false; private RepositoryVersionEntity repositoryVersion; - private Mpack mpack = null; private List m_visitors = new ArrayList<>(); @@ -194,8 +190,6 @@ public void setAutoSkipFailures(boolean autoSkipFailures) { * * @return */ - @Deprecated - @Experimental(feature = ExperimentalFeature.REPO_VERSION_REMOVAL) public RepositoryVersionEntity getRepositoryVersion() { return repositoryVersion; } @@ -208,30 +202,10 @@ public RepositoryVersionEntity getRepositoryVersion() { * @param stackId * the stackId to use for stack-based properties on the command. */ - @Deprecated - @Experimental(feature = ExperimentalFeature.REPO_VERSION_REMOVAL) public void setRepositoryVersion(RepositoryVersionEntity repositoryVersion) { this.repositoryVersion = repositoryVersion; } - /** - * Sets the management pack for this command. This can be used to version and - * stack information. - * - * @param mpack - */ - public void setMpack(Mpack mpack) { - this.mpack = mpack; - } - - /** - * Gets the management pack associated with this command. This can be used for - * version and stack information. - */ - public Mpack getMpack() { - return mpack; - } - /** * Adds a command visitor that will be invoked after a command is created. Provides access * to the command. diff --git a/ambari-server/src/main/java/org/apache/ambari/server/controller/AmbariActionExecutionHelper.java b/ambari-server/src/main/java/org/apache/ambari/server/controller/AmbariActionExecutionHelper.java index 56e6968b90f..a46e2c4d4dd 100644 --- a/ambari-server/src/main/java/org/apache/ambari/server/controller/AmbariActionExecutionHelper.java +++ b/ambari-server/src/main/java/org/apache/ambari/server/controller/AmbariActionExecutionHelper.java @@ -52,6 +52,7 @@ import org.apache.ambari.server.state.ServiceComponentHost; import org.apache.ambari.server.state.ServiceInfo; import org.apache.ambari.server.state.StackId; +import org.apache.ambari.server.state.stack.upgrade.RepositoryVersionHelper; import org.apache.ambari.server.state.svccomphost.ServiceComponentHostOpInProgressEvent; import org.apache.ambari.server.utils.SecretReference; import org.apache.ambari.server.utils.StageUtils; @@ -91,6 +92,10 @@ public class AmbariActionExecutionHelper { @Inject private Configuration configs; + @Inject + private RepositoryVersionHelper repoVersionHelper; + + /** * Validates the request to execute an action. * @param actionRequest @@ -466,6 +471,15 @@ public boolean shouldHostBeRemoved(final String hostname) hostLevelParams.put(previousDBConnectorName.getKey(), previousDBConnectorName.getValue()); } + if (StringUtils.isNotBlank(serviceName)) { + Service service = cluster.getService(serviceName); + repoVersionHelper.addRepoInfoToHostLevelParams(cluster, actionContext, service.getDesiredRepositoryVersion(), + hostLevelParams, hostName); + } else { + repoVersionHelper.addRepoInfoToHostLevelParams(cluster, actionContext, null, hostLevelParams, hostName); + } + + Map roleParams = execCmd.getRoleParams(); if (roleParams == null) { roleParams = new TreeMap<>(); diff --git a/ambari-server/src/main/java/org/apache/ambari/server/controller/AmbariCustomCommandExecutionHelper.java b/ambari-server/src/main/java/org/apache/ambari/server/controller/AmbariCustomCommandExecutionHelper.java index fdd20425395..9625f101269 100644 --- a/ambari-server/src/main/java/org/apache/ambari/server/controller/AmbariCustomCommandExecutionHelper.java +++ b/ambari-server/src/main/java/org/apache/ambari/server/controller/AmbariCustomCommandExecutionHelper.java @@ -33,6 +33,7 @@ import static org.apache.ambari.server.agent.ExecutionCommand.KeyNames.MYSQL_JDBC_URL; import static org.apache.ambari.server.agent.ExecutionCommand.KeyNames.NOT_MANAGED_HDFS_PATH_LIST; import static org.apache.ambari.server.agent.ExecutionCommand.KeyNames.ORACLE_JDBC_URL; +import static org.apache.ambari.server.agent.ExecutionCommand.KeyNames.REPO_INFO; import static org.apache.ambari.server.agent.ExecutionCommand.KeyNames.SCRIPT; import static org.apache.ambari.server.agent.ExecutionCommand.KeyNames.SCRIPT_TYPE; import static org.apache.ambari.server.agent.ExecutionCommand.KeyNames.STACK_NAME; @@ -70,6 +71,7 @@ import org.apache.ambari.server.controller.internal.RequestOperationLevel; import org.apache.ambari.server.controller.internal.RequestResourceFilter; import org.apache.ambari.server.controller.spi.Resource; +import org.apache.ambari.server.controller.spi.SystemException; import org.apache.ambari.server.metadata.ActionMetadata; import org.apache.ambari.server.orm.dao.HostRoleCommandDAO; import org.apache.ambari.server.state.Cluster; @@ -413,6 +415,12 @@ public boolean shouldHostBeRemoved(final String hostname) hostLevelParams.put(CUSTOM_COMMAND, commandName); + // Set parameters required for re-installing clients on restart + try { + hostLevelParams.put(REPO_INFO, repoVersionHelper.getRepoInfo(cluster, component, host)); + } catch (SystemException e) { + throw new AmbariException("", e); + } hostLevelParams.put(STACK_NAME, stackId.getStackName()); hostLevelParams.put(STACK_VERSION, stackId.getStackVersion()); @@ -1434,6 +1442,7 @@ Map createDefaultHostParams(Cluster cluster, StackId stackId) th hostLevelParams.put(MYSQL_JDBC_URL, managementController.getMysqljdbcUrl()); hostLevelParams.put(ORACLE_JDBC_URL, managementController.getOjdbcUrl()); hostLevelParams.put(DB_DRIVER_FILENAME, configs.getMySQLJarName()); + hostLevelParams.putAll(managementController.getRcaParameters()); hostLevelParams.put(HOST_SYS_PREPPED, configs.areHostsSysPrepped()); hostLevelParams.put(AGENT_STACK_RETRY_ON_UNAVAILABILITY, configs.isAgentStackRetryOnInstallEnabled()); hostLevelParams.put(AGENT_STACK_RETRY_COUNT, configs.getAgentStackRetryOnInstallCount()); diff --git a/ambari-server/src/main/java/org/apache/ambari/server/controller/AmbariManagementController.java b/ambari-server/src/main/java/org/apache/ambari/server/controller/AmbariManagementController.java index 0e5fb727911..9a1bb921215 100644 --- a/ambari-server/src/main/java/org/apache/ambari/server/controller/AmbariManagementController.java +++ b/ambari-server/src/main/java/org/apache/ambari/server/controller/AmbariManagementController.java @@ -509,19 +509,6 @@ RequestStatusResponse createAction(ExecuteActionRequest actionRequest, Map> findConfigurationTagsWithOverrides( Cluster cluster, String hostName) throws AmbariException; + /** + * Returns parameters for RCA database + * + * @return the map with parameters for RCA db + * + */ + Map getRcaParameters(); + /** * Get the Factory to create Request schedules * @return the request execution factory diff --git a/ambari-server/src/main/java/org/apache/ambari/server/controller/AmbariManagementControllerImpl.java b/ambari-server/src/main/java/org/apache/ambari/server/controller/AmbariManagementControllerImpl.java index 6a45f187303..9f5acbfdf94 100644 --- a/ambari-server/src/main/java/org/apache/ambari/server/controller/AmbariManagementControllerImpl.java +++ b/ambari-server/src/main/java/org/apache/ambari/server/controller/AmbariManagementControllerImpl.java @@ -18,6 +18,10 @@ package org.apache.ambari.server.controller; +import static org.apache.ambari.server.agent.ExecutionCommand.KeyNames.AMBARI_DB_RCA_DRIVER; +import static org.apache.ambari.server.agent.ExecutionCommand.KeyNames.AMBARI_DB_RCA_PASSWORD; +import static org.apache.ambari.server.agent.ExecutionCommand.KeyNames.AMBARI_DB_RCA_URL; +import static org.apache.ambari.server.agent.ExecutionCommand.KeyNames.AMBARI_DB_RCA_USERNAME; import static org.apache.ambari.server.agent.ExecutionCommand.KeyNames.CLIENTS_TO_UPDATE_CONFIGS; import static org.apache.ambari.server.agent.ExecutionCommand.KeyNames.COMMAND_RETRY_ENABLED; import static org.apache.ambari.server.agent.ExecutionCommand.KeyNames.COMMAND_TIMEOUT; @@ -27,8 +31,11 @@ import static org.apache.ambari.server.agent.ExecutionCommand.KeyNames.MAX_DURATION_OF_RETRIES; import static org.apache.ambari.server.agent.ExecutionCommand.KeyNames.NOT_MANAGED_HDFS_PATH_LIST; import static org.apache.ambari.server.agent.ExecutionCommand.KeyNames.PACKAGE_LIST; +import static org.apache.ambari.server.agent.ExecutionCommand.KeyNames.PACKAGE_VERSION; +import static org.apache.ambari.server.agent.ExecutionCommand.KeyNames.REPO_INFO; import static org.apache.ambari.server.agent.ExecutionCommand.KeyNames.SCRIPT; import static org.apache.ambari.server.agent.ExecutionCommand.KeyNames.SCRIPT_TYPE; +import static org.apache.ambari.server.agent.ExecutionCommand.KeyNames.SERVICE_REPO_INFO; import static org.apache.ambari.server.agent.ExecutionCommand.KeyNames.UNLIMITED_KEY_JCE_REQUIRED; import static org.apache.ambari.server.agent.ExecutionCommand.KeyNames.USER_GROUPS; import static org.apache.ambari.server.agent.ExecutionCommand.KeyNames.USER_LIST; @@ -83,6 +90,7 @@ import org.apache.ambari.server.actionmanager.Stage; import org.apache.ambari.server.actionmanager.StageFactory; import org.apache.ambari.server.agent.ExecutionCommand; +import org.apache.ambari.server.agent.ExecutionCommand.KeyNames; import org.apache.ambari.server.agent.rest.AgentResource; import org.apache.ambari.server.api.services.AmbariMetaInfo; import org.apache.ambari.server.api.services.LoggingService; @@ -101,6 +109,7 @@ import org.apache.ambari.server.controller.metrics.timeline.cache.TimelineMetricCacheProvider; import org.apache.ambari.server.controller.spi.Resource; import org.apache.ambari.server.controller.spi.ResourceAlreadyExistsException; +import org.apache.ambari.server.controller.spi.SystemException; import org.apache.ambari.server.customactions.ActionDefinition; import org.apache.ambari.server.events.publishers.AmbariEventPublisher; import org.apache.ambari.server.metadata.ActionMetadata; @@ -110,9 +119,7 @@ import org.apache.ambari.server.orm.dao.ClusterServiceDAO; import org.apache.ambari.server.orm.dao.ExtensionDAO; import org.apache.ambari.server.orm.dao.ExtensionLinkDAO; -import org.apache.ambari.server.orm.dao.HostComponentStateDAO; import org.apache.ambari.server.orm.dao.RepositoryVersionDAO; -import org.apache.ambari.server.orm.dao.ServiceComponentDesiredStateDAO; import org.apache.ambari.server.orm.dao.SettingDAO; import org.apache.ambari.server.orm.dao.StackDAO; import org.apache.ambari.server.orm.dao.WidgetDAO; @@ -120,13 +127,11 @@ import org.apache.ambari.server.orm.entities.ClusterEntity; import org.apache.ambari.server.orm.entities.ClusterServiceEntity; import org.apache.ambari.server.orm.entities.ExtensionLinkEntity; -import org.apache.ambari.server.orm.entities.HostComponentStateEntity; import org.apache.ambari.server.orm.entities.HostEntity; import org.apache.ambari.server.orm.entities.MpackEntity; import org.apache.ambari.server.orm.entities.RepoDefinitionEntity; import org.apache.ambari.server.orm.entities.RepoOsEntity; import org.apache.ambari.server.orm.entities.RepositoryVersionEntity; -import org.apache.ambari.server.orm.entities.ServiceComponentDesiredStateEntity; import org.apache.ambari.server.orm.entities.SettingEntity; import org.apache.ambari.server.orm.entities.StackEntity; import org.apache.ambari.server.orm.entities.WidgetEntity; @@ -183,7 +188,6 @@ import org.apache.ambari.server.state.ServiceComponentHostEvent; import org.apache.ambari.server.state.ServiceComponentHostFactory; import org.apache.ambari.server.state.ServiceFactory; -import org.apache.ambari.server.state.ServiceGroup; import org.apache.ambari.server.state.ServiceGroupFactory; import org.apache.ambari.server.state.ServiceInfo; import org.apache.ambari.server.state.StackId; @@ -331,13 +335,6 @@ public class AmbariManagementControllerImpl implements AmbariManagementControlle @Inject private ClusterServiceDAO clusterServiceDAO; - - @Inject - private ServiceComponentDesiredStateDAO serviceComponentDesiredStateDAO; - - @Inject - private HostComponentStateDAO hostComponentStateDAO; - @Inject private ExtensionDAO extensionDAO; @Inject @@ -399,9 +396,6 @@ public AmbariManagementControllerImpl(ActionManager actionManager, masterHostname = InetAddress.getLocalHost().getCanonicalHostName(); maintenanceStateHelper = injector.getInstance(MaintenanceStateHelper.class); kerberosHelper = injector.getInstance(KerberosHelper.class); - hostComponentStateDAO = injector.getInstance(HostComponentStateDAO.class); - serviceComponentDesiredStateDAO = injector.getInstance(ServiceComponentDesiredStateDAO.class); - if(configs != null) { if (configs.getApiSSLAuthentication()) { @@ -630,13 +624,6 @@ public synchronized Set createHostComponents(Set createHostComponents(Set getHostComponents( } if (StringUtils.isBlank(serviceName)) { - LOG.error("Unable to find service for componentName : {}", request.getComponentName()); + LOG.error("Unable to find service for component {}", request.getComponentName()); throw new ServiceComponentHostNotFoundException( cluster.getClusterName(), null, request.getComponentName(), request.getHostname()); } @@ -1333,31 +1318,6 @@ private Set getHostComponents( Map desiredConfigs = cluster.getDesiredConfigs(); Map hosts = clusters.getHostsForCluster(cluster.getClusterName()); - /* - This is a core step in retrieving a given component instance in multi-host component instances world. - We fetch the 'HostComponentStateEntity' based on the 'host component Id' passed-in in the request. if it exists, - we use the service group Id, service Id, componentName and componentType to query the unique ServiceComponentEntity - associated with it. - */ - ServiceComponentDesiredStateEntity serviceComponentDesiredStateEntity = null; - HostComponentStateEntity hostComponentStateEntity = null; - if (request.getComponentId() != null) { - hostComponentStateEntity = hostComponentStateDAO.findById(request.getComponentId()); - if (hostComponentStateEntity == null) { - throw new AmbariException("Could not find Host Component resource for" - + " componentId = "+ request.getComponentId()); - } - serviceComponentDesiredStateEntity = serviceComponentDesiredStateDAO.findByName(hostComponentStateEntity.getClusterId(), - hostComponentStateEntity.getServiceGroupId(), hostComponentStateEntity.getServiceId(), - hostComponentStateEntity.getComponentName(), hostComponentStateEntity.getComponentType()); - if (serviceComponentDesiredStateEntity == null) { - throw new AmbariException("Could not find Service Component resource for" - + " componentId = " + request.getComponentId() + ", serviceGroupId = " + hostComponentStateEntity.getServiceGroupId() - + ", serviceId = " + hostComponentStateEntity.getServiceId() + ", componentName = " + hostComponentStateEntity.getComponentName() - + ", componntType = " + hostComponentStateEntity.getComponentType()); - } - } - for (Service s : services) { // filter on component name if provided Set components = new HashSet<>(); @@ -1366,12 +1326,9 @@ private Set getHostComponents( } else { components.addAll(s.getServiceComponents().values()); } - for (ServiceComponent sc : components) { - if (serviceComponentDesiredStateEntity != null && - serviceComponentDesiredStateEntity.getId() != null && - sc.getId() != null) { - if (!sc.getId().equals(serviceComponentDesiredStateEntity.getId())) { + if (request.getComponentName() != null) { + if (!sc.getName().equals(request.getComponentName())) { continue; } } @@ -1431,7 +1388,7 @@ private Set getHostComponents( response.add(r); } catch (ServiceComponentHostNotFoundException e) { - if (request.getServiceName() == null || request.getComponentId() == null) { + if (request.getServiceName() == null || request.getComponentName() == null) { // Ignore the exception if either the service name or component name are not specified. // This is an artifact of how we get host_components and can happen in the case where // we get all host_components for a host, for example. @@ -1443,7 +1400,7 @@ private Set getHostComponents( // condition. LOG.debug("ServiceComponentHost not found ", e); throw new ServiceComponentHostNotFoundException(cluster.getClusterName(), - request.getServiceName(), request.getComponentId(), request.getHostname()); + request.getServiceName(), request.getComponentName(), request.getHostname()); } } } else { @@ -2437,6 +2394,7 @@ private void createHostAction(Cluster cluster, Map commandParamsInp, ServiceComponentHostEvent event, boolean skipFailure, + RepositoryVersionEntity repoVersion, boolean isUpgradeSuspended, DatabaseType databaseType, Map clusterDesiredConfigs @@ -2603,7 +2561,34 @@ private void createHostAction(Cluster cluster, } StageUtils.useAmbariJdkInCommandParams(commandParams, configs); + String repoInfo; + try { + repoInfo = repoVersionHelper.getRepoInfo(cluster, component, host); + } catch (SystemException e) { + throw new AmbariException("", e); + } + if (LOG.isDebugEnabled()) { + LOG.debug("Sending repo information to agent, hostname={}, clusterName={}, stackInfo={}, repoInfo={}", + scHost.getHostName(), clusterName, stackId.getStackId(), repoInfo); + } + Map hostParams = new TreeMap<>(); + hostParams.put(REPO_INFO, repoInfo); + hostParams.putAll(getRcaParameters()); + + if (null != repoVersion) { + try { + VersionDefinitionXml xml = repoVersion.getRepositoryXml(); + if (null != xml && !StringUtils.isBlank(xml.getPackageVersion(osFamily))) { + hostParams.put(PACKAGE_VERSION, xml.getPackageVersion(osFamily)); + } + } catch (Exception e) { + throw new AmbariException(String.format("Could not load version xml from repo version %s", + repoVersion.getVersion()), e); + } + + hostParams.put(KeyNames.REPO_VERSION_ID, repoVersion.getId().toString()); + } List packages = getPackagesForStackServiceHost(ambariMetaInfo.getStack(stackId), serviceInfo, hostParams, osFamily); @@ -2695,11 +2680,11 @@ private void createHostAction(Cluster cluster, /** * Computes os-dependent packages for osSpecificMap. Does not take into - * account package dependencies for ANY_OS. Instead of this method you should - * use getPackagesForStackServiceHost() because it takes into account both - * os-dependent and os-independent lists of packages for stack service. - * - * @param hostParams + * account package dependencies for ANY_OS. Instead of this method + * you should use getPackagesForStackServiceHost() + * because it takes into account both os-dependent and os-independent lists + * of packages for stack service. + * @param hostParams may be modified (appended SERVICE_REPO_INFO) * @return a list of os-dependent packages for host */ protected OsSpecific populatePackagesInfo(Map osSpecificMap, Map hostParams, @@ -2710,12 +2695,18 @@ protected OsSpecific populatePackagesInfo(Map osSpecificMap, for (OsSpecific osSpecific : foundOSSpecifics) { hostOs.addPackages(osSpecific.getPackages()); } - } + //TODO this looks deprecated. Need to investigate if it's actually used + // Choose repo that is relevant for host + OsSpecific.Repo repos = hostOs.getRepo(); + if (repos != null) { + String serviceRepoInfo = gson.toJson(repos); + hostParams.put(SERVICE_REPO_INFO, serviceRepoInfo); + } + } return hostOs; } - @Override public List getPackagesForStackServiceHost(StackInfo stackInfo, ServiceInfo serviceInfo, Map hostParams, String osFamily) { List packages = new ArrayList<>(); //add all packages for ANY_OS @@ -2958,11 +2949,11 @@ protected RequestStageContainer doStageCreation(RequestStageContainer requestSta Service service = cluster.getService(scHost.getServiceName()); ServiceComponent serviceComponent = service.getServiceComponent(compName); - StackId stackId = cluster.getServiceGroup(scHost.getServiceGroupId()).getStackId(); if (StringUtils.isBlank(stage.getHostParamsStage())) { + RepositoryVersionEntity repositoryVersion = serviceComponent.getDesiredRepositoryVersion(); stage.setHostParamsStage(StageUtils.getGson().toJson( - customCommandExecutionHelper.createDefaultHostParams(cluster, stackId))); + customCommandExecutionHelper.createDefaultHostParams(cluster, repositoryVersion.getStackId()))); } @@ -3032,6 +3023,7 @@ protected RequestStageContainer doStageCreation(RequestStageContainer requestSta } break; case STARTED: + StackId stackId = serviceComponent.getDesiredStackId(); ComponentInfo compInfo = ambariMetaInfo.getComponent( stackId.getStackName(), stackId.getStackVersion(), scHost.getServiceType(), scHost.getServiceComponentName()); @@ -3178,8 +3170,10 @@ protected RequestStageContainer doStageCreation(RequestStageContainer requestSta } } else { // !!! can never be null + RepositoryVersionEntity repoVersion = serviceComponent.getDesiredRepositoryVersion(); + createHostAction(cluster, stage, scHost, configurations, configurationAttributes, configTags, - roleCommand, requestParameters, event, skipFailure, isUpgradeSuspended, + roleCommand, requestParameters, event, skipFailure, repoVersion, isUpgradeSuspended, databaseType, clusterDesiredConfigs); } @@ -3195,12 +3189,9 @@ protected RequestStageContainer doStageCreation(RequestStageContainer requestSta calculateServiceComponentHostForServiceCheck(cluster, service); if (StringUtils.isBlank(stage.getHostParamsStage())) { - long serviceGroupId = componentForServiceCheck.getServiceGroupId(); - ServiceGroup serviceGroup = cluster.getServiceGroup(serviceGroupId); - StackId stackId = serviceGroup.getStackId(); - + RepositoryVersionEntity repositoryVersion = componentForServiceCheck.getServiceComponent().getDesiredRepositoryVersion(); stage.setHostParamsStage(StageUtils.getGson().toJson( - customCommandExecutionHelper.createDefaultHostParams(cluster, stackId))); + customCommandExecutionHelper.createDefaultHostParams(cluster, repositoryVersion.getStackId()))); } customCommandExecutionHelper.addServiceCheckAction(stage, componentForServiceCheck.getHostName(), smokeTestRole, @@ -3327,11 +3318,19 @@ public ExecutionCommand getExecutionCommand(Cluster cluster, configurationAttributes = new TreeMap<>(); + RepositoryVersionEntity repoVersion = null; + if (null != scHost.getServiceComponent().getDesiredRepositoryVersion()) { + repoVersion = scHost.getServiceComponent().getDesiredRepositoryVersion(); + } else { + Service service = cluster.getService(scHost.getServiceName()); + repoVersion = service.getDesiredRepositoryVersion(); + } + boolean isUpgradeSuspended = cluster.isUpgradeSuspended(); DatabaseType databaseType = configs.getDatabaseType(); Map clusterDesiredConfigs = cluster.getDesiredConfigs(); createHostAction(cluster, stage, scHost, configurations, configurationAttributes, configTags, - roleCommand, null, null, false, isUpgradeSuspended, databaseType, + roleCommand, null, null, false, repoVersion, isUpgradeSuspended, databaseType, clusterDesiredConfigs); ExecutionCommand ec = stage.getExecutionCommands().get(scHost.getHostName()).get(0).getExecutionCommand(); @@ -3563,14 +3562,10 @@ public void validateServiceComponentHostRequest(ServiceComponentHostRequest requ || request.getClusterName().isEmpty() || request.getComponentName() == null || request.getComponentName().isEmpty() - || request.getServiceName() == null - || request.getServiceName().isEmpty() - || request.getServiceGroupName() == null - || request.getServiceGroupName().isEmpty() || request.getHostname() == null || request.getHostname().isEmpty()) { throw new IllegalArgumentException("Invalid arguments" - + ", cluster name, component name, service name, service group name and host name should be" + + ", cluster name, component name and host name should be" + " provided"); } @@ -3600,11 +3595,6 @@ public String findService(Cluster cluster, String componentName) throws AmbariEx return cluster.getServiceByComponentName(componentName).getName(); } - @Override - public String findService(Cluster cluster, Long componentId) throws AmbariException { - return cluster.getServiceByComponentId(componentId).getName(); - } - @Override public synchronized void deleteCluster(ClusterRequest request) throws AmbariException { @@ -3645,8 +3635,8 @@ public DeleteStatusMetaData deleteHostComponents( for (ServiceComponentHost sch : cluster.getServiceComponentHosts(request.getHostname())) { ServiceComponentHostRequest schr = new ServiceComponentHostRequest(request.getClusterName(), - sch.getServiceGroupName(), sch.getServiceName(), sch.getServiceComponentId(), sch.getServiceComponentName(), - sch.getServiceComponentType(), sch.getHostName(), null); + request.getServiceGroupName(), sch.getServiceName(), sch.getServiceComponentName(), + sch.getHostName(), null); expanded.add(schr); } } @@ -3672,7 +3662,6 @@ public DeleteStatusMetaData deleteHostComponents( + ", clusterName=" + request.getClusterName() + ", serviceName=" + request.getServiceName() + ", componentName=" + request.getComponentName() - + ", componentType=" + request.getComponentType() + ", hostname=" + request.getHostname() + ", request=" + request); @@ -5089,6 +5078,28 @@ public String getMysqljdbcUrl() { return mysqljdbcUrl; } + @Override + public Map getRcaParameters() { + + String hostName = StageUtils.getHostName(); + + String url = configs.getRcaDatabaseUrl(); + if (url.contains(Configuration.HOSTNAME_MACRO)) { + url = + url.replace(Configuration.HOSTNAME_MACRO, + hostsMap.getHostMap(hostName)); + } + + Map rcaParameters = new HashMap<>(); + + rcaParameters.put(AMBARI_DB_RCA_URL, url); + rcaParameters.put(AMBARI_DB_RCA_DRIVER, configs.getRcaDatabaseDriver()); + rcaParameters.put(AMBARI_DB_RCA_USERNAME, configs.getRcaDatabaseUser()); + rcaParameters.put(AMBARI_DB_RCA_PASSWORD, configs.getRcaDatabasePassword()); + + return rcaParameters; + } + @Override public boolean checkLdapConfigured() { return ldapDataPopulator.isLdapEnabled(); @@ -5793,7 +5804,7 @@ public Set createServiceConfigVersion(Set existingConfigTypeToConfig = new HashMap<>(); + Map existingConfigTypeToConfig = new HashMap(); for (Config config : configs) { Config existingConfig = cluster.getDesiredConfigByType(config.getType()); existingConfigTypeToConfig.put(config.getType(), existingConfig); diff --git a/ambari-server/src/main/java/org/apache/ambari/server/controller/AmbariServer.java b/ambari-server/src/main/java/org/apache/ambari/server/controller/AmbariServer.java index f7f26833108..e93277e541f 100644 --- a/ambari-server/src/main/java/org/apache/ambari/server/controller/AmbariServer.java +++ b/ambari-server/src/main/java/org/apache/ambari/server/controller/AmbariServer.java @@ -81,6 +81,7 @@ import org.apache.ambari.server.metrics.system.MetricsService; import org.apache.ambari.server.orm.GuiceJpaInitializer; import org.apache.ambari.server.orm.PersistenceType; +import org.apache.ambari.server.orm.dao.BlueprintDAO; import org.apache.ambari.server.orm.dao.ClusterDAO; import org.apache.ambari.server.orm.dao.GroupDAO; import org.apache.ambari.server.orm.dao.MetainfoDAO; @@ -113,6 +114,7 @@ import org.apache.ambari.server.security.unsecured.rest.CertificateDownload; import org.apache.ambari.server.security.unsecured.rest.CertificateSign; import org.apache.ambari.server.security.unsecured.rest.ConnectionInfo; +import org.apache.ambari.server.stack.UpdateActiveRepoVersionOnStartup; import org.apache.ambari.server.state.Clusters; import org.apache.ambari.server.topology.AmbariContext; import org.apache.ambari.server.topology.BlueprintFactory; @@ -946,6 +948,7 @@ public void performStaticInjection() { injector.getInstance(TopologyRequestFactoryImpl.class), injector.getInstance(SecurityConfigurationFactory .class), injector.getInstance(Gson.class)); HostResourceProvider.setTopologyManager(injector.getInstance(TopologyManager.class)); + BlueprintFactory.init(injector.getInstance(BlueprintDAO.class)); BaseClusterRequest.init(injector.getInstance(BlueprintFactory.class)); AmbariContext.init(injector.getInstance(HostRoleCommandFactory.class)); @@ -1074,7 +1077,7 @@ private static void loadRequestlogHandler(AmbariHandlerList handlerList, Server HandlerCollection handlers = new HandlerCollection(); Handler[] handler = serverForAgent.getHandlers(); if(handler != null ) { - handlers.setHandlers(handler); + handlers.setHandlers((Handler[])handler); handlers.addHandler(requestLogHandler); serverForAgent.setHandler(handlers); } @@ -1105,6 +1108,7 @@ public static void main(String[] args) throws Exception { DatabaseConsistencyCheckHelper.checkDBVersionCompatible(); server = injector.getInstance(AmbariServer.class); + injector.getInstance(UpdateActiveRepoVersionOnStartup.class).process(); CertificateManager certMan = injector.getInstance(CertificateManager.class); certMan.initRootCert(); KerberosChecker.checkJaasConfiguration(); diff --git a/ambari-server/src/main/java/org/apache/ambari/server/controller/ControllerModule.java b/ambari-server/src/main/java/org/apache/ambari/server/controller/ControllerModule.java index 0ae2676ee5a..357f1b61d86 100644 --- a/ambari-server/src/main/java/org/apache/ambari/server/controller/ControllerModule.java +++ b/ambari-server/src/main/java/org/apache/ambari/server/controller/ControllerModule.java @@ -169,16 +169,12 @@ import org.apache.ambari.server.state.stack.OsFamily; import org.apache.ambari.server.state.svccomphost.ServiceComponentHostImpl; import org.apache.ambari.server.topology.BlueprintFactory; -import org.apache.ambari.server.topology.ComponentResolver; -import org.apache.ambari.server.topology.DefaultStackFactory; +import org.apache.ambari.server.topology.BlueprintValidator; +import org.apache.ambari.server.topology.BlueprintValidatorImpl; import org.apache.ambari.server.topology.PersistedState; import org.apache.ambari.server.topology.PersistedStateImpl; import org.apache.ambari.server.topology.SecurityConfigurationFactory; -import org.apache.ambari.server.topology.StackComponentResolver; -import org.apache.ambari.server.topology.StackFactory; import org.apache.ambari.server.topology.tasks.ConfigureClusterTaskFactory; -import org.apache.ambari.server.topology.validators.BasicBlueprintValidator; -import org.apache.ambari.server.topology.validators.BlueprintValidator; import org.apache.ambari.server.utils.PasswordUtils; import org.apache.ambari.server.view.ViewInstanceHandlerList; import org.eclipse.jetty.server.SessionIdManager; @@ -418,7 +414,6 @@ protected void configure() { bind(SecurityConfigurationFactory.class).in(Scopes.SINGLETON); bind(PersistedState.class).to(PersistedStateImpl.class); - bind(ComponentResolver.class).to(StackComponentResolver.class); // factory to create LoggingRequestHelper instances for LogSearch integration bind(LoggingRequestHelperFactory.class).to(LoggingRequestHelperFactoryImpl.class); @@ -546,8 +541,7 @@ private void installFactories() { bind(RegistryFactory.class).to(RegistryFactoryImpl.class); bind(HostRoleCommandFactory.class).to(HostRoleCommandFactoryImpl.class); bind(SecurityHelper.class).toInstance(SecurityHelperImpl.getInstance()); - bind(BlueprintValidator.class).to(BasicBlueprintValidator.class); - bind(StackFactory.class).to(DefaultStackFactory.class); + bind(BlueprintValidator.class).to(BlueprintValidatorImpl.class); bind(BlueprintFactory.class); install(new FactoryModuleBuilder().implement(AmbariEvent.class, Names.named("userCreated"), UserCreatedEvent.class).build(AmbariEventFactory.class)); diff --git a/ambari-server/src/main/java/org/apache/ambari/server/controller/ServiceComponentHostRequest.java b/ambari-server/src/main/java/org/apache/ambari/server/controller/ServiceComponentHostRequest.java index f9d5c5701fa..0824d5361cf 100644 --- a/ambari-server/src/main/java/org/apache/ambari/server/controller/ServiceComponentHostRequest.java +++ b/ambari-server/src/main/java/org/apache/ambari/server/controller/ServiceComponentHostRequest.java @@ -26,9 +26,7 @@ public class ServiceComponentHostRequest { private String clusterName; // REF private String serviceGroupName; private String serviceName; - private Long componentId; private String componentName; - private String componentType; private String hostname; private String publicHostname; private String state; @@ -41,31 +39,17 @@ public class ServiceComponentHostRequest { public ServiceComponentHostRequest(String clusterName, String serviceGroupName, String serviceName, - Long componentId, String componentName, - String componentType, String hostname, String desiredState) { this.clusterName = clusterName; this.serviceGroupName = serviceGroupName; this.serviceName = serviceName; - this.componentId = componentId; this.componentName = componentName; - this.componentType = componentType; this.hostname = hostname; this.desiredState = desiredState; } - public ServiceComponentHostRequest(String clusterName, - String serviceGroupName, - String serviceName, - String componentName, - String componentType, - String hostname, - String desiredState) { - this(clusterName, serviceGroupName, serviceName, null, componentName, componentType, hostname, desiredState); - } - /** * @return the service group Name */ @@ -90,13 +74,6 @@ public void setServiceName(String serviceName) { this.serviceName = serviceName; } - /** - * @return the componentd - */ - public Long getComponentId() { - return componentId; - } - /** * @return the componentName */ @@ -111,22 +88,6 @@ public void setComponentName(String componentName) { this.componentName = componentName; } - /** - * @param componentId the componentId to set - */ - public void setComponentId(Long componentId) { - this.componentId = componentId; - } - - /** - * @return the componentType - */ - public String getComponentType() { return componentType; } - - /** - * @param componentType the componenType to set - */ - public void setComponentType(String componentType) { this.componentType = componentType; } /** * @return the hostname */ @@ -201,9 +162,7 @@ public String toString() { sb.append("{" + " clusterName=").append(clusterName) .append(", serviceGroupName=").append(serviceGroupName) .append(", serviceName=").append(serviceName) - .append(", componentId=").append(componentId) .append(", componentName=").append(componentName) - .append(", componentType=").append(componentType) .append(", hostname=").append(hostname) .append(", publicHostname=").append(publicHostname) .append(", desiredState=").append(desiredState) @@ -244,9 +203,7 @@ public boolean equals(Object obj) { return Objects.equals(clusterName, other.clusterName) && Objects.equals(serviceGroupName, other.serviceGroupName) && Objects.equals(serviceName, other.serviceName) && - Objects.equals(componentId, other.componentId) && Objects.equals(componentName, other.componentName) && - Objects.equals(componentType, other.componentType) && Objects.equals(hostname, other.hostname) && Objects.equals(publicHostname, other.publicHostname) && Objects.equals(desiredState, other.desiredState) && @@ -259,7 +216,7 @@ public boolean equals(Object obj) { @Override public int hashCode() { - return Objects.hash(clusterName, serviceGroupName, serviceName, componentId, componentName, componentType, hostname, - publicHostname, desiredState, state, desiredStackId, staleConfig, adminState, maintenanceState); + return Objects.hash(clusterName, serviceGroupName, serviceName, componentName, hostname, publicHostname, + desiredState, state, desiredStackId, staleConfig, adminState, maintenanceState); } } diff --git a/ambari-server/src/main/java/org/apache/ambari/server/controller/ServiceComponentHostResponse.java b/ambari-server/src/main/java/org/apache/ambari/server/controller/ServiceComponentHostResponse.java index 76d187dfc4e..14f1d471130 100644 --- a/ambari-server/src/main/java/org/apache/ambari/server/controller/ServiceComponentHostResponse.java +++ b/ambari-server/src/main/java/org/apache/ambari/server/controller/ServiceComponentHostResponse.java @@ -24,8 +24,6 @@ import org.apache.ambari.server.state.HostConfig; import org.apache.ambari.server.state.UpgradeState; -import io.swagger.annotations.ApiModelProperty; - public class ServiceComponentHostResponse { private Long clusterId; // REF @@ -37,7 +35,6 @@ public class ServiceComponentHostResponse { private String serviceType; private Long hostComponentId; private String componentName; - private String componentType; private String displayName; private String publicHostname; private String hostname; @@ -56,10 +53,9 @@ public class ServiceComponentHostResponse { public ServiceComponentHostResponse(Long clusterId, String clusterName, Long serviceGroupId, String serviceGroupName, Long serviceId, String serviceName, String serviceType, Long hostComponentId, - String componentName, String componentType, String displayName, String hostname, - String publicHostname, String liveState, String version, String desiredState, - String desiredStackVersion, String desiredRepositoryVersion, - HostComponentAdminState adminState) { + String componentName, String displayName, String hostname, String publicHostname, + String liveState, String version, String desiredState, String desiredStackVersion, + String desiredRepositoryVersion, HostComponentAdminState adminState) { this.clusterId = clusterId; this.serviceGroupId = serviceGroupId; this.serviceGroupName = serviceGroupName; @@ -69,7 +65,6 @@ public ServiceComponentHostResponse(Long clusterId, String clusterName, Long ser this.serviceType = serviceType; this.hostComponentId = hostComponentId; this.componentName = componentName; - this.componentType = componentType; this.displayName = displayName; this.hostname = hostname; this.publicHostname = publicHostname; @@ -154,13 +149,6 @@ public String getComponentName() { return componentName; } - /** - * @return the componentType - */ - public String getComponentType() { - return componentType; - } - /** * @param componentName the componentName to set */ @@ -168,13 +156,6 @@ public void setComponentName(String componentName) { this.componentName = componentName; } - /** - * @param componentType the componentType to set - */ - public void setComponentType(String componentType) { - this.componentType = componentType; - } - /** * @return the displayName */ @@ -358,11 +339,6 @@ public boolean equals(Object o) { return false; } - if (componentType != null ? - !componentType.equals(that.componentType) : that.componentType != null) { - return false; - } - if (displayName != null ? !displayName.equals(that.displayName) : that.displayName != null) { return false; @@ -386,7 +362,6 @@ public int hashCode() { result = 71 * result + (serviceName != null ? serviceName.hashCode() : 0); result = 71 * result + (serviceType != null ? serviceType.hashCode() : 0); result = 71 * result + (componentName != null ? componentName.hashCode() : 0); - result = 71 * result + (componentType != null ? componentType.hashCode() : 0); result = 71 * result + (displayName != null ? displayName.hashCode() : 0); result = 71 * result + (hostname != null ? hostname.hashCode() : 0); return result; @@ -462,13 +437,4 @@ public UpgradeState getUpgradeState() { return upgradeState; } - /** - * Interface to help correct Swagger documentation generation - */ - public interface ServiceComponentHostResponseSwagger extends ApiModel { - @ApiModelProperty(name = "HostRoles") - ServiceComponentHostResponse getServiceComponentHostResponse(); - } - - } diff --git a/ambari-server/src/main/java/org/apache/ambari/server/controller/ServiceComponentRequest.java b/ambari-server/src/main/java/org/apache/ambari/server/controller/ServiceComponentRequest.java index 12fa03c26a0..f59eb984e7f 100644 --- a/ambari-server/src/main/java/org/apache/ambari/server/controller/ServiceComponentRequest.java +++ b/ambari-server/src/main/java/org/apache/ambari/server/controller/ServiceComponentRequest.java @@ -19,38 +19,34 @@ package org.apache.ambari.server.controller; -import java.util.Objects; - public class ServiceComponentRequest { private String clusterName; // REF private String serviceGroupName; private String serviceName; // GET/CREATE/UPDATE/DELETE private String componentName; // GET/CREATE/UPDATE/DELETE - private String componentType; private String desiredState; // CREATE/UPDATE private String componentCategory; private String recoveryEnabled; // CREATE/UPDATE public ServiceComponentRequest(String clusterName, String serviceGroupName, String serviceName, - String componentName, String componentType, String desiredState) { - this(clusterName, serviceGroupName, serviceName, componentName, componentType, desiredState, null, null); + String componentName, String desiredState) { + this(clusterName, serviceGroupName, serviceName, componentName, desiredState, null, null); } public ServiceComponentRequest(String clusterName, String serviceGroupName, String serviceName, String componentName, - String componentType, String desiredState, String recoveryEnabled) { - this(clusterName, serviceGroupName, serviceName, componentName, componentType, desiredState, recoveryEnabled, null); + String desiredState, String recoveryEnabled) { + this(clusterName, serviceGroupName, serviceName, componentName, desiredState, recoveryEnabled, null); } public ServiceComponentRequest(String clusterName, String serviceGroupName, - String serviceName, String componentName, String componentType, + String serviceName, String componentName, String desiredState, String recoveryEnabled, String componentCategory) { this.clusterName = clusterName; this.serviceGroupName = serviceGroupName; this.serviceName = serviceName; this.componentName = componentName; - this.componentType = componentType; this.desiredState = desiredState; this.recoveryEnabled = recoveryEnabled; this.componentCategory = componentCategory; @@ -93,18 +89,6 @@ public void setComponentName(String componentName) { this.componentName = componentName; } - /** - * @return the componentType - */ - public String getComponentType() { return componentType; } - - /** - * @param componentType the componentType to set - */ - public void setComponentType(String componentType) { - this.componentType = componentType; - } - /** * @return the desiredState */ @@ -157,33 +141,8 @@ public void setComponentCategory(String componentCategory) { @Override public String toString() { - return String.format("[clusterName=%s, serviceGroupName=%s, serviceName=%s, componentName=%s, componentType=%s, " + + return String.format("[clusterName=%s, serviceGroupName=%s, serviceName=%s, componentName=%s, " + "desiredState=%s, recoveryEnabled=%s, componentCategory=%s]", clusterName, serviceGroupName, - serviceName, componentName, componentType, desiredState, recoveryEnabled, componentCategory); - } - - @Override - public boolean equals(Object obj) { - if (obj == this) { - return true; - } - if (obj == null || getClass() != obj.getClass()) { - return false; - } - - ServiceComponentRequest other = (ServiceComponentRequest) obj; - - return Objects.equals(clusterName, other.clusterName) && - Objects.equals(serviceGroupName, other.serviceGroupName) && - Objects.equals(serviceName, other.serviceName) && - Objects.equals(componentCategory, other.componentCategory) && - Objects.equals(componentName, other.componentName) && - Objects.equals(desiredState, other.desiredState) && - Objects.equals(recoveryEnabled, other.recoveryEnabled); - } - - @Override - public int hashCode() { - return Objects.hash(clusterName, serviceGroupName, serviceName, componentCategory, componentName, desiredState, recoveryEnabled); + serviceName, clusterName, desiredState, recoveryEnabled, componentCategory); } } diff --git a/ambari-server/src/main/java/org/apache/ambari/server/controller/ServiceComponentResponse.java b/ambari-server/src/main/java/org/apache/ambari/server/controller/ServiceComponentResponse.java index 85dc55f33be..d63b33c1ef0 100644 --- a/ambari-server/src/main/java/org/apache/ambari/server/controller/ServiceComponentResponse.java +++ b/ambari-server/src/main/java/org/apache/ambari/server/controller/ServiceComponentResponse.java @@ -33,9 +33,7 @@ public class ServiceComponentResponse { private Long serviceId; // REF private String serviceName; private String serviceType; - private Long componentId; private String componentName; - private String componentType; private String displayName; private String desiredStackId; private String desiredState; @@ -46,10 +44,10 @@ public class ServiceComponentResponse { private RepositoryVersionState repoState; public ServiceComponentResponse(Long clusterId, String clusterName, Long serviceGroupId, String serviceGroupName, - Long serviceId, String serviceName, String serviceType, Long componentId, String componentName, - String componentType, StackId desiredStackId, String desiredState, - Map serviceComponentStateCount, boolean recoveryEnabled, - String displayName, String desiredVersion, RepositoryVersionState repoState) { + Long serviceId, String serviceName, String serviceType, String componentName, + StackId desiredStackId, String desiredState, Map serviceComponentStateCount, + boolean recoveryEnabled, String displayName, String desiredVersion, + RepositoryVersionState repoState) { this.clusterId = clusterId; this.clusterName = clusterName; this.serviceGroupId = serviceGroupId; @@ -57,9 +55,7 @@ public ServiceComponentResponse(Long clusterId, String clusterName, Long service this.serviceId = serviceId; this.serviceName = serviceName; this.serviceType = serviceType; - this.componentId = componentId; this.componentName = componentName; - this.componentType = componentType; this.displayName = displayName; this.desiredStackId = desiredStackId.getStackId(); this.desiredState = desiredState; @@ -137,34 +133,6 @@ public void setComponentName(String componentName) { this.componentName = componentName; } - /** - * @param componentId the componentId to set - */ - public void setComponentName(Long componentId) { - this.componentId = componentId; - } - - /** - * @return the componentType - */ - public String getComponentType() { - return componentType; - } - - /** - * @param componentType the componentType to set - */ - public void setComponentType(String componentType) { - this.componentType = componentType; - } - - /** - * @return the componentId - */ - public Long getComponentId() { - return componentId; - } - /** * @return the displayName */ @@ -325,21 +293,11 @@ public boolean equals(Object o) { return false; } - if (componentId != null ? - !componentId.equals(that.componentId) : that.componentId != null) { - return false; - } - if (componentName != null ? !componentName.equals(that.componentName) : that.componentName != null){ return false; } - if (componentType != null ? - !componentType.equals(that.componentType) : that.componentType != null){ - return false; - } - if (displayName != null ? !displayName.equals(that.displayName) : that.displayName != null) { return false; @@ -357,9 +315,7 @@ public int hashCode() { result = 71 * result + (serviceId != null ? serviceId.hashCode() : 0); result = 71 * result + (serviceName != null ? serviceName.hashCode() : 0); result = 71 * result + (serviceType != null ? serviceType.hashCode() : 0); - result = 71 * result + (componentId != null ? componentId.hashCode() : 0); result = 71 * result + (componentName != null ? componentName.hashCode():0); - result = 71 * result + (componentType != null ? componentType.hashCode():0); result = 71 * result + (displayName != null ? displayName.hashCode():0); return result; } diff --git a/ambari-server/src/main/java/org/apache/ambari/server/controller/ServiceRequest.java b/ambari-server/src/main/java/org/apache/ambari/server/controller/ServiceRequest.java index 4516b555069..70e5240c38a 100644 --- a/ambari-server/src/main/java/org/apache/ambari/server/controller/ServiceRequest.java +++ b/ambari-server/src/main/java/org/apache/ambari/server/controller/ServiceRequest.java @@ -17,8 +17,6 @@ */ package org.apache.ambari.server.controller; -import java.util.Objects; - import org.apache.ambari.server.orm.entities.RepositoryVersionEntity; import org.apache.ambari.server.state.StackId; @@ -198,32 +196,4 @@ public String toString() { public void setResolvedRepository(RepositoryVersionEntity repositoryVersion) { resolvedRepository = repositoryVersion; } public RepositoryVersionEntity getResolvedRepository() { return resolvedRepository; } - - @Override - public boolean equals(Object obj) { - if (obj == this) { - return true; - } - if (obj == null || getClass() != obj.getClass()) { - return false; - } - - ServiceRequest other = (ServiceRequest) obj; - - return Objects.equals(clusterName, other.clusterName) && - Objects.equals(serviceGroupName, other.serviceGroupName) && - Objects.equals(serviceType, other.serviceType) && - Objects.equals(serviceName, other.serviceName) && - Objects.equals(desiredState, other.desiredState) && - Objects.equals(maintenanceState, other.maintenanceState) && - Objects.equals(credentialStoreEnabled, other.credentialStoreEnabled) && - Objects.equals(credentialStoreSupported, other.credentialStoreSupported) && - Objects.equals(desiredStackId, other.desiredStackId) && - Objects.equals(desiredRepositoryVersionId, other.desiredRepositoryVersionId); - } - - @Override - public int hashCode() { - return Objects.hash(clusterName, serviceGroupName, serviceType, serviceName, desiredState, maintenanceState, credentialStoreEnabled, credentialStoreSupported, desiredStackId, desiredRepositoryVersionId); - } } diff --git a/ambari-server/src/main/java/org/apache/ambari/server/controller/ServiceResponse.java b/ambari-server/src/main/java/org/apache/ambari/server/controller/ServiceResponse.java index 65cd5734568..73a2c93a9ca 100644 --- a/ambari-server/src/main/java/org/apache/ambari/server/controller/ServiceResponse.java +++ b/ambari-server/src/main/java/org/apache/ambari/server/controller/ServiceResponse.java @@ -18,8 +18,6 @@ package org.apache.ambari.server.controller; -import org.apache.ambari.annotations.Experimental; -import org.apache.ambari.annotations.ExperimentalFeature; import org.apache.ambari.server.state.RepositoryVersionState; import org.apache.ambari.server.state.StackId; @@ -193,12 +191,8 @@ public RepositoryVersionState getRepositoryVersionState() { @Override public boolean equals(Object o) { - if (this == o) { - return true; - } - if (o == null || getClass() != o.getClass()) { - return false; - } + if (this == o) return true; + if (o == null || getClass() != o.getClass()) return false; ServiceResponse that = (ServiceResponse) o; @@ -308,16 +302,12 @@ public interface ServiceResponseSwagger extends ApiModel { /** * @param id */ - @Deprecated - @Experimental(feature = ExperimentalFeature.REPO_VERSION_REMOVAL) public void setDesiredRepositoryVersionId(Long id) { desiredRepositoryVersionId = id; } /** */ - @Deprecated - @Experimental(feature = ExperimentalFeature.REPO_VERSION_REMOVAL) public Long getDesiredRepositoryVersionId() { return desiredRepositoryVersionId; } diff --git a/ambari-server/src/main/java/org/apache/ambari/server/controller/internal/AbstractProviderModule.java b/ambari-server/src/main/java/org/apache/ambari/server/controller/internal/AbstractProviderModule.java index b30cd333ee8..f385f7ef3fa 100644 --- a/ambari-server/src/main/java/org/apache/ambari/server/controller/internal/AbstractProviderModule.java +++ b/ambari-server/src/main/java/org/apache/ambari/server/controller/internal/AbstractProviderModule.java @@ -505,17 +505,10 @@ public Host getHost(String clusterName, String hostName) { public boolean isCollectorComponentLive(String clusterName, MetricsService service) throws SystemException { final String collectorHostName = getCollectorHostName(clusterName, service); - Long componentId = null; - try { - componentId = managementController.getClusters().getCluster(clusterName).getComponentId(Role.METRICS_COLLECTOR.name()); - } catch (AmbariException e) { - e.printStackTrace(); - } if (service.equals(GANGLIA)) { - // TODO : Multi_Metrics_Changes. Is there is more than one instance of GANGLIA_SERVER, type and name would be different. return HostStatusHelper.isHostComponentLive(managementController, clusterName, collectorHostName, "GANGLIA", - componentId, Role.GANGLIA_SERVER.name(), Role.GANGLIA_SERVER.name()); + Role.GANGLIA_SERVER.name()); } else if (service.equals(TIMELINE_METRICS)) { return metricsCollectorHAManager.isCollectorComponentLive(clusterName); } diff --git a/ambari-server/src/main/java/org/apache/ambari/server/controller/internal/BaseClusterRequest.java b/ambari-server/src/main/java/org/apache/ambari/server/controller/internal/BaseClusterRequest.java index 0d243a2f551..77eafebf4bd 100644 --- a/ambari-server/src/main/java/org/apache/ambari/server/controller/internal/BaseClusterRequest.java +++ b/ambari-server/src/main/java/org/apache/ambari/server/controller/internal/BaseClusterRequest.java @@ -29,7 +29,6 @@ import org.apache.ambari.server.controller.spi.Resource; import org.apache.ambari.server.controller.spi.ResourceProvider; import org.apache.ambari.server.controller.utilities.ClusterControllerHelper; -import org.apache.ambari.server.state.StackId; import org.apache.ambari.server.topology.Blueprint; import org.apache.ambari.server.topology.BlueprintFactory; import org.apache.ambari.server.topology.Configuration; @@ -37,7 +36,6 @@ import org.apache.ambari.server.topology.InvalidTopologyTemplateException; import org.apache.ambari.server.topology.SecurityConfiguration; import org.apache.ambari.server.topology.TopologyRequest; -import org.apache.ambari.server.topology.TopologyRequestUtil; /** * Provides common cluster request functionality. @@ -55,11 +53,6 @@ public abstract class BaseClusterRequest implements TopologyRequest { protected ProvisionAction provisionAction; - /** - * The raw request body. We would like to persist it. - */ - protected String rawRequestBody; - /** * cluster id */ @@ -125,19 +118,6 @@ public Map getHostGroupInfo() { return hostGroupInfoMap; } - /** - * @return the raw request body in JSON string - */ - public String getRawRequestBody() { - return rawRequestBody; - } - - @Override - public Set getStackIds() { - return TopologyRequestUtil.getStackIdsFromRequest( - TopologyRequestUtil.getPropertyMap(rawRequestBody)); - } - /** * Validate that all properties specified in the predicate are valid for the Host resource. * @@ -200,7 +180,6 @@ public SecurityConfiguration getSecurityConfiguration() { return securityConfiguration; } - /** * Get the host resource provider instance. * diff --git a/ambari-server/src/main/java/org/apache/ambari/server/controller/internal/BlueprintConfigurationProcessor.java b/ambari-server/src/main/java/org/apache/ambari/server/controller/internal/BlueprintConfigurationProcessor.java index 5ea81c34418..8da1f51ee2c 100644 --- a/ambari-server/src/main/java/org/apache/ambari/server/controller/internal/BlueprintConfigurationProcessor.java +++ b/ambari-server/src/main/java/org/apache/ambari/server/controller/internal/BlueprintConfigurationProcessor.java @@ -44,10 +44,12 @@ import org.apache.ambari.server.state.StackId; import org.apache.ambari.server.state.ValueAttributesInfo; import org.apache.ambari.server.topology.AdvisedConfiguration; +import org.apache.ambari.server.topology.Blueprint; import org.apache.ambari.server.topology.Cardinality; import org.apache.ambari.server.topology.ClusterTopology; import org.apache.ambari.server.topology.ConfigRecommendationStrategy; import org.apache.ambari.server.topology.Configuration; +import org.apache.ambari.server.topology.HostGroup; import org.apache.ambari.server.topology.HostGroupInfo; import org.apache.ambari.server.topology.validators.UnitValidatedProperty; import org.apache.commons.lang.StringUtils; @@ -69,11 +71,6 @@ public class BlueprintConfigurationProcessor { private static final Logger LOG = LoggerFactory.getLogger(BlueprintConfigurationProcessor.class); - /** - * Compiled regex for "%HOSTGROUP::...%" token. - */ - public static final Pattern HOST_GROUP_PLACEHOLDER_PATTERN = Pattern.compile("%HOSTGROUP::(\\S+?)%"); - private final static String COMMAND_RETRY_ENABLED_PROPERTY_NAME = "command_retry_enabled"; private final static String COMMANDS_TO_RETRY_PROPERTY_NAME = "commands_to_retry"; @@ -168,23 +165,6 @@ public class BlueprintConfigurationProcessor { private static Set configPropertiesWithHASupport = new HashSet<>(Arrays.asList("fs.defaultFS", "hbase.rootdir", "instance.volumes", "policymgr_external_url", "xasecure.audit.destination.hdfs.dir")); - public static boolean isNameNodeHAEnabled(Map> configurationProperties) { - return configurationProperties.containsKey("hdfs-site") && - (configurationProperties.get("hdfs-site").containsKey("dfs.nameservices") || - configurationProperties.get("hdfs-site").containsKey("dfs.internal.nameservices")); - } - - /** - * Static convenience function to determine if Yarn ResourceManager HA is enabled - * @param configProperties configuration properties for this cluster - * @return true if Yarn ResourceManager HA is enabled - * false if Yarn ResourceManager HA is not enabled - */ - public static boolean isYarnResourceManagerHAEnabled(Map> configProperties) { - return configProperties.containsKey("yarn-site") && configProperties.get("yarn-site").containsKey("yarn.resourcemanager.ha.enabled") - && configProperties.get("yarn-site").get("yarn.resourcemanager.ha.enabled").equals("true"); - } - /** * Statically-defined list of filters to apply on property exports. * This will initially be used to filter out the Ranger Passwords, but @@ -299,7 +279,7 @@ private boolean containsHostFromHostGroups(String configType, String propertyNam return false; } // check fir bp import - Matcher m = HOST_GROUP_PLACEHOLDER_PATTERN.matcher(propertyValue); + Matcher m = HostGroup.HOSTGROUP_REGEX.matcher(propertyValue); if (m.find()) { return true; } @@ -370,8 +350,7 @@ public Set doUpdateForClusterCreate() throws ConfigurationTopologyExcept // set of properties (copy) doesn't include the removed properties. If an updater // removes a property other than the property it is registered for then we will // have an issue as it won't be removed from the clusterProps map as it is a copy. - Map> configProperties = clusterConfig.getFullProperties(); - Map> clusterProps = configProperties; + Map> clusterProps = clusterConfig.getFullProperties(); for (Map> updaterMap : createCollectionOfUpdaters()) { for (Map.Entry> entry : updaterMap.entrySet()) { String type = entry.getKey(); @@ -419,10 +398,10 @@ public Set doUpdateForClusterCreate() throws ConfigurationTopologyExcept } //todo: lots of hard coded HA rules included here - if (isNameNodeHAEnabled(configProperties)) { + if (clusterTopology.isNameNodeHAEnabled()) { // add "dfs.internal.nameservices" if it's not specified - Map hdfsSiteConfig = configProperties.get("hdfs-site"); + Map hdfsSiteConfig = clusterConfig.getFullProperties().get("hdfs-site"); String nameservices = hdfsSiteConfig.get("dfs.nameservices"); String int_nameservices = hdfsSiteConfig.get("dfs.internal.nameservices"); if(int_nameservices == null && nameservices != null) { @@ -451,7 +430,7 @@ public Set doUpdateForClusterCreate() throws ConfigurationTopologyExcept setStackToolsAndFeatures(clusterConfig, configTypesUpdated); setRetryConfiguration(clusterConfig, configTypesUpdated); setupHDFSProxyUsers(clusterConfig, configTypesUpdated); - addExcludedConfigProperties(clusterConfig, configTypesUpdated, clusterTopology.getStack()); + addExcludedConfigProperties(clusterConfig, configTypesUpdated, clusterTopology.getBlueprint().getStack()); trimProperties(clusterConfig, clusterTopology); @@ -459,7 +438,8 @@ public Set doUpdateForClusterCreate() throws ConfigurationTopologyExcept } private void trimProperties(Configuration clusterConfig, ClusterTopology clusterTopology) { - StackDefinition stack = clusterTopology.getStack(); + Blueprint blueprint = clusterTopology.getBlueprint(); + StackDefinition stack = blueprint.getStack(); Map> configTypes = clusterConfig.getFullProperties(); for (String configType : configTypes.keySet()) { @@ -507,16 +487,15 @@ private static boolean shouldPropertyBeStoredWithDefault(String propertyName) { */ public void doUpdateForBlueprintExport() { // HA configs are only processed in cluster configuration, not HG configurations - Map> configProperties = clusterTopology.getConfiguration().getFullProperties(); - if (isNameNodeHAEnabled(configProperties)) { + if (clusterTopology.isNameNodeHAEnabled()) { doNameNodeHAUpdate(); } - if (isYarnResourceManagerHAEnabled(configProperties)) { + if (clusterTopology.isYarnResourceManagerHAEnabled()) { doYarnResourceManagerHAUpdate(); } - if (isOozieServerHAEnabled(configProperties)) { + if (isOozieServerHAEnabled(clusterTopology.getConfiguration().getFullProperties())) { doOozieServerHAUpdate(); } @@ -562,7 +541,7 @@ private void doFilterPriorToExport(Configuration configuration) { String clusterName = clusterTopology.getAmbariContext().getClusterName(clusterTopology.getClusterId()); Cluster cluster = clusterTopology.getAmbariContext().getController().getClusters().getCluster(clusterName); authToLocalPerClusterMap = new HashMap<>(); - authToLocalPerClusterMap.put(clusterTopology.getClusterId(), clusterTopology.getAmbariContext().getController().getKerberosHelper().getKerberosDescriptor(cluster, false).getAllAuthToLocalProperties()); + authToLocalPerClusterMap.put(Long.valueOf(clusterTopology.getClusterId()), clusterTopology.getAmbariContext().getController().getKerberosHelper().getKerberosDescriptor(cluster, false).getAllAuthToLocalProperties()); } catch (AmbariException e) { LOG.error("Error while getting authToLocal properties. ", e); } @@ -570,7 +549,7 @@ private void doFilterPriorToExport(Configuration configuration) { for (Map.Entry> configEntry : properties.entrySet()) { String type = configEntry.getKey(); try { - clusterTopology.getStack().getServiceForConfigType(type); + clusterTopology.getBlueprint().getStack().getServiceForConfigType(type); } catch (IllegalArgumentException illegalArgumentException) { LOG.error(new StringBuilder(String.format("Error encountered while trying to obtain the service name for config type [%s]. ", type)) .append("Further processing on this config type will be skipped. ") @@ -644,7 +623,8 @@ private void doRecommendConfigurations(Configuration configuration, Set * @param advisedConfigurations advised configuration instance */ private void doFilterStackDefaults(Map advisedConfigurations) { - Configuration stackDefaults = clusterTopology.getStack().getConfiguration(clusterTopology.getServices()); + Blueprint blueprint = clusterTopology.getBlueprint(); + Configuration stackDefaults = blueprint.getStack().getConfiguration(blueprint.getServices()); Map> stackDefaultProps = stackDefaults.getProperties(); for (Map.Entry adConfEntry : advisedConfigurations.entrySet()) { AdvisedConfiguration advisedConfiguration = adConfEntry.getValue(); @@ -722,16 +702,15 @@ private void doRemovePropertiesIfNeeded(Configuration configuration, private Collection>> createCollectionOfUpdaters() { Collection>> updaters = allUpdaters; - Map> configProperties = clusterTopology.getConfiguration().getFullProperties(); - if (isNameNodeHAEnabled(configProperties)) { + if (clusterTopology.isNameNodeHAEnabled()) { updaters = addNameNodeHAUpdaters(updaters); } - if (isYarnResourceManagerHAEnabled(configProperties)) { + if (clusterTopology.isYarnResourceManagerHAEnabled()) { updaters = addYarnResourceManagerHAUpdaters(updaters); } - if (isOozieServerHAEnabled(configProperties)) { + if (isOozieServerHAEnabled(clusterTopology.getConfiguration().getFullProperties())) { updaters = addOozieServerHAUpdaters(updaters); } @@ -1402,7 +1381,7 @@ public String updateForClusterCreate(String propertyName, ClusterTopology topology) { //todo: getHostStrings - Matcher m = HOST_GROUP_PLACEHOLDER_PATTERN.matcher(origValue); + Matcher m = HostGroup.HOSTGROUP_REGEX.matcher(origValue); if (m.find()) { String hostGroupName = m.group(1); @@ -1427,7 +1406,7 @@ public Collection getRequiredHostGroups(String propertyName, Map> properties, ClusterTopology topology) { //todo: getHostStrings - Matcher m = HOST_GROUP_PLACEHOLDER_PATTERN.matcher(origValue); + Matcher m = HostGroup.HOSTGROUP_REGEX.matcher(origValue); if (m.find()) { String hostGroupName = m.group(1); return Collections.singleton(hostGroupName); @@ -1482,7 +1461,7 @@ public String updateForClusterCreate(String propertyName, topology.getHostAssignmentsForComponent(component).iterator().next(), properties); } else { //todo: extract all hard coded HA logic - Cardinality cardinality = topology.getStack().getCardinality(component); + Cardinality cardinality = topology.getBlueprint().getStack().getCardinality(component); // if no matching host groups are found for a component whose configuration // is handled by this updater, check the stack first to determine if // zero is a valid cardinality for this component. This is necessary @@ -1491,8 +1470,7 @@ public String updateForClusterCreate(String propertyName, if (matchingGroupCount == 0 && cardinality.isValidCount(0)) { return origValue; } else { - Map> configProperties = topology.getConfiguration().getFullProperties(); - if (isComponentNameNode() && (matchingGroupCount == 2) && isNameNodeHAEnabled(configProperties)) { + if (topology.isNameNodeHAEnabled() && isComponentNameNode() && (matchingGroupCount == 2)) { // if this is the defaultFS property, it should reflect the nameservice name, // rather than a hostname (used in non-HA scenarios) if (properties.containsKey("core-site") && properties.get("core-site").get("fs.defaultFS").equals(origValue)) { @@ -1518,13 +1496,13 @@ public String updateForClusterCreate(String propertyName, } - if (isComponentSecondaryNameNode() && (matchingGroupCount == 0) && isNameNodeHAEnabled(configProperties)) { + if (topology.isNameNodeHAEnabled() && isComponentSecondaryNameNode() && (matchingGroupCount == 0)) { // if HDFS HA is enabled, then no replacement is necessary for properties that refer to the SECONDARY_NAMENODE // eventually this type of information should be encoded in the stacks return origValue; } - if (isComponentResourceManager() && (matchingGroupCount == 2) && isYarnResourceManagerHAEnabled(configProperties)) { + if (topology.isYarnResourceManagerHAEnabled() && isComponentResourceManager() && (matchingGroupCount == 2)) { if (!origValue.contains("localhost")) { // if this Yarn property is a FQDN, then simply return it return origValue; @@ -1592,7 +1570,7 @@ public Collection getRequiredHostGroups(String propertyName, if (matchingGroupCount != 0) { return new HashSet<>(matchingGroups); } else { - Cardinality cardinality = topology.getStack().getCardinality(component); + Cardinality cardinality = topology.getBlueprint().getStack().getCardinality(component); // if no matching host groups are found for a component whose configuration // is handled by this updater, return an empty set if (! cardinality.isValidCount(0)) { @@ -2086,7 +2064,7 @@ public Collection getRequiredHostGroups(String propertyName, while (m.find()) { String groupName = m.group(1); - if (!topology.getHostGroupInfo().containsKey(groupName)) { + if (!topology.getBlueprint().getHostGroups().containsKey(groupName)) { throw new IllegalArgumentException( "Unable to match blueprint host group token to a host group: " + groupName); } @@ -2604,7 +2582,7 @@ public String updateForClusterCreate(String propertyName, } } - boolean isAtlasInCluster = topology.getServices().contains("ATLAS"); + boolean isAtlasInCluster = topology.getBlueprint().getServices().contains("ATLAS"); boolean isAtlasHiveHookEnabled = Boolean.parseBoolean(properties.get("hive-env").get("hive.atlas.hook")); // Append atlas hook if not already present. @@ -2635,7 +2613,7 @@ public String updateForClusterCreate(String propertyName, Map> properties, ClusterTopology topology) { - if (topology.getServices().contains("ATLAS")) { + if (topology.getBlueprint().getServices().contains("ATLAS")) { // if original value is not set or is the default "primary" set the cluster id if (origValue == null || origValue.trim().isEmpty() || origValue.equals("primary")) { //use cluster id because cluster name may change @@ -2670,7 +2648,7 @@ public String updateForClusterCreate(String propertyName, String origValue, Map> properties, ClusterTopology topology) { - if (topology.getServices().contains("ATLAS")) { + if (topology.getBlueprint().getServices().contains("ATLAS")) { String host = topology.getHostAssignmentsForComponent("ATLAS_SERVER").iterator().next(); boolean tlsEnabled = Boolean.parseBoolean(properties.get("application-properties").get("atlas.enableTLS")); @@ -2728,7 +2706,7 @@ public String updateForClusterCreate(String propertyName, Map> properties, ClusterTopology topology) { - if (topology.getServices().contains("AMBARI_METRICS")) { + if (topology.getBlueprint().getServices().contains("AMBARI_METRICS")) { final String amsReporterClass = "org.apache.hadoop.metrics2.sink.storm.StormTimelineMetricsReporter"; if (origValue == null || origValue.isEmpty()) { return amsReporterClass; @@ -2759,7 +2737,7 @@ public String updateForClusterCreate(String propertyName, Map> properties, ClusterTopology topology) { - if (topology.getServices().contains("AMBARI_METRICS")) { + if (topology.getBlueprint().getServices().contains("AMBARI_METRICS")) { final String amsReportesClass = "org.apache.hadoop.metrics2.sink.kafka.KafkaTimelineMetricsReporter"; if (origValue == null || origValue.isEmpty()) { return amsReportesClass; @@ -2851,7 +2829,7 @@ private Collection setupHDFSProxyUsers(Configuration configuration, Set< // AMBARI-5206 final Map userProps = new HashMap<>(); - Collection services = clusterTopology.getServices(); + Collection services = clusterTopology.getBlueprint().getServices(); if (services.contains("HDFS")) { // only add user properties to the map for // services actually included in the blueprint definition @@ -2905,7 +2883,7 @@ private Collection setupHDFSProxyUsers(Configuration configuration, Set< * @param stack */ private void addExcludedConfigProperties(Configuration configuration, Set configTypesUpdated, StackDefinition stack) { - Collection blueprintServices = clusterTopology.getServices(); + Collection blueprintServices = clusterTopology.getBlueprint().getServices(); LOG.debug("Handling excluded properties for blueprint services: {}", blueprintServices); @@ -3009,7 +2987,7 @@ private void setStackToolsAndFeatures(Configuration configuration, Set c ); try { - for (StackId stackId : clusterTopology.getStackIds()) { + for (StackId stackId : clusterTopology.getBlueprint().getStackIds()) { Map> defaultStackProperties = configHelper.getDefaultStackProperties(stackId); if (defaultStackProperties.containsKey(CLUSTER_ENV_CONFIG_TYPE_NAME)) { Map clusterEnvDefaultProperties = defaultStackProperties.get(CLUSTER_ENV_CONFIG_TYPE_NAME); @@ -3128,7 +3106,7 @@ private static class StackPropertyTypeFilter implements PropertyFilter { */ @Override public boolean isPropertyIncluded(String propertyName, String propertyValue, String configType, ClusterTopology topology) { - StackDefinition stack = topology.getStack(); + StackDefinition stack = topology.getBlueprint().getStack(); final String serviceName = stack.getServiceForConfigType(configType); return !(stack.isPasswordProperty(serviceName, configType, propertyName) || stack.isKerberosPrincipalNameProperty(serviceName, configType, propertyName)); @@ -3225,7 +3203,7 @@ private static abstract class DependencyFilter implements PropertyFilter { */ @Override public boolean isPropertyIncluded(String propertyName, String propertyValue, String configType, ClusterTopology topology) { - StackDefinition stack = topology.getStack(); + StackDefinition stack = topology.getBlueprint().getStack(); Configuration configuration = topology.getConfiguration(); final String serviceName = stack.getServiceForConfigType(configType); @@ -3339,7 +3317,7 @@ private static class HDFSNameNodeHAFilter implements PropertyFilter { * namenode. */ private final Set setOfHDFSPropertyNamesNonHA = - ImmutableSet.of("dfs.namenode.http-address", "dfs.namenode.https-address", "dfs.namenode.rpc-address"); + Collections.unmodifiableSet(new HashSet<>(Arrays.asList("dfs.namenode.http-address", "dfs.namenode.https-address", "dfs.namenode.rpc-address"))); /** @@ -3354,8 +3332,13 @@ private static class HDFSNameNodeHAFilter implements PropertyFilter { */ @Override public boolean isPropertyIncluded(String propertyName, String propertyValue, String configType, ClusterTopology topology) { - return !setOfHDFSPropertyNamesNonHA.contains(propertyName) - || !isNameNodeHAEnabled(topology.getConfiguration().getFullProperties()); + if (topology.isNameNodeHAEnabled()) { + if (setOfHDFSPropertyNamesNonHA.contains(propertyName)) { + return false; + } + } + + return true; } } diff --git a/ambari-server/src/main/java/org/apache/ambari/server/controller/internal/BlueprintResourceProvider.java b/ambari-server/src/main/java/org/apache/ambari/server/controller/internal/BlueprintResourceProvider.java index 8b45c4110d6..760d90f89c0 100644 --- a/ambari-server/src/main/java/org/apache/ambari/server/controller/internal/BlueprintResourceProvider.java +++ b/ambari-server/src/main/java/org/apache/ambari/server/controller/internal/BlueprintResourceProvider.java @@ -59,10 +59,12 @@ import org.apache.ambari.server.state.StackInfo; import org.apache.ambari.server.topology.Blueprint; import org.apache.ambari.server.topology.BlueprintFactory; +import org.apache.ambari.server.topology.BlueprintValidator; +import org.apache.ambari.server.topology.GPLLicenseNotAcceptedException; +import org.apache.ambari.server.topology.InvalidTopologyException; import org.apache.ambari.server.topology.MpackInstance; import org.apache.ambari.server.topology.SecurityConfiguration; import org.apache.ambari.server.topology.SecurityConfigurationFactory; -import org.apache.ambari.server.topology.validators.BlueprintValidator; import org.apache.ambari.server.utils.JsonUtils; import org.apache.ambari.server.utils.SecretReference; import org.slf4j.Logger; @@ -543,8 +545,19 @@ public Void invoke() throws AmbariException { blueprint.getName()); } - if (shouldValidate(requestInfoProps)) { - validator.validate(blueprint); + try { + validator.validateRequiredProperties(blueprint); + } catch (InvalidTopologyException | GPLLicenseNotAcceptedException e) { + throw new IllegalArgumentException("Blueprint configuration validation failed: " + e.getMessage(), e); + } + + String validateTopology = requestInfoProps.get(VALIDATE_TOPOLOGY_PROPERTY_ID); + if (validateTopology == null || ! validateTopology.equalsIgnoreCase("false")) { + try { + validator.validateTopology(blueprint); + } catch (InvalidTopologyException e) { + throw new IllegalArgumentException(e.getMessage()); + } } LOG.info("Creating Blueprint, name=" + blueprint.getName()); @@ -561,11 +574,6 @@ public Void invoke() throws AmbariException { }; } - private static boolean shouldValidate(Map requestInfoProps) { - String validateTopology = requestInfoProps.get("validate_topology"); - return validateTopology == null || Boolean.parseBoolean(validateTopology); - } - /** * The structure of blueprints is evolving where multiple resource * structures are to be supported. This class abstracts the population diff --git a/ambari-server/src/main/java/org/apache/ambari/server/controller/internal/ClientConfigResourceProvider.java b/ambari-server/src/main/java/org/apache/ambari/server/controller/internal/ClientConfigResourceProvider.java index 4344c2caf52..fdbb034d419 100644 --- a/ambari-server/src/main/java/org/apache/ambari/server/controller/internal/ClientConfigResourceProvider.java +++ b/ambari-server/src/main/java/org/apache/ambari/server/controller/internal/ClientConfigResourceProvider.java @@ -119,9 +119,7 @@ public class ClientConfigResourceProvider extends AbstractControllerResourceProv protected static final String COMPONENT_CLUSTER_NAME_PROPERTY_ID = "ServiceComponentInfo/cluster_name"; protected static final String COMPONENT_SERVICE_GROUP_NAME_PROPERTY_ID = "ServiceComponentInfo/service_group_name"; protected static final String COMPONENT_SERVICE_NAME_PROPERTY_ID = "ServiceComponentInfo/service_name"; - protected static final String COMPONENT_COMPONENT_ID_PROPERTY_ID = "ServiceComponentInfo/id"; protected static final String COMPONENT_COMPONENT_NAME_PROPERTY_ID = "ServiceComponentInfo/component_name"; - protected static final String COMPONENT_COMPONENT_TYPE_PROPERTY_ID = "ServiceComponentInfo/component_type"; protected static final String HOST_COMPONENT_HOST_NAME_PROPERTY_ID = PropertyHelper.getPropertyId("HostRoles", "host_name"); @@ -143,7 +141,6 @@ public class ClientConfigResourceProvider extends AbstractControllerResourceProv private static Set propertyIds = Sets.newHashSet( COMPONENT_CLUSTER_NAME_PROPERTY_ID, COMPONENT_SERVICE_GROUP_NAME_PROPERTY_ID, - COMPONENT_COMPONENT_ID_PROPERTY_ID, COMPONENT_SERVICE_NAME_PROPERTY_ID, COMPONENT_COMPONENT_NAME_PROPERTY_ID, HOST_COMPONENT_HOST_NAME_PROPERTY_ID); @@ -388,6 +385,7 @@ public Set invoke() throws AmbariException { hostLevelParams.put(MYSQL_JDBC_URL, managementController.getMysqljdbcUrl()); hostLevelParams.put(ORACLE_JDBC_URL, managementController.getOjdbcUrl()); hostLevelParams.put(HOST_SYS_PREPPED, configs.areHostsSysPrepped()); + hostLevelParams.putAll(managementController.getRcaParameters()); hostLevelParams.put(AGENT_STACK_RETRY_ON_UNAVAILABILITY, configs.isAgentStackRetryOnInstallEnabled()); hostLevelParams.put(AGENT_STACK_RETRY_COUNT, configs.getAgentStackRetryOnInstallCount()); hostLevelParams.put(GPL_LICENSE_ACCEPTED, configs.getGplLicenseAccepted().toString()); @@ -916,9 +914,7 @@ private ServiceComponentHostRequest getRequest(Map properties) { (String) properties.get(COMPONENT_CLUSTER_NAME_PROPERTY_ID), (String) properties.get(COMPONENT_SERVICE_GROUP_NAME_PROPERTY_ID), (String) properties.get(COMPONENT_SERVICE_NAME_PROPERTY_ID), - (Long) properties.get(COMPONENT_COMPONENT_ID_PROPERTY_ID), (String) properties.get(COMPONENT_COMPONENT_NAME_PROPERTY_ID), - (String) properties.get(COMPONENT_COMPONENT_TYPE_PROPERTY_ID), (String) properties.get(HOST_COMPONENT_HOST_NAME_PROPERTY_ID), null); } diff --git a/ambari-server/src/main/java/org/apache/ambari/server/controller/internal/ClusterResourceProvider.java b/ambari-server/src/main/java/org/apache/ambari/server/controller/internal/ClusterResourceProvider.java index baf64e05e75..7a1f517d4e8 100644 --- a/ambari-server/src/main/java/org/apache/ambari/server/controller/internal/ClusterResourceProvider.java +++ b/ambari-server/src/main/java/org/apache/ambari/server/controller/internal/ClusterResourceProvider.java @@ -533,12 +533,17 @@ private RequestStatusResponse processBlueprintCreate(Map propert ProvisionClusterRequest createClusterRequest; try { - createClusterRequest = - topologyRequestFactory.createProvisionClusterRequest(rawRequestBody, properties, securityConfiguration); + createClusterRequest = topologyRequestFactory.createProvisionClusterRequest(properties, securityConfiguration); } catch (InvalidTopologyTemplateException e) { throw new IllegalArgumentException("Invalid Cluster Creation Template: " + e, e); } + if (securityConfiguration != null && securityConfiguration.getType() == SecurityType.NONE && + createClusterRequest.getBlueprint().getSecurity() != null && createClusterRequest.getBlueprint().getSecurity() + .getType() == SecurityType.KERBEROS) { + throw new IllegalArgumentException("Setting security to NONE is not allowed as security type in blueprint is set to KERBEROS!"); + } + try { return topologyManager.provisionCluster(createClusterRequest); } catch (InvalidTopologyException e) { diff --git a/ambari-server/src/main/java/org/apache/ambari/server/controller/internal/ClusterSettingResourceProvider.java b/ambari-server/src/main/java/org/apache/ambari/server/controller/internal/ClusterSettingResourceProvider.java index af2c11ebd01..936826a4318 100644 --- a/ambari-server/src/main/java/org/apache/ambari/server/controller/internal/ClusterSettingResourceProvider.java +++ b/ambari-server/src/main/java/org/apache/ambari/server/controller/internal/ClusterSettingResourceProvider.java @@ -70,16 +70,13 @@ public class ClusterSettingResourceProvider extends AbstractControllerResourcePr // ----- Property ID constants --------------------------------------------- - public static final String CLUSTER_SETTING_NAME_PROPERTY_ID = "cluster_setting_name"; - public static final String CLUSTER_SETTING_VALUE_PROPERTY_ID = "cluster_setting_value"; - public static final String RESPONSE_KEY = "ClusterSettingInfo"; public static final String ALL_PROPERTIES = RESPONSE_KEY + PropertyHelper.EXTERNAL_PATH_SEP + "*"; public static final String CLUSTER_SETTING_CLUSTER_ID_PROPERTY_ID = RESPONSE_KEY + PropertyHelper.EXTERNAL_PATH_SEP + "cluster_id"; public static final String CLUSTER_SETTING_CLUSTER_NAME_PROPERTY_ID = RESPONSE_KEY + PropertyHelper.EXTERNAL_PATH_SEP + "cluster_name"; public static final String CLUSTER_SETTING_CLUSTER_SETTING_ID_PROPERTY_ID = RESPONSE_KEY + PropertyHelper.EXTERNAL_PATH_SEP + "cluster_setting_id"; - public static final String CLUSTER_SETTING_CLUSTER_SETTING_NAME_PROPERTY_ID = RESPONSE_KEY + PropertyHelper.EXTERNAL_PATH_SEP + CLUSTER_SETTING_NAME_PROPERTY_ID; - public static final String CLUSTER_SETTING_CLUSTER_SETTING_VALUE_PROPERTY_ID = RESPONSE_KEY + PropertyHelper.EXTERNAL_PATH_SEP + CLUSTER_SETTING_VALUE_PROPERTY_ID; + public static final String CLUSTER_SETTING_CLUSTER_SETTING_NAME_PROPERTY_ID = RESPONSE_KEY + PropertyHelper.EXTERNAL_PATH_SEP + "cluster_setting_name"; + public static final String CLUSTER_SETTING_CLUSTER_SETTING_VALUE_PROPERTY_ID = RESPONSE_KEY + PropertyHelper.EXTERNAL_PATH_SEP + "cluster_setting_value"; private static final Set pkPropertyIds = Sets.newHashSet(new String[]{ diff --git a/ambari-server/src/main/java/org/apache/ambari/server/controller/internal/ClusterStackVersionResourceProvider.java b/ambari-server/src/main/java/org/apache/ambari/server/controller/internal/ClusterStackVersionResourceProvider.java index ab11fc71dc8..69c33ed3e35 100644 --- a/ambari-server/src/main/java/org/apache/ambari/server/controller/internal/ClusterStackVersionResourceProvider.java +++ b/ambari-server/src/main/java/org/apache/ambari/server/controller/internal/ClusterStackVersionResourceProvider.java @@ -102,8 +102,6 @@ * Resource provider for cluster stack versions resources. */ @StaticallyInject -@Deprecated -@Experimental(feature = ExperimentalFeature.REPO_VERSION_REMOVAL) public class ClusterStackVersionResourceProvider extends AbstractControllerResourceProvider { private static final Logger LOG = LoggerFactory.getLogger(ClusterStackVersionResourceProvider.class); @@ -700,7 +698,7 @@ ActionExecutionContext getHostVersionInstallCommand(RepositoryVersionEntity repo // Determine repositories for host String osFamily = host.getOsFamily(); - RepoOsEntity osEntity = repoVersionHelper.getOSEntityForHost(repoVersion.getRepoOsEntities(), host); + RepoOsEntity osEntity = repoVersionHelper.getOSEntityForHost(host, repoVersion); if (CollectionUtils.isEmpty(osEntity.getRepoDefinitionEntities())) { throw new SystemException(String.format("Repositories for os type %s are not defined for version %s of Stack %s.", diff --git a/ambari-server/src/main/java/org/apache/ambari/server/controller/internal/CompatibleRepositoryVersionResourceProvider.java b/ambari-server/src/main/java/org/apache/ambari/server/controller/internal/CompatibleRepositoryVersionResourceProvider.java index fa0d5baf0b0..fa963c696b0 100644 --- a/ambari-server/src/main/java/org/apache/ambari/server/controller/internal/CompatibleRepositoryVersionResourceProvider.java +++ b/ambari-server/src/main/java/org/apache/ambari/server/controller/internal/CompatibleRepositoryVersionResourceProvider.java @@ -28,7 +28,7 @@ import org.apache.ambari.server.AmbariException; import org.apache.ambari.server.StaticallyInject; -import org.apache.ambari.server.api.resources.OperatingSystemReadOnlyResourceDefinition; +import org.apache.ambari.server.api.resources.OperatingSystemResourceDefinition; import org.apache.ambari.server.api.services.AmbariMetaInfo; import org.apache.ambari.server.controller.AmbariManagementController; import org.apache.ambari.server.controller.predicate.AndPredicate; @@ -78,7 +78,7 @@ public class CompatibleRepositoryVersionResourceProvider extends ReadOnlyResourc public static final String REPOSITORY_UPGRADES_SUPPORTED_TYPES_ID = "CompatibleRepositoryVersions/upgrade_types"; public static final String REPOSITORY_VERSION_SERVICES = "CompatibleRepositoryVersions/services"; public static final String REPOSITORY_VERSION_STACK_SERVICES = "CompatibleRepositoryVersions/stack_services"; - public static final String SUBRESOURCE_OPERATING_SYSTEMS_PROPERTY_ID = new OperatingSystemReadOnlyResourceDefinition().getPluralName(); + public static final String SUBRESOURCE_OPERATING_SYSTEMS_PROPERTY_ID = new OperatingSystemResourceDefinition().getPluralName(); private static final String REPOSITORY_STACK_VALUE = "stack_value"; private static Set pkPropertyIds = Collections.singleton(REPOSITORY_VERSION_ID_PROPERTY_ID); diff --git a/ambari-server/src/main/java/org/apache/ambari/server/controller/internal/ComponentResourceProvider.java b/ambari-server/src/main/java/org/apache/ambari/server/controller/internal/ComponentResourceProvider.java index dbe6e70cc4c..e679e50dbb9 100644 --- a/ambari-server/src/main/java/org/apache/ambari/server/controller/internal/ComponentResourceProvider.java +++ b/ambari-server/src/main/java/org/apache/ambari/server/controller/internal/ComponentResourceProvider.java @@ -94,9 +94,7 @@ public class ComponentResourceProvider extends AbstractControllerResourceProvide protected static final String COMPONENT_SERVICE_ID_PROPERTY_ID = RESPONSE_KEY + PropertyHelper.EXTERNAL_PATH_SEP + "service_id"; protected static final String COMPONENT_SERVICE_NAME_PROPERTY_ID = RESPONSE_KEY + PropertyHelper.EXTERNAL_PATH_SEP + "service_name"; protected static final String COMPONENT_SERVICE_TYPE_PROPERTY_ID = RESPONSE_KEY + PropertyHelper.EXTERNAL_PATH_SEP + "service_type"; - protected static final String COMPONENT_COMPONENT_ID_PROPERTY_ID = RESPONSE_KEY + PropertyHelper.EXTERNAL_PATH_SEP + "id"; protected static final String COMPONENT_COMPONENT_NAME_PROPERTY_ID = RESPONSE_KEY + PropertyHelper.EXTERNAL_PATH_SEP + "component_name"; - protected static final String COMPONENT_COMPONENT_TYPE_PROPERTY_ID = RESPONSE_KEY + PropertyHelper.EXTERNAL_PATH_SEP + "component_type"; protected static final String COMPONENT_DISPLAY_NAME_PROPERTY_ID = RESPONSE_KEY + PropertyHelper.EXTERNAL_PATH_SEP + "display_name"; protected static final String COMPONENT_STATE_PROPERTY_ID = RESPONSE_KEY + PropertyHelper.EXTERNAL_PATH_SEP + "state"; protected static final String COMPONENT_CATEGORY_PROPERTY_ID = RESPONSE_KEY + PropertyHelper.EXTERNAL_PATH_SEP + "category"; @@ -122,9 +120,7 @@ public class ComponentResourceProvider extends AbstractControllerResourceProvide COMPONENT_CLUSTER_NAME_PROPERTY_ID, COMPONENT_SERVICE_GROUP_NAME_PROPERTY_ID, COMPONENT_SERVICE_NAME_PROPERTY_ID, - COMPONENT_COMPONENT_ID_PROPERTY_ID, - COMPONENT_COMPONENT_NAME_PROPERTY_ID, - COMPONENT_COMPONENT_TYPE_PROPERTY_ID); + COMPONENT_COMPONENT_NAME_PROPERTY_ID); /** * The property ids for an servce resource. @@ -145,9 +141,7 @@ public class ComponentResourceProvider extends AbstractControllerResourceProvide PROPERTY_IDS.add(COMPONENT_SERVICE_ID_PROPERTY_ID); PROPERTY_IDS.add(COMPONENT_SERVICE_NAME_PROPERTY_ID); PROPERTY_IDS.add(COMPONENT_SERVICE_TYPE_PROPERTY_ID); - PROPERTY_IDS.add(COMPONENT_COMPONENT_ID_PROPERTY_ID); PROPERTY_IDS.add(COMPONENT_COMPONENT_NAME_PROPERTY_ID); - PROPERTY_IDS.add(COMPONENT_COMPONENT_TYPE_PROPERTY_ID); PROPERTY_IDS.add(COMPONENT_DISPLAY_NAME_PROPERTY_ID); PROPERTY_IDS.add(COMPONENT_STATE_PROPERTY_ID); PROPERTY_IDS.add(COMPONENT_CATEGORY_PROPERTY_ID); @@ -229,9 +223,7 @@ public Set invoke() throws AmbariException, Authorizat resource.setProperty(COMPONENT_SERVICE_ID_PROPERTY_ID, response.getServiceId()); resource.setProperty(COMPONENT_SERVICE_NAME_PROPERTY_ID, response.getServiceName()); resource.setProperty(COMPONENT_SERVICE_TYPE_PROPERTY_ID, response.getServiceType()); - resource.setProperty(COMPONENT_COMPONENT_ID_PROPERTY_ID, response.getComponentId()); resource.setProperty(COMPONENT_COMPONENT_NAME_PROPERTY_ID, response.getComponentName()); - resource.setProperty(COMPONENT_COMPONENT_TYPE_PROPERTY_ID, response.getComponentType()); resource.setProperty(COMPONENT_DISPLAY_NAME_PROPERTY_ID, response.getDisplayName()); resource.setProperty(COMPONENT_STATE_PROPERTY_ID, response.getDesiredState()); resource.setProperty(COMPONENT_CATEGORY_PROPERTY_ID, response.getCategory()); @@ -278,9 +270,7 @@ public Set invoke() throws AmbariException { setResourceProperty(resource, COMPONENT_SERVICE_ID_PROPERTY_ID, response.getServiceId(), requestedIds); setResourceProperty(resource, COMPONENT_SERVICE_NAME_PROPERTY_ID, response.getServiceName(), requestedIds); setResourceProperty(resource, COMPONENT_SERVICE_TYPE_PROPERTY_ID, response.getServiceType(), requestedIds); - setResourceProperty(resource, COMPONENT_COMPONENT_ID_PROPERTY_ID, response.getComponentId(), requestedIds); setResourceProperty(resource, COMPONENT_COMPONENT_NAME_PROPERTY_ID, response.getComponentName(), requestedIds); - setResourceProperty(resource, COMPONENT_COMPONENT_TYPE_PROPERTY_ID, response.getComponentType(), requestedIds); setResourceProperty(resource, COMPONENT_DISPLAY_NAME_PROPERTY_ID, response.getDisplayName(), requestedIds); setResourceProperty(resource, COMPONENT_STATE_PROPERTY_ID, response.getDesiredState(), requestedIds); setResourceProperty(resource, COMPONENT_CATEGORY_PROPERTY_ID, response.getCategory(), requestedIds); @@ -373,7 +363,6 @@ private ServiceComponentRequest getRequest(Map properties) { (String) properties.get(COMPONENT_SERVICE_GROUP_NAME_PROPERTY_ID), (String) properties.get(COMPONENT_SERVICE_NAME_PROPERTY_ID), (String) properties.get(COMPONENT_COMPONENT_NAME_PROPERTY_ID), - (String) properties.get(COMPONENT_COMPONENT_TYPE_PROPERTY_ID), (String) properties.get(COMPONENT_STATE_PROPERTY_ID), (String) properties.get(COMPONENT_RECOVERY_ENABLED_ID), (String) properties.get(COMPONENT_CATEGORY_PROPERTY_ID)); @@ -403,13 +392,6 @@ public Set createComponents(Set createComponents(Set createComponents(Set re final String serviceGroupName = request.getServiceGroupName(); final String serviceName = request.getServiceName(); final String componentName = request.getComponentName(); - final String componentType = request.getComponentType(); LOG.info("Received a updateComponent request: {}", request); @@ -678,7 +659,7 @@ protected RequestStatusResponse updateComponents(Set re State newState = getValidDesiredState(request); if (! maintenanceStateHelper.isOperationAllowed(reqOpLvl, s)) { - LOG.info("Operations cannot be applied to component name : " + componentName + " with type : " + componentType + LOG.info("Operations cannot be applied to component " + componentName + " because service " + serviceName + " is in the maintenance state of " + s.getMaintenanceState()); continue; @@ -693,8 +674,8 @@ protected RequestStatusResponse updateComponents(Set re boolean newRecoveryEnabled = Boolean.parseBoolean(request.getRecoveryEnabled()); boolean oldRecoveryEnabled = sc.isRecoveryEnabled(); - LOG.info("ComponentName: {}, componentType: {}, oldRecoveryEnabled: {}, newRecoveryEnabled {}", - componentName, componentType, oldRecoveryEnabled, newRecoveryEnabled); + LOG.info("Component: {}, oldRecoveryEnabled: {}, newRecoveryEnabled {}", + componentName, oldRecoveryEnabled, newRecoveryEnabled); if (newRecoveryEnabled != oldRecoveryEnabled) { if (newRecoveryEnabled) { recoveryEnabledComponents.add(sc); @@ -731,7 +712,6 @@ protected RequestStatusResponse updateComponents(Set re + ", serviceGroupName=" + serviceGroupName + ", serviceName=" + sc.getServiceName() + ", componentName=" + sc.getName() - + ", componentType=" + sc.getType() + ", recoveryEnabled=" + sc.isRecoveryEnabled() + ", currentDesiredState=" + oldScState + ", newDesiredState=" + newState); @@ -745,7 +725,6 @@ protected RequestStatusResponse updateComponents(Set re + ", serviceGroupName=" + serviceGroupName + ", serviceName=" + serviceName + ", componentName=" + sc.getName() - + ", componentType=" + sc.getType() + ", recoveryEnabled=" + sc.isRecoveryEnabled() + ", currentDesiredState=" + oldScState + ", newDesiredState=" + newState); @@ -761,7 +740,6 @@ protected RequestStatusResponse updateComponents(Set re + ", serviceGroupName=" + serviceGroupName + ", serviceName=" + serviceName + ", componentName=" + sc.getName() - + ", componentType=" + sc.getType() + ", recoveryEnabled=" + sc.isRecoveryEnabled() + ", hostname=" + sch.getHostName() + ", currentState=" + oldSchState @@ -776,7 +754,6 @@ protected RequestStatusResponse updateComponents(Set re + ", serviceGroupName=" + serviceGroupName + ", serviceName=" + serviceName + ", componentName=" + sc.getName() - + ", componentType=" + sc.getType() + ", recoveryEnabled=" + sc.isRecoveryEnabled() + ", hostname=" + sch.getHostName() + ", currentState=" + oldSchState @@ -792,7 +769,6 @@ protected RequestStatusResponse updateComponents(Set re + ", serviceGroupName=" + serviceGroupName + ", serviceName=" + serviceName + ", componentName=" + sc.getName() - + ", componentType=" + sc.getType() + ", recoveryEnabled=" + sc.isRecoveryEnabled() + ", hostname=" + sch.getHostName()); @@ -808,7 +784,6 @@ protected RequestStatusResponse updateComponents(Set re + ", serviceGroupName=" + serviceGroupName + ", serviceName=" + sch.getServiceName() + ", componentName=" + sch.getServiceComponentName() - + ", componentType=" + sch.getServiceComponentType() + ", recoveryEnabled=" + sc.isRecoveryEnabled() + ", hostname=" + sch.getHostName() + ", currentState=" + oldSchState @@ -826,7 +801,6 @@ protected RequestStatusResponse updateComponents(Set re + ", serviceGroupName=" + serviceGroupName + ", serviceName=" + serviceName + ", componentName=" + sc.getName() - + ", componentType=" + sc.getType() + ", recoveryEnabled=" + sc.isRecoveryEnabled() + ", hostname=" + sch.getHostName() + ", currentState=" + oldSchState @@ -940,16 +914,14 @@ private void setServiceNameIfAbsent(final ServiceComponentRequest request, if (StringUtils.isEmpty(request.getServiceName())) { String componentName = request.getComponentName(); - String componentType = request.getComponentType(); - String serviceName = getManagementController().findService(cluster, componentType); + String serviceName = getManagementController().findService(cluster, componentName); - debug("Looking up service name for component, componentType={}, serviceName={}", componentType, serviceName); + debug("Looking up service name for component, componentName={}, serviceName={}", componentName, serviceName); if (StringUtils.isEmpty(serviceName)) { - throw new AmbariException("Could not find service for component." - + " componentName=" + request.getComponentName() - + " componentType=" + request.getComponentType() + throw new AmbariException("Could not find service for component" + + ", componentName=" + request.getComponentName() + ", clusterName=" + cluster.getClusterName()); } request.setServiceName(serviceName); diff --git a/ambari-server/src/main/java/org/apache/ambari/server/controller/internal/CompositeStack.java b/ambari-server/src/main/java/org/apache/ambari/server/controller/internal/CompositeStack.java index 333a20aebf8..75baf300bcf 100644 --- a/ambari-server/src/main/java/org/apache/ambari/server/controller/internal/CompositeStack.java +++ b/ambari-server/src/main/java/org/apache/ambari/server/controller/internal/CompositeStack.java @@ -27,8 +27,6 @@ import java.util.Set; import java.util.stream.Stream; -import javax.annotation.Nonnull; - import org.apache.ambari.server.state.AutoDeployInfo; import org.apache.ambari.server.state.ComponentInfo; import org.apache.ambari.server.state.ConfigHelper; @@ -209,10 +207,10 @@ public String getServiceForComponent(String component) { } @Override - @Nonnull - public Stream> getServicesForComponent(String component) { + public Collection getServicesForComponents(Collection components) { return stacks.stream() - .flatMap(stack -> stack.getServicesForComponent(component)); + .flatMap(m -> m.getServicesForComponents(components).stream()) + .collect(toSet()); } @Override @@ -248,6 +246,15 @@ public Collection getDependenciesForComponent(String component) .collect(toSet()); } + @Override + public String getConditionalServiceForDependency(DependencyInfo dependency) { + return stacks.stream() + .map(m -> m.getConditionalServiceForDependency(dependency)) + .filter(Objects::nonNull) + .findAny() + .orElse(null); + } + @Override public String getExternalComponentConfig(String component) { return stacks.stream() diff --git a/ambari-server/src/main/java/org/apache/ambari/server/controller/internal/DefaultProviderModule.java b/ambari-server/src/main/java/org/apache/ambari/server/controller/internal/DefaultProviderModule.java index 498a08f8294..b7f25013980 100644 --- a/ambari-server/src/main/java/org/apache/ambari/server/controller/internal/DefaultProviderModule.java +++ b/ambari-server/src/main/java/org/apache/ambari/server/controller/internal/DefaultProviderModule.java @@ -124,8 +124,6 @@ protected ResourceProvider createResourceProvider(Resource.Type type) { return new StageResourceProvider(managementController); case OperatingSystem: return new OperatingSystemResourceProvider(managementController); - case OperatingSystemReadOnly: - return new OperatingSystemReadOnlyResourceProvider(managementController); case Repository: return new RepositoryResourceProvider(managementController); case Setting: diff --git a/ambari-server/src/main/java/org/apache/ambari/server/controller/internal/ExportBlueprintRequest.java b/ambari-server/src/main/java/org/apache/ambari/server/controller/internal/ExportBlueprintRequest.java index 151d9a7c3ee..13ae732cec5 100644 --- a/ambari-server/src/main/java/org/apache/ambari/server/controller/internal/ExportBlueprintRequest.java +++ b/ambari-server/src/main/java/org/apache/ambari/server/controller/internal/ExportBlueprintRequest.java @@ -30,7 +30,7 @@ import java.util.Map; import java.util.Set; -import org.apache.ambari.server.StackAccessException; +import org.apache.ambari.server.AmbariException; import org.apache.ambari.server.api.util.TreeNode; import org.apache.ambari.server.controller.AmbariManagementController; import org.apache.ambari.server.controller.AmbariServer; @@ -83,7 +83,7 @@ public ExportBlueprintRequest(TreeNode clusterNode) throws InvalidTopo Collection exportedHostGroups = processHostGroups(clusterNode.getChild("hosts")); createHostGroupInfo(exportedHostGroups); - createBlueprint(exportedHostGroups, createStack(parseStack(clusterResource))); + createBlueprint(exportedHostGroups, parseStack(clusterResource)); } public String getClusterName() { @@ -135,11 +135,11 @@ private void createBlueprint(Collection exportedHostGroups, S componentList.add(new Component(component)); } - hostGroups.add(new HostGroupImpl(exportedHostGroup.getName(), componentList, + hostGroups.add(new HostGroupImpl(exportedHostGroup.getName(), bpName, stack, componentList, exportedHostGroup.getConfiguration(), String.valueOf(exportedHostGroup.getCardinality()))); } ImmutableSet stackIds = ImmutableSet.of(stack.getStackId()); - blueprint = new BlueprintImpl(bpName, hostGroups, stackIds, Collections.emptySet(), configuration, null, null); + blueprint = new BlueprintImpl(bpName, hostGroups, stack, stackIds, Collections.emptySet(), configuration, null, null); } private void createHostGroupInfo(Collection exportedHostGroups) { @@ -153,15 +153,15 @@ private void createHostGroupInfo(Collection exportedHostGroup } - private StackId parseStack(Resource clusterResource) { - return new StackId(String.valueOf(clusterResource.getPropertyValue(ClusterResourceProvider.CLUSTER_VERSION_PROPERTY_ID))); - } + private Stack parseStack(Resource clusterResource) throws InvalidTopologyTemplateException { + String[] stackTokens = String.valueOf(clusterResource.getPropertyValue( + ClusterResourceProvider.CLUSTER_VERSION_PROPERTY_ID)).split("-"); - private Stack createStack(StackId stackId) throws InvalidTopologyTemplateException { try { - return new Stack(stackId, controller.getAmbariMetaInfo()); - } catch (StackAccessException e) { - throw new InvalidTopologyTemplateException(String.format("The specified stack doesn't exist: %s", stackId)); + return new Stack(stackTokens[0], stackTokens[1], controller); + } catch (AmbariException e) { + throw new InvalidTopologyTemplateException(String.format( + "The specified stack doesn't exist: name=%s version=%s", stackTokens[0], stackTokens[1])); } } diff --git a/ambari-server/src/main/java/org/apache/ambari/server/controller/internal/HostComponentResourceProvider.java b/ambari-server/src/main/java/org/apache/ambari/server/controller/internal/HostComponentResourceProvider.java index ddd180f4e60..8facd21d270 100644 --- a/ambari-server/src/main/java/org/apache/ambari/server/controller/internal/HostComponentResourceProvider.java +++ b/ambari-server/src/main/java/org/apache/ambari/server/controller/internal/HostComponentResourceProvider.java @@ -70,7 +70,6 @@ import org.slf4j.LoggerFactory; import com.google.common.collect.ImmutableMap; -import com.google.common.collect.ImmutableSet; import com.google.common.collect.Sets; import com.google.inject.Inject; import com.google.inject.Injector; @@ -98,7 +97,6 @@ public class HostComponentResourceProvider extends AbstractControllerResourcePro public static final String HOST_COMPONENT_SERVICE_TYPE_PROPERTY_ID = RESPONSE_KEY + PropertyHelper.EXTERNAL_PATH_SEP + "service_type"; public static final String HOST_COMPONENT_HOST_COMPONENT_ID_PROPERTY_ID = RESPONSE_KEY + PropertyHelper.EXTERNAL_PATH_SEP + "id"; public static final String HOST_COMPONENT_COMPONENT_NAME_PROPERTY_ID = RESPONSE_KEY + PropertyHelper.EXTERNAL_PATH_SEP + "component_name"; - public static final String HOST_COMPONENT_COMPONENT_TYPE_PROPERTY_ID = RESPONSE_KEY + PropertyHelper.EXTERNAL_PATH_SEP + "component_type"; public static final String HOST_COMPONENT_DISPLAY_NAME_PROPERTY_ID = RESPONSE_KEY + PropertyHelper.EXTERNAL_PATH_SEP + "display_name"; public static final String HOST_COMPONENT_HOST_NAME_PROPERTY_ID = RESPONSE_KEY + PropertyHelper.EXTERNAL_PATH_SEP + "host_name"; public static final String HOST_COMPONENT_PUBLIC_HOST_NAME_PROPERTY_ID = RESPONSE_KEY + PropertyHelper.EXTERNAL_PATH_SEP + "public_host_name"; @@ -125,10 +123,8 @@ public class HostComponentResourceProvider extends AbstractControllerResourcePro public static Map keyPropertyIds = ImmutableMap.builder() .put(Resource.Type.Cluster, HOST_COMPONENT_CLUSTER_NAME_PROPERTY_ID) .put(Resource.Type.Host, HOST_COMPONENT_HOST_NAME_PROPERTY_ID) - .put(Resource.Type.HostComponent, HOST_COMPONENT_HOST_COMPONENT_ID_PROPERTY_ID) + .put(Resource.Type.HostComponent, HOST_COMPONENT_COMPONENT_NAME_PROPERTY_ID) .put(Resource.Type.Component, HOST_COMPONENT_COMPONENT_NAME_PROPERTY_ID) - .put(Resource.Type.Service, HOST_COMPONENT_SERVICE_NAME_PROPERTY_ID) - .put(Resource.Type.ServiceGroup, HOST_COMPONENT_SERVICE_GROUP_NAME_PROPERTY_ID) .build(); /** @@ -139,9 +135,7 @@ public class HostComponentResourceProvider extends AbstractControllerResourcePro HOST_COMPONENT_CLUSTER_NAME_PROPERTY_ID, HOST_COMPONENT_SERVICE_GROUP_NAME_PROPERTY_ID, HOST_COMPONENT_SERVICE_NAME_PROPERTY_ID, - HOST_COMPONENT_HOST_COMPONENT_ID_PROPERTY_ID, HOST_COMPONENT_COMPONENT_NAME_PROPERTY_ID, - HOST_COMPONENT_COMPONENT_TYPE_PROPERTY_ID, HOST_COMPONENT_DISPLAY_NAME_PROPERTY_ID, HOST_COMPONENT_HOST_NAME_PROPERTY_ID, HOST_COMPONENT_PUBLIC_HOST_NAME_PROPERTY_ID, @@ -221,7 +215,6 @@ public Set invoke() throws AmbariException, Author resource.setProperty(HOST_COMPONENT_SERVICE_TYPE_PROPERTY_ID, response.getServiceType()); resource.setProperty(HOST_COMPONENT_HOST_COMPONENT_ID_PROPERTY_ID, response.getHostComponentId()); resource.setProperty(HOST_COMPONENT_COMPONENT_NAME_PROPERTY_ID, response.getComponentName()); - resource.setProperty(HOST_COMPONENT_COMPONENT_TYPE_PROPERTY_ID, response.getComponentType()); resource.setProperty(HOST_COMPONENT_DISPLAY_NAME_PROPERTY_ID, response.getDisplayName()); resource.setProperty(HOST_COMPONENT_HOST_NAME_PROPERTY_ID, response.getHostname()); resource.setProperty(HOST_COMPONENT_PUBLIC_HOST_NAME_PROPERTY_ID, response.getPublicHostname()); @@ -255,6 +248,19 @@ public Set getResources(Request request, Predicate predicate) return findResources(request, predicate, requests); } + private Set getResourcesForUpdate(Request request, Predicate predicate) + throws SystemException, UnsupportedPropertyException, NoSuchResourceException, NoSuchParentResourceException { + + final Set requests = new HashSet<>(); + + for (Map propertyMap : getPropertyMaps(predicate)) { + requests.add(getRequest(propertyMap)); + } + + return findResources(request, predicate, requests); + } + + private Set findResources(Request request, final Predicate predicate, final Set requests) throws SystemException, NoSuchResourceException, NoSuchParentResourceException { @@ -281,7 +287,6 @@ public Set invoke() throws AmbariException { setResourceProperty(resource, HOST_COMPONENT_SERVICE_TYPE_PROPERTY_ID, response.getServiceType(), requestedIds); setResourceProperty(resource, HOST_COMPONENT_HOST_COMPONENT_ID_PROPERTY_ID, response.getHostComponentId(), requestedIds); setResourceProperty(resource, HOST_COMPONENT_COMPONENT_NAME_PROPERTY_ID, response.getComponentName(), requestedIds); - setResourceProperty(resource, HOST_COMPONENT_COMPONENT_TYPE_PROPERTY_ID, response.getComponentType(), requestedIds); setResourceProperty(resource, HOST_COMPONENT_DISPLAY_NAME_PROPERTY_ID, response.getDisplayName(), requestedIds); setResourceProperty(resource, HOST_COMPONENT_HOST_NAME_PROPERTY_ID, response.getHostname(), requestedIds); setResourceProperty(resource, HOST_COMPONENT_PUBLIC_HOST_NAME_PROPERTY_ID, response.getPublicHostname(), requestedIds); @@ -352,7 +357,7 @@ public DeleteStatusMetaData invoke() throws AmbariException, AuthorizationExcept notifyDelete(Resource.Type.HostComponent, predicate); for(ServiceComponentHostRequest svcCmpntHostReq : requests) { - deleteStatusMetaData.addDeletedKey("component_id: "+svcCmpntHostReq.getComponentId()); + deleteStatusMetaData.addDeletedKey("component_name: "+svcCmpntHostReq.getComponentName()); } return getRequestStatus(null, null, deleteStatusMetaData); } @@ -672,7 +677,6 @@ protected RequestStageContainer updateHostComponents(RequestStageContainer stage + ", clusterId=" + cluster.getClusterId() + ", serviceName=" + sch.getServiceName() + ", componentName=" + sch.getServiceComponentName() - + ", componentType=" + sch.getServiceComponentType() + ", hostname=" + sch.getHostName() + ", currentState=" + oldSchState + ", newDesiredState=" + newState); @@ -718,22 +722,13 @@ protected Set getPKPropertyIds() { * @return the component request object */ private ServiceComponentHostRequest getRequest(Map properties) { - Long hostComponentId = null; - if (properties.get(HOST_COMPONENT_HOST_COMPONENT_ID_PROPERTY_ID) != null) { - hostComponentId = properties.get(HOST_COMPONENT_HOST_COMPONENT_ID_PROPERTY_ID) instanceof String ? - Long.parseLong((String) properties.get(HOST_COMPONENT_HOST_COMPONENT_ID_PROPERTY_ID)) : - (Long) properties.get(HOST_COMPONENT_HOST_COMPONENT_ID_PROPERTY_ID); - } - ServiceComponentHostRequest serviceComponentHostRequest = new ServiceComponentHostRequest( - (String) properties.get(HOST_COMPONENT_CLUSTER_NAME_PROPERTY_ID), - (String) properties.get(HOST_COMPONENT_SERVICE_GROUP_NAME_PROPERTY_ID), - (String) properties.get(HOST_COMPONENT_SERVICE_NAME_PROPERTY_ID), - hostComponentId, - (String) properties.get(HOST_COMPONENT_COMPONENT_NAME_PROPERTY_ID), - (String) properties.get(HOST_COMPONENT_COMPONENT_TYPE_PROPERTY_ID), - (String) properties.get(HOST_COMPONENT_HOST_NAME_PROPERTY_ID), - (String) properties.get(HOST_COMPONENT_DESIRED_STATE_PROPERTY_ID)); - + ServiceComponentHostRequest serviceComponentHostRequest = new ServiceComponentHostRequest( + (String) properties.get(HOST_COMPONENT_CLUSTER_NAME_PROPERTY_ID), + (String) properties.get(HOST_COMPONENT_SERVICE_GROUP_NAME_PROPERTY_ID), + (String) properties.get(HOST_COMPONENT_SERVICE_NAME_PROPERTY_ID), + (String) properties.get(HOST_COMPONENT_COMPONENT_NAME_PROPERTY_ID), + (String) properties.get(HOST_COMPONENT_HOST_NAME_PROPERTY_ID), + (String) properties.get(HOST_COMPONENT_DESIRED_STATE_PROPERTY_ID)); serviceComponentHostRequest.setState((String) properties.get(HOST_COMPONENT_STATE_PROPERTY_ID)); if (properties.get(HOST_COMPONENT_STALE_CONFIGS_PROPERTY_ID) != null) { serviceComponentHostRequest.setStaleConfig( @@ -765,23 +760,13 @@ private ServiceComponentHostRequest getRequest(Map properties) { * @return the component request object */ private ServiceComponentHostRequest changeRequest(Map properties) { - Long hostComponentId = null; - if (properties.get(HOST_COMPONENT_HOST_COMPONENT_ID_PROPERTY_ID) != null) { - hostComponentId = properties.get(HOST_COMPONENT_HOST_COMPONENT_ID_PROPERTY_ID) instanceof String ? - Long.parseLong((String) properties.get(HOST_COMPONENT_HOST_COMPONENT_ID_PROPERTY_ID)) : - (Long) properties.get(HOST_COMPONENT_HOST_COMPONENT_ID_PROPERTY_ID); - - } ServiceComponentHostRequest serviceComponentHostRequest = new ServiceComponentHostRequest( (String) properties.get(HOST_COMPONENT_CLUSTER_NAME_PROPERTY_ID), (String) properties.get(HOST_COMPONENT_SERVICE_GROUP_NAME_PROPERTY_ID), (String) properties.get(HOST_COMPONENT_SERVICE_NAME_PROPERTY_ID), - hostComponentId, (String) properties.get(HOST_COMPONENT_COMPONENT_NAME_PROPERTY_ID), - (String) properties.get(HOST_COMPONENT_COMPONENT_TYPE_PROPERTY_ID), (String) properties.get(HOST_COMPONENT_HOST_NAME_PROPERTY_ID), (String) properties.get(HOST_COMPONENT_STATE_PROPERTY_ID)); - if (properties.get(HOST_COMPONENT_DESIRED_STATE_PROPERTY_ID) != null) { serviceComponentHostRequest.setDesiredState((String)properties.get(HOST_COMPONENT_DESIRED_STATE_PROPERTY_ID)); } @@ -828,12 +813,12 @@ private RequestStageContainer doUpdateResources(final RequestStageContainer stag final boolean runSmokeTest = "true".equals(getQueryParameterValue( QUERY_PARAMETERS_RUN_SMOKE_TEST_ID, predicate)); - Set queryIds = ImmutableSet.copyOf(keyPropertyIds.values()); + Set queryIds = Collections.singleton(HOST_COMPONENT_COMPONENT_NAME_PROPERTY_ID); Request queryRequest = PropertyHelper.getReadRequest(queryIds); // will take care of 404 exception - Set matchingResources = getResources(queryRequest, predicate); + Set matchingResources = getResourcesForUpdate(queryRequest, predicate); for (Resource queryResource : matchingResources) { //todo: predicate evaluation was removed for BUG-28737 and the removal of this breaks @@ -982,13 +967,12 @@ private void doDirectTransitions(Map directTransiti * @param request the request to log */ private void logRequestInfo(String msg, ServiceComponentHostRequest request) { - LOG.info("{}, clusterName={}, serviceGroupName={}, serviceName={}, componentName={}, componentType={}, hostname={}, request={}", + LOG.info("{}, clusterName={}, serviceGroupName={}, serviceName={}, componentName={}, hostname={}, request={}", msg, request.getClusterName(), request.getServiceGroupName(), request.getServiceName(), request.getComponentName(), - request.getComponentType(), request.getHostname(), request); } @@ -1008,7 +992,6 @@ private String getServiceComponentRequestInfoLogMessage(String msg, ServiceCompo .append(", serviceGroupName=").append(request.getServiceGroupName()) .append(", serviceName=").append(request.getServiceName()) .append(", componentName=").append(request.getComponentName()) - .append(", componentType=").append(request.getComponentType()) .append(", hostname=").append(request.getHostname()) .append(", currentState=").append(oldState == null ? "null" : oldState) .append(", newDesiredState=").append(newDesiredState == null ? "null" : newDesiredState); @@ -1048,14 +1031,10 @@ private void validateServiceComponentHostRequest(ServiceComponentHostRequest req || request.getClusterName().isEmpty() || request.getComponentName() == null || request.getComponentName().isEmpty() - || request.getServiceName() == null - || request.getServiceName().isEmpty() - || request.getServiceGroupName() == null - || request.getServiceGroupName().isEmpty() || request.getHostname() == null || request.getHostname().isEmpty()) { throw new IllegalArgumentException("Invalid arguments" - + ", cluster name, component name, service name, service group name and host name should be" + + ", cluster name, component name and host name should be" + " provided"); } diff --git a/ambari-server/src/main/java/org/apache/ambari/server/controller/internal/HostResourceProvider.java b/ambari-server/src/main/java/org/apache/ambari/server/controller/internal/HostResourceProvider.java index 18122e9941c..79f4233ebce 100644 --- a/ambari-server/src/main/java/org/apache/ambari/server/controller/internal/HostResourceProvider.java +++ b/ambari-server/src/main/java/org/apache/ambari/server/controller/internal/HostResourceProvider.java @@ -155,6 +155,7 @@ public class HostResourceProvider extends AbstractControllerResourceProvider { public static Map keyPropertyIds = ImmutableMap.builder() .put(Resource.Type.Host, HOST_HOST_NAME_PROPERTY_ID) .put(Resource.Type.Cluster, HOST_CLUSTER_NAME_PROPERTY_ID) + .put(Resource.Type.HostComponent, HOST_OS_TYPE_PROPERTY_ID) .build(); /** @@ -957,9 +958,7 @@ private void processDeleteHostRequests(List requests, Clusters clu ServiceComponentHostRequest schr = new ServiceComponentHostRequest(cluster.getClusterName(), sch.getServiceGroupName(), sch.getServiceName(), - sch.getHostComponentId(), sch.getServiceComponentName(), - sch.getServiceComponentType(), sch.getHostName(), null); schrs.add(schr); @@ -1087,9 +1086,7 @@ public static String getHostNameFromProperties(Map properties) { private RequestStatusResponse submitHostRequests(Request request) throws SystemException { ScaleClusterRequest requestRequest; try { - requestRequest = new ScaleClusterRequest( - request.getRequestInfoProperties().get(Request.REQUEST_INFO_BODY_PROPERTY), - request.getProperties()); + requestRequest = new ScaleClusterRequest(request.getProperties()); } catch (InvalidTopologyTemplateException e) { throw new IllegalArgumentException("Invalid Add Hosts Template: " + e, e); } diff --git a/ambari-server/src/main/java/org/apache/ambari/server/controller/internal/HostStatusHelper.java b/ambari-server/src/main/java/org/apache/ambari/server/controller/internal/HostStatusHelper.java index 2cad5f43e73..9c38bff8c10 100644 --- a/ambari-server/src/main/java/org/apache/ambari/server/controller/internal/HostStatusHelper.java +++ b/ambari-server/src/main/java/org/apache/ambari/server/controller/internal/HostStatusHelper.java @@ -41,8 +41,8 @@ public class HostStatusHelper { LoggerFactory.getLogger(HostStatusHelper.class); public static boolean isHostComponentLive(AmbariManagementController managementController, - String clusterName, String hostName, String serviceName, - Long componentId, String componentName, String componentType) { + String clusterName, String hostName, + String serviceName, String componentName) { if (clusterName == null) { return false; } @@ -54,8 +54,8 @@ public static boolean isHostComponentLive(AmbariManagementController managementC Cluster cluster = clusters.getCluster(clusterName); Service s = cluster.getService(serviceName); ServiceComponentHostRequest componentRequest = - new ServiceComponentHostRequest(clusterName, s.getServiceGroupName(), serviceName, componentId, componentName, componentType, - hostName, null); + new ServiceComponentHostRequest(clusterName, s.getServiceGroupName(), serviceName, componentName, hostName, + null); Set hostComponents = managementController.getHostComponents(Collections.singleton(componentRequest)); diff --git a/ambari-server/src/main/java/org/apache/ambari/server/controller/internal/MpackResourceProvider.java b/ambari-server/src/main/java/org/apache/ambari/server/controller/internal/MpackResourceProvider.java index ff0cbde0748..a867d11602d 100644 --- a/ambari-server/src/main/java/org/apache/ambari/server/controller/internal/MpackResourceProvider.java +++ b/ambari-server/src/main/java/org/apache/ambari/server/controller/internal/MpackResourceProvider.java @@ -26,6 +26,7 @@ import java.util.HashMap; import java.util.HashSet; import java.util.LinkedHashSet; +import java.util.List; import java.util.Map; import java.util.Set; @@ -54,6 +55,7 @@ import org.apache.ambari.server.registry.Registry; import org.apache.ambari.server.registry.RegistryMpack; import org.apache.ambari.server.registry.RegistryMpackVersion; +import org.apache.ambari.server.state.Module; import org.apache.ambari.server.state.StackId; import org.apache.commons.lang.Validate; @@ -76,8 +78,8 @@ public class MpackResourceProvider extends AbstractControllerResourceProvider { public static final String MPACK_URI = RESPONSE_KEY + PropertyHelper.EXTERNAL_PATH_SEP + "mpack_uri"; public static final String MODULES = RESPONSE_KEY + PropertyHelper.EXTERNAL_PATH_SEP + "modules"; public static final String STACK_NAME_PROPERTY_ID = RESPONSE_KEY + PropertyHelper.EXTERNAL_PATH_SEP + "stack_name"; - public static final String STACK_VERSION_PROPERTY_ID = RESPONSE_KEY + PropertyHelper.EXTERNAL_PATH_SEP + "stack_version"; - public static final String OS_PROPERTY_ID = RESPONSE_KEY + PropertyHelper.EXTERNAL_PATH_SEP + "operating_systems"; + public static final String STACK_VERSION_PROPERTY_ID = + RESPONSE_KEY + PropertyHelper.EXTERNAL_PATH_SEP + "stack_version"; private static Set pkPropertyIds = new HashSet<>( Arrays.asList(MPACK_RESOURCE_ID, STACK_NAME_PROPERTY_ID, STACK_VERSION_PROPERTY_ID)); @@ -113,7 +115,6 @@ public class MpackResourceProvider extends AbstractControllerResourceProvider { PROPERTY_IDS.add(MODULES); PROPERTY_IDS.add(STACK_NAME_PROPERTY_ID); PROPERTY_IDS.add(STACK_VERSION_PROPERTY_ID); - PROPERTY_IDS.add(OS_PROPERTY_ID); // keys KEY_PROPERTY_IDS.put(Resource.Type.Mpack, MPACK_RESOURCE_ID); @@ -158,15 +159,12 @@ public RequestStatus createResourcesAuthorized(final Request request) associatedResources.add(resource); return getRequestStatus(null, associatedResources); } - } - catch (ConnectException e) { - throw new SystemException("The Mpack Uri: " + mpackRequest.getMpackUri() + " is not valid. Please try again", e); - } - catch (IOException e) { - throw new SystemException("I/O exception occured during installing mpack: " + mpackRequest.getMpackUri(), e); - } - catch (BodyParseException e) { - throw new SystemException("Invalid mpack registration request", e); + } catch (IOException e) { + if (e instanceof ConnectException) + throw new SystemException("The Mpack Uri : " + mpackRequest.getMpackUri() + " is not valid. Please try again"); + e.printStackTrace(); + } catch (BodyParseException e1) { + e1.printStackTrace(); } return null; } @@ -248,12 +246,11 @@ public Set getResources(Request request, Predicate predicate) Long mpackId = null; if (predicate == null) { // Fetch all mpacks - Set responses = getManagementController().getMpacks(); + Set responses = (HashSet)getManagementController().getMpacks(); if (null == responses) { responses = Collections.emptySet(); } - - for (MpackResponse response : responses) { + for (MpackResponse response : responses){ Resource resource = new ResourceImpl(Resource.Type.Mpack); resource.setProperty(MPACK_RESOURCE_ID, response.getId()); resource.setProperty(MPACK_ID, response.getMpackId()); @@ -262,52 +259,60 @@ public Set getResources(Request request, Predicate predicate) resource.setProperty(MPACK_URI, response.getMpackUri()); resource.setProperty(MPACK_DESCRIPTION, response.getDescription()); resource.setProperty(REGISTRY_ID, response.getRegistryId()); - results.add(resource); } } else { // Fetch a particular mpack based on id Map propertyMap = new HashMap<>(PredicateHelper.getProperties(predicate)); - if (propertyMap.containsKey(STACK_NAME_PROPERTY_ID) - && propertyMap.containsKey(STACK_VERSION_PROPERTY_ID)) { + if (propertyMap.containsKey(STACK_NAME_PROPERTY_ID) && propertyMap.containsKey(STACK_VERSION_PROPERTY_ID)) { String stackName = (String) propertyMap.get(STACK_NAME_PROPERTY_ID); String stackVersion = (String) propertyMap.get(STACK_VERSION_PROPERTY_ID); StackEntity stackEntity = stackDAO.find(stackName, stackVersion); mpackId = stackEntity.getMpackId(); - } else if (propertyMap.containsKey(MPACK_RESOURCE_ID)) { - Object objMpackId = propertyMap.get(MPACK_RESOURCE_ID); - if (objMpackId != null) { - mpackId = Long.valueOf((String) objMpackId); + if (mpackId != null) { + MpackResponse response = getManagementController().getMpack(mpackId); + Resource resource = new ResourceImpl(Resource.Type.Mpack); + if (null != response) { + resource.setProperty(MPACK_RESOURCE_ID, response.getId()); + resource.setProperty(MPACK_ID, response.getMpackId()); + resource.setProperty(MPACK_NAME, response.getMpackName()); + resource.setProperty(MPACK_VERSION, response.getMpackVersion()); + resource.setProperty(MPACK_URI, response.getMpackUri()); + resource.setProperty(MPACK_DESCRIPTION, response.getDescription()); + resource.setProperty(REGISTRY_ID, response.getRegistryId()); + resource.setProperty(STACK_NAME_PROPERTY_ID, stackName); + resource.setProperty(STACK_VERSION_PROPERTY_ID, stackVersion); + results.add(resource); + } } + return results; } - if (null == mpackId) { - throw new IllegalArgumentException( - "Either the management pack ID or the stack name and version are required when searching"); - } - - MpackResponse response = getManagementController().getMpack(mpackId); - Resource resource = new ResourceImpl(Resource.Type.Mpack); - if (null != response) { - resource.setProperty(MPACK_RESOURCE_ID, response.getId()); - resource.setProperty(MPACK_ID, response.getMpackId()); - resource.setProperty(MPACK_NAME, response.getMpackName()); - resource.setProperty(MPACK_VERSION, response.getMpackVersion()); - resource.setProperty(MPACK_URI, response.getMpackUri()); - resource.setProperty(MPACK_DESCRIPTION, response.getDescription()); - resource.setProperty(REGISTRY_ID, response.getRegistryId()); + if (propertyMap.containsKey(MPACK_RESOURCE_ID)) { + Object objMpackId = propertyMap.get(MPACK_RESOURCE_ID); + if (objMpackId != null) + mpackId = Long.valueOf((String) objMpackId); - StackId stackId = new StackId(response.getStackId()); - resource.setProperty(STACK_NAME_PROPERTY_ID, stackId.getStackName()); - resource.setProperty(STACK_VERSION_PROPERTY_ID, stackId.getStackVersion()); - results.add(resource); + MpackResponse response = getManagementController().getMpack(mpackId); + Resource resource = new ResourceImpl(Resource.Type.Mpack); + if (null != response) { + resource.setProperty(MPACK_RESOURCE_ID, response.getId()); + resource.setProperty(MPACK_ID, response.getMpackId()); + resource.setProperty(MPACK_NAME, response.getMpackName()); + resource.setProperty(MPACK_VERSION, response.getMpackVersion()); + resource.setProperty(MPACK_URI, response.getMpackUri()); + resource.setProperty(MPACK_DESCRIPTION, response.getDescription()); + resource.setProperty(REGISTRY_ID, response.getRegistryId()); + List modules = getManagementController().getModules(response.getId()); + resource.setProperty(MODULES, modules); + results.add(resource); + } } - if (results.isEmpty()) { - throw new NoSuchResourceException("The requested resource doesn't exist: " + predicate); + throw new NoSuchResourceException( + "The requested resource doesn't exist: " + predicate); } } - return results; } @@ -342,8 +347,8 @@ protected RequestStatus deleteResourcesAuthorized(final Request request, Predica @Override public DeleteStatusMetaData invoke() throws AmbariException { if (stackEntity != null) { - repositoryVersionDAO.removeByStack(new StackId( - stackEntity.getStackName() + "-" + stackEntity.getStackVersion())); + repositoryVersionDAO + .removeByStack(new StackId(stackEntity.getStackName() + "-" + stackEntity.getStackVersion())); stackDAO.removeByMpack(mpackId); notifyDelete(Resource.Type.Stack, predicate); } diff --git a/ambari-server/src/main/java/org/apache/ambari/server/controller/internal/OperatingSystemReadOnlyResourceProvider.java b/ambari-server/src/main/java/org/apache/ambari/server/controller/internal/OperatingSystemReadOnlyResourceProvider.java deleted file mode 100644 index 9b947f25d41..00000000000 --- a/ambari-server/src/main/java/org/apache/ambari/server/controller/internal/OperatingSystemReadOnlyResourceProvider.java +++ /dev/null @@ -1,163 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package org.apache.ambari.server.controller.internal; - -import java.util.Collections; -import java.util.HashMap; -import java.util.HashSet; -import java.util.Map; -import java.util.Set; - -import org.apache.ambari.annotations.Experimental; -import org.apache.ambari.annotations.ExperimentalFeature; -import org.apache.ambari.server.AmbariException; -import org.apache.ambari.server.controller.AmbariManagementController; -import org.apache.ambari.server.controller.OperatingSystemRequest; -import org.apache.ambari.server.controller.OperatingSystemResponse; -import org.apache.ambari.server.controller.spi.NoSuchParentResourceException; -import org.apache.ambari.server.controller.spi.NoSuchResourceException; -import org.apache.ambari.server.controller.spi.Predicate; -import org.apache.ambari.server.controller.spi.Request; -import org.apache.ambari.server.controller.spi.Resource; -import org.apache.ambari.server.controller.spi.Resource.Type; -import org.apache.ambari.server.controller.spi.SystemException; -import org.apache.ambari.server.controller.spi.UnsupportedPropertyException; -import org.apache.ambari.server.controller.utilities.PropertyHelper; - -import com.google.common.collect.Sets; - -@Deprecated -@Experimental(feature = ExperimentalFeature.REPO_VERSION_REMOVAL) -public class OperatingSystemReadOnlyResourceProvider extends ReadOnlyResourceProvider { - - public static final String OPERATING_SYSTEM_STACK_NAME_PROPERTY_ID = PropertyHelper.getPropertyId("OperatingSystems", "stack_name"); - public static final String OPERATING_SYSTEM_STACK_VERSION_PROPERTY_ID = PropertyHelper.getPropertyId("OperatingSystems", "stack_version"); - public static final String OPERATING_SYSTEM_OS_TYPE_PROPERTY_ID = PropertyHelper.getPropertyId("OperatingSystems", "os_type"); - public static final String OPERATING_SYSTEM_REPOSITORY_VERSION_ID_PROPERTY_ID = PropertyHelper.getPropertyId("OperatingSystems", "repository_version_id"); - public static final String OPERATING_SYSTEM_VERSION_DEFINITION_ID_PROPERTY_ID = PropertyHelper.getPropertyId("OperatingSystems", "version_definition_id"); - public static final String OPERATING_SYSTEM_AMBARI_MANAGED_REPOS = "OperatingSystems/ambari_managed_repositories"; - - private static Set pkPropertyIds = Sets.newHashSet( - OPERATING_SYSTEM_OS_TYPE_PROPERTY_ID, - OPERATING_SYSTEM_STACK_NAME_PROPERTY_ID, - OPERATING_SYSTEM_STACK_VERSION_PROPERTY_ID); - - public static Set propertyIds = Sets.newHashSet( - OPERATING_SYSTEM_OS_TYPE_PROPERTY_ID, - OPERATING_SYSTEM_STACK_NAME_PROPERTY_ID, - OPERATING_SYSTEM_STACK_VERSION_PROPERTY_ID, - OPERATING_SYSTEM_REPOSITORY_VERSION_ID_PROPERTY_ID, - OPERATING_SYSTEM_VERSION_DEFINITION_ID_PROPERTY_ID, - OPERATING_SYSTEM_AMBARI_MANAGED_REPOS); - - public static Map keyPropertyIds = new HashMap() { - { - put(Resource.Type.OperatingSystemReadOnly, OPERATING_SYSTEM_OS_TYPE_PROPERTY_ID); - put(Resource.Type.Stack, OPERATING_SYSTEM_STACK_NAME_PROPERTY_ID); - put(Resource.Type.StackVersion, OPERATING_SYSTEM_STACK_VERSION_PROPERTY_ID); - put(Resource.Type.RepositoryVersion, OPERATING_SYSTEM_REPOSITORY_VERSION_ID_PROPERTY_ID); - put(Resource.Type.CompatibleRepositoryVersion, OPERATING_SYSTEM_REPOSITORY_VERSION_ID_PROPERTY_ID); - put(Resource.Type.VersionDefinition, OPERATING_SYSTEM_VERSION_DEFINITION_ID_PROPERTY_ID); - } - }; - - protected OperatingSystemReadOnlyResourceProvider(AmbariManagementController managementController) { - super(Resource.Type.OperatingSystemReadOnly, propertyIds, keyPropertyIds, managementController); - } - - @Override - public Set getResources(Request request, Predicate predicate) - throws SystemException, UnsupportedPropertyException, - NoSuchResourceException, NoSuchParentResourceException { - - final Set requests = new HashSet<>(); - - if (predicate == null) { - requests.add(getRequest(Collections.emptyMap())); - } else { - for (Map propertyMap : getPropertyMaps(predicate)) { - requests.add(getRequest(propertyMap)); - } - } - - Set requestedIds = getRequestPropertyIds(request, predicate); - - Set responses = getResources(new Command>() { - @Override - public Set invoke() throws AmbariException { - return getManagementController().getOperatingSystems(requests); - } - }); - - Set resources = new HashSet<>(); - - for (OperatingSystemResponse response : responses) { - Resource resource = new ResourceImpl(Resource.Type.OperatingSystemReadOnly); - - setResourceProperty(resource, OPERATING_SYSTEM_STACK_NAME_PROPERTY_ID, - response.getStackName(), requestedIds); - - setResourceProperty(resource, OPERATING_SYSTEM_STACK_VERSION_PROPERTY_ID, - response.getStackVersion(), requestedIds); - - setResourceProperty(resource, OPERATING_SYSTEM_OS_TYPE_PROPERTY_ID, - response.getOsType(), requestedIds); - - setResourceProperty(resource, OPERATING_SYSTEM_AMBARI_MANAGED_REPOS, response.isAmbariManagedRepos(), - requestedIds); - - if (response.getRepositoryVersionId() != null) { - setResourceProperty(resource, OPERATING_SYSTEM_REPOSITORY_VERSION_ID_PROPERTY_ID, - response.getRepositoryVersionId(), requestedIds); - } - - if (response.getVersionDefinitionId() != null) { - setResourceProperty(resource, OPERATING_SYSTEM_VERSION_DEFINITION_ID_PROPERTY_ID, - response.getVersionDefinitionId(), requestedIds); - } - - resources.add(resource); - } - - return resources; - } - - private OperatingSystemRequest getRequest(Map properties) { - final OperatingSystemRequest request = new OperatingSystemRequest( - (String) properties.get(OPERATING_SYSTEM_STACK_NAME_PROPERTY_ID), - (String) properties.get(OPERATING_SYSTEM_STACK_VERSION_PROPERTY_ID), - (String) properties.get(OPERATING_SYSTEM_OS_TYPE_PROPERTY_ID)); - - if (properties.containsKey(OPERATING_SYSTEM_REPOSITORY_VERSION_ID_PROPERTY_ID)) { - request.setRepositoryVersionId(Long.parseLong(properties.get(OPERATING_SYSTEM_REPOSITORY_VERSION_ID_PROPERTY_ID).toString())); - } - - if (properties.containsKey(OPERATING_SYSTEM_VERSION_DEFINITION_ID_PROPERTY_ID)) { - request.setVersionDefinitionId(properties.get(OPERATING_SYSTEM_VERSION_DEFINITION_ID_PROPERTY_ID).toString()); - } - - return request; - } - - @Override - protected Set getPKPropertyIds() { - return pkPropertyIds; - } - -} diff --git a/ambari-server/src/main/java/org/apache/ambari/server/controller/internal/OperatingSystemResourceProvider.java b/ambari-server/src/main/java/org/apache/ambari/server/controller/internal/OperatingSystemResourceProvider.java index 82e23ceee5b..2310a28c8a8 100644 --- a/ambari-server/src/main/java/org/apache/ambari/server/controller/internal/OperatingSystemResourceProvider.java +++ b/ambari-server/src/main/java/org/apache/ambari/server/controller/internal/OperatingSystemResourceProvider.java @@ -1,4 +1,5 @@ /* + * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information * regarding copyright ownership. The ASF licenses this file @@ -17,335 +18,142 @@ package org.apache.ambari.server.controller.internal; -import java.util.ArrayList; +import java.util.Collections; import java.util.HashMap; -import java.util.Iterator; -import java.util.LinkedHashSet; -import java.util.List; +import java.util.HashSet; import java.util.Map; import java.util.Set; import org.apache.ambari.server.AmbariException; -import org.apache.ambari.server.StaticallyInject; import org.apache.ambari.server.controller.AmbariManagementController; +import org.apache.ambari.server.controller.OperatingSystemRequest; +import org.apache.ambari.server.controller.OperatingSystemResponse; import org.apache.ambari.server.controller.spi.NoSuchParentResourceException; import org.apache.ambari.server.controller.spi.NoSuchResourceException; import org.apache.ambari.server.controller.spi.Predicate; import org.apache.ambari.server.controller.spi.Request; -import org.apache.ambari.server.controller.spi.RequestStatus; import org.apache.ambari.server.controller.spi.Resource; import org.apache.ambari.server.controller.spi.Resource.Type; -import org.apache.ambari.server.controller.spi.ResourceAlreadyExistsException; import org.apache.ambari.server.controller.spi.SystemException; import org.apache.ambari.server.controller.spi.UnsupportedPropertyException; import org.apache.ambari.server.controller.utilities.PropertyHelper; -import org.apache.ambari.server.orm.dao.MpackDAO; -import org.apache.ambari.server.orm.entities.MpackEntity; -import org.apache.ambari.server.orm.entities.RepoDefinitionEntity; -import org.apache.ambari.server.orm.entities.RepoOsEntity; -import org.apache.ambari.server.security.authorization.AuthorizationException; -import org.apache.ambari.server.state.RepositoryInfo; -import org.apache.commons.lang.StringUtils; import com.google.common.collect.Sets; -import com.google.gson.Gson; -import com.google.gson.reflect.TypeToken; -import com.google.inject.Inject; -/** - * The {@link OperatingSystemResourceProvider} is used to provide CRUD - * capabilities for repositories based on an operating system. - */ -@StaticallyInject -public class OperatingSystemResourceProvider extends AbstractControllerResourceProvider { - - public static final String OPERATING_SYSTEM_OS_TYPE_PROPERTY_ID = PropertyHelper.getPropertyId("OperatingSystems", "os_type"); - public static final String OPERATING_SYSTEM_IS_AMBARI_MANAGED = PropertyHelper.getPropertyId("OperatingSystems","is_ambari_managed"); +public class OperatingSystemResourceProvider extends ReadOnlyResourceProvider { - public static final String OPERATING_SYSTEM_REPOS = PropertyHelper.getPropertyId("OperatingSystems","repositories"); - public static final String OPERATING_SYSTEM_MPACK_ID = PropertyHelper.getPropertyId("OperatingSystems", "mpack_id"); + public static final String OPERATING_SYSTEM_STACK_NAME_PROPERTY_ID = PropertyHelper.getPropertyId("OperatingSystems", "stack_name"); + public static final String OPERATING_SYSTEM_STACK_VERSION_PROPERTY_ID = PropertyHelper.getPropertyId("OperatingSystems", "stack_version"); + public static final String OPERATING_SYSTEM_OS_TYPE_PROPERTY_ID = PropertyHelper.getPropertyId("OperatingSystems", "os_type"); + public static final String OPERATING_SYSTEM_REPOSITORY_VERSION_ID_PROPERTY_ID = PropertyHelper.getPropertyId("OperatingSystems", "repository_version_id"); + public static final String OPERATING_SYSTEM_VERSION_DEFINITION_ID_PROPERTY_ID = PropertyHelper.getPropertyId("OperatingSystems", "version_definition_id"); + public static final String OPERATING_SYSTEM_AMBARI_MANAGED_REPOS = "OperatingSystems/ambari_managed_repositories"; - private static Set pkPropertyIds = Sets.newHashSet(OPERATING_SYSTEM_OS_TYPE_PROPERTY_ID); + private static Set pkPropertyIds = Sets.newHashSet( + OPERATING_SYSTEM_OS_TYPE_PROPERTY_ID, + OPERATING_SYSTEM_STACK_NAME_PROPERTY_ID, + OPERATING_SYSTEM_STACK_VERSION_PROPERTY_ID); public static Set propertyIds = Sets.newHashSet( - OPERATING_SYSTEM_MPACK_ID, OPERATING_SYSTEM_OS_TYPE_PROPERTY_ID, - OPERATING_SYSTEM_IS_AMBARI_MANAGED, OPERATING_SYSTEM_REPOS); + OPERATING_SYSTEM_STACK_NAME_PROPERTY_ID, + OPERATING_SYSTEM_STACK_VERSION_PROPERTY_ID, + OPERATING_SYSTEM_REPOSITORY_VERSION_ID_PROPERTY_ID, + OPERATING_SYSTEM_VERSION_DEFINITION_ID_PROPERTY_ID, + OPERATING_SYSTEM_AMBARI_MANAGED_REPOS); public static Map keyPropertyIds = new HashMap() { { - put(Resource.Type.Mpack, OPERATING_SYSTEM_MPACK_ID); put(Resource.Type.OperatingSystem, OPERATING_SYSTEM_OS_TYPE_PROPERTY_ID); + put(Resource.Type.Stack, OPERATING_SYSTEM_STACK_NAME_PROPERTY_ID); + put(Resource.Type.StackVersion, OPERATING_SYSTEM_STACK_VERSION_PROPERTY_ID); + put(Resource.Type.RepositoryVersion, OPERATING_SYSTEM_REPOSITORY_VERSION_ID_PROPERTY_ID); + put(Resource.Type.CompatibleRepositoryVersion, OPERATING_SYSTEM_REPOSITORY_VERSION_ID_PROPERTY_ID); + put(Resource.Type.VersionDefinition, OPERATING_SYSTEM_VERSION_DEFINITION_ID_PROPERTY_ID); } }; - /** - * Used to update - */ - @Inject - private static MpackDAO s_mpackDAO; - - /** - * Used to deserialize the repository JSON into an object. - */ - @Inject - private static Gson s_gson; - protected OperatingSystemResourceProvider(AmbariManagementController managementController) { super(Resource.Type.OperatingSystem, propertyIds, keyPropertyIds, managementController); } - /** - * {@inheritDoc} - */ - @Override - protected Set getPKPropertyIds() { - return pkPropertyIds; - } - - /** - * {@inheritDoc} - */ @Override public Set getResources(Request request, Predicate predicate) throws SystemException, UnsupportedPropertyException, NoSuchResourceException, NoSuchParentResourceException { - Set requestPropertyIds = getRequestPropertyIds(request, predicate); + final Set requests = new HashSet<>(); - // use a collection which preserves order since JPA sorts the results - Set results = new LinkedHashSet<>(); - - for (Map propertyMap : getPropertyMaps(predicate)) { - String mpackIdString = (String) propertyMap.get(OPERATING_SYSTEM_MPACK_ID); - Long mpackId = Long.valueOf(mpackIdString); - MpackEntity mpackEntity = s_mpackDAO.findById(mpackId); - List repositoryOperatingSystems = mpackEntity.getRepositoryOperatingSystems(); - for (RepoOsEntity repoOsEntity : repositoryOperatingSystems) { - Resource resource = toResource(repoOsEntity, requestPropertyIds); - results.add(resource); + if (predicate == null) { + requests.add(getRequest(Collections.emptyMap())); + } else { + for (Map propertyMap : getPropertyMaps(predicate)) { + requests.add(getRequest(propertyMap)); } } - return results; - } - - /** - * {@inheritDoc} - */ - @Override - public RequestStatus deleteResources(Request request, Predicate predicate) throws SystemException, - UnsupportedPropertyException, NoSuchResourceException, NoSuchParentResourceException { - - for (Map propertyMap : getPropertyMaps(predicate)) { - String mpackIdString = (String) propertyMap.get(OPERATING_SYSTEM_MPACK_ID); - Long mpackId = Long.valueOf(mpackIdString); + Set requestedIds = getRequestPropertyIds(request, predicate); - if (StringUtils.isBlank(mpackIdString)) { - throw new IllegalArgumentException( - String.format("The property %s is required", OPERATING_SYSTEM_MPACK_ID)); - } - - String operatingSystem = (String)propertyMap.get(OPERATING_SYSTEM_OS_TYPE_PROPERTY_ID); - if (StringUtils.isBlank(operatingSystem)) { - throw new IllegalArgumentException( - String.format("The property %s is required", OPERATING_SYSTEM_OS_TYPE_PROPERTY_ID)); - } - - MpackEntity mpackEntity = s_mpackDAO.findById(mpackId); - List repositoryOperatingSystems = mpackEntity.getRepositoryOperatingSystems(); - Iterator iterator = repositoryOperatingSystems.iterator(); - while (iterator.hasNext()) { - RepoOsEntity repoOsEntity = iterator.next(); - if (StringUtils.equals(operatingSystem, repoOsEntity.getFamily())) { - iterator.remove(); - } - } - - mpackEntity = s_mpackDAO.merge(mpackEntity); - } - - notifyDelete(Resource.Type.OperatingSystem, predicate); - return getRequestStatus(null); - } - - /** - * {@inheritDoc} - */ - @Override - public RequestStatus createResources(final Request request) throws SystemException, - UnsupportedPropertyException, ResourceAlreadyExistsException, NoSuchParentResourceException { - - createResources(new Command() { + Set responses = getResources(new Command>() { @Override - public Void invoke() throws AmbariException, AuthorizationException { - createOperatingSystem(request.getProperties()); - return null; + public Set invoke() throws AmbariException { + return getManagementController().getOperatingSystems(requests); } }); - notifyCreate(Resource.Type.OperatingSystem, request); - - return getRequestStatus(null); - } - private void createOperatingSystem(Set> requestMaps) - throws AmbariException, AuthorizationException { - for (Map requestMap : requestMaps) { - String mpackIdString = (String) requestMap.get(OPERATING_SYSTEM_MPACK_ID); - Long mpackId = Long.valueOf(mpackIdString); + Set resources = new HashSet<>(); - if (StringUtils.isBlank(mpackIdString)) { - throw new IllegalArgumentException( - String.format("The property %s is required", OPERATING_SYSTEM_MPACK_ID)); - } - - String operatingSystem = (String) requestMap.get(OPERATING_SYSTEM_OS_TYPE_PROPERTY_ID); - if (StringUtils.isBlank(operatingSystem)) { - throw new IllegalArgumentException( - String.format("The property %s is required", OPERATING_SYSTEM_OS_TYPE_PROPERTY_ID)); - } - - MpackEntity mpackEntity = s_mpackDAO.findById(mpackId); - if (null == mpackEntity) { - throw new IllegalArgumentException( - String.format("The mpack with ID %s was not found", mpackId)); - } + for (OperatingSystemResponse response : responses) { + Resource resource = new ResourceImpl(Resource.Type.OperatingSystem); - RepoOsEntity repositoryOsEntity = new RepoOsEntity(); - repositoryOsEntity.setFamily(operatingSystem); - repositoryOsEntity.setMpackEntity(mpackEntity); - populateEntity(repositoryOsEntity, requestMap); + setResourceProperty(resource, OPERATING_SYSTEM_STACK_NAME_PROPERTY_ID, + response.getStackName(), requestedIds); - mpackEntity.getRepositoryOperatingSystems().add(repositoryOsEntity); - mpackEntity = s_mpackDAO.merge(mpackEntity); + setResourceProperty(resource, OPERATING_SYSTEM_STACK_VERSION_PROPERTY_ID, + response.getStackVersion(), requestedIds); - s_mpackDAO.refresh(mpackEntity); - } - } + setResourceProperty(resource, OPERATING_SYSTEM_OS_TYPE_PROPERTY_ID, + response.getOsType(), requestedIds); - /** - * {@inheritDoc} - */ - @Override - public RequestStatus updateResources(Request request, Predicate predicate) throws SystemException, - UnsupportedPropertyException, NoSuchResourceException, NoSuchParentResourceException { + setResourceProperty(resource, OPERATING_SYSTEM_AMBARI_MANAGED_REPOS, response.isAmbariManagedRepos(), + requestedIds); - for (Map requestPropMap : request.getProperties()) { - for (Map propertyMap : getPropertyMaps(requestPropMap, predicate)) { - String mpackIdString = (String) propertyMap.get(OPERATING_SYSTEM_MPACK_ID); - Long mpackId = Long.valueOf(mpackIdString); - - if (StringUtils.isBlank(mpackIdString)) { - throw new IllegalArgumentException( - String.format("The property %s is required", OPERATING_SYSTEM_MPACK_ID)); - } - - String operatingSystem = (String) propertyMap.get(OPERATING_SYSTEM_OS_TYPE_PROPERTY_ID); - if (StringUtils.isBlank(operatingSystem)) { - throw new IllegalArgumentException( - String.format("The property %s is required", OPERATING_SYSTEM_OS_TYPE_PROPERTY_ID)); - } - - MpackEntity mpackEntity = s_mpackDAO.findById(mpackId); - if (null == mpackEntity) { - throw new IllegalArgumentException( - String.format("The mpack with ID %s was not found", mpackId)); - } - - List repositoryOperatingSystems = mpackEntity.getRepositoryOperatingSystems(); - for (RepoOsEntity repoOsEntity : repositoryOperatingSystems) { - if (StringUtils.equals(operatingSystem, repoOsEntity.getFamily())) { - try { - populateEntity(repoOsEntity, propertyMap); - } catch( AmbariException ambariException ) { - throw new SystemException(ambariException.getMessage(), ambariException); - } - } - } - - mpackEntity = s_mpackDAO.merge(mpackEntity); + if (response.getRepositoryVersionId() != null) { + setResourceProperty(resource, OPERATING_SYSTEM_REPOSITORY_VERSION_ID_PROPERTY_ID, + response.getRepositoryVersionId(), requestedIds); } - } - notifyUpdate(Resource.Type.OperatingSystem, request, predicate); - return getRequestStatus(null); - } - - /** - * Convert the repository entity to a response resource for serialization. - * - * @param repositoryOsEntity - * the operating system result to seralize. - * @param requestedIds - * the list of requested IDs to use when setting optional properties. - * @return the resource to be serialized in the response. - */ - private Resource toResource(RepoOsEntity repositoryOsEntity, Set requestedIds) { - Resource resource = new ResourceImpl(Resource.Type.OperatingSystem); - - resource.setProperty(OPERATING_SYSTEM_MPACK_ID, repositoryOsEntity.getMpackId()); - resource.setProperty(OPERATING_SYSTEM_OS_TYPE_PROPERTY_ID, repositoryOsEntity.getFamily()); - resource.setProperty(OPERATING_SYSTEM_IS_AMBARI_MANAGED, repositoryOsEntity.isAmbariManaged()); - - Set repositories = new LinkedHashSet<>(); - for (RepoDefinitionEntity repoDefinitionEntity : repositoryOsEntity.getRepoDefinitionEntities()) { - RepositoryInfo repositoryInfo = new RepositoryInfo(); - repositoryInfo.setAmbariManagedRepositories(repositoryOsEntity.isAmbariManaged()); - repositoryInfo.setBaseUrl(repoDefinitionEntity.getBaseUrl()); - repositoryInfo.setComponents(repoDefinitionEntity.getComponents()); - repositoryInfo.setDistribution(repoDefinitionEntity.getDistribution()); - repositoryInfo.setMirrorsList(repoDefinitionEntity.getMirrors()); - repositoryInfo.setOsType(repositoryOsEntity.getFamily()); - repositoryInfo.setTags(repoDefinitionEntity.getTags()); - repositoryInfo.setUnique(repoDefinitionEntity.isUnique()); - repositoryInfo.setRepoId(repoDefinitionEntity.getRepoID()); - repositoryInfo.setRepoName(repoDefinitionEntity.getRepoName()); + if (response.getVersionDefinitionId() != null) { + setResourceProperty(resource, OPERATING_SYSTEM_VERSION_DEFINITION_ID_PROPERTY_ID, + response.getVersionDefinitionId(), requestedIds); + } - repositories.add(repositoryInfo); + resources.add(resource); } - resource.setProperty(OPERATING_SYSTEM_REPOS, repositories); - - return resource; + return resources; } - /** - * Merges the map of properties into the specified entity. If the entity is - * being created, an {@link IllegalArgumentException} is thrown when a - * required property is absent. When updating, missing properties are assume - * to not have changed. - * - * @param entity - * the entity to merge the properties into (not {@code null}). - * @param requestMap - * the map of properties (not {@code null}). - * @throws AmbariException - */ - private void populateEntity(RepoOsEntity entity, Map requestMap) - throws AmbariException, AuthorizationException { + private OperatingSystemRequest getRequest(Map properties) { + final OperatingSystemRequest request = new OperatingSystemRequest( + (String) properties.get(OPERATING_SYSTEM_STACK_NAME_PROPERTY_ID), + (String) properties.get(OPERATING_SYSTEM_STACK_VERSION_PROPERTY_ID), + (String) properties.get(OPERATING_SYSTEM_OS_TYPE_PROPERTY_ID)); - if (requestMap.containsKey(OPERATING_SYSTEM_IS_AMBARI_MANAGED)) { - String isAmbariManagedString = (String) requestMap.get(OPERATING_SYSTEM_IS_AMBARI_MANAGED); - entity.setAmbariManaged(Boolean.valueOf(isAmbariManagedString)); + if (properties.containsKey(OPERATING_SYSTEM_REPOSITORY_VERSION_ID_PROPERTY_ID)) { + request.setRepositoryVersionId(Long.parseLong(properties.get(OPERATING_SYSTEM_REPOSITORY_VERSION_ID_PROPERTY_ID).toString())); } - if (requestMap.containsKey(OPERATING_SYSTEM_REPOS)) { - java.lang.reflect.Type listType = new TypeToken>(){}.getType(); - - @SuppressWarnings("unchecked") - Set> repoMaps = (Set>) requestMap.get( - OPERATING_SYSTEM_REPOS); + if (properties.containsKey(OPERATING_SYSTEM_VERSION_DEFINITION_ID_PROPERTY_ID)) { + request.setVersionDefinitionId(properties.get(OPERATING_SYSTEM_VERSION_DEFINITION_ID_PROPERTY_ID).toString()); + } - String json = s_gson.toJson(repoMaps); - List repositories = s_gson.fromJson(json, listType); - List repoDefinitionEntities = entity.getRepoDefinitionEntities(); - repoDefinitionEntities.clear(); + return request; + } - for (RepositoryInfo repositoryInfo : repositories) { - RepoDefinitionEntity repoDefinitionEntity = RepoDefinitionEntity.from(repositoryInfo); - repoDefinitionEntity.setRepoOs(entity); - repoDefinitionEntities.add(repoDefinitionEntity); - } - } + @Override + protected Set getPKPropertyIds() { + return pkPropertyIds; } + } diff --git a/ambari-server/src/main/java/org/apache/ambari/server/controller/internal/ProvisionClusterRequest.java b/ambari-server/src/main/java/org/apache/ambari/server/controller/internal/ProvisionClusterRequest.java index 7a5086d6b48..115973c9ec5 100644 --- a/ambari-server/src/main/java/org/apache/ambari/server/controller/internal/ProvisionClusterRequest.java +++ b/ambari-server/src/main/java/org/apache/ambari/server/controller/internal/ProvisionClusterRequest.java @@ -18,9 +18,8 @@ package org.apache.ambari.server.controller.internal; import static java.util.stream.Collectors.toList; -import static java.util.stream.Collectors.toSet; -import static org.apache.ambari.server.topology.TopologyManager.KDC_ADMIN_CREDENTIAL; +import java.io.IOException; import java.util.Arrays; import java.util.Collection; import java.util.HashMap; @@ -30,11 +29,8 @@ import org.apache.ambari.server.api.predicate.InvalidQueryException; import org.apache.ambari.server.security.encryption.CredentialStoreType; import org.apache.ambari.server.stack.NoSuchStackException; -import org.apache.ambari.server.state.SecurityType; -import org.apache.ambari.server.state.StackId; import org.apache.ambari.server.state.quicklinksprofile.QuickLinksProfileBuilder; import org.apache.ambari.server.state.quicklinksprofile.QuickLinksProfileEvaluationException; -import org.apache.ambari.server.topology.BlueprintFactory; import org.apache.ambari.server.topology.ConfigRecommendationStrategy; import org.apache.ambari.server.topology.Configuration; import org.apache.ambari.server.topology.ConfigurationFactory; @@ -44,11 +40,13 @@ import org.apache.ambari.server.topology.ManagementPackMapping; import org.apache.ambari.server.topology.MpackInstance; import org.apache.ambari.server.topology.NoSuchBlueprintException; -import org.apache.ambari.server.topology.ProvisionRequest; import org.apache.ambari.server.topology.SecurityConfiguration; import org.slf4j.Logger; import org.slf4j.LoggerFactory; +import com.fasterxml.jackson.core.type.TypeReference; +import com.fasterxml.jackson.databind.DeserializationFeature; +import com.fasterxml.jackson.databind.ObjectMapper; import com.google.common.base.Enums; import com.google.common.base.Optional; import com.google.common.base.Strings; @@ -57,7 +55,7 @@ * Request for provisioning a cluster. */ @SuppressWarnings("unchecked") -public class ProvisionClusterRequest extends BaseClusterRequest implements ProvisionRequest { +public class ProvisionClusterRequest extends BaseClusterRequest { /** * host groups property name */ @@ -131,6 +129,8 @@ public class ProvisionClusterRequest extends BaseClusterRequest implements Provi public static final String MANAGEMENT_PACK_MAPPINGS_PROPERTY = "management_pack_mappings"; + public static final String MPACK_INSTANCES_PROPERTY = BlueprintResourceProvider.MPACK_INSTANCES_PROPERTY_ID; + public static final String MPACK_INSTANCE_PROPERTY = "mpack_instance"; public static final String COMPONENT_NAME_PROPERTY = "component_name"; @@ -163,8 +163,7 @@ public class ProvisionClusterRequest extends BaseClusterRequest implements Provi private final String quickLinksProfileJson; - private final Collection mpackInstances; - private final Set stackIds; + private Collection mpackInstances; private final static Logger LOG = LoggerFactory.getLogger(ProvisionClusterRequest.class); @@ -174,10 +173,8 @@ public class ProvisionClusterRequest extends BaseClusterRequest implements Provi * @param properties request properties * @param securityConfiguration security config related properties */ - public ProvisionClusterRequest(String rawRequestBody, Map properties, SecurityConfiguration securityConfiguration) throws + public ProvisionClusterRequest(Map properties, SecurityConfiguration securityConfiguration) throws InvalidTopologyTemplateException { - this.rawRequestBody = rawRequestBody; - setClusterName(String.valueOf(properties.get( ClusterResourceProvider.CLUSTER_NAME_PROPERTY_ID))); @@ -201,24 +198,22 @@ public ProvisionClusterRequest(String rawRequestBody, Map proper throw new InvalidTopologyTemplateException("The specified blueprint doesn't exist: " + e, e); } - Configuration configuration = configurationFactory.getConfiguration((Collection>) properties.get(CONFIGURATIONS_PROPERTY)); + this.securityConfiguration = securityConfiguration; + + Configuration configuration = configurationFactory.getConfiguration( + (Collection>) properties.get(CONFIGURATIONS_PROPERTY)); configuration.setParentConfiguration(blueprint.getConfiguration()); setConfiguration(configuration); parseHostGroupInfo(properties); - this.securityConfiguration = securityConfiguration; this.credentialsMap = parseCredentials(properties); - if (securityConfiguration != null && securityConfiguration.getType() == SecurityType.KERBEROS && getCredentialsMap().get(KDC_ADMIN_CREDENTIAL) == null) { - throw new InvalidTopologyTemplateException(KDC_ADMIN_CREDENTIAL + " is missing from request."); - } this.configRecommendationStrategy = parseConfigRecommendationStrategy(properties); setProvisionAction(parseProvisionAction(properties)); - mpackInstances = BlueprintFactory.createMpackInstances(properties); - stackIds = mpackInstances.stream().map(MpackInstance::getStackId).collect(toSet()); // FIXME persist these + processMpackInstances(properties); try { this.quickLinksProfileJson = processQuickLinksProfile(properties); @@ -227,6 +222,21 @@ public ProvisionClusterRequest(String rawRequestBody, Map proper } } + private void processMpackInstances(Map properties) throws InvalidTopologyTemplateException { + if (properties.containsKey(MPACK_INSTANCES_PROPERTY)) { + ObjectMapper mapper = new ObjectMapper(); + mapper.disable(DeserializationFeature.FAIL_ON_UNKNOWN_PROPERTIES); + try { + String mpackInstancesJson = mapper.writeValueAsString(properties.get(MPACK_INSTANCES_PROPERTY)); + this.mpackInstances = mapper.readValue(mpackInstancesJson, + new TypeReference>() {}); + } + catch (IOException ex) { + throw new InvalidTopologyTemplateException("Cannot process mpack instances.", ex); + } + } + } + private String processQuickLinksProfile(Map properties) throws QuickLinksProfileEvaluationException { Object globalFilters = properties.get(QUICKLINKS_PROFILE_FILTERS_PROPERTY); Object serviceFilters = properties.get(QUICKLINKS_PROFILE_SERVICES_PROPERTY); @@ -281,7 +291,6 @@ public void setClusterName(String clusterName) { this.clusterName = clusterName; } - @Override public ConfigRecommendationStrategy getConfigRecommendationStrategy() { return configRecommendationStrategy; } @@ -521,18 +530,11 @@ public String getQuickLinksProfileJson() { return quickLinksProfileJson; } - @Override public String getDefaultPassword() { return defaultPassword; } - @Override - public Set getStackIds() { - return stackIds; - } - - @Override - public Collection getMpacks() { + public Collection getMpackInstances() { return mpackInstances; } } diff --git a/ambari-server/src/main/java/org/apache/ambari/server/controller/internal/RepositoryResourceProvider.java b/ambari-server/src/main/java/org/apache/ambari/server/controller/internal/RepositoryResourceProvider.java index 0989e145e78..60dff694bbf 100644 --- a/ambari-server/src/main/java/org/apache/ambari/server/controller/internal/RepositoryResourceProvider.java +++ b/ambari-server/src/main/java/org/apache/ambari/server/controller/internal/RepositoryResourceProvider.java @@ -101,7 +101,7 @@ public class RepositoryResourceProvider extends AbstractControllerResourceProvid put(Resource.Type.Stack, REPOSITORY_STACK_NAME_PROPERTY_ID); put(Resource.Type.StackVersion, REPOSITORY_STACK_VERSION_PROPERTY_ID); put(Resource.Type.ClusterStackVersion, REPOSITORY_CLUSTER_STACK_VERSION_PROPERTY_ID); - put(Resource.Type.OperatingSystemReadOnly, REPOSITORY_OS_TYPE_PROPERTY_ID); + put(Resource.Type.OperatingSystem, REPOSITORY_OS_TYPE_PROPERTY_ID); put(Resource.Type.Repository, REPOSITORY_REPO_ID_PROPERTY_ID); put(Resource.Type.RepositoryVersion, REPOSITORY_REPOSITORY_VERSION_ID_PROPERTY_ID); put(Resource.Type.VersionDefinition, REPOSITORY_VERSION_DEFINITION_ID_PROPERTY_ID); diff --git a/ambari-server/src/main/java/org/apache/ambari/server/controller/internal/RepositoryVersionResourceProvider.java b/ambari-server/src/main/java/org/apache/ambari/server/controller/internal/RepositoryVersionResourceProvider.java index 9b0dfc1fd21..8704cb78697 100644 --- a/ambari-server/src/main/java/org/apache/ambari/server/controller/internal/RepositoryVersionResourceProvider.java +++ b/ambari-server/src/main/java/org/apache/ambari/server/controller/internal/RepositoryVersionResourceProvider.java @@ -30,7 +30,7 @@ import org.apache.ambari.server.AmbariException; import org.apache.ambari.server.ObjectNotFoundException; -import org.apache.ambari.server.api.resources.OperatingSystemReadOnlyResourceDefinition; +import org.apache.ambari.server.api.resources.OperatingSystemResourceDefinition; import org.apache.ambari.server.api.resources.RepositoryResourceDefinition; import org.apache.ambari.server.api.services.AmbariMetaInfo; import org.apache.ambari.server.controller.spi.NoSuchParentResourceException; @@ -88,7 +88,7 @@ public class RepositoryVersionResourceProvider extends AbstractAuthorizedResourc public static final String REPOSITORY_VERSION_DISPLAY_NAME_PROPERTY_ID = PropertyHelper.getPropertyId("RepositoryVersions", "display_name"); public static final String REPOSITORY_VERSION_HIDDEN_PROPERTY_ID = PropertyHelper.getPropertyId("RepositoryVersions", "hidden"); public static final String REPOSITORY_VERSION_RESOLVED_PROPERTY_ID = PropertyHelper.getPropertyId("RepositoryVersions", "resolved"); - public static final String SUBRESOURCE_OPERATING_SYSTEMS_PROPERTY_ID = new OperatingSystemReadOnlyResourceDefinition().getPluralName(); + public static final String SUBRESOURCE_OPERATING_SYSTEMS_PROPERTY_ID = new OperatingSystemResourceDefinition().getPluralName(); public static final String SUBRESOURCE_REPOSITORIES_PROPERTY_ID = new RepositoryResourceDefinition().getPluralName(); public static final String REPOSITORY_VERSION_TYPE_PROPERTY_ID = "RepositoryVersions/type"; diff --git a/ambari-server/src/main/java/org/apache/ambari/server/controller/internal/ScaleClusterRequest.java b/ambari-server/src/main/java/org/apache/ambari/server/controller/internal/ScaleClusterRequest.java index 958a3d58fb7..5e5eec8936e 100644 --- a/ambari-server/src/main/java/org/apache/ambari/server/controller/internal/ScaleClusterRequest.java +++ b/ambari-server/src/main/java/org/apache/ambari/server/controller/internal/ScaleClusterRequest.java @@ -51,8 +51,7 @@ public class ScaleClusterRequest extends BaseClusterRequest { * * @throws InvalidTopologyTemplateException if any validation of properties fails */ - public ScaleClusterRequest(String rawRequestBody, Set> propertySet) throws InvalidTopologyTemplateException { - this.rawRequestBody = rawRequestBody; + public ScaleClusterRequest(Set> propertySet) throws InvalidTopologyTemplateException { for (Map properties : propertySet) { // can only operate on a single cluster per logical request if (getClusterName() == null) { diff --git a/ambari-server/src/main/java/org/apache/ambari/server/controller/internal/ServiceResourceProvider.java b/ambari-server/src/main/java/org/apache/ambari/server/controller/internal/ServiceResourceProvider.java index db31a65c437..3ae0a31a55e 100644 --- a/ambari-server/src/main/java/org/apache/ambari/server/controller/internal/ServiceResourceProvider.java +++ b/ambari-server/src/main/java/org/apache/ambari/server/controller/internal/ServiceResourceProvider.java @@ -668,7 +668,6 @@ protected RequestStageContainer updateServices(RequestStageContainer requestStag LOG.info("Received a updateService request" + ", clusterName=" + request.getClusterName() - + ", serviceGroupName=" + request.getServiceGroupName() + ", serviceName=" + request.getServiceName() + ", request=" + request); diff --git a/ambari-server/src/main/java/org/apache/ambari/server/controller/internal/Stack.java b/ambari-server/src/main/java/org/apache/ambari/server/controller/internal/Stack.java index d08e5726485..77422eb2fa4 100644 --- a/ambari-server/src/main/java/org/apache/ambari/server/controller/internal/Stack.java +++ b/ambari-server/src/main/java/org/apache/ambari/server/controller/internal/Stack.java @@ -26,10 +26,8 @@ import java.util.Set; import java.util.stream.Stream; -import javax.annotation.Nonnull; - -import org.apache.ambari.server.StackAccessException; -import org.apache.ambari.server.api.services.AmbariMetaInfo; +import org.apache.ambari.server.AmbariException; +import org.apache.ambari.server.controller.AmbariManagementController; import org.apache.ambari.server.controller.ReadOnlyConfigurationResponse; import org.apache.ambari.server.state.AutoDeployInfo; import org.apache.ambari.server.state.ComponentInfo; @@ -43,7 +41,6 @@ import org.apache.ambari.server.state.ValueAttributesInfo; import org.apache.ambari.server.topology.Cardinality; import org.apache.ambari.server.topology.Configuration; -import org.apache.commons.lang3.tuple.Pair; import com.google.common.base.Preconditions; import com.google.common.collect.ImmutableMap; @@ -77,6 +74,12 @@ public class Stack implements StackDefinition { private Map> dependencies = new HashMap<>(); + /** + * Map of dependency to conditional service + */ + private Map dependencyConditionalServiceMap = + new HashMap<>(); + /** * Map of database component name to configuration property which indicates whether * the database in to be managed or if it is an external non-managed instance. @@ -124,8 +127,8 @@ public class Stack implements StackDefinition { private Map> excludedConfigurationTypes = new HashMap<>(); - public Stack(StackId stackId, AmbariMetaInfo metaInfo) throws StackAccessException { - this(metaInfo.getStack(stackId)); + public Stack(String name, String version, AmbariManagementController ctrl) throws AmbariException { // FIXME remove or at least change to use metainfo directly + this(ctrl.getAmbariMetaInfo().getStack(name, version)); } public Stack(StackInfo stackInfo) { @@ -167,6 +170,10 @@ public StackId getStackId() { return new StackId(getName(), getVersion()); } + Map getDependencyConditionalServiceMap() { + return dependencyConditionalServiceMap; + } + @Override public Set getStackIds() { return ImmutableSet.of(getStackId()); @@ -373,12 +380,13 @@ public String getServiceForComponent(String component) { } @Override - @Nonnull - public Stream> getServicesForComponent(String component) { - String service = getServiceForComponent(component); - return service != null - ? Stream.of(Pair.of(getStackId(), service)) - : Stream.empty(); + public Collection getServicesForComponents(Collection components) { + Set services = new HashSet<>(); + for (String component : components) { + services.add(getServiceForComponent(component)); + } + + return services; } @Override @@ -414,6 +422,11 @@ public Collection getDependenciesForComponent(String component) Collections.emptySet(); } + @Override + public String getConditionalServiceForDependency(DependencyInfo dependency) { + return dependencyConditionalServiceMap.get(dependency); + } + @Override public String getExternalComponentConfig(String component) { return dbDependencyInfo.get(component); @@ -597,8 +610,11 @@ private void parseExcludedConfigurations(ServiceInfo stackServiceResponse) { excludedConfigurationTypes.put(stackServiceResponse.getName(), stackServiceResponse.getExcludedConfigTypes()); } + /** + * Register conditional dependencies. + */ //todo: This information should be specified in the stack definition. - private void registerConditionalDependencies() { + void registerConditionalDependencies() { dbDependencyInfo.put("MYSQL_SERVER", "global/hive_database"); } diff --git a/ambari-server/src/main/java/org/apache/ambari/server/controller/internal/StackDefinedPropertyProvider.java b/ambari-server/src/main/java/org/apache/ambari/server/controller/internal/StackDefinedPropertyProvider.java index 0641ccb03d2..7e95ac2cca2 100644 --- a/ambari-server/src/main/java/org/apache/ambari/server/controller/internal/StackDefinedPropertyProvider.java +++ b/ambari-server/src/main/java/org/apache/ambari/server/controller/internal/StackDefinedPropertyProvider.java @@ -166,23 +166,17 @@ public Set populateResources(Set resources, Cluster cluster = clusters.getCluster(clusterName); Service service = null; - Long componentId = null; - String componentType = null; try { - // TODO : Multi_Metrics_Changes. Querying by component name is incorrect. We need to pass-in - // SG name and and Service name also. - componentId = cluster.getComponentId(componentName); - componentType = cluster.getComponentType(componentId); - service = cluster.getServiceByComponentId(componentId); + service = cluster.getServiceByComponentName(componentName); } catch (ServiceNotFoundException e) { - LOG.debug("Could not load componentName {}", componentName); + LOG.debug("Could not load component {}", componentName); continue; } StackId stack = service.getDesiredStackId(); List defs = metaInfo.getMetrics( - stack.getStackName(), stack.getStackVersion(), service.getServiceType(), componentType, type.name()); + stack.getStackName(), stack.getStackVersion(), service.getServiceType(), componentName, type.name()); if (null == defs || 0 == defs.size()) { continue; @@ -351,8 +345,6 @@ private PropertyProvider getDelegate(MetricDefinition definition, // use a Factory for the REST provider if (clz.equals(RestMetricsPropertyProvider.class)) { - // TODO : Multi_Metrics_Changes. We need to pass UniqueComponentName like : - // {SG_instance_name}_{service_instance_name}_component_name instead of just 'componentName'. return metricPropertyProviderFactory.createRESTMetricsPropertyProvider( definition.getProperties(), componentMetrics, streamProvider, metricsHostProvider, clusterNamePropertyId, hostNamePropertyId, componentNamePropertyId, statePropertyId, @@ -367,8 +359,6 @@ private PropertyProvider getDelegate(MetricDefinition definition, Constructor ct = clz.getConstructor(Map.class, Map.class, StreamProvider.class, MetricHostProvider.class, String.class, String.class, String.class, String.class, String.class); - // TODO : Multi_Metrics_Changes. Check if we need to pass UniqueComponentName like : - // {SG_instance_name}_{service_instance_name}_component_name instead of just 'componentId'. Object o = ct.newInstance( injector, definition.getProperties(), componentMetrics, diff --git a/ambari-server/src/main/java/org/apache/ambari/server/controller/internal/StackDefinition.java b/ambari-server/src/main/java/org/apache/ambari/server/controller/internal/StackDefinition.java index 3f9c74f933a..82aaa693ee2 100644 --- a/ambari-server/src/main/java/org/apache/ambari/server/controller/internal/StackDefinition.java +++ b/ambari-server/src/main/java/org/apache/ambari/server/controller/internal/StackDefinition.java @@ -23,8 +23,6 @@ import java.util.Set; import java.util.stream.Stream; -import javax.annotation.Nonnull; - import org.apache.ambari.server.state.AutoDeployInfo; import org.apache.ambari.server.state.ComponentInfo; import org.apache.ambari.server.state.DependencyInfo; @@ -32,8 +30,6 @@ import org.apache.ambari.server.state.StackId; import org.apache.ambari.server.topology.Cardinality; import org.apache.ambari.server.topology.Configuration; -import org.apache.ambari.server.topology.validators.DependencyAndCardinalityValidator; -import org.apache.commons.lang3.tuple.Pair; /** * Encapsulates stack information. @@ -204,10 +200,13 @@ public interface StackDefinition { String getServiceForComponent(String component); /** - * Get (stackID, service) pairs which contain the specified component in this stack. + * Get the names of the services which contains the specified components. + * + * @param components collection of components + * + * @return collection of services which contain the specified components */ - @Nonnull - Stream> getServicesForComponent(String component); + Collection getServicesForComponents(Collection components); /** * Obtain the service name which corresponds to the specified configuration. @@ -233,6 +232,16 @@ public interface StackDefinition { //todo: full dependency graph Collection getDependenciesForComponent(String component); + /** + * Get the service, if any, that a component dependency is conditional on. + * + * @param dependency dependency to get conditional service for + * + * @return conditional service for provided component or null if dependency + * is not conditional on a service + */ + String getConditionalServiceForDependency(DependencyInfo dependency); + /** * Get the custom "descriptor" that is used to decide whether component * is a managed or non-managed dependency. The descriptor is formatted as: @@ -241,7 +250,7 @@ public interface StackDefinition { * * @param component component to get dependency information for * @return the descriptor of form "config_type/property_name" - * @see DependencyAndCardinalityValidator#isDependencyManaged + * @see org.apache.ambari.server.topology.BlueprintValidatorImpl#isDependencyManaged */ String getExternalComponentConfig(String component); diff --git a/ambari-server/src/main/java/org/apache/ambari/server/controller/internal/UnitUpdater.java b/ambari-server/src/main/java/org/apache/ambari/server/controller/internal/UnitUpdater.java index 0e57599fd87..bb02b44febc 100644 --- a/ambari-server/src/main/java/org/apache/ambari/server/controller/internal/UnitUpdater.java +++ b/ambari-server/src/main/java/org/apache/ambari/server/controller/internal/UnitUpdater.java @@ -49,7 +49,7 @@ public String updateForClusterCreate(String propertyName, String origValue, Map> properties, ClusterTopology topology) { - PropertyUnit stackUnit = PropertyUnit.of(topology.getStack(), serviceName, configType, propertyName); + PropertyUnit stackUnit = PropertyUnit.of(topology.getBlueprint().getStack(), serviceName, configType, propertyName); PropertyValue value = PropertyValue.of(propertyName, origValue); if (value.hasUnit(stackUnit)) { return value.toString(); diff --git a/ambari-server/src/main/java/org/apache/ambari/server/controller/internal/UpgradeGroupResourceProvider.java b/ambari-server/src/main/java/org/apache/ambari/server/controller/internal/UpgradeGroupResourceProvider.java index 63c870fb8b3..913375a3277 100644 --- a/ambari-server/src/main/java/org/apache/ambari/server/controller/internal/UpgradeGroupResourceProvider.java +++ b/ambari-server/src/main/java/org/apache/ambari/server/controller/internal/UpgradeGroupResourceProvider.java @@ -57,7 +57,6 @@ public class UpgradeGroupResourceProvider extends AbstractControllerResourceProv protected static final String UPGRADE_CLUSTER_NAME = "UpgradeGroup/cluster_name"; protected static final String UPGRADE_GROUP_NAME = "UpgradeGroup/name"; protected static final String UPGRADE_GROUP_TITLE = "UpgradeGroup/title"; - protected static final String UPGRADE_GROUP_LIFECYCLE = "UpgradeGroup/lifecycle"; protected static final String UPGRADE_GROUP_PROGRESS_PERCENT = "UpgradeGroup/progress_percent"; protected static final String UPGRADE_GROUP_STATUS = "UpgradeGroup/status"; protected static final String UPGRADE_GROUP_DISPLAY_STATUS = "UpgradeGroup/display_status"; @@ -69,7 +68,6 @@ public class UpgradeGroupResourceProvider extends AbstractControllerResourceProv private static final Set PK_PROPERTY_IDS = new HashSet<>( Arrays.asList(UPGRADE_REQUEST_ID, UPGRADE_GROUP_ID)); - private static final Set PROPERTY_IDS = new HashSet<>(); private static final Map KEY_PROPERTY_IDS = new HashMap<>(); @@ -92,7 +90,6 @@ public class UpgradeGroupResourceProvider extends AbstractControllerResourceProv PROPERTY_IDS.add(UPGRADE_GROUP_TOTAL_TASKS); PROPERTY_IDS.add(UPGRADE_GROUP_IN_PROGRESS_TASKS); PROPERTY_IDS.add(UPGRADE_GROUP_COMPLETED_TASKS); - PROPERTY_IDS.add(UPGRADE_GROUP_LIFECYCLE); // keys KEY_PROPERTY_IDS.put(Resource.Type.UpgradeGroup, UPGRADE_GROUP_ID); @@ -186,7 +183,6 @@ private Resource toResource(UpgradeEntity upgrade, UpgradeGroupEntity group, Set setResourceProperty(resource, UPGRADE_GROUP_ID, group.getId(), requestedIds); setResourceProperty(resource, UPGRADE_GROUP_NAME, group.getName(), requestedIds); setResourceProperty(resource, UPGRADE_GROUP_TITLE, group.getTitle(), requestedIds); - setResourceProperty(resource, UPGRADE_GROUP_LIFECYCLE, group.getLifecycle(), requestedIds); return resource; } diff --git a/ambari-server/src/main/java/org/apache/ambari/server/controller/internal/UpgradeResourceProvider.java b/ambari-server/src/main/java/org/apache/ambari/server/controller/internal/UpgradeResourceProvider.java index 09ca1ee0520..f50ef747d00 100644 --- a/ambari-server/src/main/java/org/apache/ambari/server/controller/internal/UpgradeResourceProvider.java +++ b/ambari-server/src/main/java/org/apache/ambari/server/controller/internal/UpgradeResourceProvider.java @@ -822,7 +822,6 @@ major stack versions (e.g., HDP 2.2 -> 2.3), and then set config changes if(!itemEntities.isEmpty()) { UpgradeGroupEntity groupEntity = new UpgradeGroupEntity(); - groupEntity.setLifecycle(group.lifecycle); groupEntity.setName(group.name); groupEntity.setTitle(group.title); groupEntity.setItems(itemEntities); @@ -1620,9 +1619,27 @@ public static ConfigUpgradePack build(UpgradeContext cx) { stackId = cx.getStackIdFromVersions(cx.getTargetVersions()); } + List intermediateStacks = upgradePack.getIntermediateStacks(); ConfigUpgradePack configUpgradePack = s_metaProvider.get().getConfigUpgradePack( stackId.getStackName(), stackId.getStackVersion()); + // merge in any intermediate stacks + if (null != intermediateStacks) { + + // start out with the source stack's config pack + ArrayList configPacksToMerge = Lists.newArrayList(configUpgradePack); + + for (UpgradePack.IntermediateStack intermediateStack : intermediateStacks) { + ConfigUpgradePack intermediateConfigUpgradePack = s_metaProvider.get().getConfigUpgradePack( + stackId.getStackName(), intermediateStack.version); + + configPacksToMerge.add(intermediateConfigUpgradePack); + } + + // merge all together + configUpgradePack = ConfigUpgradePack.merge(configPacksToMerge); + } + return configUpgradePack; } } diff --git a/ambari-server/src/main/java/org/apache/ambari/server/controller/internal/VersionDefinitionResourceProvider.java b/ambari-server/src/main/java/org/apache/ambari/server/controller/internal/VersionDefinitionResourceProvider.java index 99219beab1f..0934704825b 100644 --- a/ambari-server/src/main/java/org/apache/ambari/server/controller/internal/VersionDefinitionResourceProvider.java +++ b/ambari-server/src/main/java/org/apache/ambari/server/controller/internal/VersionDefinitionResourceProvider.java @@ -30,11 +30,9 @@ import java.util.Map.Entry; import java.util.Set; -import org.apache.ambari.annotations.Experimental; -import org.apache.ambari.annotations.ExperimentalFeature; import org.apache.ambari.server.AmbariException; import org.apache.ambari.server.StaticallyInject; -import org.apache.ambari.server.api.resources.OperatingSystemReadOnlyResourceDefinition; +import org.apache.ambari.server.api.resources.OperatingSystemResourceDefinition; import org.apache.ambari.server.api.resources.RepositoryResourceDefinition; import org.apache.ambari.server.api.services.AmbariMetaInfo; import org.apache.ambari.server.configuration.ComponentSSLConfiguration; @@ -49,16 +47,16 @@ import org.apache.ambari.server.controller.spi.SystemException; import org.apache.ambari.server.controller.spi.UnsupportedPropertyException; import org.apache.ambari.server.controller.utilities.PropertyHelper; -import org.apache.ambari.server.orm.dao.MpackDAO; import org.apache.ambari.server.orm.dao.RepositoryVersionDAO; import org.apache.ambari.server.orm.dao.StackDAO; -import org.apache.ambari.server.orm.entities.MpackEntity; import org.apache.ambari.server.orm.entities.RepoDefinitionEntity; import org.apache.ambari.server.orm.entities.RepoOsEntity; import org.apache.ambari.server.orm.entities.RepositoryVersionEntity; import org.apache.ambari.server.orm.entities.StackEntity; import org.apache.ambari.server.security.authorization.ResourceType; import org.apache.ambari.server.security.authorization.RoleAuthorization; +import org.apache.ambari.server.stack.RepoUtil; +import org.apache.ambari.server.state.RepositoryInfo; import org.apache.ambari.server.state.RepositoryType; import org.apache.ambari.server.state.StackId; import org.apache.ambari.server.state.StackInfo; @@ -79,6 +77,7 @@ import com.google.common.base.Function; import com.google.common.collect.Collections2; +import com.google.common.collect.ListMultimap; import com.google.common.collect.Sets; import com.google.inject.Inject; import com.google.inject.Provider; @@ -88,8 +87,6 @@ * files. */ @StaticallyInject -@Deprecated -@Experimental(feature = ExperimentalFeature.REPO_VERSION_REMOVAL) public class VersionDefinitionResourceProvider extends AbstractAuthorizedResourceProvider { private static final Logger LOG = LoggerFactory.getLogger(VersionDefinitionResourceProvider.class); @@ -124,7 +121,7 @@ public class VersionDefinitionResourceProvider extends AbstractAuthorizedResourc public static final String DIRECTIVE_SKIP_URL_CHECK = "skip_url_check"; - public static final String SUBRESOURCE_OPERATING_SYSTEMS_PROPERTY_ID = new OperatingSystemReadOnlyResourceDefinition().getPluralName(); + public static final String SUBRESOURCE_OPERATING_SYSTEMS_PROPERTY_ID = new OperatingSystemResourceDefinition().getPluralName(); @Inject private static RepositoryVersionDAO s_repoVersionDAO; @@ -141,9 +138,6 @@ public class VersionDefinitionResourceProvider extends AbstractAuthorizedResourc @Inject private static Configuration s_configuration; - @Inject - private static MpackDAO s_mpackDAO; - /** * Key property ids */ @@ -610,12 +604,17 @@ protected void toRepositoryVersionEntity(XmlHolder holder) throws AmbariExceptio StackId stackId = new StackId(holder.xml.release.stackId); StackEntity stackEntity = s_stackDAO.find(stackId.getStackName(), stackId.getStackVersion()); - MpackEntity mpackEntity = s_mpackDAO.findById(stackEntity.getMpackId()); entity.setStack(stackEntity); - List repositoryOperatingSystems = mpackEntity.getRepositoryOperatingSystems(); - entity.addRepoOsEntities(repositoryOperatingSystems); + List repos = holder.xml.repositoryInfo.getRepositories(); + + // Add service repositories (these are not contained by the VDF but are there in the stack model) + ListMultimap stackReposByOs = + s_metaInfo.get().getStack(stackId.getStackName(), stackId.getStackVersion()).getRepositoriesByOs(); + repos.addAll(RepoUtil.getServiceRepos(repos, stackReposByOs)); + + entity.addRepoOsEntities(s_repoVersionHelper.get().createRepoOsEntities(repos)); entity.setVersion(holder.xml.release.getFullVersion()); entity.setDisplayName(stackId, holder.xml.release); @@ -749,17 +748,17 @@ private void addSubresources(Resource res, RepositoryVersionEntity entity) { ObjectNode osBase = factory.objectNode(); ObjectNode osElement = factory.objectNode(); - osElement.put(PropertyHelper.getPropertyName(OperatingSystemReadOnlyResourceProvider.OPERATING_SYSTEM_AMBARI_MANAGED_REPOS), + osElement.put(PropertyHelper.getPropertyName(OperatingSystemResourceProvider.OPERATING_SYSTEM_AMBARI_MANAGED_REPOS), os.isAmbariManaged()); - osElement.put(PropertyHelper.getPropertyName(OperatingSystemReadOnlyResourceProvider.OPERATING_SYSTEM_OS_TYPE_PROPERTY_ID), + osElement.put(PropertyHelper.getPropertyName(OperatingSystemResourceProvider.OPERATING_SYSTEM_OS_TYPE_PROPERTY_ID), os.getFamily()); - osElement.put(PropertyHelper.getPropertyName(OperatingSystemReadOnlyResourceProvider.OPERATING_SYSTEM_STACK_NAME_PROPERTY_ID), + osElement.put(PropertyHelper.getPropertyName(OperatingSystemResourceProvider.OPERATING_SYSTEM_STACK_NAME_PROPERTY_ID), entity.getStackName()); - osElement.put(PropertyHelper.getPropertyName(OperatingSystemReadOnlyResourceProvider.OPERATING_SYSTEM_STACK_VERSION_PROPERTY_ID), + osElement.put(PropertyHelper.getPropertyName(OperatingSystemResourceProvider.OPERATING_SYSTEM_STACK_VERSION_PROPERTY_ID), entity.getStackVersion()); - osBase.put(PropertyHelper.getPropertyCategory(OperatingSystemReadOnlyResourceProvider.OPERATING_SYSTEM_AMBARI_MANAGED_REPOS), + osBase.put(PropertyHelper.getPropertyCategory(OperatingSystemResourceProvider.OPERATING_SYSTEM_AMBARI_MANAGED_REPOS), osElement); ArrayNode reposArray = factory.arrayNode(); @@ -803,7 +802,7 @@ private void addSubresources(Resource res, RepositoryVersionEntity entity) { subs.add(osBase); } - res.setProperty(new OperatingSystemReadOnlyResourceDefinition().getPluralName(), subs); + res.setProperty(new OperatingSystemResourceDefinition().getPluralName(), subs); } diff --git a/ambari-server/src/main/java/org/apache/ambari/server/controller/metrics/MetricsCollectorHAClusterState.java b/ambari-server/src/main/java/org/apache/ambari/server/controller/metrics/MetricsCollectorHAClusterState.java index 80811d2b27f..231d1034ceb 100644 --- a/ambari-server/src/main/java/org/apache/ambari/server/controller/metrics/MetricsCollectorHAClusterState.java +++ b/ambari-server/src/main/java/org/apache/ambari/server/controller/metrics/MetricsCollectorHAClusterState.java @@ -23,7 +23,6 @@ import java.util.concurrent.CopyOnWriteArraySet; import java.util.concurrent.atomic.AtomicInteger; -import org.apache.ambari.server.AmbariException; import org.apache.ambari.server.Role; import org.apache.ambari.server.controller.AmbariManagementController; import org.apache.ambari.server.controller.AmbariServer; @@ -61,23 +60,11 @@ public MetricsCollectorHAClusterState(String clusterName) { this.liveCollectorHosts = new CopyOnWriteArraySet<>(); this.deadCollectorHosts = new CopyOnWriteArraySet<>(); collectorDownRefreshCounter = new AtomicInteger(0); - - } - - private Long getComponentId(String componentName) { - Long componentId = null; - try { - componentId = managementController.getClusters().getCluster(clusterName).getComponentId(componentName); - } catch (AmbariException e) { - e.printStackTrace(); - } - return componentId; } public void addMetricsCollectorHost(String collectorHost) { - Long componentId = getComponentId(Role.METRICS_COLLECTOR.name()); if (HostStatusHelper.isHostComponentLive(managementController, clusterName, collectorHost, "AMBARI_METRICS", - componentId, Role.METRICS_COLLECTOR.name(), Role.METRICS_COLLECTOR.name())) { + Role.METRICS_COLLECTOR.name())) { liveCollectorHosts.add(collectorHost); deadCollectorHosts.remove(collectorHost); } else { @@ -86,9 +73,9 @@ public void addMetricsCollectorHost(String collectorHost) { } //If there is no current collector host or the current host is down, this will be a proactive switch. - // TODO : Multi_Metrics_Changes. componentName=Role.METRICS_COLLECTOR.name() may or may not be unique if there are multiple instances. if (currentCollectorHost == null || !HostStatusHelper.isHostComponentLive(managementController, clusterName, - currentCollectorHost, "AMBARI_METRICS", componentId, Role.METRICS_COLLECTOR.name(), Role.METRICS_COLLECTOR.name())) { + currentCollectorHost, "AMBARI_METRICS", + Role.METRICS_COLLECTOR.name())) { refreshCollectorHost(currentCollectorHost); } } @@ -155,12 +142,11 @@ private void testAndAddDeadCollectorsToLiveList() { } private boolean isValidAliveCollectorHost(String clusterName, String collectorHost) { - Long componentId = getComponentId(Role.METRICS_COLLECTOR.name()); - // TODO : Multi_Metrics_Changes. componentName=Role.METRICS_COLLECTOR.name() may or may not be unique if there are multiple instances. + return ((collectorHost != null) && HostStatusHelper.isHostLive(managementController, clusterName, collectorHost) && - HostStatusHelper.isHostComponentLive(managementController, clusterName, collectorHost, - "AMBARI_METRICS", componentId, Role.METRICS_COLLECTOR.name(), Role.METRICS_COLLECTOR.name())); + HostStatusHelper.isHostComponentLive(managementController, clusterName, collectorHost, "AMBARI_METRICS", + Role.METRICS_COLLECTOR.name())); } /* @@ -196,21 +182,19 @@ public boolean isCollectorHostLive() { } public boolean isCollectorComponentAlive() { - Long componentId = getComponentId(Role.METRICS_COLLECTOR.name()); + //Check in live hosts - // TODO : Multi_Metrics_Changes. componentName=Role.METRICS_COLLECTOR.name() may or may not be unique if there are multiple instances. for (String host : liveCollectorHosts) { if (HostStatusHelper.isHostComponentLive(managementController, clusterName, host, "AMBARI_METRICS", - componentId, Role.METRICS_COLLECTOR.name(), Role.METRICS_COLLECTOR.name())) { + Role.METRICS_COLLECTOR.name())) { return true; } } //Check in dead hosts. Don't update live and dead lists. Can be done on refresh call. - // TODO : Multi_Metrics_Changes. componentName=Role.METRICS_COLLECTOR.name() may or may not be unique if there are multiple instances. for (String host : deadCollectorHosts) { if (HostStatusHelper.isHostComponentLive(managementController, clusterName, host, "AMBARI_METRICS", - componentId, Role.METRICS_COLLECTOR.name(), Role.METRICS_COLLECTOR.name())) { + Role.METRICS_COLLECTOR.name())) { return true; } } diff --git a/ambari-server/src/main/java/org/apache/ambari/server/controller/metrics/ganglia/GangliaPropertyProvider.java b/ambari-server/src/main/java/org/apache/ambari/server/controller/metrics/ganglia/GangliaPropertyProvider.java index c94f15f00fc..ac0c590fdae 100644 --- a/ambari-server/src/main/java/org/apache/ambari/server/controller/metrics/ganglia/GangliaPropertyProvider.java +++ b/ambari-server/src/main/java/org/apache/ambari/server/controller/metrics/ganglia/GangliaPropertyProvider.java @@ -60,6 +60,7 @@ public abstract class GangliaPropertyProvider extends MetricsPropertyProvider { */ static final Map> GANGLIA_CLUSTER_NAME_MAP = new HashMap<>(); + static { GANGLIA_CLUSTER_NAME_MAP.put("NAMENODE", Collections.singletonList("HDPNameNode")); GANGLIA_CLUSTER_NAME_MAP.put("DATANODE", Arrays.asList("HDPDataNode", "HDPSlaves")); diff --git a/ambari-server/src/main/java/org/apache/ambari/server/controller/spi/Resource.java b/ambari-server/src/main/java/org/apache/ambari/server/controller/spi/Resource.java index e530a83fa3b..1524d9aff27 100644 --- a/ambari-server/src/main/java/org/apache/ambari/server/controller/spi/Resource.java +++ b/ambari-server/src/main/java/org/apache/ambari/server/controller/spi/Resource.java @@ -109,7 +109,6 @@ enum InternalType { Extension, ExtensionVersion, OperatingSystem, - OperatingSystemReadOnly, Repository, StackService, StackConfiguration, @@ -246,7 +245,6 @@ final class Type implements Comparable{ public static final Type Extension = InternalType.Extension.getType(); public static final Type ExtensionVersion = InternalType.ExtensionVersion.getType(); public static final Type OperatingSystem = InternalType.OperatingSystem.getType(); - public static final Type OperatingSystemReadOnly = InternalType.OperatingSystemReadOnly.getType(); public static final Type Repository = InternalType.Repository.getType(); public static final Type StackService = InternalType.StackService.getType(); public static final Type StackConfiguration = InternalType.StackConfiguration.getType(); diff --git a/ambari-server/src/main/java/org/apache/ambari/server/controller/utilities/state/DefaultServiceCalculatedState.java b/ambari-server/src/main/java/org/apache/ambari/server/controller/utilities/state/DefaultServiceCalculatedState.java index 0a352b0acf8..904523058b4 100644 --- a/ambari-server/src/main/java/org/apache/ambari/server/controller/utilities/state/DefaultServiceCalculatedState.java +++ b/ambari-server/src/main/java/org/apache/ambari/server/controller/utilities/state/DefaultServiceCalculatedState.java @@ -89,7 +89,7 @@ public State getState(String clusterName, String serviceName) { StackId stackId = service.getDesiredStackId(); ServiceComponentHostRequest request = new ServiceComponentHostRequest(clusterName, service.getServiceGroupName(), - serviceName, null, null, null, null, null); + serviceName, null, null, null); Set hostComponentResponses = managementControllerProvider.get().getHostComponents(Collections.singleton(request)); diff --git a/ambari-server/src/main/java/org/apache/ambari/server/controller/utilities/state/FlumeServiceCalculatedState.java b/ambari-server/src/main/java/org/apache/ambari/server/controller/utilities/state/FlumeServiceCalculatedState.java index 63a01dc5358..b34b926fb11 100644 --- a/ambari-server/src/main/java/org/apache/ambari/server/controller/utilities/state/FlumeServiceCalculatedState.java +++ b/ambari-server/src/main/java/org/apache/ambari/server/controller/utilities/state/FlumeServiceCalculatedState.java @@ -52,7 +52,7 @@ public State getState(String clusterName, String serviceName) { if (cluster != null && managementControllerProvider != null) { Service service = cluster.getService(serviceName); ServiceComponentHostRequest request = new ServiceComponentHostRequest(clusterName, service.getServiceGroupName(), - serviceName, null, null, null, null, null); + serviceName, null, null, null); Set hostComponentResponses = managementControllerProvider.get().getHostComponents(Collections.singleton(request)); diff --git a/ambari-server/src/main/java/org/apache/ambari/server/controller/utilities/state/HBaseServiceCalculatedState.java b/ambari-server/src/main/java/org/apache/ambari/server/controller/utilities/state/HBaseServiceCalculatedState.java index 51dd6c3807e..63456e8c98b 100644 --- a/ambari-server/src/main/java/org/apache/ambari/server/controller/utilities/state/HBaseServiceCalculatedState.java +++ b/ambari-server/src/main/java/org/apache/ambari/server/controller/utilities/state/HBaseServiceCalculatedState.java @@ -54,7 +54,7 @@ public State getState(String clusterName, String serviceName) { StackId stackId = service.getDesiredStackId(); ServiceComponentHostRequest request = new ServiceComponentHostRequest(clusterName, service.getServiceGroupName(), - serviceName, null, null, null, null, null); + serviceName, null, null, null); Set hostComponentResponses = managementControllerProvider.get().getHostComponents(Collections.singleton(request)); diff --git a/ambari-server/src/main/java/org/apache/ambari/server/controller/utilities/state/HDFSServiceCalculatedState.java b/ambari-server/src/main/java/org/apache/ambari/server/controller/utilities/state/HDFSServiceCalculatedState.java index 6c171136f5a..05e89040946 100644 --- a/ambari-server/src/main/java/org/apache/ambari/server/controller/utilities/state/HDFSServiceCalculatedState.java +++ b/ambari-server/src/main/java/org/apache/ambari/server/controller/utilities/state/HDFSServiceCalculatedState.java @@ -54,7 +54,7 @@ public State getState(String clusterName, String serviceName) { StackId stackId = service.getDesiredStackId(); ServiceComponentHostRequest request = new ServiceComponentHostRequest(clusterName, service.getServiceGroupName(), - serviceName, null,null, null, null, null); + serviceName, null, null, null); Set hostComponentResponses = managementControllerProvider.get().getHostComponents(Collections.singleton(request)); diff --git a/ambari-server/src/main/java/org/apache/ambari/server/controller/utilities/state/HiveServiceCalculatedState.java b/ambari-server/src/main/java/org/apache/ambari/server/controller/utilities/state/HiveServiceCalculatedState.java index 6881a1f31da..83d26f0a71d 100644 --- a/ambari-server/src/main/java/org/apache/ambari/server/controller/utilities/state/HiveServiceCalculatedState.java +++ b/ambari-server/src/main/java/org/apache/ambari/server/controller/utilities/state/HiveServiceCalculatedState.java @@ -55,7 +55,7 @@ public State getState(String clusterName, String serviceName) { ServiceComponentHostRequest request = new ServiceComponentHostRequest(clusterName, service.getServiceGroupName(), - serviceName, null, null, null, null, null); + serviceName, null, null, null); Set hostComponentResponses = managementControllerProvider.get().getHostComponents(Collections.singleton(request)); diff --git a/ambari-server/src/main/java/org/apache/ambari/server/controller/utilities/state/OozieServiceCalculatedState.java b/ambari-server/src/main/java/org/apache/ambari/server/controller/utilities/state/OozieServiceCalculatedState.java index db2318deeaa..5cd0319e1f8 100644 --- a/ambari-server/src/main/java/org/apache/ambari/server/controller/utilities/state/OozieServiceCalculatedState.java +++ b/ambari-server/src/main/java/org/apache/ambari/server/controller/utilities/state/OozieServiceCalculatedState.java @@ -54,7 +54,7 @@ public State getState(String clusterName, String serviceName) { StackId stackId = service.getDesiredStackId(); ServiceComponentHostRequest request = new ServiceComponentHostRequest(clusterName, service.getServiceGroupName(), - serviceName, null, null, null, null, null); + serviceName, null, null, null); Set hostComponentResponses = managementControllerProvider.get().getHostComponents(Collections.singleton(request)); diff --git a/ambari-server/src/main/java/org/apache/ambari/server/controller/utilities/state/YARNServiceCalculatedState.java b/ambari-server/src/main/java/org/apache/ambari/server/controller/utilities/state/YARNServiceCalculatedState.java index adeb74032a3..2530068d69f 100644 --- a/ambari-server/src/main/java/org/apache/ambari/server/controller/utilities/state/YARNServiceCalculatedState.java +++ b/ambari-server/src/main/java/org/apache/ambari/server/controller/utilities/state/YARNServiceCalculatedState.java @@ -55,7 +55,7 @@ public State getState(String clusterName, String serviceName) { ServiceComponentHostRequest request = new ServiceComponentHostRequest(clusterName, service.getServiceGroupName(), - serviceName, null, null, null, null, null); + serviceName, null, null, null); Set hostComponentResponses = managementControllerProvider.get().getHostComponents(Collections.singleton(request)); diff --git a/ambari-server/src/main/java/org/apache/ambari/server/events/HostComponentVersionAdvertisedEvent.java b/ambari-server/src/main/java/org/apache/ambari/server/events/HostComponentVersionAdvertisedEvent.java index e8509b46779..6155880b1cb 100644 --- a/ambari-server/src/main/java/org/apache/ambari/server/events/HostComponentVersionAdvertisedEvent.java +++ b/ambari-server/src/main/java/org/apache/ambari/server/events/HostComponentVersionAdvertisedEvent.java @@ -17,8 +17,6 @@ */ package org.apache.ambari.server.events; -import org.apache.ambari.annotations.Experimental; -import org.apache.ambari.annotations.ExperimentalFeature; import org.apache.ambari.server.state.Cluster; import org.apache.ambari.server.state.ServiceComponentHost; @@ -26,8 +24,6 @@ * The {@link HostComponentVersionAdvertisedEvent} * occurs when a Host Component advertises it's current version value. */ -@Deprecated -@Experimental(feature = ExperimentalFeature.REPO_VERSION_REMOVAL) public class HostComponentVersionAdvertisedEvent extends ClusterEvent { protected Cluster cluster; diff --git a/ambari-server/src/main/java/org/apache/ambari/server/events/listeners/upgrade/StackVersionListener.java b/ambari-server/src/main/java/org/apache/ambari/server/events/listeners/upgrade/StackVersionListener.java index 34a19fe0026..df038b4ecd2 100644 --- a/ambari-server/src/main/java/org/apache/ambari/server/events/listeners/upgrade/StackVersionListener.java +++ b/ambari-server/src/main/java/org/apache/ambari/server/events/listeners/upgrade/StackVersionListener.java @@ -208,6 +208,8 @@ private void processComponentAdvertisedVersion(Cluster cluster, ServiceComponent setUpgradeStateAndRecalculateHostVersions(sch, UpgradeState.VERSION_MISMATCH); } } + + sc.updateRepositoryState(newVersion); } /** diff --git a/ambari-server/src/main/java/org/apache/ambari/server/mpack/MpackManager.java b/ambari-server/src/main/java/org/apache/ambari/server/mpack/MpackManager.java index 2e8ed3a52b6..d77c5e1fa70 100644 --- a/ambari-server/src/main/java/org/apache/ambari/server/mpack/MpackManager.java +++ b/ambari-server/src/main/java/org/apache/ambari/server/mpack/MpackManager.java @@ -44,15 +44,11 @@ import org.apache.ambari.server.orm.dao.MpackDAO; import org.apache.ambari.server.orm.dao.StackDAO; import org.apache.ambari.server.orm.entities.MpackEntity; -import org.apache.ambari.server.orm.entities.RepoOsEntity; import org.apache.ambari.server.orm.entities.StackEntity; -import org.apache.ambari.server.stack.RepoUtil; import org.apache.ambari.server.state.Module; import org.apache.ambari.server.state.Mpack; import org.apache.ambari.server.state.OsSpecific; -import org.apache.ambari.server.state.stack.RepositoryXml; import org.apache.ambari.server.state.stack.StackMetainfoXml; -import org.apache.ambari.server.state.stack.upgrade.RepositoryVersionHelper; import org.apache.commons.compress.archivers.tar.TarArchiveEntry; import org.apache.commons.compress.archivers.tar.TarArchiveInputStream; import org.apache.commons.compress.compressors.gzip.GzipCompressorInputStream; @@ -83,20 +79,20 @@ public class MpackManager { private MpackDAO mpackDAO; private StackDAO stackDAO; private File stackRoot; - private RepositoryVersionHelper repoVersionHelper; - @AssistedInject - public MpackManager(@Assisted("mpacksv2Staging") File mpacksStagingLocation, - @Assisted("stackRoot") File stackRootDir, MpackDAO mpackDAOObj, StackDAO stackDAOObj, - RepositoryVersionHelper repoVersionHelper) { + public MpackManager( + @Assisted("mpacksv2Staging") File mpacksStagingLocation, + @Assisted("stackRoot") File stackRootDir, + MpackDAO mpackDAOObj, + StackDAO stackDAOObj) { mpacksStaging = mpacksStagingLocation; mpackDAO = mpackDAOObj; stackRoot = stackRootDir; stackDAO = stackDAOObj; - this.repoVersionHelper = repoVersionHelper; parseMpackDirectories(); + } /** @@ -203,7 +199,6 @@ public MpackResponse registerMpack(MpackRequest mpackRequest) mpackDirectory = mpacksStaging + File.separator + mpack.getName() + File.separator + mpack.getVersion(); } } - extractMpackTar(mpack, mpackTarPath, mpackDirectory); mpack.setMpackUri(mpackRequest.getMpackUri()); mpackResourceId = populateDB(mpack); @@ -315,14 +310,6 @@ private void extractMpackTar(Mpack mpack, Path mpackTarPath, String mpackDirecto generateMetainfo(metainfoFile, mpack); } - RepositoryXml repositoryXml = RepoUtil.getRepositoryXml(extractedMpackDirectory.toFile()); - if (null == repositoryXml) { - throw new IOException("The repository file " + RepoUtil.REPOSITORY_FILE_NAME - + " must exist in the management pack"); - } - - mpack.setRepositoryXml(repositoryXml); - createSymLinks(mpack); } @@ -522,23 +509,14 @@ protected Long populateDB(Mpack mpack) throws IOException { if (resultSet.size() == 0 && stackEntity == null) { LOG.info("Adding mpack {}-{} to the database", mpackName, mpackVersion); - final MpackEntity mpackEntity = new MpackEntity(); + MpackEntity mpackEntity = new MpackEntity(); mpackEntity.setMpackName(mpackName); mpackEntity.setMpackVersion(mpackVersion); mpackEntity.setMpackUri(mpack.getMpackUri()); mpackEntity.setRegistryId(mpack.getRegistryId()); - mpackDAO.create(mpackEntity); - - List repositoryOperatingSystems = repoVersionHelper.createRepoOsEntities( - mpack.getRepositoryXml().getRepositories()); - - repositoryOperatingSystems.stream().forEach( - operatingSystem -> operatingSystem.setMpackEntity(mpackEntity)); - - mpackEntity.setRepositoryOperatingSystems(repositoryOperatingSystems); - return mpackDAO.merge(mpackEntity).getId(); + Long mpackId = mpackDAO.create(mpackEntity); + return mpackId; } - //mpack already exists return null; } @@ -608,9 +586,8 @@ public boolean removeMpack(MpackEntity mpackEntity, StackEntity stackEntity) thr if (stackEntity != null) { Path stackPath = Paths.get(stackRoot + "/" + stackEntity.getStackName() + "/" + stackEntity.getStackVersion()); File stackDirectory = new File(stackRoot + "/" + stackEntity.getStackName()); - if (!Files.exists(stackPath)) { + if (!Files.exists(stackPath)) Files.delete(stackPath); - } if (stackDirectory.isDirectory()) { if (stackDirectory.list().length == 0) { Files.delete(stackDirectory.toPath()); diff --git a/ambari-server/src/main/java/org/apache/ambari/server/orm/dao/CrudDAO.java b/ambari-server/src/main/java/org/apache/ambari/server/orm/dao/CrudDAO.java index d40c53f28b7..c4ed2051949 100644 --- a/ambari-server/src/main/java/org/apache/ambari/server/orm/dao/CrudDAO.java +++ b/ambari-server/src/main/java/org/apache/ambari/server/orm/dao/CrudDAO.java @@ -95,7 +95,7 @@ public Long findMaxId(String idColName) { * @param entity entity to create */ @Transactional - public void create(E entity) { + protected void create(E entity) { entityManagerProvider.get().persist(entity); } diff --git a/ambari-server/src/main/java/org/apache/ambari/server/orm/dao/HostComponentDesiredStateDAO.java b/ambari-server/src/main/java/org/apache/ambari/server/orm/dao/HostComponentDesiredStateDAO.java index 3cb994e82f9..6f6fa43514e 100644 --- a/ambari-server/src/main/java/org/apache/ambari/server/orm/dao/HostComponentDesiredStateDAO.java +++ b/ambari-server/src/main/java/org/apache/ambari/server/orm/dao/HostComponentDesiredStateDAO.java @@ -86,15 +86,24 @@ public HostComponentDesiredStateEntity findByServiceComponentAndHost( /** * Retrieve the single Host Component Desired State for the given unique cluster, service, component, and host. * - * @param componentId Component Id + * @param clusterId Cluster ID + * @param serviceGroupId Service Group ID + * @param serviceId Service ID + * @param componentName Component Name + * @param hostId Host ID * @return Return the Host Component Desired State entity that match the criteria. */ @RequiresSession - public HostComponentDesiredStateEntity findByIndex(Long componentId) { + public HostComponentDesiredStateEntity findByIndex(Long clusterId, Long serviceGroupId, Long serviceId, + String componentName, Long hostId) { final TypedQuery query = entityManagerProvider.get() .createNamedQuery("HostComponentDesiredStateEntity.findByIndex", HostComponentDesiredStateEntity.class); - query.setParameter("id", componentId); + query.setParameter("clusterId", clusterId); + query.setParameter("serviceGroupId", serviceGroupId); + query.setParameter("serviceId", serviceId); + query.setParameter("componentName", componentName); + query.setParameter("hostId", hostId); return daoUtils.selectSingle(query); } diff --git a/ambari-server/src/main/java/org/apache/ambari/server/orm/dao/HostComponentStateDAO.java b/ambari-server/src/main/java/org/apache/ambari/server/orm/dao/HostComponentStateDAO.java index dd2e9d085fc..7115b280a51 100644 --- a/ambari-server/src/main/java/org/apache/ambari/server/orm/dao/HostComponentStateDAO.java +++ b/ambari-server/src/main/java/org/apache/ambari/server/orm/dao/HostComponentStateDAO.java @@ -42,9 +42,6 @@ public class HostComponentStateDAO { @Inject HostDAO hostDAO; - @Inject - HostComponentDesiredStateDAO hostComponentDesiredStateDAO; - @RequiresSession public HostComponentStateEntity findById(long id) { return entityManagerProvider.get().find(HostComponentStateEntity.class, id); @@ -123,18 +120,22 @@ public List findByServiceAndComponent( * Service Group ID * @param serviceId * Service ID - * @param componentId - * Component ID + * @param componentName + * Component Name * @param hostId * Host ID * @return Return all of the Host Component States that match the criteria. */ @RequiresSession public HostComponentStateEntity findByIndex(Long clusterId, Long serviceGroupId, Long serviceId, - Long componentId, Long hostId) { + String componentName, Long hostId) { final TypedQuery query = entityManagerProvider.get().createNamedQuery( "HostComponentStateEntity.findByIndex", HostComponentStateEntity.class); - query.setParameter("id", componentId); + query.setParameter("clusterId", clusterId); + query.setParameter("serviceGroupId", serviceGroupId); + query.setParameter("serviceId", serviceId); + query.setParameter("componentName", componentName); + query.setParameter("hostId", hostId); return daoUtils.selectSingle(query); } diff --git a/ambari-server/src/main/java/org/apache/ambari/server/orm/dao/HostVersionDAO.java b/ambari-server/src/main/java/org/apache/ambari/server/orm/dao/HostVersionDAO.java index a8ef7a8c87c..feb4172b160 100644 --- a/ambari-server/src/main/java/org/apache/ambari/server/orm/dao/HostVersionDAO.java +++ b/ambari-server/src/main/java/org/apache/ambari/server/orm/dao/HostVersionDAO.java @@ -24,8 +24,6 @@ import javax.persistence.EntityManager; import javax.persistence.TypedQuery; -import org.apache.ambari.annotations.Experimental; -import org.apache.ambari.annotations.ExperimentalFeature; import org.apache.ambari.server.orm.RequiresSession; import org.apache.ambari.server.orm.entities.HostEntity; import org.apache.ambari.server.orm.entities.HostVersionEntity; @@ -45,8 +43,6 @@ * {@link org.apache.ambari.server.state.RepositoryVersionState#INSTALLING}. */ @Singleton -@Deprecated -@Experimental(feature = ExperimentalFeature.REPO_VERSION_REMOVAL) public class HostVersionDAO extends CrudDAO { @Inject Provider entityManagerProvider; @@ -161,9 +157,9 @@ public List findByClusterAndState(String clusterName, Reposit query.setParameter("clusterName", clusterName); query.setParameter("state", state); - return daoUtils.selectList(query); + return daoUtils.selectList(query); } - + /** * Retrieve all of the host versions for the given cluster name, host name, and state.
* @param clusterName Cluster name diff --git a/ambari-server/src/main/java/org/apache/ambari/server/orm/dao/MpackDAO.java b/ambari-server/src/main/java/org/apache/ambari/server/orm/dao/MpackDAO.java index 8e12e93fae0..43e2a7f89dd 100644 --- a/ambari-server/src/main/java/org/apache/ambari/server/orm/dao/MpackDAO.java +++ b/ambari-server/src/main/java/org/apache/ambari/server/orm/dao/MpackDAO.java @@ -24,6 +24,7 @@ import org.apache.ambari.server.orm.RequiresSession; import org.apache.ambari.server.orm.entities.MpackEntity; + import org.slf4j.Logger; import org.slf4j.LoggerFactory; @@ -34,15 +35,7 @@ @Singleton -public class MpackDAO extends CrudDAO { - - /** - * Constructor. - */ - public MpackDAO() { - super(MpackEntity.class); - } - +public class MpackDAO { protected final static Logger LOG = LoggerFactory.getLogger(MpackDAO.class); /** @@ -57,6 +50,15 @@ public MpackDAO() { @Inject private DaoUtils m_daoUtils; + /** + * Persists a new mpack + */ + @Transactional + public Long create(MpackEntity mpackEntity) { + m_entityManagerProvider.get().persist(mpackEntity); + return mpackEntity.getId(); + } + /** * Gets an mpack with the specified ID. * @@ -84,6 +86,18 @@ public List findByNameVersion(String mpackName, String mpackVersion return m_daoUtils.selectList(query); } + /** + * Gets all mpacks stored in the database across all clusters. + * + * @return all mpacks or an empty list if none exist (never {@code null}). + */ + @RequiresSession + public List findAll() { + TypedQuery query = m_entityManagerProvider.get().createNamedQuery( + "MpackEntity.findAll", MpackEntity.class); + return m_daoUtils.selectList(query); + } + @Transactional public void removeById(Long id) { m_entityManagerProvider.get().remove(findById(id)); diff --git a/ambari-server/src/main/java/org/apache/ambari/server/orm/dao/MpackHostStateDAO.java b/ambari-server/src/main/java/org/apache/ambari/server/orm/dao/MpackHostStateDAO.java deleted file mode 100644 index 5be01370003..00000000000 --- a/ambari-server/src/main/java/org/apache/ambari/server/orm/dao/MpackHostStateDAO.java +++ /dev/null @@ -1,109 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package org.apache.ambari.server.orm.dao; - -import java.util.Collection; -import java.util.List; - -import javax.persistence.EntityManager; -import javax.persistence.TypedQuery; - -import org.apache.ambari.server.orm.RequiresSession; -import org.apache.ambari.server.orm.entities.MpackHostStateEntity; - -import com.google.inject.Inject; -import com.google.inject.Provider; -import com.google.inject.Singleton; -import com.google.inject.persist.Transactional; - -/** - * The {@link MpackHostStateDAO} contains all of the CRUD operations relating to - * the installation state of a management pack on a host. - */ -@Singleton -public class MpackHostStateDAO extends CrudDAO { - @Inject - Provider entityManagerProvider; - - @Inject - DaoUtils daoUtils; - - /** - * Constructor. - */ - public MpackHostStateDAO() { - super(MpackHostStateEntity.class); - } - - /** - * @param entity entity to create - */ - @Override - @Transactional - public void create(MpackHostStateEntity entity) throws IllegalArgumentException { - super.create(entity); - } - - /** - * Retrieve all of the install states for any management packs installed on - * the specified host. - * - * @param hostName - * FQDN of host - * @return Return all of the mpack install states that match the criteria. - */ - @RequiresSession - public List findByHost(String hostName) { - final TypedQuery query = entityManagerProvider.get().createNamedQuery( - "mpackHostStateForHost", MpackHostStateEntity.class); - query.setParameter("hostName", hostName); - - return daoUtils.selectList(query); - } - - /** - * Retrieve all of the host versions for the given management pack. - * - * @param mpackId - * the ID of the mpack - * @return all of the hosts in the cluster which have entries for the - * specified mpack. - */ - @RequiresSession - public List findByMpack(Long mpackId) { - final TypedQuery query = entityManagerProvider.get().createNamedQuery( - "mpackHostStateForMpack", MpackHostStateEntity.class); - query.setParameter("mpackId", mpackId); - - return daoUtils.selectList(query); - } - - /** - * Removes all of the associated mpack host states for a given host. - * - * @param hostName - * the name of the host. - */ - @Transactional - public void removeByHostName(String hostName) { - Collection mpackHostStates = findByHost(hostName); - this.remove(mpackHostStates); - } - -} diff --git a/ambari-server/src/main/java/org/apache/ambari/server/orm/dao/RepositoryVersionDAO.java b/ambari-server/src/main/java/org/apache/ambari/server/orm/dao/RepositoryVersionDAO.java index dd51e557005..732a4e9f557 100644 --- a/ambari-server/src/main/java/org/apache/ambari/server/orm/dao/RepositoryVersionDAO.java +++ b/ambari-server/src/main/java/org/apache/ambari/server/orm/dao/RepositoryVersionDAO.java @@ -22,8 +22,6 @@ import javax.persistence.TypedQuery; -import org.apache.ambari.annotations.Experimental; -import org.apache.ambari.annotations.ExperimentalFeature; import org.apache.ambari.server.AmbariException; import org.apache.ambari.server.orm.RequiresSession; import org.apache.ambari.server.orm.entities.RepoOsEntity; @@ -40,8 +38,6 @@ * */ @Singleton -@Deprecated -@Experimental(feature = ExperimentalFeature.REPO_VERSION_REMOVAL) public class RepositoryVersionDAO extends CrudDAO { /** * Constructor. @@ -157,7 +153,7 @@ public List findByStackAndType(StackId stackId, Reposit query.setParameter("type", type); return daoUtils.selectList(query); } - + /** * Validates and creates an object. * The version must be unique within this stack name (e.g., HDP, HDPWIN, BIGTOP). diff --git a/ambari-server/src/main/java/org/apache/ambari/server/orm/dao/ServiceComponentDesiredStateDAO.java b/ambari-server/src/main/java/org/apache/ambari/server/orm/dao/ServiceComponentDesiredStateDAO.java index 313eb51bab8..61bc9aed6d6 100644 --- a/ambari-server/src/main/java/org/apache/ambari/server/orm/dao/ServiceComponentDesiredStateDAO.java +++ b/ambari-server/src/main/java/org/apache/ambari/server/orm/dao/ServiceComponentDesiredStateDAO.java @@ -26,6 +26,7 @@ import org.apache.ambari.server.orm.RequiresSession; import org.apache.ambari.server.orm.entities.ServiceComponentDesiredStateEntity; +import org.apache.ambari.server.orm.entities.ServiceComponentVersionEntity; import com.google.inject.Inject; import com.google.inject.Provider; @@ -82,7 +83,7 @@ public List findAll() { */ @RequiresSession public ServiceComponentDesiredStateEntity findByName(long clusterId, long serviceGroupId, long serviceId, - String componentName, String componentType) { + String componentName) { EntityManager entityManager = entityManagerProvider.get(); TypedQuery query = entityManager.createNamedQuery( "ServiceComponentDesiredStateEntity.findByName", ServiceComponentDesiredStateEntity.class); @@ -91,38 +92,6 @@ public ServiceComponentDesiredStateEntity findByName(long clusterId, long servic query.setParameter("serviceGroupId", serviceGroupId); query.setParameter("serviceId", serviceId); query.setParameter("componentName", componentName); - query.setParameter("componentType", componentType); - - ServiceComponentDesiredStateEntity entity = null; - List entities = daoUtils.selectList(query); - if (null != entities && !entities.isEmpty()) { - entity = entities.get(0); - } - - return entity; - } - - /** - * Finds a {@link ServiceComponentDesiredStateEntity} by a combination of - * cluster, service, and component. - * - * @param clusterId - * the cluster ID - * @param serviceGroupId - * the service group ID - * @param serviceId - * the service ID - * @param componentId - * the component id (not {@code null}) - */ - @RequiresSession - public ServiceComponentDesiredStateEntity findById(long clusterId, long serviceGroupId, long serviceId, - Long componentId) { - EntityManager entityManager = entityManagerProvider.get(); - TypedQuery query = entityManager.createNamedQuery( - "ServiceComponentDesiredStateEntity.findById", ServiceComponentDesiredStateEntity.class); - - query.setParameter("id", componentId); ServiceComponentDesiredStateEntity entity = null; List entities = daoUtils.selectList(query); @@ -154,10 +123,59 @@ public void remove(ServiceComponentDesiredStateEntity serviceComponentDesiredSta } @Transactional - public void removeByName(long clusterId, long serviceGroupId, long serviceId, String componentName, String componentType) { - ServiceComponentDesiredStateEntity entity = findByName(clusterId, serviceGroupId, serviceId, componentName, componentType); + public void removeByName(long clusterId, long serviceGroupId, long serviceId, String componentName) { + ServiceComponentDesiredStateEntity entity = findByName(clusterId, serviceGroupId, serviceId, componentName); if (null != entity) { entityManagerProvider.get().remove(entity); } } + + /** + * @param clusterId the cluster id + * @param serviceGroupId the service group id + * @param serviceId the service id + * @param componentName the component name + * @return the list of repository versions for a component + */ + @RequiresSession + public List findVersions(long clusterId, long serviceGroupId, long serviceId, + String componentName) { + EntityManager entityManager = entityManagerProvider.get(); + TypedQuery query = entityManager.createNamedQuery( + "ServiceComponentVersionEntity.findByComponent", ServiceComponentVersionEntity.class); + + query.setParameter("clusterId", clusterId); + query.setParameter("serviceGroupId", serviceGroupId); + query.setParameter("serviceId", serviceId); + query.setParameter("componentName", componentName); + + return daoUtils.selectList(query); + } + + /** + * Gets a specific version for a component + * @param clusterId the cluster id + * @param serviceGroupId the service group id + * @param serviceId the service id + * @param componentName the component name + * @param version the component version to find + * @return the version entity, or {@code null} if not found + */ + @RequiresSession + public ServiceComponentVersionEntity findVersion(long clusterId, long serviceGroupId, long serviceId, + String componentName, String version) { + + EntityManager entityManager = entityManagerProvider.get(); + TypedQuery query = entityManager.createNamedQuery( + "ServiceComponentVersionEntity.findByComponentAndVersion", ServiceComponentVersionEntity.class); + + query.setParameter("clusterId", clusterId); + query.setParameter("serviceGroupId", serviceGroupId); + query.setParameter("serviceId", serviceId); + query.setParameter("componentName", componentName); + query.setParameter("repoVersion", version); + + return daoUtils.selectSingle(query); + } + } diff --git a/ambari-server/src/main/java/org/apache/ambari/server/orm/entities/HostComponentDesiredStateEntity.java b/ambari-server/src/main/java/org/apache/ambari/server/orm/entities/HostComponentDesiredStateEntity.java index 86534f3c01d..fafec3d77ec 100644 --- a/ambari-server/src/main/java/org/apache/ambari/server/orm/entities/HostComponentDesiredStateEntity.java +++ b/ambari-server/src/main/java/org/apache/ambari/server/orm/entities/HostComponentDesiredStateEntity.java @@ -66,7 +66,7 @@ "SELECT hcds from HostComponentDesiredStateEntity hcds WHERE hcds.clusterId=:clusterId AND hcds.serviceGroupId=:serviceGroupId AND hcds.serviceId=:serviceId AND hcds.componentName=:componentName AND hcds.hostEntity.hostName=:hostName"), @NamedQuery(name = "HostComponentDesiredStateEntity.findByIndex", query = - "SELECT hcds from HostComponentDesiredStateEntity hcds WHERE hcds.id=:id") + "SELECT hcds from HostComponentDesiredStateEntity hcds WHERE hcds.clusterId=:clusterId AND hcds.serviceGroupId=:serviceGroupId AND hcds.serviceId=:serviceId AND hcds.componentName=:componentName AND hcds.hostId=:hostId"), }) public class HostComponentDesiredStateEntity { @@ -91,9 +91,6 @@ public class HostComponentDesiredStateEntity { @Column(name = "component_name", insertable = false, updatable = false) private String componentName = ""; - @Column(name = "component_type", insertable = false, updatable = false) - private String componentType = ""; - @Basic @Column(name = "desired_state", nullable = false, insertable = true, updatable = true) @Enumerated(value = EnumType.STRING) @@ -108,8 +105,7 @@ public class HostComponentDesiredStateEntity { @JoinColumn(name = "cluster_id", referencedColumnName = "cluster_id", nullable = false), @JoinColumn(name = "service_group_id", referencedColumnName = "service_group_id", nullable = false), @JoinColumn(name = "service_id", referencedColumnName = "service_id", nullable = false), - @JoinColumn(name = "component_name", referencedColumnName = "component_name", nullable = false), - @JoinColumn(name = "component_type", referencedColumnName = "component_type", nullable = false) }) + @JoinColumn(name = "component_name", referencedColumnName = "component_name", nullable = false)}) private ServiceComponentDesiredStateEntity serviceComponentDesiredStateEntity; @ManyToOne @@ -158,14 +154,6 @@ public void setComponentName(String componentName) { this.componentName = componentName; } - public String getComponentType() { - return defaultString(componentType); - } - - public void setComponentType(String componentType) { - this.componentType = componentType; - } - public State getDesiredState() { return desiredState; } @@ -221,10 +209,6 @@ public boolean equals(Object o) { return false; } - if (!Objects.equal(componentType, that.componentType)) { - return false; - } - if (!Objects.equal(desiredState, that.desiredState)) { return false; } @@ -244,7 +228,6 @@ public int hashCode() { result = 31 * result + (serviceId != null ? serviceId.hashCode() : 0); result = 31 * result + (hostEntity != null ? hostEntity.hashCode() : 0); result = 31 * result + (componentName != null ? componentName.hashCode() : 0); - result = 31 * result + (componentType != null ? componentType.hashCode() : 0); result = 31 * result + (desiredState != null ? desiredState.hashCode() : 0); return result; } @@ -281,7 +264,6 @@ public void setRestartRequired(boolean restartRequired) { public String toString() { return Objects.toStringHelper(this).add("clusterId", clusterId).add( "serviceGroupId", serviceGroupId).add("serviceId", serviceId).add("componentName", - componentName).add("componentType", componentType).add("hostId", hostId).add("desiredState", - desiredState).toString(); + componentName).add("hostId", hostId).add("desiredState", desiredState).toString(); } } \ No newline at end of file diff --git a/ambari-server/src/main/java/org/apache/ambari/server/orm/entities/HostComponentStateEntity.java b/ambari-server/src/main/java/org/apache/ambari/server/orm/entities/HostComponentStateEntity.java index 34379a8ea41..f56df9d2427 100644 --- a/ambari-server/src/main/java/org/apache/ambari/server/orm/entities/HostComponentStateEntity.java +++ b/ambari-server/src/main/java/org/apache/ambari/server/orm/entities/HostComponentStateEntity.java @@ -30,23 +30,16 @@ import javax.persistence.ManyToOne; import javax.persistence.NamedQueries; import javax.persistence.NamedQuery; -import javax.persistence.OneToOne; import javax.persistence.Table; import javax.persistence.TableGenerator; -import javax.persistence.UniqueConstraint; - import org.apache.ambari.server.state.State; import org.apache.ambari.server.state.UpgradeState; import com.google.common.base.Objects; @Entity -@Table( - name = "hostcomponentstate", - uniqueConstraints = @UniqueConstraint( - name = "UQ_hostcomponentstate_name", - columnNames = { "component_name", "service_id" , "host_id", "service_group_id", "cluster_id" }) ) +@Table(name = "hostcomponentstate") @TableGenerator( name = "hostcomponentstate_id_generator", table = "ambari_sequences", @@ -83,7 +76,9 @@ "AND hcs.version != :version"), @NamedQuery( name = "HostComponentStateEntity.findByIndex", - query = "SELECT hcs from HostComponentStateEntity hcs WHERE hcs.id=:id") }) + query = "SELECT hcs from HostComponentStateEntity hcs WHERE hcs.clusterId=:clusterId " + + "AND hcs.serviceGroupId=:serviceGroupId AND hcs.serviceId=:serviceId AND hcs.componentName=:componentName AND hcs.hostId=:hostId") }) + public class HostComponentStateEntity { @Id @@ -91,9 +86,6 @@ public class HostComponentStateEntity { @Column(name = "id", nullable = false, insertable = true, updatable = false) private Long id; - @Column(name = "host_component_desired_state_id", nullable = false, insertable = false, updatable = false) - private Long hostComponentDesiredStateId; - @Column(name = "cluster_id", nullable = false, insertable = false, updatable = false, length = 10) private Long clusterId; @@ -109,9 +101,6 @@ public class HostComponentStateEntity { @Column(name = "component_name", nullable = false, insertable = false, updatable = false) private String componentName; - @Column(name = "component_type", nullable = false, insertable = false, updatable = false) - private String componentType; - /** * Version reported by host component during last status update. */ @@ -131,18 +120,13 @@ public class HostComponentStateEntity { @JoinColumn(name = "cluster_id", referencedColumnName = "cluster_id", nullable = false), @JoinColumn(name = "service_group_id", referencedColumnName = "service_group_id", nullable = false), @JoinColumn(name = "service_id", referencedColumnName = "service_id", nullable = false), - @JoinColumn(name = "component_name", referencedColumnName = "component_name", nullable = false), - @JoinColumn(name = "component_type", referencedColumnName = "component_type", nullable = false) }) + @JoinColumn(name = "component_name", referencedColumnName = "component_name", nullable = false) }) private ServiceComponentDesiredStateEntity serviceComponentDesiredStateEntity; @ManyToOne @JoinColumn(name = "host_id", referencedColumnName = "host_id", nullable = false) private HostEntity hostEntity; - @OneToOne - @JoinColumn(name = "host_component_desired_state_id", referencedColumnName = "id", nullable = false) - private HostComponentDesiredStateEntity hostComponentDesiredStateEntity; - public Long getId() { return id; } @@ -187,26 +171,6 @@ public void setComponentName(String componentName) { this.componentName = componentName; } - public Long getHostComponentDesiredStateId() { - return hostComponentDesiredStateEntity != null ? hostComponentDesiredStateEntity.getId() : null; - } - - public void setComponentId(Long componentId) { - this.id = componentId; - } - - public Long getComponentId() { - return id; - } - - public void setComponentType(String componentType) { - this.componentType = componentType; - } - - public String getComponentType() { - return componentType; - } - public State getCurrentState() { return currentState; } @@ -264,11 +228,6 @@ public boolean equals(Object o) { return false; } - if (componentType != null ? !componentType.equals(that.componentType) - : that.componentType != null) { - return false; - } - if (currentState != null ? !currentState.equals(that.currentState) : that.currentState != null) { return false; @@ -283,10 +242,6 @@ public boolean equals(Object o) { return false; } - if (hostComponentDesiredStateEntity != null ? !hostComponentDesiredStateEntity.equals(that.hostComponentDesiredStateEntity) : that.hostComponentDesiredStateEntity != null) { - return false; - } - if (version != null ? !version.equals(that.version) : that.version != null) { return false; } @@ -301,9 +256,7 @@ public int hashCode() { result = 31 * result + (serviceGroupId != null ? serviceGroupId.intValue() : 0); result = 31 * result + (serviceId != null ? serviceId.intValue() : 0); result = 31 * result + (hostEntity != null ? hostEntity.hashCode() : 0); - result = 31 * result + (hostComponentDesiredStateEntity != null ? hostComponentDesiredStateEntity.hashCode() : 0); result = 31 * result + (componentName != null ? componentName.hashCode() : 0); - result = 31 * result + (componentType != null ? componentType.hashCode() : 0); result = 31 * result + (currentState != null ? currentState.hashCode() : 0); result = 31 * result + (upgradeState != null ? upgradeState.hashCode() : 0); result = 31 * result + (version != null ? version.hashCode() : 0); @@ -314,7 +267,8 @@ public ServiceComponentDesiredStateEntity getServiceComponentDesiredStateEntity( return serviceComponentDesiredStateEntity; } - public void setServiceComponentDesiredStateEntity(ServiceComponentDesiredStateEntity serviceComponentDesiredStateEntity) { + public void setServiceComponentDesiredStateEntity( + ServiceComponentDesiredStateEntity serviceComponentDesiredStateEntity) { this.serviceComponentDesiredStateEntity = serviceComponentDesiredStateEntity; } @@ -326,22 +280,14 @@ public void setHostEntity(HostEntity hostEntity) { this.hostEntity = hostEntity; } - public HostComponentDesiredStateEntity getHostComponentDesiredStateEntity() { - return hostComponentDesiredStateEntity; - } - - public void setHostComponentDesiredStateEntity(HostComponentDesiredStateEntity hostComponentDesiredStateEntity) { - this.hostComponentDesiredStateEntity = hostComponentDesiredStateEntity; - } - /** * {@inheritDoc} */ @Override public String toString() { return Objects.toStringHelper(this).add("clusterId", clusterId).add("serviceGroupId", serviceGroupId).add( - "serviceId", serviceId).add("componentId", id).add("componentName", componentName).add - ("componentType", componentType).add("hostId", hostId).add("state", currentState).toString(); + "serviceId", serviceId).add("componentName", componentName).add( + "hostId", hostId).add("state", currentState).toString(); } } \ No newline at end of file diff --git a/ambari-server/src/main/java/org/apache/ambari/server/orm/entities/HostVersionEntity.java b/ambari-server/src/main/java/org/apache/ambari/server/orm/entities/HostVersionEntity.java index 94bbb78a4a2..2c61a3c2aa3 100644 --- a/ambari-server/src/main/java/org/apache/ambari/server/orm/entities/HostVersionEntity.java +++ b/ambari-server/src/main/java/org/apache/ambari/server/orm/entities/HostVersionEntity.java @@ -33,12 +33,8 @@ import javax.persistence.TableGenerator; import javax.persistence.UniqueConstraint; -import org.apache.ambari.annotations.Experimental; -import org.apache.ambari.annotations.ExperimentalFeature; import org.apache.ambari.server.state.RepositoryVersionState; -@Deprecated -@Experimental(feature = ExperimentalFeature.REPO_VERSION_REMOVAL) @Entity @Table( name = "host_version", diff --git a/ambari-server/src/main/java/org/apache/ambari/server/orm/entities/MpackEntity.java b/ambari-server/src/main/java/org/apache/ambari/server/orm/entities/MpackEntity.java index 20fc872f33a..879d24a1238 100644 --- a/ambari-server/src/main/java/org/apache/ambari/server/orm/entities/MpackEntity.java +++ b/ambari-server/src/main/java/org/apache/ambari/server/orm/entities/MpackEntity.java @@ -17,20 +17,15 @@ */ package org.apache.ambari.server.orm.entities; -import java.util.ArrayList; -import java.util.List; import java.util.Objects; -import javax.persistence.CascadeType; import javax.persistence.Column; import javax.persistence.Entity; -import javax.persistence.FetchType; import javax.persistence.GeneratedValue; import javax.persistence.GenerationType; import javax.persistence.Id; import javax.persistence.NamedQueries; import javax.persistence.NamedQuery; -import javax.persistence.OneToMany; import javax.persistence.Table; import javax.persistence.TableGenerator; @@ -69,18 +64,6 @@ public class MpackEntity { @Column(name = "mpack_uri", nullable = false) private String mpackUri; - /** - * The list of repositories for this management pack. Each repository is - * organized by operating system. A single operating system can have multiple - * repo URLs defined for it for a given management pack. - */ - @OneToMany( - orphanRemoval = true, - fetch = FetchType.EAGER, - cascade = { CascadeType.MERGE, CascadeType.REFRESH, CascadeType.REMOVE }, - mappedBy = "mpackEntity") - private List repositoryOperatingSystems = new ArrayList<>(); - public Long getId() { return id; } @@ -121,30 +104,6 @@ public void setMpackUri(String mpackUri) { this.mpackUri = mpackUri; } - /** - * Gets the list of repositories by OS that are associated with this - * management pack. - * - * @return the repositories for this mpack by operating system. - */ - public List getRepositoryOperatingSystems() { - return repositoryOperatingSystems; - } - - /** - * Sets the repositories associated with this management pack. - * - * @param repositoryOperatingSystems - * each operating system repo grouping. - */ - public void setRepositoryOperatingSystems(List repositoryOperatingSystems) { - this.repositoryOperatingSystems = repositoryOperatingSystems; - for (RepoOsEntity repositoryOperatingSystem : repositoryOperatingSystems) { - repositoryOperatingSystem.setMpackEntity(this); - repositoryOperatingSystem.setMpackId(id); - } - } - public MpackEntity() { } diff --git a/ambari-server/src/main/java/org/apache/ambari/server/orm/entities/MpackHostStateEntity.java b/ambari-server/src/main/java/org/apache/ambari/server/orm/entities/MpackHostStateEntity.java deleted file mode 100644 index 2030ddd53d9..00000000000 --- a/ambari-server/src/main/java/org/apache/ambari/server/orm/entities/MpackHostStateEntity.java +++ /dev/null @@ -1,206 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package org.apache.ambari.server.orm.entities; - -import java.util.Objects; - -import javax.persistence.Column; -import javax.persistence.Entity; -import javax.persistence.EnumType; -import javax.persistence.Enumerated; -import javax.persistence.GeneratedValue; -import javax.persistence.GenerationType; -import javax.persistence.Id; -import javax.persistence.JoinColumn; -import javax.persistence.ManyToOne; -import javax.persistence.NamedQueries; -import javax.persistence.NamedQuery; -import javax.persistence.Table; -import javax.persistence.TableGenerator; -import javax.persistence.UniqueConstraint; - -import org.apache.ambari.server.state.RepositoryVersionState; - -/** - * The {@link MpackHostStateEntity} is used to track the installation state of a - * management pack for a given host. This allows Ambari to determine if a - * management pack needs re-installation on a host or if a host has the correct - * bits installed already. - */ -@Entity -@Table( - name = "mpack_host_state", - uniqueConstraints = @UniqueConstraint( - name = "UQ_mpack_host_state", - columnNames = { "host_id", "mpack_id" })) -@TableGenerator( - name = "mpack_host_state_id_generator", - table = "ambari_sequences", - pkColumnName = "sequence_name", - valueColumnName = "sequence_value", - pkColumnValue = "mpack_host_state_id_seq", - initialValue = 0) -@NamedQueries({ - @NamedQuery( - name = "mpackHostStateForHost", - query = "SELECT mpackHostState FROM MpackHostStateEntity mpackHostState JOIN mpackHostState.hostEntity host " - + "WHERE mpackHostState.hostEntity.hostName=:hostName"), - @NamedQuery( - name = "mpackHostStateForMpack", - query = "SELECT mpackHostState FROM MpackHostStateEntity mpackHostState WHERE mpackHostState.id = :mpackId") }) - -public class MpackHostStateEntity { - - /** - * The primary key ID of this entity. - */ - @Id - @Column(name = "id", nullable = false, insertable = true, updatable = false) - @GeneratedValue(strategy = GenerationType.TABLE, generator = "mpack_host_state_id_generator") - private Long id; - - /** - * The ID of the host associated with an mpack for a given installation state. - */ - @Column(name = "host_id", nullable = false, insertable = false, updatable = false) - private Long hostId; - - /** - * The host associated with an mpack for a given installation state. - */ - @ManyToOne - @JoinColumn(name = "host_id", referencedColumnName = "host_id", nullable = false) - private HostEntity hostEntity; - - /** - * The ID of the host associated with an mpack for a given installation state. - */ - @Column(name = "mpack_id", nullable = false, insertable = false, updatable = false) - private Long mpackId; - - /** - * The host associated with an mpack for a given installation state. - */ - @ManyToOne - @JoinColumn(name = "mpack_id", referencedColumnName = "id", nullable = false) - private MpackEntity mpackEntity; - - /** - * The state of the - */ - @Column(name = "state", nullable = false, insertable = true, updatable = true) - @Enumerated(value = EnumType.STRING) - private RepositoryVersionState state; - - /** - * Constructor. - */ - public MpackHostStateEntity() { - } - - /** - * Constructor. - * - * @param mpackEntity - * @param hostEntity - * @param state - */ - public MpackHostStateEntity(MpackEntity mpackEntity, HostEntity hostEntity, - RepositoryVersionState state) { - this.mpackEntity = mpackEntity; - this.hostEntity = hostEntity; - this.state = state; - } - - public Long getId() { - return id; - } - - public void setId(Long id) { - this.id = id; - } - - public Long getHostId() { - return hostId; - } - - public void setHostId(Long hostId) { - this.hostId = hostId; - } - - public Long getMpackId() { - return mpackId; - } - - public void setMpackId(Long mpackId) { - this.mpackId = mpackId; - } - - public String getHostName() { - return hostEntity != null ? hostEntity.getHostName() : null; - } - - public HostEntity getHostEntity() { - return hostEntity; - } - - public void setHostEntity(HostEntity hostEntity) { - this.hostEntity = hostEntity; - } - - public RepositoryVersionState getState() { - return state; - } - - public void setState(RepositoryVersionState state) { - this.state = state; - } - - public MpackEntity getMpack() { - return mpackEntity; - } - - public void setMpack(MpackEntity mpackEntity) { - this.mpackEntity = mpackEntity; - } - - /** - * {@inheritDoc} - */ - @Override - public int hashCode() { - return Objects.hash(id, mpackId, hostId, state); - } - - @Override - public boolean equals(Object obj) { - if (this == obj) { - return true; - } - if (obj == null) { - return false; - } - if (getClass() != obj.getClass()) { - return false; - } - - MpackHostStateEntity other = (MpackHostStateEntity) obj; - return Objects.equals(id, other.id) && Objects.equals(mpackId, other.mpackId) - && Objects.equals(hostId, other.hostId) && Objects.equals(state, other.state); - } -} diff --git a/ambari-server/src/main/java/org/apache/ambari/server/orm/entities/RepoDefinitionEntity.java b/ambari-server/src/main/java/org/apache/ambari/server/orm/entities/RepoDefinitionEntity.java index f308fd27623..dec922abaa7 100644 --- a/ambari-server/src/main/java/org/apache/ambari/server/orm/entities/RepoDefinitionEntity.java +++ b/ambari-server/src/main/java/org/apache/ambari/server/orm/entities/RepoDefinitionEntity.java @@ -35,7 +35,6 @@ import javax.persistence.Table; import javax.persistence.TableGenerator; -import org.apache.ambari.server.state.RepositoryInfo; import org.apache.ambari.server.state.stack.RepoTag; import com.google.common.base.Objects; @@ -210,35 +209,4 @@ public boolean equals(Object object) { && Objects.equal(distribution, that.distribution) && Objects.equal(components, that.components); } - - /** - * {@inheritDoc} - */ - @Override - public String toString() { - return Objects.toStringHelper(this) - .add("id", repoID) - .add("name", repoName) - .add("tags", repoTags) - .toString(); - } - - /** - * Builds a {@link RepoDefinitionEntity} from a {@link RepositoryInfo} instance. - * - * @param repositoryInfo the repository to build from. - * @return a newly created {@link RepoDefinitionEntity} which is not yet persisted. - */ - public static RepoDefinitionEntity from(RepositoryInfo repositoryInfo) { - RepoDefinitionEntity repositoryDefinition = new RepoDefinitionEntity(); - repositoryDefinition.setBaseUrl(repositoryInfo.getBaseUrl()); - repositoryDefinition.setRepoName(repositoryInfo.getRepoName()); - repositoryDefinition.setRepoID(repositoryInfo.getRepoId()); - repositoryDefinition.setDistribution(repositoryInfo.getDistribution()); - repositoryDefinition.setComponents(repositoryInfo.getComponents()); - repositoryDefinition.setMirrors(repositoryInfo.getMirrorsList()); - repositoryDefinition.setUnique(repositoryInfo.isUnique()); - repositoryDefinition.setTags(repositoryInfo.getTags()); - return repositoryDefinition; - } - } +} \ No newline at end of file diff --git a/ambari-server/src/main/java/org/apache/ambari/server/orm/entities/RepoOsEntity.java b/ambari-server/src/main/java/org/apache/ambari/server/orm/entities/RepoOsEntity.java index 9af0ea90df4..e1c4a566e95 100644 --- a/ambari-server/src/main/java/org/apache/ambari/server/orm/entities/RepoOsEntity.java +++ b/ambari-server/src/main/java/org/apache/ambari/server/orm/entities/RepoOsEntity.java @@ -33,9 +33,6 @@ import javax.persistence.Table; import javax.persistence.TableGenerator; -import org.apache.ambari.annotations.Experimental; -import org.apache.ambari.annotations.ExperimentalFeature; - import com.google.common.base.Objects; /** @@ -55,12 +52,6 @@ public class RepoOsEntity { @GeneratedValue(strategy = GenerationType.TABLE, generator = "repo_os_id_generator") private Long id; - /** - * The ID of the mpack that this repository entry belongs to. - */ - @Column(name = "mpack_id", updatable = false, insertable = false) - private long mpackId; - @Column(name = "family") private String family; @@ -76,16 +67,10 @@ public class RepoOsEntity { /** * many-to-one association to {@link RepositoryVersionEntity} */ - @Deprecated - @Experimental(feature = ExperimentalFeature.REPO_VERSION_REMOVAL) @ManyToOne(fetch = FetchType.EAGER) - @JoinColumn(name = "repo_version_id", nullable = true) + @JoinColumn(name = "repo_version_id", nullable = false) private RepositoryVersionEntity repositoryVersionEntity; - @ManyToOne - @JoinColumn(name = "mpack_id", referencedColumnName = "id", nullable = false) - private MpackEntity mpackEntity; - /** * @return repoDefinitionEntities */ @@ -110,13 +95,14 @@ public void addRepoDefinitionEntities(List repoDefinitionE * @param repoDefinition many-to-one entity */ public void addRepoDefinition(RepoDefinitionEntity repoDefinition) { - repoDefinitionEntities.add(repoDefinition); + this.repoDefinitionEntities.add(repoDefinition); repoDefinition.setRepoOs(this); } + public RepositoryVersionEntity getRepositoryVersionEntity() { + return repositoryVersionEntity; + } - @Deprecated - @Experimental(feature = ExperimentalFeature.REPO_VERSION_REMOVAL) public void setRepositoryVersionEntity(RepositoryVersionEntity repositoryVersionEntity) { this.repositoryVersionEntity = repositoryVersionEntity; } @@ -129,25 +115,6 @@ public void setId(Long id) { this.id = id; } - /** - * Gets the management pack ID. - * - * @return the management pack ID. - */ - public Long getMpackId() { - return mpackId; - } - - /** - * Sets the management pack ID. - * - * @param mpackId - * the management pack ID. - */ - public void setMpackId(Long mpackId) { - this.mpackId = mpackId; - } - public String getFamily() { return family; } @@ -164,32 +131,12 @@ public void setAmbariManaged(boolean ambariManaged) { this.ambariManaged = (short) (ambariManaged ? 1 : 0); } - /** - * Gets the Mpack which is associated with this repository operating system. - * - * @return the Mpack - */ - public MpackEntity getMpackEntity() { - return mpackEntity; - } - - /** - * Sets the Mpack which is associated with this repository operating system. - * - * @param mpackEntity - * the Mpack - */ - public void setMpackEntity(MpackEntity mpackEntity) { - this.mpackEntity = mpackEntity; - } - /** * {@inheritDoc} */ @Override public int hashCode() { - return java.util.Objects.hash(mpackId, mpackEntity, family, ambariManaged, - repoDefinitionEntities); + return java.util.Objects.hash(family, ambariManaged, repoDefinitionEntities); } /** @@ -210,22 +157,8 @@ public boolean equals(Object object) { } RepoOsEntity that = (RepoOsEntity) object; - return Objects.equal(mpackId, that.mpackId) - && Objects.equal(mpackEntity, that.mpackEntity) - && Objects.equal(ambariManaged, that.ambariManaged) + return Objects.equal(ambariManaged, that.ambariManaged) && Objects.equal(family, that.family) && Objects.equal(repoDefinitionEntities, that.repoDefinitionEntities); } - - /** - * {@inheritDoc} - */ - @Override - public String toString() { - return Objects.toStringHelper(this) - .add("mpackId", mpackId) - .add("family", family) - .add("isManagedByAmbari", ambariManaged) - .toString(); - } } diff --git a/ambari-server/src/main/java/org/apache/ambari/server/orm/entities/RepositoryVersionEntity.java b/ambari-server/src/main/java/org/apache/ambari/server/orm/entities/RepositoryVersionEntity.java index 832861abec1..18ac2c2c3d7 100644 --- a/ambari-server/src/main/java/org/apache/ambari/server/orm/entities/RepositoryVersionEntity.java +++ b/ambari-server/src/main/java/org/apache/ambari/server/orm/entities/RepositoryVersionEntity.java @@ -92,8 +92,6 @@ name = "findByServiceDesiredVersion", query = "SELECT repositoryVersion FROM RepositoryVersionEntity repositoryVersion WHERE repositoryVersion IN (SELECT DISTINCT sd1.desiredRepositoryVersion FROM ServiceDesiredStateEntity sd1 WHERE sd1.desiredRepositoryVersion IN ?1)") }) @StaticallyInject -@Deprecated -@Experimental(feature = ExperimentalFeature.REPO_VERSION_REMOVAL) public class RepositoryVersionEntity { @Inject private static Provider repositoryVersionHelperProvider; @@ -119,7 +117,7 @@ public class RepositoryVersionEntity { /** * one-to-many association to {@link RepoOsEntity} */ - @OneToMany(fetch = FetchType.EAGER, mappedBy = "repositoryVersionEntity", orphanRemoval = true) + @OneToMany(fetch = FetchType.EAGER, cascade = CascadeType.ALL, mappedBy = "repositoryVersionEntity", orphanRemoval = true) private List repoOsEntities = new ArrayList<>(); @OneToMany(cascade = CascadeType.REMOVE, mappedBy = "repositoryVersion") @@ -363,7 +361,7 @@ public VersionDefinitionXml getRepositoryXml() throws Exception { */ @Override public int hashCode() { - return java.util.Objects.hash(stack, version, displayName); + return java.util.Objects.hash(stack, version, displayName, repoOsEntities); } /** @@ -385,7 +383,8 @@ public boolean equals(Object object) { RepositoryVersionEntity that = (RepositoryVersionEntity) object; return Objects.equal(stack, that.stack) && Objects.equal(version, that.version) - && Objects.equal(displayName, that.displayName); + && Objects.equal(displayName, that.displayName) + && Objects.equal(repoOsEntities, that.repoOsEntities); } /** diff --git a/ambari-server/src/main/java/org/apache/ambari/server/orm/entities/ServiceComponentDesiredStateEntity.java b/ambari-server/src/main/java/org/apache/ambari/server/orm/entities/ServiceComponentDesiredStateEntity.java index 6ee28f1512f..8a8b3809758 100644 --- a/ambari-server/src/main/java/org/apache/ambari/server/orm/entities/ServiceComponentDesiredStateEntity.java +++ b/ambari-server/src/main/java/org/apache/ambari/server/orm/entities/ServiceComponentDesiredStateEntity.java @@ -18,8 +18,10 @@ package org.apache.ambari.server.orm.entities; +import java.util.ArrayList; import java.util.Collection; +import javax.persistence.CascadeType; import javax.persistence.Column; import javax.persistence.Entity; import javax.persistence.EnumType; @@ -38,8 +40,6 @@ import javax.persistence.TableGenerator; import javax.persistence.UniqueConstraint; -import org.apache.ambari.annotations.Experimental; -import org.apache.ambari.annotations.ExperimentalFeature; import org.apache.ambari.server.state.RepositoryVersionState; import org.apache.ambari.server.state.State; @@ -62,13 +62,7 @@ query = "SELECT scds FROM ServiceComponentDesiredStateEntity scds WHERE scds.clusterId = :clusterId " + "AND scds.serviceGroupId = :serviceGroupId " + "AND scds.serviceId = :serviceId " + - "AND scds.componentName = :componentName " + - "AND scds.componentType = :componentType" ), - @NamedQuery( - name = "ServiceComponentDesiredStateEntity.findById", - query = "SELECT scds FROM ServiceComponentDesiredStateEntity scds WHERE scds.id = :id" ) -}) - + "AND scds.componentName = :componentName") }) public class ServiceComponentDesiredStateEntity { @Id @@ -81,9 +75,6 @@ public class ServiceComponentDesiredStateEntity { @Column(name = "component_name", nullable = false, insertable = true, updatable = true) private String componentName; - @Column(name = "component_type", nullable = false, insertable = true, updatable = true) - private String componentType; - @Column(name = "cluster_id", nullable = false, insertable = false, updatable = false, length = 10) private Long clusterId; @@ -100,8 +91,6 @@ public class ServiceComponentDesiredStateEntity { @Column(name = "recovery_enabled", nullable = false, insertable = true, updatable = true) private Integer recoveryEnabled = 0; - @Deprecated - @Experimental(feature = ExperimentalFeature.REPO_VERSION_REMOVAL) @Column(name = "repo_state", nullable = false, insertable = true, updatable = true) @Enumerated(EnumType.STRING) private RepositoryVersionState repoState = RepositoryVersionState.NOT_REQUIRED; @@ -109,8 +98,6 @@ public class ServiceComponentDesiredStateEntity { /** * Unidirectional one-to-one association to {@link RepositoryVersionEntity} */ - @Deprecated - @Experimental(feature = ExperimentalFeature.REPO_VERSION_REMOVAL) @OneToOne @JoinColumn( name = "desired_repo_version_id", @@ -135,6 +122,11 @@ public class ServiceComponentDesiredStateEntity { @OneToMany(mappedBy = "serviceComponentDesiredStateEntity") private Collection hostComponentDesiredStateEntities; + @OneToMany( + mappedBy = "m_serviceComponentDesiredStateEntity", + cascade = {CascadeType.ALL}) + private Collection serviceComponentVersions; + public Long getId() { return id; } @@ -163,14 +155,6 @@ public void setComponentName(String componentName) { this.componentName = componentName; } - public String getComponentType() { - return componentType; - } - - public void setComponentType(String componentType) { - this.componentType = componentType; - } - public State getDesiredState() { return desiredState; } @@ -179,30 +163,42 @@ public void setDesiredState(State desiredState) { this.desiredState = desiredState; } - @Deprecated - @Experimental(feature = ExperimentalFeature.REPO_VERSION_REMOVAL) public RepositoryVersionEntity getDesiredRepositoryVersion() { return desiredRepositoryVersion; } - @Deprecated - @Experimental(feature = ExperimentalFeature.REPO_VERSION_REMOVAL) public void setDesiredRepositoryVersion(RepositoryVersionEntity desiredRepositoryVersion) { this.desiredRepositoryVersion = desiredRepositoryVersion; } - @Deprecated - @Experimental(feature = ExperimentalFeature.REPO_VERSION_REMOVAL) public StackEntity getDesiredStack() { return desiredRepositoryVersion.getStack(); } - @Deprecated - @Experimental(feature = ExperimentalFeature.REPO_VERSION_REMOVAL) public String getDesiredVersion() { return desiredRepositoryVersion.getVersion(); } + /** + * @param versionEntity the version to add + */ + public void addVersion(ServiceComponentVersionEntity versionEntity) { + if (null == serviceComponentVersions) { + serviceComponentVersions = new ArrayList<>(); + } + + serviceComponentVersions.add(versionEntity); + versionEntity.setServiceComponentDesiredState(this); + } + + /** + * @return the collection of versions for the component + */ + public Collection getVersions() { + return serviceComponentVersions; + } + + public boolean isRecoveryEnabled() { return recoveryEnabled != 0; } @@ -241,9 +237,6 @@ public boolean equals(Object o) { if (componentName != null ? !componentName.equals(that.componentName) : that.componentName != null) { return false; } - if (componentType != null ? !componentType.equals(that.componentType) : that.componentType != null) { - return false; - } if (desiredState != null ? !desiredState.equals(that.desiredState) : that.desiredState != null) { return false; } @@ -261,7 +254,6 @@ public int hashCode() { result = 31 * result + (serviceGroupId != null ? serviceGroupId.hashCode() : 0); result = 31 * result + (serviceId != null ? serviceId.hashCode() : 0); result = 31 * result + (componentName != null ? componentName.hashCode() : 0); - result = 31 * result + (componentType != null ? componentType.hashCode() : 0); result = 31 * result + (desiredState != null ? desiredState.hashCode() : 0); result = 31 * result + (desiredRepositoryVersion != null ? desiredRepositoryVersion.hashCode() : 0); @@ -295,8 +287,6 @@ public void setHostComponentDesiredStateEntities(Collection topologyHostGroupEntities; @@ -145,20 +141,6 @@ public void setDescription(String description) { this.description = description; } - /** - * @return the raw request body in JSON - */ - public String getRawRequestBody() { - return rawRequestBody; - } - - /** - * @param rawRequestBody the raw request body in JSON - */ - public void setRawRequestBody(String rawRequestBody) { - this.rawRequestBody = rawRequestBody; - } - public Collection getTopologyHostGroupEntities() { return topologyHostGroupEntities; } diff --git a/ambari-server/src/main/java/org/apache/ambari/server/orm/entities/UpgradeGroupEntity.java b/ambari-server/src/main/java/org/apache/ambari/server/orm/entities/UpgradeGroupEntity.java index 7558affc65f..046ab11fb18 100644 --- a/ambari-server/src/main/java/org/apache/ambari/server/orm/entities/UpgradeGroupEntity.java +++ b/ambari-server/src/main/java/org/apache/ambari/server/orm/entities/UpgradeGroupEntity.java @@ -23,8 +23,6 @@ import javax.persistence.CascadeType; import javax.persistence.Column; import javax.persistence.Entity; -import javax.persistence.EnumType; -import javax.persistence.Enumerated; import javax.persistence.GeneratedValue; import javax.persistence.GenerationType; import javax.persistence.Id; @@ -34,8 +32,6 @@ import javax.persistence.Table; import javax.persistence.TableGenerator; -import org.apache.ambari.server.state.stack.upgrade.LifecycleType; - /** * Models a single upgrade group as part of an entire {@link UpgradeEntity}. *

@@ -61,12 +57,8 @@ public class UpgradeGroupEntity { @Column(name = "upgrade_id", nullable = false, insertable = false, updatable = false) private Long upgradeId; - @Column(name = "lifecycle", length=255, nullable = false) - @Enumerated(value = EnumType.STRING) - private LifecycleType lifecycle; - @Basic - @Column(name = "group_name", length=255, nullable = true) + @Column(name = "group_name", length=255, nullable = false) private String groupName; @Basic @@ -144,20 +136,6 @@ public void setItems(List items) { upgradeItems = items; } - /** - * @return the lifecycle for the group - */ - public LifecycleType getLifecycle() { - return lifecycle; - } - - /** - * @param type the lifecycle for the group - */ - public void setLifecycle(LifecycleType type) { - lifecycle = type; - } - /** * {@inheritDoc} */ diff --git a/ambari-server/src/main/java/org/apache/ambari/server/stack/RepoUtil.java b/ambari-server/src/main/java/org/apache/ambari/server/stack/RepoUtil.java index 4adedc48aa2..6a8ece2569c 100644 --- a/ambari-server/src/main/java/org/apache/ambari/server/stack/RepoUtil.java +++ b/ambari-server/src/main/java/org/apache/ambari/server/stack/RepoUtil.java @@ -19,7 +19,6 @@ import java.io.File; import java.util.ArrayList; -import java.util.Arrays; import java.util.Collection; import java.util.HashSet; import java.util.List; @@ -56,20 +55,16 @@ public class RepoUtil { */ private final static Logger LOG = LoggerFactory.getLogger(RepoUtil.class); - /** - * Used to unmarshal XML stack files, such as {@code repoinfo.xml}. - */ - private static final ModuleFileUnmarshaller m_unmarshaller = new ModuleFileUnmarshaller(); /** * repository directory name */ - public final static String REPOSITORY_FOLDER_NAME = "repos"; + final static String REPOSITORY_FOLDER_NAME = "repos"; /** * repository file name */ - public final static String REPOSITORY_FILE_NAME = "repoinfo.xml"; + final static String REPOSITORY_FILE_NAME = "repoinfo.xml"; private static final Function REPO_ENTITY_TO_NAME = new Function() { @Override @@ -78,19 +73,6 @@ public String apply(@Nullable RepoDefinitionEntity input) { } }; - /** - * Gets the repository XML as an unmarshalled object. - * - * @param directory - * the root stack directory. - * @return the repository XML or {@code null}. - */ - public static RepositoryXml getRepositoryXml(File directory) { - RepositoryFolderAndXml repositoryFolderAndXml = parseRepoFile(directory, - Arrays.asList(directory.list()), m_unmarshaller); - - return repositoryFolderAndXml.repoXml.orNull(); - } /** * Parses the repository file for a stack/service if exists. @@ -144,12 +126,11 @@ public static boolean addServiceReposToOperatingSystemEntities(List serviceReposForOs = stackReposByOs.get(os.getFamily()); ImmutableSet repoNames = ImmutableSet.copyOf(Lists.transform(os.getRepoDefinitionEntities(), REPO_ENTITY_TO_NAME)); - for (RepositoryInfo repoInfo : serviceReposForOs) { + for (RepositoryInfo repoInfo : serviceReposForOs) if (!repoNames.contains(repoInfo.getRepoName())) { os.addRepoDefinition(toRepositoryEntity(repoInfo)); addedRepos.add(String.format("%s (%s)", repoInfo.getRepoId(), os.getFamily())); } - } } LOG.info("Added {} service repos: {}", addedRepos.size(),Iterables.toString(addedRepos)); diff --git a/ambari-server/src/main/java/org/apache/ambari/server/stack/StackDirectory.java b/ambari-server/src/main/java/org/apache/ambari/server/stack/StackDirectory.java index 496a845f0d5..baaf8f0f7db 100644 --- a/ambari-server/src/main/java/org/apache/ambari/server/stack/StackDirectory.java +++ b/ambari-server/src/main/java/org/apache/ambari/server/stack/StackDirectory.java @@ -132,6 +132,11 @@ public boolean accept(File dir, String s) { */ private final static String REPOSITORY_FOLDER_NAME = "repos"; + /** + * repository file name + */ + private final static String REPOSITORY_FILE_NAME = "repoinfo.xml"; + /** * metainfo file name */ diff --git a/ambari-server/src/main/java/org/apache/ambari/server/stack/StackModule.java b/ambari-server/src/main/java/org/apache/ambari/server/stack/StackModule.java index 2258bbf6466..f576a34656a 100644 --- a/ambari-server/src/main/java/org/apache/ambari/server/stack/StackModule.java +++ b/ambari-server/src/main/java/org/apache/ambari/server/stack/StackModule.java @@ -1156,6 +1156,7 @@ private UpgradePack parseServiceUpgradePack(UpgradePack parent, File serviceFile * @return the child upgrade pack */ private UpgradePack parseServiceUpgradePack(UpgradePack parent, UpgradePack child) { + parent.mergePrerequisiteChecks(child); parent.mergeProcessing(child); return child; diff --git a/ambari-server/src/main/java/org/apache/ambari/server/stack/UpdateActiveRepoVersionOnStartup.java b/ambari-server/src/main/java/org/apache/ambari/server/stack/UpdateActiveRepoVersionOnStartup.java new file mode 100644 index 00000000000..334136f6a48 --- /dev/null +++ b/ambari-server/src/main/java/org/apache/ambari/server/stack/UpdateActiveRepoVersionOnStartup.java @@ -0,0 +1,112 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.ambari.server.stack; + +import java.util.List; + +import org.apache.ambari.server.AmbariException; +import org.apache.ambari.server.api.services.AmbariMetaInfo; +import org.apache.ambari.server.orm.dao.ClusterDAO; +import org.apache.ambari.server.orm.dao.RepositoryVersionDAO; +import org.apache.ambari.server.orm.entities.ClusterEntity; +import org.apache.ambari.server.orm.entities.ClusterServiceEntity; +import org.apache.ambari.server.orm.entities.RepoOsEntity; +import org.apache.ambari.server.orm.entities.RepositoryVersionEntity; +import org.apache.ambari.server.state.RepositoryInfo; +import org.apache.ambari.server.state.StackId; +import org.apache.ambari.server.state.StackInfo; +import org.apache.ambari.server.state.stack.upgrade.RepositoryVersionHelper; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; + +import com.google.common.collect.ListMultimap; +import com.google.inject.Inject; +import com.google.inject.persist.Transactional; + + +/** + * This class should be instantiated on server startup and its {@link #process()} method invoked. + * The class is part of management pack support. Management packs can contain services which define + * their own (yum/apt/ect) repositories. If a management pack is installed on an Ambari with an existing + * cluster, the cluster's repository version entity must be updated with the custom repos provided by the + * management pack. The class takes care of this. + */ +public class UpdateActiveRepoVersionOnStartup { + + private static final Logger LOG = LoggerFactory.getLogger(UpdateActiveRepoVersionOnStartup.class); + + ClusterDAO clusterDao; + RepositoryVersionDAO repositoryVersionDao; + RepositoryVersionHelper repositoryVersionHelper; + StackManager stackManager; + + @Inject + public UpdateActiveRepoVersionOnStartup(ClusterDAO clusterDao, + RepositoryVersionDAO repositoryVersionDao, + RepositoryVersionHelper repositoryVersionHelper, + AmbariMetaInfo metaInfo) { + this.clusterDao = clusterDao; + this.repositoryVersionDao = repositoryVersionDao; + this.repositoryVersionHelper = repositoryVersionHelper; + this.stackManager = metaInfo.getStackManager(); + } + + /** + * Updates the active {@link RepositoryVersionEntity} for clusters with add-on services defined in management packs. + * @throws AmbariException + */ + @Transactional + public void process() throws AmbariException { + LOG.info("Updating existing repo versions with service repos."); + + try { + + List clusters = clusterDao.findAll(); + for (ClusterEntity cluster: clusters) { + for (ClusterServiceEntity service : cluster.getClusterServiceEntities()) { + RepositoryVersionEntity repositoryVersion = service.getServiceDesiredStateEntity().getDesiredRepositoryVersion(); + + StackId stackId = repositoryVersion.getStackId(); + StackInfo stack = stackManager.getStack(stackId.getStackName(), stackId.getStackVersion()); + + if (updateRepoVersion(stack, repositoryVersion)) { + repositoryVersionDao.merge(repositoryVersion); + } + } + } + } + catch(Exception ex) { + throw new AmbariException( + "An error occured during updating current repository versions with stack repositories.", + ex); + } + } + + private boolean updateRepoVersion(StackInfo stackInfo, RepositoryVersionEntity repoVersion) throws Exception { + ListMultimap serviceReposByOs = stackInfo.getRepositoriesByOs(); + + List operatingSystems = repoVersion.getRepoOsEntities(); + boolean changed = RepoUtil.addServiceReposToOperatingSystemEntities(operatingSystems, serviceReposByOs); + if (changed) { + repoVersion.addRepoOsEntities(operatingSystems); + } + return changed; + } + +} diff --git a/ambari-server/src/main/java/org/apache/ambari/server/state/Cluster.java b/ambari-server/src/main/java/org/apache/ambari/server/state/Cluster.java index c708ec1565c..79d5844c3dc 100644 --- a/ambari-server/src/main/java/org/apache/ambari/server/state/Cluster.java +++ b/ambari-server/src/main/java/org/apache/ambari/server/state/Cluster.java @@ -161,22 +161,6 @@ Service addDependencyToService(String serviceGroupName, String serviceName, */ Service getServiceByComponentName(String componentName) throws AmbariException; - /** - * Gets a service from the given component Id. - * - * @param componentId - * @return - * @throws AmbariException - */ - - Service getServiceByComponentId(Long componentId) throws AmbariException; - - Long getComponentId(String componentName) throws AmbariException; - - String getComponentName(Long componentId) throws AmbariException; - - String getComponentType(Long componentId) throws AmbariException; - /** * Get all services * diff --git a/ambari-server/src/main/java/org/apache/ambari/server/state/Module.java b/ambari-server/src/main/java/org/apache/ambari/server/state/Module.java index 29366d9d842..800a9e01f4a 100644 --- a/ambari-server/src/main/java/org/apache/ambari/server/state/Module.java +++ b/ambari-server/src/main/java/org/apache/ambari/server/state/Module.java @@ -20,8 +20,6 @@ import java.util.List; import java.util.Objects; -import org.apache.commons.lang3.StringUtils; - import com.google.gson.annotations.SerializedName; public class Module { @@ -129,24 +127,11 @@ public void setComponents(List components) { this.components = components; } - public ModuleComponent getModuleComponent(String moduleComponentName) { - for (ModuleComponent moduleComponent : components) { - if (StringUtils.equals(moduleComponentName, moduleComponent.getName())) { - return moduleComponent; - } - } - - return null; - } @Override public boolean equals(Object o) { - if (this == o) { - return true; - } - if (o == null || getClass() != o.getClass()) { - return false; - } + if (this == o) return true; + if (o == null || getClass() != o.getClass()) return false; Module module = (Module) o; diff --git a/ambari-server/src/main/java/org/apache/ambari/server/state/Mpack.java b/ambari-server/src/main/java/org/apache/ambari/server/state/Mpack.java index 93b9a2d6949..76e43d4aab6 100644 --- a/ambari-server/src/main/java/org/apache/ambari/server/state/Mpack.java +++ b/ambari-server/src/main/java/org/apache/ambari/server/state/Mpack.java @@ -19,12 +19,6 @@ import java.util.List; import java.util.Map; -import java.util.Objects; - -import org.apache.ambari.server.stack.RepoUtil; -import org.apache.ambari.server.state.stack.RepositoryXml; -import org.apache.commons.lang.StringUtils; -import org.apache.commons.lang.builder.EqualsBuilder; import com.google.gson.annotations.SerializedName; @@ -67,11 +61,6 @@ public class Mpack { private String mpackUri; - /** - * The {@link RepoUtil#REPOSITORY_FILE_NAME} representation. - */ - private RepositoryXml repositoryXml; - public Long getResourceId() { return resourceId; } @@ -153,98 +142,38 @@ public void setDefinition(String definition) { this.definition = definition; } - /** - * Gets the repository XML representation. - * - * @return the {@link RepoUtil#REPOSITORY_FILE_NAME} unmarshalled. - */ - public RepositoryXml getRepositoryXml() { - return repositoryXml; - } - - /** - * Gets the repository XML representation. - * - * @param repositoryXml - * the {@link RepoUtil#REPOSITORY_FILE_NAME} unmarshalled. - */ - public void setRepositoryXml(RepositoryXml repositoryXml) { - this.repositoryXml = repositoryXml; - } - - /** - * Gets the module with the given name. Module names are service names. - * - * @param moduleName - * the name of the module. - * @return the module or {@code null}. - */ - public Module getModule(String moduleName) { - for (Module module : modules) { - if (StringUtils.equals(moduleName, module.getName())) { - return module; - } - } - - return null; - } - - /** - * Gets a component from a given module. - * - * @param moduleName - * the module (service) name. - * @param moduleComponentName - * the name of the component. - * @return the component or {@code null}. - */ - public ModuleComponent getModuleComponent(String moduleName, String moduleComponentName) { - for (Module module : modules) { - ModuleComponent moduleComponent = module.getModuleComponent(moduleComponentName); - if (null != moduleComponent) { - return moduleComponent; - } - } - - return null; - } - - /** - * {@inheritDoc} - */ @Override public boolean equals(Object o) { - if (this == o) { - return true; - } + if (this == o) return true; + if (o == null || getClass() != o.getClass()) return false; - if (o == null || getClass() != o.getClass()) { - return false; - } + Mpack mpack = (Mpack) o; - Mpack that = (Mpack) o; - EqualsBuilder equalsBuilder = new EqualsBuilder(); - equalsBuilder.append(resourceId, that.resourceId); - equalsBuilder.append(registryId, that.registryId); - equalsBuilder.append(mpackId, that.mpackId); - equalsBuilder.append(name, that.name); - equalsBuilder.append(version, that.version); - equalsBuilder.append(prerequisites, that.prerequisites); - equalsBuilder.append(modules, that.modules); - equalsBuilder.append(definition, that.definition); - equalsBuilder.append(description, that.description); - equalsBuilder.append(mpackUri, that.mpackUri); - - return equalsBuilder.isEquals(); + if (!resourceId.equals(mpack.resourceId)) return false; + if (registryId != null ? !registryId.equals(mpack.registryId) : mpack.registryId != null) return false; + if (!mpackId.equals(mpack.mpackId)) return false; + if (!name.equals(mpack.name)) return false; + if (!version.equals(mpack.version)) return false; + if (!prerequisites.equals(mpack.prerequisites)) return false; + if (!modules.equals(mpack.modules)) return false; + if (!definition.equals(mpack.definition)) return false; + if (!description.equals(mpack.description)) return false; + return mpackUri.equals(mpack.mpackUri); } - /** - * {@inheritDoc} - */ @Override public int hashCode() { - return Objects.hash(resourceId, registryId, mpackId, name, version, prerequisites, modules, - definition, description, mpackUri); + int result = resourceId.hashCode(); + result = 31 * result + (registryId != null ? registryId.hashCode() : 0); + result = 31 * result + mpackId.hashCode(); + result = 31 * result + name.hashCode(); + result = 31 * result + version.hashCode(); + result = 31 * result + prerequisites.hashCode(); + result = 31 * result + modules.hashCode(); + result = 31 * result + definition.hashCode(); + result = 31 * result + description.hashCode(); + result = 31 * result + mpackUri.hashCode(); + return result; } @Override @@ -264,32 +193,32 @@ public String toString() { } public void copyFrom(Mpack mpack) { - if (resourceId == null) { - resourceId = mpack.getResourceId(); + if (this.resourceId == null) { + this.resourceId = mpack.getResourceId(); } - if (name == null) { - name = mpack.getName(); + if (this.name == null) { + this.name = mpack.getName(); } - if (mpackId == null) { - mpackId = mpack.getMpackId(); + if (this.mpackId == null) { + this.mpackId = mpack.getMpackId(); } - if (version == null) { - version = mpack.getVersion(); + if (this.version == null) { + this.version = mpack.getVersion(); } - if (registryId == null) { - registryId = mpack.getRegistryId(); + if (this.registryId == null) { + this.registryId = mpack.getRegistryId(); } - if (description == null) { - description = mpack.getDescription(); + if (this.description == null) { + this.description = mpack.getDescription(); } - if (modules == null) { - modules = mpack.getModules(); + if (this.modules == null) { + this.modules = mpack.getModules(); } - if (prerequisites == null) { - prerequisites = mpack.getPrerequisites(); + if (this.prerequisites == null) { + this.prerequisites = mpack.getPrerequisites(); } - if (definition == null) { - definition = mpack.getDefinition(); + if (this.definition == null) { + this.definition = mpack.getDefinition(); } } } diff --git a/ambari-server/src/main/java/org/apache/ambari/server/state/RepositoryInfo.java b/ambari-server/src/main/java/org/apache/ambari/server/state/RepositoryInfo.java index 016853e8768..a7bbc1b16b1 100644 --- a/ambari-server/src/main/java/org/apache/ambari/server/state/RepositoryInfo.java +++ b/ambari-server/src/main/java/org/apache/ambari/server/state/RepositoryInfo.java @@ -23,60 +23,23 @@ import org.apache.ambari.server.controller.RepositoryResponse; import org.apache.ambari.server.state.stack.RepoTag; -import org.codehaus.jackson.annotate.JsonIgnore; -import org.codehaus.jackson.annotate.JsonProperty; import com.google.common.base.Function; import com.google.common.base.Objects; import com.google.common.base.Strings; -import com.google.gson.annotations.SerializedName; public class RepositoryInfo { - @JsonProperty("base_url") - @SerializedName("base_url") private String baseUrl; - - @JsonProperty("os_type") - @SerializedName("os_type") private String osType; - - @JsonProperty("repo_id") - @SerializedName("repo_id") private String repoId; - - @JsonProperty("repo_name") - @SerializedName("repo_name") private String repoName; - - @JsonProperty("distribution") - @SerializedName("distribution") private String distribution; - - @JsonProperty("components") - @SerializedName("components") private String components; - - @JsonProperty("mirrors_list") - @SerializedName("mirrors_list") private String mirrorsList; - - @JsonProperty("default_base_url") - @SerializedName("default_base_url") private String defaultBaseUrl; - - @JsonIgnore private boolean repoSaved = false; - - @JsonProperty("unique") - @SerializedName("unique") private boolean unique = false; - - @JsonProperty("ambari_managed") - @SerializedName("ambari_managed") private boolean ambariManagedRepositories = true; - - @JsonProperty("tags") - @SerializedName("tags") private Set tags = new HashSet<>(); /** @@ -224,12 +187,8 @@ public String toString() { @Override public boolean equals(Object o) { - if (this == o) { - return true; - } - if (o == null || getClass() != o.getClass()) { - return false; - } + if (this == o) return true; + if (o == null || getClass() != o.getClass()) return false; RepositoryInfo that = (RepositoryInfo) o; return repoSaved == that.repoSaved && unique == that.unique && diff --git a/ambari-server/src/main/java/org/apache/ambari/server/state/Service.java b/ambari-server/src/main/java/org/apache/ambari/server/state/Service.java index 000ab361f9b..0ba80cc8046 100644 --- a/ambari-server/src/main/java/org/apache/ambari/server/state/Service.java +++ b/ambari-server/src/main/java/org/apache/ambari/server/state/Service.java @@ -22,8 +22,6 @@ import java.util.Map; import java.util.Set; -import org.apache.ambari.annotations.Experimental; -import org.apache.ambari.annotations.ExperimentalFeature; import org.apache.ambari.server.AmbariException; import org.apache.ambari.server.api.services.ServiceKey; import org.apache.ambari.server.controller.ServiceDependencyResponse; @@ -74,7 +72,7 @@ void addServiceComponent(ServiceComponent component) void debugDump(StringBuilder sb); - ServiceComponent addServiceComponent(String serviceComponentName, String serviceComponentType) + ServiceComponent addServiceComponent(String serviceComponentName) throws AmbariException; /** @@ -148,23 +146,17 @@ void deleteServiceComponent(String componentName) /** * @return */ - @Deprecated - @Experimental(feature = ExperimentalFeature.REPO_VERSION_REMOVAL) RepositoryVersionEntity getDesiredRepositoryVersion(); /** * @param desiredRepositoryVersion */ - @Deprecated - @Experimental(feature = ExperimentalFeature.REPO_VERSION_REMOVAL) void setDesiredRepositoryVersion(RepositoryVersionEntity desiredRepositoryVersion); /** * Gets the repository for the desired version of this service by consulting * the repository states of all known components. */ - @Deprecated - @Experimental(feature = ExperimentalFeature.REPO_VERSION_REMOVAL) RepositoryVersionState getRepositoryState(); enum Type { diff --git a/ambari-server/src/main/java/org/apache/ambari/server/state/ServiceComponent.java b/ambari-server/src/main/java/org/apache/ambari/server/state/ServiceComponent.java index 6dab235f93e..3c44fdf971f 100644 --- a/ambari-server/src/main/java/org/apache/ambari/server/state/ServiceComponent.java +++ b/ambari-server/src/main/java/org/apache/ambari/server/state/ServiceComponent.java @@ -20,8 +20,6 @@ import java.util.Map; -import org.apache.ambari.annotations.Experimental; -import org.apache.ambari.annotations.ExperimentalFeature; import org.apache.ambari.server.AmbariException; import org.apache.ambari.server.controller.ServiceComponentResponse; import org.apache.ambari.server.orm.entities.RepositoryVersionEntity; @@ -30,10 +28,6 @@ public interface ServiceComponent { String getName(); - String getType(); - - Long getId(); - /** * Get a true or false value specifying * if auto start was enabled for this component. @@ -69,20 +63,12 @@ public interface ServiceComponent { * * @return */ - @Deprecated - @Experimental(feature = ExperimentalFeature.REPO_VERSION_REMOVAL) RepositoryVersionEntity getDesiredRepositoryVersion(); - @Deprecated - @Experimental(feature = ExperimentalFeature.REPO_VERSION_REMOVAL) StackId getDesiredStackId(); - @Deprecated - @Experimental(feature = ExperimentalFeature.REPO_VERSION_REMOVAL) String getDesiredVersion(); - @Deprecated - @Experimental(feature = ExperimentalFeature.REPO_VERSION_REMOVAL) void setDesiredRepositoryVersion(RepositoryVersionEntity repositoryVersionEntity); /** @@ -124,10 +110,18 @@ ServiceComponentHost addServiceComponentHost( void delete() throws AmbariException; + /** + * This method computes the state of the repository that's associated with the desired + * version. It is used, for example, when a host component reports its version and the + * state can be in flux. + * + * @param reportedVersion + * @throws AmbariException + */ + void updateRepositoryState(String reportedVersion) throws AmbariException; + /** * @return the repository state for the desired version */ - @Deprecated - @Experimental(feature = ExperimentalFeature.REPO_VERSION_REMOVAL) RepositoryVersionState getRepositoryState(); } diff --git a/ambari-server/src/main/java/org/apache/ambari/server/state/ServiceComponentFactory.java b/ambari-server/src/main/java/org/apache/ambari/server/state/ServiceComponentFactory.java index 4c36e7c586f..9c692d6f36e 100644 --- a/ambari-server/src/main/java/org/apache/ambari/server/state/ServiceComponentFactory.java +++ b/ambari-server/src/main/java/org/apache/ambari/server/state/ServiceComponentFactory.java @@ -19,12 +19,9 @@ import org.apache.ambari.server.orm.entities.ServiceComponentDesiredStateEntity; -import com.google.inject.assistedinject.Assisted; - public interface ServiceComponentFactory { - ServiceComponent createNew(Service service, @Assisted("componentName") String componentName, - @Assisted("componentType") String componentType); + ServiceComponent createNew(Service service, String componentName); ServiceComponent createExisting(Service service, ServiceComponentDesiredStateEntity serviceComponentDesiredStateEntity); } diff --git a/ambari-server/src/main/java/org/apache/ambari/server/state/ServiceComponentHost.java b/ambari-server/src/main/java/org/apache/ambari/server/state/ServiceComponentHost.java index 46c704fb138..54c378d51a5 100644 --- a/ambari-server/src/main/java/org/apache/ambari/server/state/ServiceComponentHost.java +++ b/ambari-server/src/main/java/org/apache/ambari/server/state/ServiceComponentHost.java @@ -71,23 +71,11 @@ public interface ServiceComponentHost { String getServiceType(); /** - * Get the ServiceComponent's Id this object maps to - * @return Id of the ServiceComponent - */ - public Long getServiceComponentId(); - - /** - * Get the ServiceComponent's Name this object maps to + * Get the ServiceComponent this object maps to * @return Name of the ServiceComponent */ String getServiceComponentName(); - /** - * Get the ServiceComponent's Type this object maps to - * @return Type of the ServiceComponent - */ - String getServiceComponentType(); - /** * Get the Host this object maps to * @return Host's hostname diff --git a/ambari-server/src/main/java/org/apache/ambari/server/state/ServiceComponentImpl.java b/ambari-server/src/main/java/org/apache/ambari/server/state/ServiceComponentImpl.java index 77634a0ecbe..b4b4ea1c3fb 100644 --- a/ambari-server/src/main/java/org/apache/ambari/server/state/ServiceComponentImpl.java +++ b/ambari-server/src/main/java/org/apache/ambari/server/state/ServiceComponentImpl.java @@ -19,6 +19,7 @@ package org.apache.ambari.server.state; import java.util.HashMap; +import java.util.List; import java.util.Map; import java.util.Map.Entry; import java.util.concurrent.ConcurrentHashMap; @@ -26,8 +27,6 @@ import java.util.concurrent.locks.ReadWriteLock; import java.util.concurrent.locks.ReentrantReadWriteLock; -import org.apache.ambari.annotations.Experimental; -import org.apache.ambari.annotations.ExperimentalFeature; import org.apache.ambari.server.AmbariException; import org.apache.ambari.server.ObjectNotFoundException; import org.apache.ambari.server.ServiceComponentHostNotFoundException; @@ -36,6 +35,7 @@ import org.apache.ambari.server.controller.MaintenanceStateHelper; import org.apache.ambari.server.controller.ServiceComponentResponse; import org.apache.ambari.server.events.ServiceComponentRecoveryChangedEvent; +import org.apache.ambari.server.events.listeners.upgrade.StackVersionListener; import org.apache.ambari.server.events.publishers.AmbariEventPublisher; import org.apache.ambari.server.orm.dao.ClusterServiceDAO; import org.apache.ambari.server.orm.dao.HostComponentDesiredStateDAO; @@ -48,11 +48,16 @@ import org.apache.ambari.server.orm.entities.HostComponentStateEntity; import org.apache.ambari.server.orm.entities.RepositoryVersionEntity; import org.apache.ambari.server.orm.entities.ServiceComponentDesiredStateEntity; +import org.apache.ambari.server.orm.entities.ServiceComponentVersionEntity; import org.apache.ambari.server.orm.entities.StackEntity; import org.apache.ambari.server.state.cluster.ClusterImpl; +import org.apache.commons.collections.CollectionUtils; +import org.apache.commons.collections.MapUtils; import org.slf4j.Logger; import org.slf4j.LoggerFactory; +import com.google.common.base.Function; +import com.google.common.collect.Maps; import com.google.inject.Inject; import com.google.inject.ProvisionException; import com.google.inject.assistedinject.Assisted; @@ -66,7 +71,6 @@ public class ServiceComponentImpl implements ServiceComponent { private final Service service; private final ReadWriteLock readWriteLock = new ReentrantReadWriteLock(); private final String componentName; - private final String componentType; private String displayName; private boolean isClientComponent; private boolean isMasterComponent; @@ -99,8 +103,8 @@ public class ServiceComponentImpl implements ServiceComponent { private MaintenanceStateHelper maintenanceStateHelper; @AssistedInject - public ServiceComponentImpl(@Assisted Service service, @Assisted("componentName") String componentName, - @Assisted("componentType") String componentType, AmbariMetaInfo ambariMetaInfo, + public ServiceComponentImpl(@Assisted Service service, @Assisted String componentName, + AmbariMetaInfo ambariMetaInfo, ServiceComponentDesiredStateDAO serviceComponentDesiredStateDAO, ClusterServiceDAO clusterServiceDAO, ServiceComponentHostFactory serviceComponentHostFactory, AmbariEventPublisher eventPublisher) @@ -109,7 +113,6 @@ public ServiceComponentImpl(@Assisted Service service, @Assisted("componentName" this.ambariMetaInfo = ambariMetaInfo; this.service = service; this.componentName = componentName; - this.componentType = componentType; this.serviceComponentDesiredStateDAO = serviceComponentDesiredStateDAO; this.clusterServiceDAO = clusterServiceDAO; this.serviceComponentHostFactory = serviceComponentHostFactory; @@ -117,7 +120,6 @@ public ServiceComponentImpl(@Assisted Service service, @Assisted("componentName" ServiceComponentDesiredStateEntity desiredStateEntity = new ServiceComponentDesiredStateEntity(); desiredStateEntity.setComponentName(componentName); - desiredStateEntity.setComponentType(componentType); desiredStateEntity.setDesiredState(State.INIT); desiredStateEntity.setServiceGroupId(service.getServiceGroupId()); desiredStateEntity.setServiceId(service.getServiceId()); @@ -136,7 +138,7 @@ public void updateComponentInfo() throws AmbariException { StackId stackId = service.getDesiredStackId(); try { ComponentInfo compInfo = ambariMetaInfo.getComponent(stackId.getStackName(), - stackId.getStackVersion(), service.getServiceType(), componentType); + stackId.getStackVersion(), service.getServiceType(), componentName); isClientComponent = compInfo.isClient(); isMasterComponent = compInfo.isMaster(); isVersionAdvertised = compInfo.isVersionAdvertised(); @@ -147,7 +149,6 @@ public void updateComponentInfo() throws AmbariException { + ", clusterName=" + service.getCluster().getClusterName() + ", serviceName=" + service.getServiceType() + ", componentName=" + componentName - + ", componentType=" + componentType + ", stackInfo=" + stackId.getStackId()); } } @@ -171,14 +172,18 @@ public ServiceComponentImpl(@Assisted Service service, desiredStateEntityId = serviceComponentDesiredStateEntity.getId(); componentName = serviceComponentDesiredStateEntity.getComponentName(); - componentType = serviceComponentDesiredStateEntity.getComponentType(); updateComponentInfo(); for (HostComponentStateEntity hostComponentStateEntity : serviceComponentDesiredStateEntity.getHostComponentStateEntities()) { HostComponentDesiredStateEntity hostComponentDesiredStateEntity = hostComponentDesiredStateDAO.findByIndex( - hostComponentStateEntity.getHostComponentDesiredStateId()); + hostComponentStateEntity.getClusterId(), + hostComponentStateEntity.getServiceGroupId(), + hostComponentStateEntity.getServiceId(), + hostComponentStateEntity.getComponentName(), + hostComponentStateEntity.getHostId() + ); try { hostComponents.put(hostComponentStateEntity.getHostName(), @@ -186,11 +191,9 @@ public ServiceComponentImpl(@Assisted Service service, hostComponentStateEntity, hostComponentDesiredStateEntity)); } catch(ProvisionException ex) { StackId currentStackId = getDesiredStackId(); - LOG.error(String.format("Cannot get host component info: stackName=%s, stackVersion=%s, serviceName=%s, componentName=%s, " + - "componentType=%s, hostname=%s", - currentStackId.getStackName(), currentStackId.getStackVersion(), service.getName(), - serviceComponentDesiredStateEntity.getComponentName(), serviceComponentDesiredStateEntity.getComponentType(), - hostComponentStateEntity.getHostName())); + LOG.error(String.format("Can not get host component info: stackName=%s, stackVersion=%s, serviceName=%s, componentName=%s, hostname=%s", + currentStackId.getStackName(), currentStackId.getStackVersion(), + service.getName(),serviceComponentDesiredStateEntity.getComponentName(), hostComponentStateEntity.getHostName())); ex.printStackTrace(); } } @@ -201,16 +204,6 @@ public String getName() { return componentName; } - @Override - public String getType() { - return componentType; - } - - @Override - public Long getId() { - return desiredStateEntityId; - } - /** * Get the recoveryEnabled value. * @@ -226,7 +219,7 @@ public boolean isRecoveryEnabled() { } else { LOG.warn("Trying to fetch a member from an entity object that may " + "have been previously deleted, serviceName = " + service.getName() + ", " + - "componentName = " + componentName + ", componentType = " + componentType); + "componentName = " + componentName); } return false; } @@ -239,8 +232,8 @@ public boolean isRecoveryEnabled() { @Override public void setRecoveryEnabled(boolean recoveryEnabled) { if (LOG.isDebugEnabled()) { - LOG.debug("Setting RecoveryEnabled of Component, clusterName={}, clusterId={}, serviceName={}, componentName={}, componentType={}, oldRecoveryEnabled={}, newRecoveryEnabled={}", - service.getCluster().getClusterName(), service.getCluster().getClusterId(), service.getName(), getName(), getType(), isRecoveryEnabled(), recoveryEnabled); + LOG.debug("Setting RecoveryEnabled of Component, clusterName={}, clusterId={}, serviceName={}, componentName={}, oldRecoveryEnabled={}, newRecoveryEnabled={}", + service.getCluster().getClusterName(), service.getCluster().getClusterId(), service.getName(), getName(), isRecoveryEnabled(), recoveryEnabled); } ServiceComponentDesiredStateEntity desiredStateEntity = serviceComponentDesiredStateDAO.findById( @@ -353,7 +346,7 @@ public State getDesiredState() { } else { LOG.warn("Trying to fetch a member from an entity object that may " + "have been previously deleted, serviceName = " + getServiceName() + ", " + - "componentName = " + componentName + ", componentType = " + componentType); + "componentName = " + componentName); } return null; @@ -400,8 +393,6 @@ public StackId getDesiredStackId() { * {@inheritDoc} */ @Override - @Deprecated - @Experimental(feature = ExperimentalFeature.REPO_VERSION_REMOVAL) public void setDesiredRepositoryVersion(RepositoryVersionEntity repositoryVersionEntity) { ServiceComponentDesiredStateEntity desiredStateEntity = serviceComponentDesiredStateDAO.findById( desiredStateEntityId); @@ -419,8 +410,6 @@ public void setDesiredRepositoryVersion(RepositoryVersionEntity repositoryVersio * {@inheritDoc} */ @Override - @Deprecated - @Experimental(feature = ExperimentalFeature.REPO_VERSION_REMOVAL) public RepositoryVersionEntity getDesiredRepositoryVersion() { ServiceComponentDesiredStateEntity desiredStateEntity = serviceComponentDesiredStateDAO.findById( desiredStateEntityId); @@ -429,8 +418,6 @@ public RepositoryVersionEntity getDesiredRepositoryVersion() { } @Override - @Deprecated - @Experimental(feature = ExperimentalFeature.REPO_VERSION_REMOVAL) public String getDesiredVersion() { ServiceComponentDesiredStateEntity desiredStateEntity = serviceComponentDesiredStateDAO.findById( desiredStateEntityId); @@ -442,23 +429,18 @@ public String getDesiredVersion() { public ServiceComponentResponse convertToResponse() { Cluster cluster = service.getCluster(); ServiceGroup sg = null; + RepositoryVersionEntity repositoryVersionEntity = getDesiredRepositoryVersion(); + StackId desiredStackId = repositoryVersionEntity.getStackId(); try { sg = cluster.getServiceGroup(service.getServiceGroupId()); } catch (ServiceGroupNotFoundException e) { LOG.warn("Service Group " + service.getServiceGroupId() + " not found"); } - - String serviceName = service.getName(); - String componentName = getName(); - - Mpack mpack = ambariMetaInfo.getMpack(sg.getMpackId()); - ModuleComponent moduleComponent = mpack.getModuleComponent(serviceName, componentName); - ServiceComponentResponse r = new ServiceComponentResponse(getClusterId(), cluster.getClusterName(), sg.getServiceGroupId(), sg.getServiceGroupName(), service.getServiceId(), - serviceName, service.getServiceType(), getId(), getName(), getType(), sg.getStackId(), getDesiredState().toString(), - getServiceComponentStateCount(), isRecoveryEnabled(), displayName, moduleComponent.getVersion(), + service.getName(), service.getServiceType(), getName(), desiredStackId, getDesiredState().toString(), + getServiceComponentStateCount(), isRecoveryEnabled(), displayName, repositoryVersionEntity.getVersion(), getRepositoryState()); return r; @@ -532,7 +514,7 @@ public boolean canBeRemoved() { if (!sch.canBeRemoved()) { LOG.warn("Found non removable hostcomponent when trying to" + " delete service component" + ", clusterName=" + getClusterName() + ", serviceName=" + getServiceName() - + ", componentName=" + getName() + ", componentType=" + getType() + ", state=" + sch.getState() + ", hostname=" + + ", componentName=" + getName() + ", state=" + sch.getState() + ", hostname=" + sch.getHostName()); return false; } @@ -547,12 +529,12 @@ public void deleteAllServiceComponentHosts() throws AmbariException { try { LOG.info("Deleting all servicecomponenthosts for component" + ", clusterName=" + getClusterName() + ", serviceName=" + getServiceName() + ", componentName=" + getName() - + ", componentType=" + getType() + ", recoveryEnabled=" + isRecoveryEnabled()); + + ", recoveryEnabled=" + isRecoveryEnabled()); for (ServiceComponentHost sch : hostComponents.values()) { if (!sch.canBeRemoved()) { throw new AmbariException("Found non removable hostcomponent " + " when trying to delete" + " all hostcomponents from servicecomponent" + ", clusterName=" + getClusterName() - + ", serviceName=" + getServiceName() + ", componentName=" + getName() + ", componentType=" + getType() + + ", serviceName=" + getServiceName() + ", componentName=" + getName() + ", recoveryEnabled=" + isRecoveryEnabled() + ", hostname=" + sch.getHostName()); } } @@ -573,14 +555,13 @@ public void deleteServiceComponentHosts(String hostname) throws AmbariException try { ServiceComponentHost sch = getServiceComponentHost(hostname); LOG.info("Deleting servicecomponenthost for cluster" + ", clusterName=" + getClusterName() - + ", serviceName=" + getServiceName() + ", componentName=" + getName() + ", componentType=" + getType() + + ", serviceName=" + getServiceName() + ", componentName=" + getName() + ", recoveryEnabled=" + isRecoveryEnabled() + ", hostname=" + sch.getHostName()); if (!sch.canBeRemoved()) { throw new AmbariException("Could not delete hostcomponent from cluster" + ", clusterName=" + getClusterName() + ", serviceName=" + getServiceName() + ", componentName=" + getName() - + ", componentType=" + getType() + ", recoveryEnabled=" + isRecoveryEnabled() + ", hostname=" + sch.getHostName()); } @@ -609,9 +590,139 @@ public void delete() throws AmbariException { } } + + /** + * Follows this version logic: + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + *
DB hostcomponent1DB hostcomponentNDB desiredNew desiredRepo State
v1v1UNKNOWNv1CURRENT
v1v2UNKNOWNUNKNOWNOUT_OF_SYNC
v1v2v2v2 (no change)OUT_OF_SYNC
v2v2v1v1 (no change)OUT_OF_SYNC
v2v2v2v2 (no change)CURRENT
+ */ + @Override + @Transactional + public void updateRepositoryState(String reportedVersion) throws AmbariException { + + ServiceComponentDesiredStateEntity component = serviceComponentDesiredStateDAO.findById( + desiredStateEntityId); + + List componentVersions = serviceComponentDesiredStateDAO.findVersions( + getClusterId(), getServiceGroupId(), getServiceId(), getName()); + + // per component, this list should be small, so iterating here isn't a big deal + Map map = new HashMap<>(Maps.uniqueIndex(componentVersions, + new Function() { + @Override + public String apply(ServiceComponentVersionEntity input) { + return input.getRepositoryVersion().getVersion(); + } + })); + + if (LOG.isDebugEnabled()) { + LOG.debug("Existing versions for {}/{}/{}: {}", + getClusterName(), getServiceName(), getName(), map.keySet()); + } + + ServiceComponentVersionEntity componentVersion = map.get(reportedVersion); + + if (null == componentVersion) { + RepositoryVersionEntity repoVersion = repoVersionDAO.findByStackAndVersion( + getDesiredStackId(), reportedVersion); + + if (null != repoVersion) { + componentVersion = new ServiceComponentVersionEntity(); + componentVersion.setRepositoryVersion(repoVersion); + componentVersion.setState(RepositoryVersionState.INSTALLED); + componentVersion.setUserName("auto-reported"); + + // since we've never seen this version before, mark the component as CURRENT + component.setRepositoryState(RepositoryVersionState.CURRENT); + component.addVersion(componentVersion); + + component = serviceComponentDesiredStateDAO.merge(component); + + map.put(reportedVersion, componentVersion); + + } else { + LOG.warn("There is no repository available for stack {}, version {}", + getDesiredStackId(), reportedVersion); + } + } + + if (MapUtils.isNotEmpty(map)) { + String desiredVersion = component.getDesiredVersion(); + RepositoryVersionEntity desiredRepositoryVersion = service.getDesiredRepositoryVersion(); + + // TODO : is this function call rally required. check ? + List hostComponents = hostComponentDAO.findByServiceAndComponentAndNotVersion( + getClusterId(), getServiceGroupId(), getServiceId(), component.getComponentName(), reportedVersion); + + LOG.debug("{}/{} reportedVersion={}, desiredVersion={}, non-matching desired count={}, repo_state={}", + component.getServiceId(), component.getComponentName(), reportedVersion, + desiredVersion, hostComponents.size(), component.getRepositoryState()); + + // !!! if we are unknown, that means it's never been set. Try to determine it. + if (StackVersionListener.UNKNOWN_VERSION.equals(desiredVersion)) { + if (CollectionUtils.isEmpty(hostComponents)) { + // all host components are the same version as reported + component.setDesiredRepositoryVersion(desiredRepositoryVersion); + component.setRepositoryState(RepositoryVersionState.CURRENT); + } else { + // desired is UNKNOWN and there's a mix of versions in the host components + component.setRepositoryState(RepositoryVersionState.OUT_OF_SYNC); + } + } else { + if (!reportedVersion.equals(desiredVersion)) { + component.setRepositoryState(RepositoryVersionState.OUT_OF_SYNC); + } else if (CollectionUtils.isEmpty(hostComponents)) { + component.setRepositoryState(RepositoryVersionState.CURRENT); + } + } + + component = serviceComponentDesiredStateDAO.merge(component); + } + } + @Override - @Deprecated - @Experimental(feature = ExperimentalFeature.REPO_VERSION_REMOVAL) public RepositoryVersionState getRepositoryState() { ServiceComponentDesiredStateEntity component = serviceComponentDesiredStateDAO.findById( desiredStateEntityId); diff --git a/ambari-server/src/main/java/org/apache/ambari/server/state/ServiceGroup.java b/ambari-server/src/main/java/org/apache/ambari/server/state/ServiceGroup.java index 9ea95a130a9..dce2337c68d 100644 --- a/ambari-server/src/main/java/org/apache/ambari/server/state/ServiceGroup.java +++ b/ambari-server/src/main/java/org/apache/ambari/server/state/ServiceGroup.java @@ -72,11 +72,4 @@ public interface ServiceGroup { * @return updated service group entity */ ServiceGroupEntity deleteServiceGroupDependency(Long dependencyServiceGroupId) throws AmbariException; - - /** - * Gets the management pack associated with this service group. - * - * @return the management pack. - */ - Long getMpackId(); } diff --git a/ambari-server/src/main/java/org/apache/ambari/server/state/ServiceGroupImpl.java b/ambari-server/src/main/java/org/apache/ambari/server/state/ServiceGroupImpl.java index 504644134bc..5ada744eaed 100644 --- a/ambari-server/src/main/java/org/apache/ambari/server/state/ServiceGroupImpl.java +++ b/ambari-server/src/main/java/org/apache/ambari/server/state/ServiceGroupImpl.java @@ -95,7 +95,7 @@ public ServiceGroupImpl(@Assisted Cluster cluster, this.serviceGroupDependencies = serviceGroupDependencies; } - serviceGroupEntityPK = getServiceGroupEntityPK(serviceGroupEntity); + this.serviceGroupEntityPK = getServiceGroupEntityPK(serviceGroupEntity); persist(serviceGroupEntity); } @@ -115,13 +115,13 @@ public ServiceGroupImpl(@Assisted Cluster cluster, this.serviceGroupDAO = serviceGroupDAO; this.eventPublisher = eventPublisher; - serviceGroupId = serviceGroupEntity.getServiceGroupId(); - serviceGroupName = serviceGroupEntity.getServiceGroupName(); + this.serviceGroupId = serviceGroupEntity.getServiceGroupId(); + this.serviceGroupName = serviceGroupEntity.getServiceGroupName(); StackEntity stack = serviceGroupEntity.getStack(); - stackId = new StackId(stack.getStackName(), stack.getStackVersion()); - serviceGroupDependencies = getServiceGroupDependencies(serviceGroupEntity.getServiceGroupDependencies()); + this.stackId = new StackId(stack.getStackName(), stack.getStackVersion()); + this.serviceGroupDependencies = getServiceGroupDependencies(serviceGroupEntity.getServiceGroupDependencies()); - serviceGroupEntityPK = getServiceGroupEntityPK(serviceGroupEntity); + this.serviceGroupEntityPK = getServiceGroupEntityPK(serviceGroupEntity); } @Override @@ -162,15 +162,6 @@ public void setServiceGroupDependencies(Set serviceGroupDepende this.serviceGroupDependencies = serviceGroupDependencies; } - /** - * {@inheritDoc} - */ - @Override - public Long getMpackId() { - ServiceGroupEntity serviceGroupEntity = getServiceGroupEntity(); - return serviceGroupEntity.getStack().getMpackId(); - } - @Override public ServiceGroupResponse convertToResponse() { ServiceGroupResponse r = new ServiceGroupResponse(cluster.getClusterId(), diff --git a/ambari-server/src/main/java/org/apache/ambari/server/state/ServiceImpl.java b/ambari-server/src/main/java/org/apache/ambari/server/state/ServiceImpl.java index 72939b123ee..7f1daa7ebe5 100644 --- a/ambari-server/src/main/java/org/apache/ambari/server/state/ServiceImpl.java +++ b/ambari-server/src/main/java/org/apache/ambari/server/state/ServiceImpl.java @@ -30,8 +30,6 @@ import java.util.concurrent.locks.Lock; import java.util.concurrent.locks.ReentrantLock; -import org.apache.ambari.annotations.Experimental; -import org.apache.ambari.annotations.ExperimentalFeature; import org.apache.ambari.server.AmbariException; import org.apache.ambari.server.ObjectNotFoundException; import org.apache.ambari.server.ServiceComponentNotFoundException; @@ -80,8 +78,7 @@ public class ServiceImpl implements Service { private final Cluster cluster; private final ServiceGroup serviceGroup; - private final ConcurrentMap componentsByName = new ConcurrentHashMap<>(); - private final ConcurrentMap componentsById = new ConcurrentHashMap<>(); + private final ConcurrentMap components = new ConcurrentHashMap<>(); private List serviceDependencies = new ArrayList<>(); private boolean isClientOnlyService; private boolean isCredentialStoreSupported; @@ -213,11 +210,11 @@ public class ServiceImpl implements Service { this.serviceDesiredStateDAO = serviceDesiredStateDAO; this.serviceComponentFactory = serviceComponentFactory; this.eventPublisher = eventPublisher; - serviceId = serviceEntity.getServiceId(); - serviceName = serviceEntity.getServiceName(); - serviceType = serviceEntity.getServiceType(); + this.serviceId = serviceEntity.getServiceId(); + this.serviceName = serviceEntity.getServiceName(); + this.serviceType = serviceEntity.getServiceType(); this.ambariMetaInfo = ambariMetaInfo; - serviceDependencies = getServiceDependencies(serviceEntity.getServiceDependencies()); + this.serviceDependencies = getServiceDependencies(serviceEntity.getServiceDependencies()); ServiceDesiredStateEntity serviceDesiredStateEntity = serviceEntity.getServiceDesiredStateEntity(); serviceDesiredStateEntityPK = getServiceDesiredStateEntityPK(serviceDesiredStateEntity); @@ -227,12 +224,10 @@ public class ServiceImpl implements Service { for (ServiceComponentDesiredStateEntity serviceComponentDesiredStateEntity : serviceEntity.getServiceComponentDesiredStateEntities()) { try { - ServiceComponent svcComponent = serviceComponentFactory.createExisting(this, - serviceComponentDesiredStateEntity); - componentsByName.put(serviceComponentDesiredStateEntity.getComponentName(), svcComponent); - componentsById.put(serviceComponentDesiredStateEntity.getId(), svcComponent); - - } catch(ProvisionException ex) { + components.put(serviceComponentDesiredStateEntity.getComponentName(), + serviceComponentFactory.createExisting(this, + serviceComponentDesiredStateEntity)); + } catch(ProvisionException ex) { StackId stackId = new StackId(serviceComponentDesiredStateEntity.getDesiredStack()); LOG.error(String.format("Can not get component info: stackName=%s, stackVersion=%s, serviceName=%s, componentName=%s", stackId.getStackName(), stackId.getStackVersion(), @@ -307,13 +302,13 @@ public String getServiceGroupName() { @Override public Map getServiceComponents() { - return new HashMap<>(componentsByName); + return new HashMap<>(components); } @Override public void addServiceComponents( - Map componentsByName) throws AmbariException { - for (ServiceComponent sc : componentsByName.values()) { + Map components) throws AmbariException { + for (ServiceComponent sc : components.values()) { addServiceComponent(sc); } } @@ -329,7 +324,7 @@ public void setServiceDependencies(List serviceDependencies) { @Override public void addServiceComponent(ServiceComponent component) throws AmbariException { - if (componentsByName.containsKey(component.getName())) { + if (components.containsKey(component.getName())) { throw new AmbariException("Cannot add duplicate ServiceComponent" + ", clusterName=" + cluster.getClusterName() + ", clusterId=" + cluster.getClusterId() @@ -338,13 +333,13 @@ public void addServiceComponent(ServiceComponent component) throws AmbariExcepti + ", serviceComponentName=" + component.getName()); } - componentsByName.put(component.getName(), component); + components.put(component.getName(), component); } @Override - public ServiceComponent addServiceComponent(String serviceComponentName, String serviceComponentType) + public ServiceComponent addServiceComponent(String serviceComponentName) throws AmbariException { - ServiceComponent component = serviceComponentFactory.createNew(this, serviceComponentName, serviceComponentType); + ServiceComponent component = serviceComponentFactory.createNew(this, serviceComponentName); addServiceComponent(component); return component; } @@ -352,7 +347,7 @@ public ServiceComponent addServiceComponent(String serviceComponentName, String @Override public ServiceComponent getServiceComponent(String componentName) throws AmbariException { - ServiceComponent serviceComponent = componentsByName.get(componentName); + ServiceComponent serviceComponent = components.get(componentName); if (null == serviceComponent) { throw new ServiceComponentNotFoundException(cluster.getClusterName(), getName(), getServiceType(), serviceGroup.getServiceGroupName(), componentName); @@ -452,8 +447,6 @@ public StackId getDesiredStackId() { * {@inheritDoc} */ @Override - @Deprecated - @Experimental(feature = ExperimentalFeature.REPO_VERSION_REMOVAL) public RepositoryVersionEntity getDesiredRepositoryVersion() { ServiceDesiredStateEntity serviceDesiredStateEntity = getServiceDesiredStateEntity(); return serviceDesiredStateEntity.getDesiredRepositoryVersion(); @@ -464,15 +457,13 @@ public RepositoryVersionEntity getDesiredRepositoryVersion() { */ @Override @Transactional - @Deprecated - @Experimental(feature = ExperimentalFeature.REPO_VERSION_REMOVAL) public void setDesiredRepositoryVersion(RepositoryVersionEntity repositoryVersionEntity) { ServiceDesiredStateEntity serviceDesiredStateEntity = getServiceDesiredStateEntity(); serviceDesiredStateEntity.setDesiredRepositoryVersion(repositoryVersionEntity); serviceDesiredStateDAO.merge(serviceDesiredStateEntity); - Collection componentsByName = getServiceComponents().values(); - for (ServiceComponent component : componentsByName) { + Collection components = getServiceComponents().values(); + for (ServiceComponent component : components) { component.setDesiredRepositoryVersion(repositoryVersionEntity); } } @@ -481,15 +472,13 @@ public void setDesiredRepositoryVersion(RepositoryVersionEntity repositoryVersio * {@inheritDoc} */ @Override - @Deprecated - @Experimental(feature = ExperimentalFeature.REPO_VERSION_REMOVAL) public RepositoryVersionState getRepositoryState() { - if (componentsByName.isEmpty()) { + if (components.isEmpty()) { return RepositoryVersionState.NOT_REQUIRED; } List states = new ArrayList<>(); - for( ServiceComponent component : componentsByName.values() ){ + for( ServiceComponent component : components.values() ){ states.add(component.getRepositoryState()); } @@ -499,12 +488,11 @@ public RepositoryVersionState getRepositoryState() { @Override public ServiceResponse convertToResponse() { RepositoryVersionEntity desiredRespositoryVersion = getDesiredRepositoryVersion(); - Mpack mpack = ambariMetaInfo.getMpack(serviceGroup.getMpackId()); - Module module = mpack.getModule(getName()); + StackId desiredStackId = desiredRespositoryVersion.getStackId(); ServiceResponse r = new ServiceResponse(cluster.getClusterId(), cluster.getClusterName(), serviceGroup.getServiceGroupId(), serviceGroup.getServiceGroupName(), - getServiceId(), getName(), getServiceType(), serviceGroup.getStackId(), module.getVersion(), + getServiceId(), getName(), getServiceType(), desiredStackId, desiredRespositoryVersion.getVersion(), getRepositoryState(), getDesiredState().toString(), isCredentialStoreSupported(), isCredentialStoreEnabled()); r.setDesiredRepositoryVersionId(desiredRespositoryVersion.getId()); @@ -661,9 +649,9 @@ public void debugDump(StringBuilder sb) { .append(", clusterId=").append(cluster.getClusterId()) .append(", desiredStackVersion=").append(getDesiredStackId()) .append(", desiredState=").append(getDesiredState()) - .append(", componentsByName=[ "); + .append(", components=[ "); boolean first = true; - for (ServiceComponent sc : componentsByName.values()) { + for (ServiceComponent sc : components.values()) { if (!first) { sb.append(" , "); } @@ -714,11 +702,11 @@ void persistEntities(ClusterServiceEntity serviceEntity) { @Override public boolean canBeRemoved() { // - // A service can be deleted if all it's componentsByName + // A service can be deleted if all it's components // can be removed, irrespective of the state of // the service itself. // - for (ServiceComponent sc : componentsByName.values()) { + for (ServiceComponent sc : components.values()) { if (!sc.canBeRemoved()) { LOG.warn("Found non-removable component when trying to delete service" + ", clusterName=" + cluster.getClusterName() + ", serviceName=" + getName() + ", serviceType=" @@ -760,22 +748,22 @@ void deleteAllServiceConfigs() throws AmbariException { public void deleteAllComponents() throws AmbariException { lock.lock(); try { - LOG.info("Deleting all componentsByName for service" + ", clusterName=" + cluster.getClusterName() + LOG.info("Deleting all components for service" + ", clusterName=" + cluster.getClusterName() + ", serviceName=" + getName()); // FIXME check dependencies from meta layer - for (ServiceComponent component : componentsByName.values()) { + for (ServiceComponent component : components.values()) { if (!component.canBeRemoved()) { throw new AmbariException("Found non removable component when trying to" - + " delete all componentsByName from service" + ", clusterName=" + cluster.getClusterName() + + " delete all components from service" + ", clusterName=" + cluster.getClusterName() + ", serviceName=" + getName() + ", componentName=" + component.getName()); } } - for (ServiceComponent serviceComponent : componentsByName.values()) { + for (ServiceComponent serviceComponent : components.values()) { serviceComponent.delete(); } - componentsByName.clear(); + components.clear(); } finally { lock.unlock(); } @@ -798,7 +786,7 @@ public void deleteServiceComponent(String componentName) } component.delete(); - componentsByName.remove(componentName); + components.remove(componentName); } finally { lock.unlock(); } @@ -812,7 +800,7 @@ public boolean isClientOnlyService() { @Override @Transactional public void delete() throws AmbariException { - List componentsByName = getComponents(); // XXX temporal coupling, need to call this BEFORE deletingAllComponents + List components = getComponents(); // XXX temporal coupling, need to call this BEFORE deletingAllComponents deleteAllComponents(); deleteAllServiceConfigs(); @@ -827,7 +815,7 @@ public void delete() throws AmbariException { ServiceRemovedEvent event = new ServiceRemovedEvent(getClusterId(), stackId.getStackName(), stackId.getStackVersion(), getName(), getServiceType(), - serviceGroup.getServiceGroupName(), componentsByName); + serviceGroup.getServiceGroupName(), components); eventPublisher.publish(event); } diff --git a/ambari-server/src/main/java/org/apache/ambari/server/state/UpgradeContext.java b/ambari-server/src/main/java/org/apache/ambari/server/state/UpgradeContext.java index db6a2a29876..ea1a439c251 100644 --- a/ambari-server/src/main/java/org/apache/ambari/server/state/UpgradeContext.java +++ b/ambari-server/src/main/java/org/apache/ambari/server/state/UpgradeContext.java @@ -74,7 +74,7 @@ import org.apache.ambari.server.state.stack.upgrade.HostOrderGrouping; import org.apache.ambari.server.state.stack.upgrade.HostOrderItem; import org.apache.ambari.server.state.stack.upgrade.HostOrderItem.HostOrderActionType; -import org.apache.ambari.server.state.stack.upgrade.LifecycleType; +import org.apache.ambari.server.state.stack.upgrade.Lifecycle; import org.apache.ambari.server.state.stack.upgrade.UpgradeScope; import org.apache.ambari.server.state.stack.upgrade.UpgradeType; import org.apache.commons.collections.CollectionUtils; @@ -1297,7 +1297,7 @@ void check(Cluster cluster, Direction direction, UpgradeType type, UpgradePack u // verify that the upgradepack has the required grouping and set the // action items on it HostOrderGrouping hostOrderGrouping = null; - List groupings = upgradePack.getGroups(LifecycleType.UPGRADE, direction); + List groupings = upgradePack.getGroups(Lifecycle.LifecycleType.UPGRADE, direction); for (Grouping grouping : groupings) { if (grouping instanceof HostOrderGrouping) { hostOrderGrouping = (HostOrderGrouping) grouping; diff --git a/ambari-server/src/main/java/org/apache/ambari/server/state/UpgradeHelper.java b/ambari-server/src/main/java/org/apache/ambari/server/state/UpgradeHelper.java index 8b83c01c824..94d5ba2fbca 100644 --- a/ambari-server/src/main/java/org/apache/ambari/server/state/UpgradeHelper.java +++ b/ambari-server/src/main/java/org/apache/ambari/server/state/UpgradeHelper.java @@ -61,7 +61,7 @@ import org.apache.ambari.server.state.stack.UpgradePack.ProcessingComponent; import org.apache.ambari.server.state.stack.upgrade.Direction; import org.apache.ambari.server.state.stack.upgrade.Grouping; -import org.apache.ambari.server.state.stack.upgrade.LifecycleType; +import org.apache.ambari.server.state.stack.upgrade.Lifecycle; import org.apache.ambari.server.state.stack.upgrade.ManualTask; import org.apache.ambari.server.state.stack.upgrade.RestartTask; import org.apache.ambari.server.state.stack.upgrade.ServiceCheckGrouping; @@ -302,7 +302,7 @@ public List createSequence(UpgradePack upgradePack, List groups = new ArrayList<>(); UpgradeGroupHolder previousGroupHolder = null; - for (Grouping group : upgradePack.getGroups(LifecycleType.UPGRADE, context.getDirection())) { + for (Grouping group : upgradePack.getGroups(Lifecycle.LifecycleType.UPGRADE, context.getDirection())) { // !!! grouping is not scoped to context if (!context.isScoped(group.scope)) { @@ -318,7 +318,6 @@ public List createSequence(UpgradePack upgradePack, UpgradeGroupHolder groupHolder = new UpgradeGroupHolder(); groupHolder.name = group.name; - groupHolder.lifecycle = group.lifecycle; groupHolder.title = group.title; groupHolder.groupClass = group.getClass(); groupHolder.skippable = group.skippable; @@ -701,22 +700,15 @@ private String tokenReplace(UpgradeContext ctx, String source, String service, S * Short-lived objects that hold information about upgrade groups */ public static class UpgradeGroupHolder { - - /** - * The lifecycle - */ - public LifecycleType lifecycle; - /** * */ private boolean processingGroup; /** - * The title + * The name */ public String name; - /** * The title */ diff --git a/ambari-server/src/main/java/org/apache/ambari/server/state/cluster/ClusterImpl.java b/ambari-server/src/main/java/org/apache/ambari/server/state/cluster/ClusterImpl.java index 23c7489c854..9b0390faba5 100644 --- a/ambari-server/src/main/java/org/apache/ambari/server/state/cluster/ClusterImpl.java +++ b/ambari-server/src/main/java/org/apache/ambari/server/state/cluster/ClusterImpl.java @@ -1187,58 +1187,6 @@ public Service getServiceByComponentName(String componentName) throws AmbariExce throw new ServiceNotFoundException(getClusterName(), "component: " + componentName); } - @Override - public Service getServiceByComponentId(Long componentId) throws AmbariException { - for (Service service : services.values()) { - for (ServiceComponent component : service.getServiceComponents().values()) { - if (component.getId().equals(componentId)) { - return service; - } - } - } - - throw new ServiceNotFoundException(getClusterName(), "component Id: " + componentId); - } - - @Override - public String getComponentName(Long componentId) throws AmbariException { - for (Service service : services.values()) { - for (ServiceComponent component : service.getServiceComponents().values()) { - if (component.getId().equals(componentId)) { - return component.getName(); - } - } - } - - throw new ServiceNotFoundException(getClusterName(), "component Id: " + componentId); - } - - - @Override - public String getComponentType(Long componentId) throws AmbariException { - for (Service service : services.values()) { - for (ServiceComponent component : service.getServiceComponents().values()) { - if (component.getId().equals(componentId)) { - return component.getType(); - } - } - } - - throw new ServiceNotFoundException(getClusterName(), "component Id: " + componentId); - } - - @Override - public Long getComponentId(String componentName) throws AmbariException { - for (Service service : services.values()) { - for (ServiceComponent component : service.getServiceComponents().values()) { - if (component.getName().equals(componentName)) { - return component.getId(); - } - } - } - - throw new ServiceNotFoundException(getClusterName(), "component Name: " + componentName); - } @Override public ServiceGroup getServiceGroup(String serviceGroupName) throws ServiceGroupNotFoundException { diff --git a/ambari-server/src/main/java/org/apache/ambari/server/state/configgroup/ConfigGroupImpl.java b/ambari-server/src/main/java/org/apache/ambari/server/state/configgroup/ConfigGroupImpl.java index b1cf3813b53..2536d8b3a0a 100644 --- a/ambari-server/src/main/java/org/apache/ambari/server/state/configgroup/ConfigGroupImpl.java +++ b/ambari-server/src/main/java/org/apache/ambari/server/state/configgroup/ConfigGroupImpl.java @@ -101,8 +101,7 @@ public class ConfigGroupImpl implements ConfigGroup { @AssistedInject public ConfigGroupImpl(@Assisted("cluster") Cluster cluster, @Assisted("serviceGroupId") @Nullable Long serviceGroupId, - @Assisted("serviceId") @Nullable Long serviceId, - @Assisted("name") String name, + @Assisted("serviceId") @Nullable Long serviceId, @Assisted("name") String name, @Assisted("tag") String tag, @Assisted("description") String description, @Assisted("configs") Map configurations, @Assisted("hosts") Map hosts, Clusters clusters, ConfigFactory configFactory, diff --git a/ambari-server/src/main/java/org/apache/ambari/server/state/repository/VersionDefinitionXml.java b/ambari-server/src/main/java/org/apache/ambari/server/state/repository/VersionDefinitionXml.java index bec6d5d9e32..6cad9be253e 100644 --- a/ambari-server/src/main/java/org/apache/ambari/server/state/repository/VersionDefinitionXml.java +++ b/ambari-server/src/main/java/org/apache/ambari/server/state/repository/VersionDefinitionXml.java @@ -48,8 +48,6 @@ import javax.xml.validation.Schema; import javax.xml.validation.SchemaFactory; -import org.apache.ambari.annotations.Experimental; -import org.apache.ambari.annotations.ExperimentalFeature; import org.apache.ambari.server.AmbariException; import org.apache.ambari.server.state.Cluster; import org.apache.ambari.server.state.ComponentInfo; @@ -80,8 +78,6 @@ */ @XmlRootElement(name="repository-version") @XmlAccessorType(XmlAccessType.FIELD) -@Deprecated -@Experimental(feature = ExperimentalFeature.REPO_VERSION_REMOVAL) public class VersionDefinitionXml { public static String SCHEMA_LOCATION = "version_definition.xsd"; diff --git a/ambari-server/src/main/java/org/apache/ambari/server/state/stack/RepositoryXml.java b/ambari-server/src/main/java/org/apache/ambari/server/state/stack/RepositoryXml.java index f77fddd3f36..ccb25e8595e 100644 --- a/ambari-server/src/main/java/org/apache/ambari/server/state/stack/RepositoryXml.java +++ b/ambari-server/src/main/java/org/apache/ambari/server/state/stack/RepositoryXml.java @@ -33,7 +33,6 @@ import org.apache.ambari.server.stack.Validable; import org.apache.ambari.server.state.RepositoryInfo; -import org.codehaus.jackson.annotate.JsonProperty; /** * Represents the repository file $STACK_VERSION/repos/repoinfo.xml. @@ -44,7 +43,6 @@ public class RepositoryXml implements Validable{ @XmlElement(name="latest") private String latestUri; - @XmlElement(name="os") private List oses = new ArrayList<>(); @@ -84,7 +82,7 @@ public Collection getErrors() { @Override public void addErrors(Collection errors) { - errorSet.addAll(errors); + this.errorSet.addAll(errors); } /** @@ -107,7 +105,6 @@ public List getOses() { @XmlAccessorType(XmlAccessType.FIELD) public static class Os { @XmlAttribute(name="family") - @JsonProperty("os_type") private String family; @XmlElement(name="package-version") @@ -146,30 +143,16 @@ public String getPackageVersion() { */ @XmlAccessorType(XmlAccessType.FIELD) public static class Repo { - @JsonProperty("base_url") private String baseurl = null; - - @JsonProperty("mirrors_list") private String mirrorslist = null; - - @JsonProperty("repo_id") private String repoid = null; - - @JsonProperty("repo_name") private String reponame = null; - - @JsonProperty("distribution") private String distribution = null; - - @JsonProperty("components") private String components = null; - - @JsonProperty("unique") private boolean unique = false; @XmlElementWrapper(name="tags") @XmlElement(name="tag") - @JsonProperty("tags") private Set tags = new HashSet<>(); private Repo() { diff --git a/ambari-server/src/main/java/org/apache/ambari/server/state/stack/UpgradePack.java b/ambari-server/src/main/java/org/apache/ambari/server/state/stack/UpgradePack.java index 3841497e8e4..a287cb567e9 100644 --- a/ambari-server/src/main/java/org/apache/ambari/server/state/stack/UpgradePack.java +++ b/ambari-server/src/main/java/org/apache/ambari/server/state/stack/UpgradePack.java @@ -19,13 +19,13 @@ import java.util.ArrayList; import java.util.Collections; +import java.util.HashMap; import java.util.Iterator; import java.util.LinkedHashMap; import java.util.List; import java.util.Map; import java.util.Optional; import java.util.regex.Pattern; -import java.util.stream.Collectors; import javax.xml.bind.Unmarshaller; import javax.xml.bind.annotation.XmlAccessType; @@ -39,13 +39,13 @@ import org.apache.ambari.annotations.Experimental; import org.apache.ambari.annotations.ExperimentalFeature; +import org.apache.ambari.server.api.services.AmbariMetaInfo; import org.apache.ambari.server.state.stack.upgrade.ClusterGrouping; import org.apache.ambari.server.state.stack.upgrade.ConfigureTask; import org.apache.ambari.server.state.stack.upgrade.CreateAndConfigureTask; import org.apache.ambari.server.state.stack.upgrade.Direction; import org.apache.ambari.server.state.stack.upgrade.Grouping; import org.apache.ambari.server.state.stack.upgrade.Lifecycle; -import org.apache.ambari.server.state.stack.upgrade.LifecycleType; import org.apache.ambari.server.state.stack.upgrade.ServiceCheckGrouping; import org.apache.ambari.server.state.stack.upgrade.Task; import org.apache.ambari.server.state.stack.upgrade.UpgradeType; @@ -76,9 +76,8 @@ public class UpgradePack { @XmlElement(name="lifecycle") public List lifecycles; - @XmlElementWrapper(name="prerequisite-checks") - @XmlElement(name="check") - private List prerequisiteChecks; + @XmlElement(name="prerequisite-checks") + private PrerequisiteChecks prerequisiteChecks; /** * In the case of a rolling upgrade, will specify processing logic for a particular component. @@ -125,6 +124,10 @@ public class UpgradePack { @XmlElement(name="type", defaultValue="rolling") private UpgradeType type; + @XmlElementWrapper(name="upgrade-path") + @XmlElement(name="intermediate-stack") + private List intermediateStacks; + public String getName() { return name; } @@ -153,18 +156,76 @@ public List getPrerequisiteChecks() { if (prerequisiteChecks == null) { return new ArrayList<>(); } + return new ArrayList<>(prerequisiteChecks.checks); + } - return prerequisiteChecks.stream().map(c -> c.className).collect(Collectors.toList()); + /** + * + * @return the prerequisite check configuration + */ + public PrerequisiteCheckConfig getPrerequisiteCheckConfig() { + if (prerequisiteChecks == null) { + return new PrerequisiteCheckConfig(); + } + return prerequisiteChecks.configuration; } /** - * Merges the processing section of the upgrade xml with - * the processing section from a service's upgrade xml. - * These are added to the end of the current list of services. + * Merges the prerequisite checks section of the upgrade xml with + * the prerequisite checks from a service's upgrade xml. + * These are added to the end of the current list of checks. * * @param pack * the service's upgrade pack */ + public void mergePrerequisiteChecks(UpgradePack pack) { + PrerequisiteChecks newPrereqChecks = pack.prerequisiteChecks; + if (prerequisiteChecks == null) { + prerequisiteChecks = newPrereqChecks; + return; + } + + if (newPrereqChecks == null) { + return; + } + + if (prerequisiteChecks.checks == null) { + prerequisiteChecks.checks = new ArrayList<>(); + } + if (newPrereqChecks.checks != null) { + prerequisiteChecks.checks.addAll(newPrereqChecks.checks); + } + + if (newPrereqChecks.configuration == null) { + return; + } + + if (prerequisiteChecks.configuration == null) { + prerequisiteChecks.configuration = newPrereqChecks.configuration; + return; + } + if (prerequisiteChecks.configuration.globalProperties == null) { + prerequisiteChecks.configuration.globalProperties = new ArrayList<>(); + } + if (prerequisiteChecks.configuration.prerequisiteCheckProperties == null) { + prerequisiteChecks.configuration.prerequisiteCheckProperties = new ArrayList<>(); + } + if (newPrereqChecks.configuration.globalProperties != null) { + prerequisiteChecks.configuration.globalProperties.addAll(newPrereqChecks.configuration.globalProperties); + } + if (newPrereqChecks.configuration.prerequisiteCheckProperties != null) { + prerequisiteChecks.configuration.prerequisiteCheckProperties.addAll(newPrereqChecks.configuration.prerequisiteCheckProperties); + } + } + +/** + * Merges the processing section of the upgrade xml with + * the processing section from a service's upgrade xml. + * These are added to the end of the current list of services. + * + * @param pack + * the service's upgrade pack + */ public void mergeProcessing(UpgradePack pack) { List list = pack.processing; if (list == null) { @@ -180,6 +241,22 @@ public void mergeProcessing(UpgradePack pack) { initializeProcessingComponentMappings(); } + /** + * Gets a list of stacks which are between the current stack version and the + * target stack version inclusive. For example, if upgrading from HDP-2.2 to + * HDP-2.4, this should include HDP-2.3 and HDP-2.4. + *

+ * This method is used to combine the correct configuration packs for a + * specific upgrade from + * {@link AmbariMetaInfo#getConfigUpgradePack(String, String)}. + * + * @return a list of intermediate stacks (target stack inclusive) or + * {@code null} if none. + */ + public List getIntermediateStacks() { + return intermediateStacks; + } + /** * @return the target stack, or {@code null} if the upgrade is within the same stack */ @@ -226,7 +303,7 @@ public List getAllGroups() { * the direction to return the ordered groups * @return the list of groups */ - public List getGroups(LifecycleType type, Direction direction) { + public List getGroups(Lifecycle.LifecycleType type, Direction direction) { // !!! lifecycles are bound to be only one per-type per-Upgrade Pack, so findFirst() is ok here Optional optional = lifecycles.stream().filter(l -> l.type == type).findFirst(); @@ -553,51 +630,113 @@ private void initializeTasks(String service, List tasks) { } /** - * @return + * An intermediate stack definition in + * upgrade/upgrade-path/intermediate-stack path */ - public PrerequisiteCheckConfig getPrerequisiteCheckConfig() { - return new PrerequisiteCheckConfig(prerequisiteChecks); + public static class IntermediateStack { + + @XmlAttribute + public String version; } /** - * Holds the config properties for all the checks defined for an Upgrade Pack. + * Container class to specify list of additional prerequisite checks to run in addition to the + * required prerequisite checks and configuration properties for all prerequisite checks + */ + public static class PrerequisiteChecks { + /** + * List of additional prerequisite checks to run in addition to required prerequisite checks + */ + @XmlElement(name="check", type=String.class) + public List checks = new ArrayList<>(); + + /** + * Prerequisite checks configuration + */ + @XmlElement(name="configuration") + public PrerequisiteCheckConfig configuration; + } + + /** + * Prerequisite checks configuration */ public static class PrerequisiteCheckConfig { - private List m_checks; - private PrerequisiteCheckConfig(List checks) { - m_checks = checks; + /** + * Global config properties common to all prereq checks + */ + @XmlElement(name="property") + public List globalProperties; + + /** + * Config properties for individual prerequisite checks + */ + @XmlElement(name="check-properties") + public List prerequisiteCheckProperties; + + /** + * Get global config properties as a map + * @return Map of global config properties + */ + public Map getGlobalProperties() { + if(globalProperties == null) { + return null; + } + Map result = new HashMap<>(); + for (PrerequisiteProperty property : globalProperties) { + result.put(property.name, property.value); + } + return result; } + /** + * Get config properties for a given prerequisite check as a map + * @param checkName The prerequisite check name + * @return Map of config properties for the prerequisite check + */ public Map getCheckProperties(String checkName) { - if (null == m_checks) { - return Collections.emptyMap(); + if(prerequisiteCheckProperties == null) { + return null; } - - Optional optional = m_checks.stream() - .filter(c -> c.className.equals(checkName)).findFirst(); - - if (!optional.isPresent()) { - return Collections.emptyMap(); + for(PrerequisiteCheckProperties checkProperties : prerequisiteCheckProperties) { + if(checkProperties.name.equalsIgnoreCase(checkName)) { + return checkProperties.getProperties(); + } } - - PrerequisiteCheckDefinition checks = optional.get(); - - Map map = checks.properties.stream().collect(Collectors.toMap( - c -> c.name, c -> c.value)); - - return map; + return null; } } /** - * The definition for each check in the Upgrade Pack. + * Config properties for a specific prerequisite check. */ - public static class PrerequisiteCheckDefinition { - @XmlAttribute(name="class") - public String className; + public static class PrerequisiteCheckProperties { + /** + * Prereq check name + */ + @XmlAttribute + public String name; + /** + * Config properties for the prerequisite check + */ @XmlElement(name="property") - public List properties = new ArrayList<>(); + public List properties; + + /** + * Get config properties as a map + * @return Map of config properties + */ + public Map getProperties() { + if(properties == null) { + return null; + } + + Map result = new HashMap<>(); + for (PrerequisiteProperty property : properties) { + result.put(property.name, property.value); + } + return result; + } } /** @@ -623,5 +762,4 @@ private static class DowngradeTasks { private List tasks = new ArrayList<>(); } - } diff --git a/ambari-server/src/main/java/org/apache/ambari/server/state/stack/upgrade/Grouping.java b/ambari-server/src/main/java/org/apache/ambari/server/state/stack/upgrade/Grouping.java index 6d7b6d6d13a..32487177ab1 100644 --- a/ambari-server/src/main/java/org/apache/ambari/server/state/stack/upgrade/Grouping.java +++ b/ambari-server/src/main/java/org/apache/ambari/server/state/stack/upgrade/Grouping.java @@ -29,7 +29,6 @@ import javax.xml.bind.annotation.XmlAttribute; import javax.xml.bind.annotation.XmlElement; import javax.xml.bind.annotation.XmlSeeAlso; -import javax.xml.bind.annotation.XmlTransient; import org.apache.ambari.server.AmbariException; import org.apache.ambari.server.stack.HostsType; @@ -86,9 +85,6 @@ public class Grouping { @XmlElement(name="scope") public UpgradeScope scope = UpgradeScope.ANY; - @XmlTransient - public LifecycleType lifecycle; - /** * A condition element with can prevent this entire group from being scheduled * in the upgrade. @@ -415,10 +411,6 @@ private void addSkippedServices(Map> skippedServices, */ @Override public String toString() { - return Objects.toStringHelper(this) - .add("name", name) - .add("title", title) - .toString(); - + return Objects.toStringHelper(this).add("name", name).toString(); } } diff --git a/ambari-server/src/main/java/org/apache/ambari/server/state/stack/upgrade/Lifecycle.java b/ambari-server/src/main/java/org/apache/ambari/server/state/stack/upgrade/Lifecycle.java index 21e8c1bb4d6..3d75892ff0e 100644 --- a/ambari-server/src/main/java/org/apache/ambari/server/state/stack/upgrade/Lifecycle.java +++ b/ambari-server/src/main/java/org/apache/ambari/server/state/stack/upgrade/Lifecycle.java @@ -17,14 +17,18 @@ */ package org.apache.ambari.server.state.stack.upgrade; +import java.util.Collection; import java.util.List; +import java.util.stream.Collectors; +import java.util.stream.Stream; -import javax.xml.bind.Unmarshaller; import javax.xml.bind.annotation.XmlAccessType; import javax.xml.bind.annotation.XmlAccessorType; import javax.xml.bind.annotation.XmlAttribute; import javax.xml.bind.annotation.XmlElement; import javax.xml.bind.annotation.XmlElementWrapper; +import javax.xml.bind.annotation.XmlEnum; +import javax.xml.bind.annotation.XmlEnumValue; import javax.xml.bind.annotation.XmlRootElement; /** @@ -33,14 +37,14 @@ * namely: * *

    - *
  1. {@link LifecycleType#INSTALL}
  2. - *
  3. {@link LifecycleType#QUIET}
  4. - *
  5. {@link LifecycleType#SNAPSHOT}
  6. - *
  7. {@link LifecycleType#PREPARE}
  8. - *
  9. {@link LifecycleType#STOP}
  10. - *
  11. {@link LifecycleType#UPGRADE}
  12. - *
  13. {@link LifecycleType#START}
  14. - *
  15. {@link LifecycleType#FINALIZE}
  16. + *
  17. INSTALL
  18. + *
  19. QUIET
  20. + *
  21. SNAPSHOT
  22. + *
  23. PREPARE
  24. + *
  25. STOP
  26. + *
  27. UPGRADE
  28. + *
  29. START
  30. + *
  31. FINALIZE
  32. *
*/ @XmlAccessorType(XmlAccessType.FIELD) @@ -54,16 +58,76 @@ public class Lifecycle { @XmlElement(name="group") public List groups; - /** - * Post-processes the groups for their enclosing lifecycle type + * Identifies the lifecycle types */ - protected void afterUnmarshal(Unmarshaller unmarshaller, Object parent) { - if (null == groups) { - return; + @XmlEnum + public enum LifecycleType { + + /** + * Work required that can be classified as installation. Normally installation of + * bits occurs outside the scope of upgrade orchestration. + */ + @XmlEnumValue("install") + INSTALL(1.0f), + + /** + * Work to stop and wait on, for example, queues or topologies. + */ + @XmlEnumValue("quiet") + QUIET(2.0f), + + /** + * Work required to snapshot or other backup. + */ + @XmlEnumValue("snapshot") + SNAPSHOT(3.0f), + + /** + * Work required to prepare to upgrade. + */ + @XmlEnumValue("prepare") + PREPARE(4.0f), + + /** + * Work required to stop a service. + */ + @XmlEnumValue("stop") + STOP(5.0f), + + /** + * For a Rolling upgrade, work required to restart and upgrade the service. + */ + @XmlEnumValue("upgrade") + UPGRADE(6.0f), + + /** + * Work required to start a service. + */ + @XmlEnumValue("start") + START(7.0f), + + /** + * Work required to finalize. Will not happen until the end of the upgrade. + */ + @XmlEnumValue("finalize") + FINALIZE(8.0f); + + private float m_order; + + private LifecycleType(float order) { + m_order = order; } - groups.stream().forEach(group -> group.lifecycle = type); + + /** + * Returns the ordered collection of lifecycle types. This is prefered over {@link #values()} + * to preserve ordering when adding new values. + */ + public static Collection ordered() { + return Stream.of(LifecycleType.values()).sorted((l1, l2) -> + Float.compare(l1.m_order, l2.m_order)).collect(Collectors.toList()); + } } } diff --git a/ambari-server/src/main/java/org/apache/ambari/server/state/stack/upgrade/LifecycleType.java b/ambari-server/src/main/java/org/apache/ambari/server/state/stack/upgrade/LifecycleType.java deleted file mode 100644 index 84bd0a1cf0d..00000000000 --- a/ambari-server/src/main/java/org/apache/ambari/server/state/stack/upgrade/LifecycleType.java +++ /dev/null @@ -1,98 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package org.apache.ambari.server.state.stack.upgrade; - -import java.util.Collection; -import java.util.stream.Collectors; -import java.util.stream.Stream; - -import javax.xml.bind.annotation.XmlEnum; -import javax.xml.bind.annotation.XmlEnumValue; - - -/** - * Identifies the lifecycle types - */ -@XmlEnum -public enum LifecycleType { - - /** - * Work required that can be classified as installation. Normally installation of - * bits occurs outside the scope of upgrade orchestration. - */ - @XmlEnumValue("install") - INSTALL(1.0f), - - /** - * Work to stop and wait on, for example, queues or topologies. - */ - @XmlEnumValue("quiet") - QUIET(2.0f), - - /** - * Work required to snapshot or other backup. - */ - @XmlEnumValue("snapshot") - SNAPSHOT(3.0f), - - /** - * Work required to prepare to upgrade. - */ - @XmlEnumValue("prepare") - PREPARE(4.0f), - - /** - * Work required to stop a service. - */ - @XmlEnumValue("stop") - STOP(5.0f), - - /** - * For a Rolling upgrade, work required to restart and upgrade the service. - */ - @XmlEnumValue("upgrade") - UPGRADE(6.0f), - - /** - * Work required to start a service. - */ - @XmlEnumValue("start") - START(7.0f), - - /** - * Work required to finalize. Will not happen until the end of the upgrade. - */ - @XmlEnumValue("finalize") - FINALIZE(8.0f); - - private float m_order; - - private LifecycleType(float order) { - m_order = order; - } - - - /** - * Returns the ordered collection of lifecycle types. This is prefered over {@link #values()} - * to preserve ordering when adding new values. - */ - public static Collection ordered() { - return Stream.of(LifecycleType.values()).sorted((l1, l2) -> - Float.compare(l1.m_order, l2.m_order)).collect(Collectors.toList()); - } -} \ No newline at end of file diff --git a/ambari-server/src/main/java/org/apache/ambari/server/state/stack/upgrade/RepositoryVersionHelper.java b/ambari-server/src/main/java/org/apache/ambari/server/state/stack/upgrade/RepositoryVersionHelper.java index 06b9f8169d6..fcbf846cbb3 100644 --- a/ambari-server/src/main/java/org/apache/ambari/server/state/stack/upgrade/RepositoryVersionHelper.java +++ b/ambari-server/src/main/java/org/apache/ambari/server/state/stack/upgrade/RepositoryVersionHelper.java @@ -35,26 +35,26 @@ import org.apache.ambari.server.configuration.Configuration; import org.apache.ambari.server.controller.ActionExecutionContext; import org.apache.ambari.server.controller.AmbariManagementController; -import org.apache.ambari.server.controller.internal.OperatingSystemReadOnlyResourceProvider; +import org.apache.ambari.server.controller.internal.OperatingSystemResourceProvider; import org.apache.ambari.server.controller.internal.RepositoryResourceProvider; import org.apache.ambari.server.controller.internal.RepositoryVersionResourceProvider; import org.apache.ambari.server.controller.spi.SystemException; -import org.apache.ambari.server.orm.dao.MpackDAO; -import org.apache.ambari.server.orm.entities.MpackEntity; import org.apache.ambari.server.orm.entities.RepoDefinitionEntity; import org.apache.ambari.server.orm.entities.RepoOsEntity; import org.apache.ambari.server.orm.entities.RepositoryVersionEntity; import org.apache.ambari.server.state.Cluster; import org.apache.ambari.server.state.Clusters; import org.apache.ambari.server.state.Host; -import org.apache.ambari.server.state.Mpack; import org.apache.ambari.server.state.OsSpecific; import org.apache.ambari.server.state.RepositoryInfo; +import org.apache.ambari.server.state.RepositoryType; +import org.apache.ambari.server.state.Service; import org.apache.ambari.server.state.ServiceComponent; -import org.apache.ambari.server.state.ServiceGroup; import org.apache.ambari.server.state.ServiceInfo; import org.apache.ambari.server.state.StackId; import org.apache.ambari.server.state.StackInfo; +import org.apache.ambari.server.state.repository.ClusterVersionSummary; +import org.apache.ambari.server.state.repository.VersionDefinitionXml; import org.apache.ambari.server.state.stack.OsFamily; import org.apache.ambari.server.state.stack.RepoTag; import org.apache.ambari.server.state.stack.UpgradePack; @@ -96,17 +96,45 @@ public class RepositoryVersionHelper { @Inject Provider clusters; - /** - * Used to retrieve management packs. - */ - @Inject - private Provider ambariMetainfoProvider; /** - * Used for retrieving mpacks by their ID. + * Checks repo URLs against the current version for the cluster and make + * adjustments to the Base URL when the current is different. + * + * @param cluster {@link Cluster} object + * @param component resolve {@link RepositoryVersionEntity} for the component, could be {@code null} + * + * @return {@link RepositoryVersionEntity} retrieved for component if set or cluster if not */ - @Inject - private MpackDAO mpackDAO; + @Experimental(feature = ExperimentalFeature.PATCH_UPGRADES) + private RepositoryVersionEntity getRepositoryVersionEntity(Cluster cluster, ServiceComponent component) throws SystemException { + + RepositoryVersionEntity repositoryEntity = null; + + // !!! try to find the component repo first + if (null != component) { + repositoryEntity = component.getDesiredRepositoryVersion(); + } else { + LOG.info("Service component not passed in, attempt to resolve the repository for cluster {}", + cluster.getClusterName()); + } + + if (null == repositoryEntity && null != component) { + try { + Service service = cluster.getService(component.getServiceName()); + repositoryEntity = service.getDesiredRepositoryVersion(); + } catch (AmbariException e) { + throw new SystemException("Unhandled exception", e); + } + } + + if (null == repositoryEntity) { + LOG.info("Cluster {} has no specific Repository Versions. Using stack-defined values", cluster.getClusterName()); + return null; + } + + return repositoryEntity; + } /** * Parses operating systems json to a list of entities. Expects json like: @@ -141,11 +169,11 @@ public List parseOperatingSystems(String repositoriesJson) throws final RepoOsEntity operatingSystemEntity = new RepoOsEntity(); - operatingSystemEntity.setFamily(osObj.get(OperatingSystemReadOnlyResourceProvider.OPERATING_SYSTEM_OS_TYPE_PROPERTY_ID).getAsString()); + operatingSystemEntity.setFamily(osObj.get(OperatingSystemResourceProvider.OPERATING_SYSTEM_OS_TYPE_PROPERTY_ID).getAsString()); - if (osObj.has(OperatingSystemReadOnlyResourceProvider.OPERATING_SYSTEM_AMBARI_MANAGED_REPOS)) { + if (osObj.has(OperatingSystemResourceProvider.OPERATING_SYSTEM_AMBARI_MANAGED_REPOS)) { operatingSystemEntity.setAmbariManaged(osObj.get( - OperatingSystemReadOnlyResourceProvider.OPERATING_SYSTEM_AMBARI_MANAGED_REPOS).getAsBoolean()); + OperatingSystemResourceProvider.OPERATING_SYSTEM_AMBARI_MANAGED_REPOS).getAsBoolean()); } for (JsonElement repositoryElement: osObj.get(RepositoryVersionResourceProvider.SUBRESOURCE_REPOSITORIES_PROPERTY_ID).getAsJsonArray()) { @@ -195,7 +223,17 @@ public List createRepoOsEntities(List repositories RepoOsEntity operatingSystemEntity = new RepoOsEntity(); List repositoriesList = new ArrayList<>(); for (RepositoryInfo repository : operatingSystem.getValue()) { - RepoDefinitionEntity repositoryDefinition = RepoDefinitionEntity.from(repository); + RepoDefinitionEntity repositoryDefinition = new RepoDefinitionEntity(); + repositoryDefinition.setBaseUrl(repository.getBaseUrl()); + repositoryDefinition.setRepoName(repository.getRepoName()); + repositoryDefinition.setRepoID(repository.getRepoId()); + repositoryDefinition.setDistribution(repository.getDistribution()); + repositoryDefinition.setComponents(repository.getComponents()); + repositoryDefinition.setMirrors(repository.getMirrorsList()); + repositoryDefinition.setUnique(repository.isUnique()); + + repositoryDefinition.setTags(repository.getTags()); + repositoriesList.add(repositoryDefinition); operatingSystemEntity.setAmbariManaged(repository.isAmbariManagedRepositories()); } @@ -300,57 +338,17 @@ public Map buildRoleParams(AmbariManagementController amc, Repos /** - * Return repositories available for target os version on host based on the - * mpack and host family. - * - * @param mpackEntity - * the management pack to get the repo for. - * @param host - * target {@link Host} for providing repositories list - * - * @return {@link RepoOsEntity} with available repositories for host - * @throws SystemException - * if no repository available for target {@link Host} - */ - public RepoOsEntity getOSEntityForHost(MpackEntity mpackEntity, Host host) - throws SystemException { - String osFamily = host.getOsFamily(); - RepoOsEntity osEntity = null; - for (RepoOsEntity operatingSystem : mpackEntity.getRepositoryOperatingSystems()) { - if (osFamily.equals(operatingSystem.getFamily())) { - osEntity = operatingSystem; - break; - } - } - - if (null == osEntity) { - throw new SystemException( - String.format("Operating System matching %s could not be found for mpack with ID %s", - osFamily, mpackEntity.getId())); - } - - return osEntity; - } - - /** - * Return repositories available for target os version on host based on the - * host family. - * - * @param operatingSystems - * the list of repository operating systems to use when finding a - * match for the host's OS family. - * @param host - * target {@link Host} for providing repositories list + * Return repositories available for target os version on host based on {@code repoVersion} repository definition + * @param host target {@link Host} for providing repositories list + * @param repoVersion {@link RepositoryVersionEntity} version definition with all available repositories * * @return {@link RepoOsEntity} with available repositories for host - * @throws SystemException - * if no repository available for target {@link Host} + * @throws SystemException if no repository available for target {@link Host} */ - public RepoOsEntity getOSEntityForHost(List operatingSystems, Host host) - throws SystemException { + public RepoOsEntity getOSEntityForHost(Host host, RepositoryVersionEntity repoVersion) throws SystemException { String osFamily = host.getOsFamily(); RepoOsEntity osEntity = null; - for (RepoOsEntity operatingSystem : operatingSystems) { + for (RepoOsEntity operatingSystem : repoVersion.getRepoOsEntities()) { if (osFamily.equals(operatingSystem.getFamily())) { osEntity = operatingSystem; break; @@ -358,8 +356,8 @@ public RepoOsEntity getOSEntityForHost(List operatingSystems, Host } if (null == osEntity) { - throw new SystemException( - String.format("Operating System matching %s could not be found", osFamily)); + throw new SystemException(String.format("Operating System matching %s could not be found", + osFamily)); } return osEntity; @@ -369,24 +367,35 @@ public RepoOsEntity getOSEntityForHost(List operatingSystems, Host * Adds a command repository to the action context * @param osEntity the OS family */ - public CommandRepository getCommandRepository(Mpack mpack, RepoOsEntity osEntity) - throws AmbariException { + public CommandRepository getCommandRepository(final RepositoryVersionEntity repoVersion, + final RepoOsEntity osEntity) throws SystemException { final CommandRepository commandRepo = new CommandRepository(); final boolean sysPreppedHost = configuration.get().areHostsSysPrepped().equalsIgnoreCase("true"); + if (null == repoVersion) { + throw new SystemException("Repository version entity is not provided"); + } + commandRepo.setRepositories(osEntity.getFamily(), osEntity.getRepoDefinitionEntities()); - commandRepo.setMpackId(mpack.getResourceId()); - commandRepo.setMpackName(mpack.getName()); - commandRepo.setMpackVersion(mpack.getVersion()); + commandRepo.setRepositoryVersion(repoVersion.getVersion()); + commandRepo.setRepositoryVersionId(repoVersion.getId()); + commandRepo.setResolved(repoVersion.isResolved()); + commandRepo.setStackName(repoVersion.getStackId().getStackName()); commandRepo.getFeature().setPreInstalled(configuration.get().areHostsSysPrepped()); commandRepo.getFeature().setIsScoped(!sysPreppedHost); if (!osEntity.isAmbariManaged()) { commandRepo.setNonManaged(); } else { - commandRepo.setRepoFileName(mpack.getName(), mpack.getResourceId()); - commandRepo.setUniqueSuffix(String.format("-repo-%s", mpack.getMpackId())); + if (repoVersion.isLegacy()){ + commandRepo.setLegacyRepoFileName(repoVersion.getStackName(), repoVersion.getVersion()); + commandRepo.setLegacyRepoId(repoVersion.getVersion()); + commandRepo.getFeature().setIsScoped(false); + } else { + commandRepo.setRepoFileName(repoVersion.getStackName(), repoVersion.getId()); + commandRepo.setUniqueSuffix(String.format("-repo-%s", repoVersion.getId())); + } } if (configuration.get().arePackagesLegacyOverridden()) { @@ -409,48 +418,210 @@ public CommandRepository getCommandRepository(Mpack mpack, RepoOsEntity osEntity */ @Experimental(feature=ExperimentalFeature.PATCH_UPGRADES) public CommandRepository getCommandRepository(final Cluster cluster, ServiceComponent component, final Host host) - throws AmbariException, SystemException { + throws SystemException { + + RepositoryVersionEntity repoVersion = getRepositoryVersionEntity(cluster, component); + RepoOsEntity osEntity = getOSEntityForHost(host, repoVersion); + + return getCommandRepository(repoVersion, osEntity); + } + + /** + * This method builds and adds repo infoto hostLevelParams of action + * + * @param cluster cluster to which host level params belongs + * @param actionContext context of the action. Must be not {@code null} + * @param repositoryVersion repository version entity to use + * @param hostLevelParams hasgmap with host level params. Must be not {@code null} + * @param hostName host name to which add repo onfo + * @throws AmbariException + */ + @Deprecated + public void addRepoInfoToHostLevelParams(final Cluster cluster, final ActionExecutionContext actionContext, + final RepositoryVersionEntity repositoryVersion, final Map hostLevelParams, + final String hostName) throws AmbariException { + + // if the repo is null, see if any values from the context should go on the + // host params and then return + if (null == repositoryVersion) { + // see if the action context has a repository set to use for the command + if (null != actionContext.getRepositoryVersion()) { + StackId stackId = actionContext.getRepositoryVersion().getStackId(); + hostLevelParams.put(KeyNames.STACK_NAME, stackId.getStackName()); + hostLevelParams.put(KeyNames.STACK_VERSION, stackId.getStackVersion()); + } + + return; + } else { + StackId stackId = repositoryVersion.getStackId(); + hostLevelParams.put(KeyNames.STACK_NAME, stackId.getStackName()); + hostLevelParams.put(KeyNames.STACK_VERSION, stackId.getStackVersion()); + } - AmbariMetaInfo ambariMetaInfo = ambariMetainfoProvider.get(); + JsonObject rootJsonObject = new JsonObject(); + JsonArray repositories = new JsonArray(); - long serviceGroupId = component.getServiceGroupId(); - ServiceGroup serviceGroup = cluster.getServiceGroup(serviceGroupId); - long mpackId = serviceGroup.getMpackId(); - MpackEntity mpackEntity = mpackDAO.findById(mpackId); - Mpack mpack = ambariMetaInfo.getMpack(mpackId); + String hostOsFamily = cluster.getHost(hostName).getOsFamily(); + for (RepoOsEntity operatingSystemEntity : repositoryVersion.getRepoOsEntities()) { + if (operatingSystemEntity.getFamily().equals(hostOsFamily)) { + for (RepoDefinitionEntity repositoryEntity : operatingSystemEntity.getRepoDefinitionEntities()) { + JsonObject repositoryInfo = new JsonObject(); + repositoryInfo.addProperty("base_url", repositoryEntity.getBaseUrl()); + repositoryInfo.addProperty("repo_name", repositoryEntity.getRepoName()); + repositoryInfo.addProperty("repo_id", repositoryEntity.getRepoID()); - RepoOsEntity osEntity = getOSEntityForHost(mpackEntity, host); - return getCommandRepository(mpack, osEntity); + repositories.add(repositoryInfo); + } + rootJsonObject.add("repositories", repositories); + } + } + hostLevelParams.put(KeyNames.REPO_INFO, rootJsonObject.toString()); + } + + + /** + * Get repository info given a cluster and host. + * + * @param cluster the cluster + * @param host the host + * + * @return the repo info + * + * @deprecated use {@link #getCommandRepository(Cluster, ServiceComponent, Host)} instead. + * @throws SystemException if the repository information can not be obtained + */ + @Deprecated + public String getRepoInfo(Cluster cluster, ServiceComponent component, Host host) throws SystemException { + final JsonArray jsonList = getBaseUrls(cluster, component, host); + final RepositoryVersionEntity rve = getRepositoryVersionEntity(cluster, component); + + if (null == rve || null == jsonList) { + return ""; + } + + final JsonArray result = new JsonArray(); + + for (JsonElement e : jsonList) { + JsonObject obj = e.getAsJsonObject(); + + String repoId = obj.has("repoId") ? obj.get("repoId").getAsString() : null; + String repoName = obj.has("repoName") ? obj.get("repoName").getAsString() : null; + String baseUrl = obj.has("baseUrl") ? obj.get("baseUrl").getAsString() : null; + String osType = obj.has("osType") ? obj.get("osType").getAsString() : null; + + if (null == repoId || null == baseUrl || null == osType || null == repoName) { + continue; + } + + for (RepoOsEntity ose : rve.getRepoOsEntities()) { + if (ose.getFamily().equals(osType) && ose.isAmbariManaged()) { + for (RepoDefinitionEntity re : ose.getRepoDefinitionEntities()) { + if (re.getRepoName().equals(repoName) && + !re.getBaseUrl().equals(baseUrl)) { + obj.addProperty("baseUrl", re.getBaseUrl()); + } + } + result.add(e); + } + } + } + return result.toString(); + } + + + /** + * Executed by two different representations of repos. When we are comfortable with the new + * implementation, this may be removed and called inline in {@link #getCommandRepository(Cluster, ServiceComponent, Host)} + * + * @param cluster the cluster to isolate the stack + * @param component the component + * @param host used to resolve the family for the repositories + * @return JsonArray the type as defined by the supplied {@code function}. + * @throws SystemException + */ + @Deprecated + private JsonArray getBaseUrls(Cluster cluster, ServiceComponent component, Host host) throws SystemException { + + String hostOsType = host.getOsType(); + String hostOsFamily = host.getOsFamily(); + String hostName = host.getHostName(); + + StackId stackId = component.getDesiredStackId(); + Map> repos; + + try { + repos = ami.get().getRepository(stackId.getStackName(), stackId.getStackVersion()); + }catch (AmbariException e) { + throw new SystemException("Unhandled exception", e); + } + + String family = os_family.get().find(hostOsType); + if (null == family) { + family = hostOsFamily; + } + + final List repoInfoList; + + // !!! check for the most specific first + if (repos.containsKey(hostOsType)) { + repoInfoList = repos.get(hostOsType); + } else if (null != family && repos.containsKey(family)) { + repoInfoList = repos.get(family); + } else { + repoInfoList = null; + LOG.warn("Could not retrieve repo information for host" + + ", hostname=" + hostName + + ", clusterName=" + cluster.getClusterName() + + ", stackInfo=" + stackId.getStackId()); + } + + return (null == repoInfoList) ? null : (JsonArray) gson.toJsonTree(repoInfoList); } + /** * Adds a command repository to the action context * @param context the context * @param osEntity the OS family */ public void addCommandRepositoryToContext(ActionExecutionContext context, - RepoOsEntity osEntity) throws SystemException { + RepoOsEntity osEntity) throws SystemException { - AmbariMetaInfo ambariMetaInfo = ambariMetainfoProvider.get(); + final RepositoryVersionEntity repoVersion = context.getRepositoryVersion(); + final CommandRepository commandRepo = getCommandRepository(repoVersion, osEntity); - try { - Mpack mpack = context.getMpack(); - if (null == mpack) { + ClusterVersionSummary summary = null; + + if (RepositoryType.STANDARD != repoVersion.getType()) { + try { final Cluster cluster = clusters.get().getCluster(context.getClusterName()); - ServiceGroup serviceGroup = cluster.getServiceGroup(context.getExpectedServiceGroupName()); - long mpackId = serviceGroup.getMpackId(); - mpack = ambariMetaInfo.getMpack(mpackId); + + VersionDefinitionXml xml = repoVersion.getRepositoryXml(); + summary = xml.getClusterSummary(cluster); + } catch (Exception e) { + LOG.warn("Could not determine repository from %s/%s. Will not pass cluster version."); } + } + + final ClusterVersionSummary clusterSummary = summary; - final CommandRepository commandRepo = getCommandRepository(mpack, osEntity); - context.addVisitor(command -> { - if (null == command.getRepositoryFile()) { - command.setRepositoryFile(commandRepo); + context.addVisitor(command -> { + if (null == command.getRepositoryFile()) { + command.setRepositoryFile(commandRepo); + } + + if (null != clusterSummary) { + Map params = command.getRoleParameters(); + if (null == params) { + params = new HashMap<>(); + command.setRoleParameters(params); } - }); - } catch (AmbariException ambariException) { - throw new SystemException(ambariException.getMessage(), ambariException); - } + params.put(KeyNames.CLUSTER_VERSION_SUMMARY, clusterSummary); + } + + }); } + + } diff --git a/ambari-server/src/main/java/org/apache/ambari/server/state/svccomphost/ServiceComponentHostImpl.java b/ambari-server/src/main/java/org/apache/ambari/server/state/svccomphost/ServiceComponentHostImpl.java index afde94a3b8c..519bcc60de5 100644 --- a/ambari-server/src/main/java/org/apache/ambari/server/state/svccomphost/ServiceComponentHostImpl.java +++ b/ambari-server/src/main/java/org/apache/ambari/server/state/svccomphost/ServiceComponentHostImpl.java @@ -18,6 +18,7 @@ package org.apache.ambari.server.state.svccomphost; +import java.text.MessageFormat; import java.util.Collections; import java.util.HashMap; import java.util.List; @@ -70,6 +71,7 @@ import org.apache.ambari.server.state.ServiceComponentHostEventType; import org.apache.ambari.server.state.ServiceGroup; import org.apache.ambari.server.state.StackId; +import org.apache.ambari.server.state.StackInfo; import org.apache.ambari.server.state.State; import org.apache.ambari.server.state.UpgradeState; import org.apache.ambari.server.state.alert.AlertDefinitionHash; @@ -788,7 +790,6 @@ public ServiceComponentHostImpl(@Assisted ServiceComponent serviceComponent, stateEntity.setServiceGroupId(serviceComponent.getServiceGroupId()); stateEntity.setServiceId(serviceComponent.getServiceId()); stateEntity.setComponentName(serviceComponent.getName()); - stateEntity.setComponentType(serviceComponent.getType()); stateEntity.setVersion(State.UNKNOWN.toString()); stateEntity.setHostEntity(hostEntity); stateEntity.setCurrentState(stateMachine.getCurrentState()); @@ -797,8 +798,6 @@ public ServiceComponentHostImpl(@Assisted ServiceComponent serviceComponent, HostComponentDesiredStateEntity desiredStateEntity = new HostComponentDesiredStateEntity(); desiredStateEntity.setClusterId(serviceComponent.getClusterId()); desiredStateEntity.setComponentName(serviceComponent.getName()); - desiredStateEntity.setComponentType(serviceComponent.getType()); - desiredStateEntity.setServiceGroupId(serviceComponent.getServiceGroupId()); desiredStateEntity.setServiceId(serviceComponent.getServiceId()); desiredStateEntity.setHostEntity(hostEntity); desiredStateEntity.setDesiredState(State.INIT); @@ -996,21 +995,11 @@ public void handleEvent(ServiceComponentHostEvent event) } } - @Override - public Long getServiceComponentId() { - return serviceComponent.getId(); - } - @Override public String getServiceComponentName() { return serviceComponent.getName(); } - @Override - public String getServiceComponentType() { - return serviceComponent.getType(); - } - @Override public String getHostName() { return host.getHostName(); @@ -1196,8 +1185,6 @@ public ServiceComponentHostResponse convertToResponse(Map e.printStackTrace(); } String serviceComponentName = serviceComponent.getName(); - String serviceComponentType = serviceComponent.getType(); - Long hostComponentId = getHostComponentId(); String hostName = getHostName(); String publicHostName = hostEntity.getPublicHostName(); @@ -1225,8 +1212,8 @@ public ServiceComponentHostResponse convertToResponse(Map ServiceComponentHostResponse r = new ServiceComponentHostResponse(clusterId, clusterName, service.getServiceGroupId(), service.getServiceGroupName(), service.getServiceId(), service.getName(), service.getServiceType(), - hostComponentId, serviceComponentName, serviceComponentType, displayName, hostName, publicHostName, state, - getVersion(), desiredState, desiredStackId, desiredRepositoryVersion, componentAdminState); + hostComponentId, serviceComponentName, displayName, hostName, publicHostName, state, getVersion(), + desiredState, desiredStackId, desiredRepositoryVersion, componentAdminState); r.setActualConfigs(actualConfigs); r.setUpgradeState(upgradeState); @@ -1259,8 +1246,6 @@ public void debugDump(StringBuilder sb) { sb.append("ServiceComponentHost={ hostname=").append(getHostName()) .append(", serviceComponentName=") .append(serviceComponent.getName()) - .append(", serviceComponentType=") - .append(serviceComponent.getType()) .append(", clusterName=") .append(serviceComponent.getClusterName()) .append(", serviceName=") @@ -1279,10 +1264,9 @@ public void debugDump(StringBuilder sb) { @Transactional void persistEntities(HostEntity hostEntity, HostComponentStateEntity stateEntity, HostComponentDesiredStateEntity desiredStateEntity) { - ServiceComponentDesiredStateEntity serviceComponentDesiredStateEntity = null; - serviceComponentDesiredStateEntity = serviceComponentDesiredStateDAO.findByName( - serviceComponent.getClusterId(), serviceComponent.getServiceGroupId(), serviceComponent.getServiceId(), - serviceComponent.getName(), serviceComponent.getType()); + ServiceComponentDesiredStateEntity serviceComponentDesiredStateEntity = serviceComponentDesiredStateDAO.findByName( + serviceComponent.getClusterId(), serviceComponent.getServiceGroupId(), serviceComponent.getServiceId(), + serviceComponent.getName()); desiredStateEntity.setServiceComponentDesiredStateEntity(serviceComponentDesiredStateEntity); desiredStateEntity.setHostEntity(hostEntity); @@ -1290,12 +1274,8 @@ void persistEntities(HostEntity hostEntity, HostComponentStateEntity stateEntity stateEntity.setServiceComponentDesiredStateEntity(serviceComponentDesiredStateEntity); stateEntity.setHostEntity(hostEntity); - hostComponentDesiredStateDAO.create(desiredStateEntity); - stateEntity.setHostComponentDesiredStateEntity(desiredStateEntity); hostComponentStateDAO.create(stateEntity); - - serviceComponentDesiredStateEntity.getHostComponentStateEntities().add( - stateEntity); + hostComponentDesiredStateDAO.create(desiredStateEntity); serviceComponentDesiredStateEntity.getHostComponentDesiredStateEntities().add( desiredStateEntity); @@ -1485,6 +1465,28 @@ public void setRestartRequired(boolean restartRequired) { } } + @Transactional + RepositoryVersionEntity createRepositoryVersion(String version, final StackId stackId, final StackInfo stackInfo) throws AmbariException { + // During an Ambari Upgrade from 1.7.0 -> 2.0.0, the Repo Version will not exist, so bootstrap it. + LOG.info("Creating new repository version " + stackId.getStackName() + "-" + version); + + StackEntity stackEntity = stackDAO.find(stackId.getStackName(), + stackId.getStackVersion()); + + // Ensure that the version provided is part of the Stack. + // E.g., version 2.3.0.0 is part of HDP 2.3, so is 2.3.0.0-1234 + if (null == version) { + throw new AmbariException(MessageFormat.format("Cannot create Repository Version for Stack {0}-{1} if the version is empty", + stackId.getStackName(), stackId.getStackVersion())); + } + + return repositoryVersionDAO.create( + stackEntity, + version, + stackId.getStackName() + "-" + version, + repositoryVersionHelper.createRepoOsEntities(stackInfo.getRepositories())); + } + /** * {@inheritDoc} */ diff --git a/ambari-server/src/main/java/org/apache/ambari/server/topology/AmbariContext.java b/ambari-server/src/main/java/org/apache/ambari/server/topology/AmbariContext.java index f298b4b1d43..6fbb59e810a 100644 --- a/ambari-server/src/main/java/org/apache/ambari/server/topology/AmbariContext.java +++ b/ambari-server/src/main/java/org/apache/ambari/server/topology/AmbariContext.java @@ -19,13 +19,13 @@ package org.apache.ambari.server.topology; import static java.util.stream.Collectors.joining; -import static java.util.stream.Collectors.toCollection; import static java.util.stream.Collectors.toSet; import java.util.Collection; import java.util.Collections; import java.util.Comparator; import java.util.HashMap; +import java.util.HashSet; import java.util.List; import java.util.Map; import java.util.Set; @@ -34,8 +34,6 @@ import java.util.concurrent.Callable; import java.util.concurrent.atomic.AtomicLong; import java.util.concurrent.locks.Lock; -import java.util.function.Function; -import java.util.stream.Stream; import javax.annotation.Nullable; import javax.inject.Inject; @@ -45,7 +43,6 @@ import org.apache.ambari.server.DuplicateResourceException; import org.apache.ambari.server.Role; import org.apache.ambari.server.RoleCommand; -import org.apache.ambari.server.StackAccessException; import org.apache.ambari.server.actionmanager.HostRoleCommand; import org.apache.ambari.server.actionmanager.HostRoleCommandFactory; import org.apache.ambari.server.actionmanager.HostRoleStatus; @@ -71,7 +68,6 @@ import org.apache.ambari.server.controller.internal.ServiceGroupDependencyResourceProvider; import org.apache.ambari.server.controller.internal.ServiceGroupResourceProvider; import org.apache.ambari.server.controller.internal.ServiceResourceProvider; -import org.apache.ambari.server.controller.internal.Stack; import org.apache.ambari.server.controller.internal.StackDefinition; import org.apache.ambari.server.controller.internal.VersionDefinitionResourceProvider; import org.apache.ambari.server.controller.predicate.EqualsPredicate; @@ -84,7 +80,6 @@ import org.apache.ambari.server.orm.dao.RepositoryVersionDAO; import org.apache.ambari.server.orm.entities.RepositoryVersionEntity; import org.apache.ambari.server.security.authorization.AuthorizationException; -import org.apache.ambari.server.stack.NoSuchStackException; import org.apache.ambari.server.state.Cluster; import org.apache.ambari.server.state.Clusters; import org.apache.ambari.server.state.Config; @@ -92,18 +87,15 @@ import org.apache.ambari.server.state.ConfigHelper; import org.apache.ambari.server.state.DesiredConfig; import org.apache.ambari.server.state.Host; -import org.apache.ambari.server.state.PropertyInfo; import org.apache.ambari.server.state.RepositoryType; import org.apache.ambari.server.state.SecurityType; import org.apache.ambari.server.state.StackId; import org.apache.ambari.server.state.configgroup.ConfigGroup; import org.apache.ambari.server.utils.RetryHelper; -import org.apache.commons.lang3.tuple.Pair; import org.apache.directory.api.util.Strings; import org.slf4j.Logger; import org.slf4j.LoggerFactory; -import com.google.common.base.Joiner; import com.google.common.base.Preconditions; import com.google.common.collect.Iterables; import com.google.common.collect.Sets; @@ -129,9 +121,6 @@ public enum TaskType {INSTALL, START} @Inject ConfigFactory configFactory; - @Inject - StackFactory stackFactory; - @Inject RepositoryVersionDAO repositoryVersionDAO; @@ -226,7 +215,7 @@ public void createAmbariResources(ClusterTopology topology, String clusterName, String repoVersionString, Long repoVersionId) { Map repoVersionByStack = new HashMap<>(); - Set stackIds = topology.getStackIds(); + Set stackIds = topology.getBlueprint().getStackIds(); for (StackId stackId : stackIds) { RepositoryVersionEntity repoVersion = null; if (stackIds.size() == 1) { @@ -245,7 +234,8 @@ public void createAmbariResources(ClusterTopology topology, String clusterName, } } - createAmbariClusterResource(clusterName, topology.getStackIds(), securityType); + StackId stackId = Iterables.getFirst(topology.getBlueprint().getStackIds(), null); + createAmbariClusterResource(clusterName, stackId, securityType); createAmbariServiceAndComponentResources(topology, clusterName, repoVersionByStack); } @@ -329,17 +319,19 @@ private RepositoryVersionEntity findSpecifiedRepo(String repoVersionString, Long return repoVersion; } - private void createAmbariClusterResource(String clusterName, Set stackIds, SecurityType securityType) { - String stackInfo = stackIds.iterator().next().toString(); // temporary + public void createAmbariClusterResource(String clusterName, StackId stackId, SecurityType securityType) { + String stackInfo = stackId.toString(); final ClusterRequest clusterRequest = new ClusterRequest(null, clusterName, null, securityType, stackInfo, null); try { - RetryHelper.executeWithRetry(() -> { - getController().createCluster(clusterRequest); - return null; + RetryHelper.executeWithRetry(new Callable() { + @Override + public Object call() throws Exception { + getController().createCluster(clusterRequest); + return null; + } }); - addDefaultClusterSettings(clusterName); } catch (AmbariException e) { LOG.error("Failed to create Cluster resource: ", e); if (e.getCause() instanceof DuplicateResourceException) { @@ -350,31 +342,37 @@ private void createAmbariClusterResource(String clusterName, Set stackI } } - // FIXME temporarily add default cluster settings -- should be provided by ClusterImpl itself - private void addDefaultClusterSettings(String clusterName) throws AmbariException { - Cluster cluster = getController().getClusters().getCluster(clusterName); - for (PropertyInfo p : getController().getAmbariMetaInfo().getClusterProperties()) { - cluster.addClusterSetting(p.getName(), p.getValue()); + public void createAmbariServiceAndComponentResources(ClusterTopology topology, String clusterName, Map repoVersionByStack) { + Set serviceGroups = Sets.newHashSet(DEFAULT_SERVICE_GROUP_NAME); + Collection services = topology.getBlueprint().getServices(); + + try { + Cluster cluster = getController().getClusters().getCluster(clusterName); + serviceGroups.removeAll(cluster.getServiceGroups().keySet()); + services.removeAll(cluster.getServices().keySet()); + } catch (AmbariException e) { + throw new RuntimeException("Failed to persist service and component resources: " + e, e); } - } - private void createAmbariServiceAndComponentResources(ClusterTopology topology, String clusterName, Map repoVersionByStack) { - Set serviceGroupRequests = topology.getComponents() - .map(c -> new ServiceGroupRequest(clusterName, c.effectiveServiceGroupName(), c.stackId().getStackId())) + Set serviceGroupRequests = serviceGroups.stream() + .map(serviceGroupName -> new ServiceGroupRequest(clusterName, serviceGroupName, Iterables.getFirst(topology.getBlueprint().getStackIds(), null).getStackId())) .collect(toSet()); - Set serviceRequests = topology.getComponents() - .map(c -> new ServiceRequest( - clusterName, c.effectiveServiceGroupName(), c.effectiveServiceName(), c.serviceType(), repoVersionByStack.get(c.stackId()), null, - topology.getSetting().getCredentialStoreEnabled(c.effectiveServiceName()), // FIXME settings by service type or name? - c.stackId() - )) - .collect(toSet()); + Set serviceRequests = new HashSet<>(); + Set componentRequests = new HashSet<>(); + for (String service : services) { + String credentialStoreEnabled = topology.getBlueprint().getCredentialStoreEnabled(service); + StackId stackId = Iterables.getOnlyElement(topology.getBlueprint().getStackIdsForService(service)); // FIXME temporarily assume each service is defined in only one mpack + Long repositoryVersionId = repoVersionByStack.get(stackId); + serviceRequests.add(new ServiceRequest(clusterName, DEFAULT_SERVICE_GROUP_NAME, service, service, + repositoryVersionId, null, credentialStoreEnabled, stackId + )); - Set componentRequests = topology.getComponents() - .map(c -> new ServiceComponentRequest(clusterName, c.effectiveServiceGroupName(), c.effectiveServiceName(), c.componentName(), c.componentName(), - topology.getSetting().getRecoveryEnabled(c.effectiveServiceName(), c.componentName()))) // FIXME settings by service type or name? - .collect(toSet()); + for (String component : topology.getBlueprint().getComponentNames(service)) { + String recoveryEnabled = topology.getBlueprint().getRecoveryEnabled(service, component); + componentRequests.add(new ServiceComponentRequest(clusterName, DEFAULT_SERVICE_GROUP_NAME, service, component, null, recoveryEnabled)); + } + } try { if (!serviceGroupRequests.isEmpty()) { @@ -408,7 +406,7 @@ private void createAmbariServiceAndComponentResources(ClusterTopology topology, } } - public void createAmbariHostResources(long clusterId, String hostName, Stream components) { + public void createAmbariHostResources(long clusterId, String hostName, Map> components) { Host host; try { host = getController().getClusters().getHost(hostName); @@ -418,7 +416,7 @@ public void createAmbariHostResources(long clusterId, String hostName, Stream requests = components - .filter(component -> !component.componentName().equals(RootComponent.AMBARI_SERVER.name())) - .map(component -> new ServiceComponentHostRequest(clusterName, component.effectiveServiceGroupName(), component.effectiveServiceName(), component.componentName(), component.componentName(),hostName, null)) - .collect(toSet()); + final Set requests = new HashSet<>(); + for (Map.Entry> entry : components.entrySet()) { + String service = entry.getKey(); + for (String component : entry.getValue()) { + //todo: handle this in a generic manner. These checks are all over the code + try { + if (cluster.getService(service) != null && !component.equals(RootComponent.AMBARI_SERVER.name())) { + requests.add(new ServiceComponentHostRequest(clusterName, DEFAULT_SERVICE_GROUP_NAME, service, component, hostName, null)); + } + } catch(AmbariException se) { + LOG.warn("Service already deleted from cluster: {}", service); + } + } + } try { - RetryHelper.executeWithRetry(() -> { - getController().createHostComponents(requests); - return null; + RetryHelper.executeWithRetry(new Callable() { + @Override + public Object call() throws Exception { + getController().createHostComponents(requests); + return null; + } }); } catch (AmbariException e) { LOG.error("Unable to create host component resource for host {}", hostName, e); @@ -480,7 +491,7 @@ public static void init(HostRoleCommandFactory factory) { } public void registerHostWithConfigGroup(final String hostName, final ClusterTopology topology, final String groupName) { - String qualifiedGroupName = getConfigurationGroupName(topology.getBlueprintName(), groupName); + String qualifiedGroupName = getConfigurationGroupName(topology.getBlueprint().getName(), groupName); Lock configGroupLock = configGroupCreateLock.get(qualifiedGroupName); @@ -749,7 +760,7 @@ private boolean addHostToExistingConfigGroups(String hostName, ClusterTopology t */ private void createConfigGroupsAndRegisterHost(ClusterTopology topology, String groupName) throws AmbariException { Map> groupConfigs = new HashMap<>(); - StackDefinition stack = topology.getStack(); + StackDefinition stack = topology.getBlueprint().getStack(); // get the host-group config with cluster creation template overrides Configuration topologyHostGroupConfig = topology. @@ -763,7 +774,7 @@ private void createConfigGroupsAndRegisterHost(ClusterTopology topology, String for (Map.Entry> entry : userProvidedGroupProperties.entrySet()) { String type = entry.getKey(); String service = stack.getServicesForConfigType(type) - .filter(each -> topology.getServices().contains(each)) + .filter(each -> topology.getBlueprint().getServices().contains(each)) .findFirst() // TODO check if this is required at all (might be handled by the "orphan" removal) // TODO move this validation earlier @@ -779,7 +790,7 @@ private void createConfigGroupsAndRegisterHost(ClusterTopology topology, String serviceConfigs.put(type, config); } - String bpName = topology.getBlueprintName(); + String bpName = topology.getBlueprint().getName(); for (Map.Entry> entry : groupConfigs.entrySet()) { String service = entry.getKey(); Map serviceConfigs = entry.getValue(); @@ -823,50 +834,6 @@ public boolean apply(@Nullable String groupHost) { } } - public StackDefinition composeStacks(Set stackIds) { - Set stacks = stackIds.stream() - .map(this::createStack) - .collect(toSet()); - StackDefinition composite = StackDefinition.of(stacks); - - // temporary check - verifyStackDefinitionsAreDisjoint(composite.getServices().stream(), "Service", composite::getStacksForService); - verifyStackDefinitionsAreDisjoint(composite.getComponents().stream(), "Component", composite::getStacksForComponent); - - return composite; - } - - /** - * Verify that each item in items is defined by only one stack. - * - * @param items the items to check - * @param type string description of the type of items (eg. "Service", or "Component") - * @param lookup a function to find the set of stacks that an item belongs to - * @throws IllegalArgumentException if some items are defined in multiple stacks - */ - static void verifyStackDefinitionsAreDisjoint(Stream items, String type, Function> lookup) { - Set>> definedInMultipleStacks = items - .map(s -> Pair.of(s, lookup.apply(s))) - .filter(p -> p.getRight().size() > 1) - .collect(toCollection(TreeSet::new)); - - if (!definedInMultipleStacks.isEmpty()) { - String msg = definedInMultipleStacks.stream() - .map(p -> String.format("%s %s is defined in multiple stacks: %s", type, p.getLeft(), Joiner.on(", ").join(p.getRight()))) - .collect(joining("\n")); - LOG.error(msg); - throw new IllegalArgumentException(msg); - } - } - - protected Stack createStack(StackId stackId) { - try { - return stackFactory.createStack(stackId); - } catch (StackAccessException e) { - throw new NoSuchStackException(stackId); - } - } - /** * Get a config group name based on a bp and host group. * diff --git a/ambari-server/src/main/java/org/apache/ambari/server/topology/Blueprint.java b/ambari-server/src/main/java/org/apache/ambari/server/topology/Blueprint.java index 64456247601..04e004e5c55 100644 --- a/ambari-server/src/main/java/org/apache/ambari/server/topology/Blueprint.java +++ b/ambari-server/src/main/java/org/apache/ambari/server/topology/Blueprint.java @@ -19,11 +19,11 @@ package org.apache.ambari.server.topology; import java.util.Collection; +import java.util.List; import java.util.Map; import java.util.Set; -import javax.annotation.Nonnull; - +import org.apache.ambari.server.controller.internal.StackDefinition; import org.apache.ambari.server.orm.entities.BlueprintEntity; import org.apache.ambari.server.state.StackId; @@ -73,11 +73,80 @@ public interface Blueprint { */ Setting getSetting(); + /** + * Get all of the services represented in the blueprint. + * + * @return collection of all represented service names + */ + Collection getServices(); + + /** + * Get the components that are included in the blueprint for the specified service. + * + * @param service service name + * + * @return collection of components for the service. Will not return null. + */ + Collection getComponents(String service); + + /** + * Get the components that are included in the blueprint for the specified service. + * + * @param service service name + * + * @return collection of component names for the service. Will not return null. + */ + @Deprecated + Collection getComponentNames(String service); + + /** + * Get whether a component is enabled for auto start. + * + * @param serviceName - Service name. + * @param componentName - Component name. + * + * @return null if value is not specified; true or false if specified. + */ + String getRecoveryEnabled(String serviceName, String componentName); + + /** + * Get whether a service is enabled for credential store use. + * + * @param serviceName - Service name. + * + * @return null if value is not specified; true or false if specified. + */ + String getCredentialStoreEnabled(String serviceName); + + /** + * Check if auto skip failure is enabled. + * @return true if enabled, otherwise false. + */ + boolean shouldSkipFailure(); + + /** + * Get the stack associated with the blueprint. + * For mpack-based installation this is a composite stack + * that provides a single unified view of all underlying mpacks, + * but does not have any identifier. + * + * @return associated stack + */ + StackDefinition getStack(); + /** * @return the set of stack (mpack) IDs associated with the blueprint */ Set getStackIds(); + /** + * Look up the stacks that define service. + * To be used only after checking that services map to + * @param service the name of the service as defined in the stack (mpack), eg. ZOOKEEPER + * @return the ID of the stack that defines the given service + */ + Set getStackIdsForService(String service); + /** * Get the mpacks associated with the blueprint. * @@ -85,6 +154,16 @@ public interface Blueprint { */ Collection getMpacks(); + /** + * Get the host groups which contain components for the specified service. + * + * @param service service name + * + * @return collection of host groups containing components for the specified service; + * will not return null + */ + Collection getHostGroupsForService(String service); + /** * Get the host groups which contain the give component. * @@ -94,9 +173,13 @@ public interface Blueprint { */ Collection getHostGroupsForComponent(String component); - @Nonnull SecurityConfiguration getSecurity(); + /** + * A config type is valid if there are services related to except cluster-env and global. + */ + boolean isValidConfigType(String configType); + /** * Obtain the blueprint as an entity. * @@ -104,9 +187,6 @@ public interface Blueprint { */ BlueprintEntity toEntity(); - /** - * Add the kerberos client to all host groups in the blueprint. - */ - boolean ensureKerberosClientIsPresent(); + List getRepositorySettings(); } diff --git a/ambari-server/src/main/java/org/apache/ambari/server/topology/BlueprintBasedClusterProvisionRequest.java b/ambari-server/src/main/java/org/apache/ambari/server/topology/BlueprintBasedClusterProvisionRequest.java deleted file mode 100644 index 22d4bab2e07..00000000000 --- a/ambari-server/src/main/java/org/apache/ambari/server/topology/BlueprintBasedClusterProvisionRequest.java +++ /dev/null @@ -1,211 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package org.apache.ambari.server.topology; - -import static java.util.stream.Collectors.toMap; - -import java.util.Collection; -import java.util.HashMap; -import java.util.Map; -import java.util.Set; -import java.util.function.Function; - -import javax.annotation.Nonnull; - -import org.apache.ambari.server.controller.internal.ProvisionAction; -import org.apache.ambari.server.controller.internal.ProvisionClusterRequest; -import org.apache.ambari.server.controller.internal.StackDefinition; -import org.apache.ambari.server.orm.entities.BlueprintEntity; -import org.apache.ambari.server.state.SecurityType; -import org.apache.ambari.server.state.StackId; -import org.slf4j.Logger; -import org.slf4j.LoggerFactory; - -import com.google.common.collect.ImmutableSet; -import com.google.common.collect.Sets; - -/** - * I am the Blueprint and ProvisionClusterRequest combined. - */ -public class BlueprintBasedClusterProvisionRequest implements Blueprint, ProvisionRequest { - - private static final Logger LOG = LoggerFactory.getLogger(BlueprintBasedClusterProvisionRequest.class); - - private final Blueprint blueprint; - private final ProvisionClusterRequest request; - private final Set stackIds; - private final StackDefinition stack; - private final Set mpacks; - private final SecurityConfiguration securityConfiguration; - - public BlueprintBasedClusterProvisionRequest(AmbariContext ambariContext, SecurityConfigurationFactory securityConfigurationFactory, Blueprint blueprint, ProvisionClusterRequest request) { - this.blueprint = blueprint; - this.request = request; - - stackIds = ImmutableSet.copyOf(Sets.union(blueprint.getStackIds(), request.getStackIds())); - stack = ambariContext.composeStacks(stackIds); - mpacks = ImmutableSet.builder(). - addAll(blueprint.getMpacks()). - addAll(request.getMpacks()).build(); - - securityConfiguration = processSecurityConfiguration(securityConfigurationFactory); - - if (securityConfiguration.getType() == SecurityType.KERBEROS) { - ensureKerberosClientIsPresent(); - } - } - - @Override - public String getName() { - return blueprint.getName(); - } - - @Override - public Map getHostGroups() { - return blueprint.getHostGroups(); - } - - @Override - public HostGroup getHostGroup(String name) { - return blueprint.getHostGroup(name); - } - - @Override - public Configuration getConfiguration() { - return request.getConfiguration(); - } - - @Override - public Setting getSetting() { - return blueprint.getSetting(); - } - - @Override - public Set getStackIds() { - return stackIds; - } - - @Override - public Collection getMpacks() { - return mpacks; - } - - @Override - public Collection getHostGroupsForComponent(String component) { - return blueprint.getHostGroupsForComponent(component); - } - - @Nonnull - @Override - public SecurityConfiguration getSecurity() { - return securityConfiguration; - } - - @Override - public BlueprintEntity toEntity() { - throw new UnsupportedOperationException(); - } - - @Override - public Long getClusterId() { - return null; - } - - @Override - public Type getType() { - return Type.PROVISION; - } - - @Override - public Blueprint getBlueprint() { - return blueprint; - } - - @Override - public Map getHostGroupInfo() { - return request.getHostGroupInfo(); - } - - @Override - public String getDescription() { - return request.getDescription(); - } - - public String getDefaultPassword() { - return request.getDefaultPassword(); - } - - public ConfigRecommendationStrategy getConfigRecommendationStrategy() { - return request.getConfigRecommendationStrategy(); - } - - public ProvisionAction getProvisionAction() { - return request.getProvisionAction(); - } - - public StackDefinition getStack() { - return stack; - } - - public Map> getServicesByMpack() { - Map> result = new HashMap<>(); - for (MpackInstance mpack : mpacks) { - Map services = mpack.getServiceInstances().stream() - .collect(toMap(ServiceInstance::getName, Function.identity())); - result.put(mpack.getMpackName(), services); - } - return result; - } - - /** - * @return service instances defined in the topology, mapped by service name, - * whose name is unique across all mpacks. - */ - public Map getUniqueServices() { - Map map = mpacks.stream() - .flatMap(mpack -> mpack.getServiceInstances().stream()) - .collect(toMap(ServiceInstance::getName, Function.identity(), (s1, s2) -> null)); - map.entrySet().removeIf(e -> e.getValue() == null); // remove non-unique names mapped to null - return map; - } - - /** - * Retrieve security info from Blueprint if missing from Cluster Template request. - */ - private SecurityConfiguration processSecurityConfiguration(SecurityConfigurationFactory securityConfigurationFactory) { - SecurityConfiguration blueprintSecurity = blueprint.getSecurity(); - SecurityConfiguration requestSecurity = request.getSecurityConfiguration(); - - if (requestSecurity == null) { - LOG.debug("There's no security configuration in the request, retrieving it from the associated blueprint"); - requestSecurity = blueprintSecurity; - if (requestSecurity.getType() == SecurityType.KERBEROS && requestSecurity.getDescriptorReference() != null) { - requestSecurity = securityConfigurationFactory.loadSecurityConfigurationByReference(requestSecurity.getDescriptorReference()); - } - } else if (requestSecurity.getType() == SecurityType.NONE && blueprintSecurity.getType() == SecurityType.KERBEROS) { - throw new IllegalArgumentException("Setting security to NONE is not allowed as security type in blueprint is set to KERBEROS!"); - } - - return requestSecurity; - } - - @Override - public boolean ensureKerberosClientIsPresent() { - return blueprint.ensureKerberosClientIsPresent(); - } -} diff --git a/ambari-server/src/main/java/org/apache/ambari/server/topology/BlueprintFactory.java b/ambari-server/src/main/java/org/apache/ambari/server/topology/BlueprintFactory.java index 0ac76df322a..4386ec6919d 100644 --- a/ambari-server/src/main/java/org/apache/ambari/server/topology/BlueprintFactory.java +++ b/ambari-server/src/main/java/org/apache/ambari/server/topology/BlueprintFactory.java @@ -40,17 +40,22 @@ import java.util.ArrayList; import java.util.Collection; import java.util.Collections; +import java.util.HashMap; +import java.util.HashSet; import java.util.Map; -import java.util.Optional; import java.util.Set; import java.util.TreeSet; import java.util.function.Function; import java.util.stream.Stream; -import javax.inject.Inject; -import javax.inject.Provider; - +import org.apache.ambari.server.AmbariException; +import org.apache.ambari.server.ObjectNotFoundException; +import org.apache.ambari.server.controller.AmbariManagementController; +import org.apache.ambari.server.controller.AmbariServer; +import org.apache.ambari.server.controller.RootComponent; import org.apache.ambari.server.controller.internal.ProvisionAction; +import org.apache.ambari.server.controller.internal.Stack; +import org.apache.ambari.server.controller.internal.StackDefinition; import org.apache.ambari.server.orm.dao.BlueprintDAO; import org.apache.ambari.server.orm.entities.BlueprintEntity; import org.apache.ambari.server.stack.NoSuchStackException; @@ -63,6 +68,7 @@ import com.fasterxml.jackson.databind.DeserializationFeature; import com.fasterxml.jackson.databind.ObjectMapper; import com.google.common.base.Joiner; +import com.google.inject.Inject; /** * Create a Blueprint instance. @@ -71,21 +77,27 @@ public class BlueprintFactory { private static final Logger LOG = LoggerFactory.getLogger(BlueprintFactory.class); - private final Provider blueprintDAO; + private static BlueprintDAO blueprintDAO; + private final ConfigurationFactory configFactory = new ConfigurationFactory(); + private final StackFactory stackFactory; - @Inject - public BlueprintFactory(Provider blueprintDAO) { - this.blueprintDAO = blueprintDAO; + public BlueprintFactory() { + this(new DefaultStackFactory()); + } + + protected BlueprintFactory(StackFactory stackFactory) { + this.stackFactory = stackFactory; } public Blueprint getBlueprint(String blueprintName) throws NoSuchStackException { - BlueprintEntity entity = blueprintDAO.get().findByName(blueprintName); + BlueprintEntity entity = blueprintDAO.findByName(blueprintName); if (entity != null) { Set stackIds = entity.getMpackInstances().stream() .map(m -> new StackId(m.getMpackName(), m.getMpackVersion())) .collect(toSet()); - return new BlueprintImpl(entity, stackIds); + StackDefinition stack = composeStacks(stackIds); + return new BlueprintImpl(entity, stack, stackIds); } return null; } @@ -108,25 +120,22 @@ public Blueprint createBlueprint(Map properties, SecurityConfigu Collection mpackInstances = createMpackInstances(properties); if (mpackInstances.isEmpty()) { - Optional stackId = getStackId(properties); - if (stackId.isPresent()) { - String stackName = stackId.get().getStackName(); - String stackVersion = stackId.get().getStackVersion(); - mpackInstances = Collections.singleton(new MpackInstance(stackName, stackVersion, null, null, Configuration.createEmpty())); - } + StackId stackId = getStackId(properties); + mpackInstances = Collections.singleton(new MpackInstance(stackId.getStackName(), stackId.getStackVersion(), null, null, Configuration.createEmpty())); } Set stackIds = mpackInstances.stream() - .map(MpackInstance::getStackId) + .map(m -> new StackId(m.getMpackName(), m.getMpackVersion())) .collect(toSet()); - Collection hostGroups = processHostGroups(properties); + StackDefinition stack = composeStacks(stackIds); + Collection hostGroups = processHostGroups(name, stack, properties); Configuration configuration = configFactory.getConfiguration((Collection>) properties.get(CONFIGURATION_PROPERTY_ID)); Setting setting = SettingFactory.getSetting((Collection>) properties.get(SETTING_PROPERTY_ID)); - return new BlueprintImpl(name, hostGroups, stackIds, mpackInstances, configuration, securityConfiguration, setting); + return new BlueprintImpl(name, hostGroups, stack, stackIds, mpackInstances, configuration, securityConfiguration, setting); } - public static Collection createMpackInstances(Map properties) throws NoSuchStackException { + private Collection createMpackInstances(Map properties) throws NoSuchStackException { if (properties.containsKey(MPACK_INSTANCES_PROPERTY_ID)) { ObjectMapper mapper = new ObjectMapper(); mapper.disable(DeserializationFeature.FAIL_ON_UNKNOWN_PROPERTIES); @@ -142,12 +151,23 @@ public static Collection createMpackInstances(Map } } - private static Optional getStackId(Map properties) throws NoSuchStackException { - Object stackName = properties.get(STACK_NAME_PROPERTY_ID); - Object stackVersion = properties.get(STACK_VERSION_PROPERTY_ID); - return stackName != null && stackVersion != null - ? Optional.of(new StackId(stackName.toString(), stackVersion.toString())) - : Optional.empty(); + private static StackId getStackId(Map properties) throws NoSuchStackException { + String stackName = String.valueOf(properties.get(STACK_NAME_PROPERTY_ID)); + String stackVersion = String.valueOf(properties.get(STACK_VERSION_PROPERTY_ID)); + return new StackId(stackName, stackVersion); + } + + private StackDefinition composeStacks(Set stackIds) { + Set stacks = stackIds.stream() + .map(this::createStack) + .collect(toSet()); + StackDefinition composite = StackDefinition.of(stacks); + + // temporary check + verifyStackDefinitionsAreDisjoint(composite.getServices().stream(), "Service", composite::getStacksForService); + verifyStackDefinitionsAreDisjoint(composite.getComponents().stream(), "Component", composite::getStacksForComponent); + + return composite; } /** @@ -173,10 +193,23 @@ static void verifyStackDefinitionsAreDisjoint(Stream items, String type, } } + protected Stack createStack(StackId stackId) { + try { + //todo: don't pass in controller + return stackFactory.createStack(stackId, AmbariServer.getController()); + } catch (ObjectNotFoundException e) { + throw new NoSuchStackException(stackId); + } catch (AmbariException e) { + // todo + throw new RuntimeException( + String.format("An error occurred parsing the stack information for %s", stackId) , e); + } + } + //todo: Move logic to HostGroupImpl @SuppressWarnings("unchecked") - private Collection processHostGroups(Map properties) { - Set> hostGroupProps = (Set>) + private Collection processHostGroups(String bpName, StackDefinition stack, Map properties) { + Set> hostGroupProps = (HashSet>) properties.get(HOST_GROUP_PROPERTY_ID); if (hostGroupProps == null || hostGroupProps.isEmpty()) { @@ -184,43 +217,49 @@ private Collection processHostGroups(Map properties) } Collection hostGroups = new ArrayList<>(); - for (Map hostGroupProperties : hostGroupProps) { + for (HashMap hostGroupProperties : hostGroupProps) { String hostGroupName = (String) hostGroupProperties.get(HOST_GROUP_NAME_PROPERTY_ID); if (hostGroupName == null || hostGroupName.isEmpty()) { throw new IllegalArgumentException("Every host group must include a non-null 'name' property"); } - Set> componentProps = (Set>) + HashSet> componentProps = (HashSet>) hostGroupProperties.get(COMPONENT_PROPERTY_ID); Collection> configProps = (Collection>) hostGroupProperties.get(CONFIGURATION_PROPERTY_ID); - Collection components = processHostGroupComponents(hostGroupName, componentProps); + Collection components = processHostGroupComponents(stack, hostGroupName, componentProps); Configuration configuration = configFactory.getConfiguration(configProps); String cardinality = String.valueOf(hostGroupProperties.get(HOST_GROUP_CARDINALITY_PROPERTY_ID)); - HostGroup group = new HostGroupImpl(hostGroupName, components, configuration, cardinality); + HostGroup group = new HostGroupImpl(hostGroupName, bpName, stack, components, configuration, cardinality); hostGroups.add(group); } return hostGroups; } - private Collection processHostGroupComponents(String groupName, Set> componentProps) { + private Collection processHostGroupComponents(StackDefinition stack, String groupName, HashSet> componentProps) { if (componentProps == null || componentProps.isEmpty()) { throw new IllegalArgumentException("Host group '" + groupName + "' must contain at least one component"); } + Collection stackComponentNames = getAllStackComponents(stack); Collection components = new ArrayList<>(); - for (Map componentProperties : componentProps) { + for (HashMap componentProperties : componentProps) { String componentName = componentProperties.get(COMPONENT_NAME_PROPERTY_ID); if (componentName == null || componentName.isEmpty()) { throw new IllegalArgumentException("Host group '" + groupName + "' contains a component with no 'name' property"); } + if (! stackComponentNames.contains(componentName)) { + throw new IllegalArgumentException("The component '" + componentName + "' in host group '" + + groupName + "' is not valid for the specified stack"); + } + String mpackInstance = componentProperties.get(COMPONENT_MPACK_INSTANCE_PROPERTY); String serviceInstance = componentProperties.get(COMPONENT_SERVICE_INSTANCE_PROPERTY); //TODO, might want to add some validation here, to only accept value enum types, rwn @@ -232,4 +271,52 @@ private Collection processHostGroupComponents(String groupName, Set getAllStackComponents(StackDefinition stack) { + Collection allComponents = new HashSet<>(stack.getComponents()); + + // currently ambari server is not a recognized component + allComponents.add(RootComponent.AMBARI_SERVER.name()); + + return allComponents; + } + + + /** + * Static initialization. + * + * @param dao blueprint data access object + */ + @Inject + public static void init(BlueprintDAO dao) { + blueprintDAO = dao; + } + + /** + * Internal interface used to abstract out the process of creating the Stack object. + * + * This is used to simplify unit testing, since a new Factory can be provided to + * simulate various Stack or error conditions. + */ + interface StackFactory { + Stack createStack(StackId stackId, AmbariManagementController managementController) throws AmbariException; + } + + /** + * Default implementation of StackFactory. + * + * Calls the Stack constructor to create the Stack instance. + * + */ + private static class DefaultStackFactory implements StackFactory { + @Override + public Stack createStack(StackId stackId, AmbariManagementController managementController) throws AmbariException { + return new Stack(stackId.getStackName(), stackId.getStackVersion(), managementController); + } + } } diff --git a/ambari-server/src/main/java/org/apache/ambari/server/topology/BlueprintImpl.java b/ambari-server/src/main/java/org/apache/ambari/server/topology/BlueprintImpl.java index 47e19bafbe7..04616a72270 100644 --- a/ambari-server/src/main/java/org/apache/ambari/server/topology/BlueprintImpl.java +++ b/ambari-server/src/main/java/org/apache/ambari/server/topology/BlueprintImpl.java @@ -19,6 +19,8 @@ package org.apache.ambari.server.topology; +import static java.util.stream.Collectors.toList; + import java.util.ArrayList; import java.util.Collection; import java.util.Collections; @@ -30,6 +32,7 @@ import java.util.Set; import java.util.function.Supplier; +import org.apache.ambari.server.controller.internal.StackDefinition; import org.apache.ambari.server.orm.entities.BlueprintConfigEntity; import org.apache.ambari.server.orm.entities.BlueprintConfiguration; import org.apache.ambari.server.orm.entities.BlueprintEntity; @@ -40,13 +43,14 @@ import org.apache.ambari.server.orm.entities.HostGroupConfigEntity; import org.apache.ambari.server.orm.entities.HostGroupEntity; import org.apache.ambari.server.stack.NoSuchStackException; +import org.apache.ambari.server.state.ConfigHelper; import org.apache.ambari.server.state.StackId; import org.apache.ambari.server.utils.JsonUtils; +import org.apache.commons.lang.StringUtils; import com.fasterxml.jackson.core.type.TypeReference; import com.google.common.base.Preconditions; import com.google.common.base.Splitter; -import com.google.common.collect.ImmutableMap; import com.google.gson.Gson; /** @@ -57,32 +61,37 @@ public class BlueprintImpl implements Blueprint { private final String name; private final Map hostGroups; private final Collection mpacks; + private final StackDefinition stack; private final Set stackIds; private final Configuration configuration; private final SecurityConfiguration security; private final Setting setting; private final List repoSettings; - public BlueprintImpl(BlueprintEntity entity, Set stackIds) throws NoSuchStackException { + public BlueprintImpl(BlueprintEntity entity, StackDefinition stack, Set stackIds) throws NoSuchStackException { name = entity.getBlueprintName(); security = entity.getSecurityType() != null ? new SecurityConfiguration(entity.getSecurityType(), entity.getSecurityDescriptorReference(), null) : SecurityConfiguration.NONE; mpacks = parseMpacks(entity); + this.stack = stack; this.stackIds = stackIds; // create config first because it is set as a parent on all host-group configs configuration = processConfiguration(entity.getConfigurations()); hostGroups = parseBlueprintHostGroups(entity); - setting = new Setting(parseSetting(entity.getSettings())); + // TODO: how to handle multiple stacks correctly? + configuration.setParentConfiguration(stack.getConfiguration(getServices())); + setting = processSetting(entity.getSettings()); repoSettings = processRepoSettings(); } - public BlueprintImpl(String name, Collection groups, Set stackIds, Collection mpacks, + public BlueprintImpl(String name, Collection groups, StackDefinition stack, Set stackIds, Collection mpacks, Configuration configuration, SecurityConfiguration security, Setting setting) { this.name = name; this.mpacks = mpacks; + this.stack = stack; this.stackIds = stackIds; this.security = security != null ? security : SecurityConfiguration.NONE; @@ -91,22 +100,29 @@ public BlueprintImpl(String name, Collection groups, Set sta for (HostGroup hostGroup : groups) { hostGroups.put(hostGroup.getName(), hostGroup); } + // TODO: handle configuration from multiple stacks properly + // if the parent isn't set, the stack configuration is set as the parent this.configuration = configuration; - this.setting = setting != null ? setting : new Setting(ImmutableMap.of()); + if (configuration.getParentConfiguration() == null) { + configuration.setParentConfiguration(stack.getConfiguration(getServices())); + } + this.setting = setting; repoSettings = processRepoSettings(); } - @Override public String getName() { return name; } - @Override public Set getStackIds() { return stackIds; } @Override + public Set getStackIdsForService(String service) { + return stack.getStacksForService(service); + } + public SecurityConfiguration getSecurity() { return security; } @@ -133,10 +149,162 @@ public Setting getSetting() { return setting; } + /** + * Get all services represented in blueprint. + * + * @return collections of all services provided by topology + */ + @Override + public Collection getServices() { + Collection services = new HashSet<>(); + for (HostGroup group : getHostGroups().values()) { + services.addAll(group.getServices()); + } + return services; + } + + @Override + public Collection getComponents(String service) { + Collection components = new HashSet<>(); + for (HostGroup group : getHostGroupsForService(service)) { + components.addAll(group.getComponents(service)); + } + return components; + } + + @Override + @Deprecated + public Collection getComponentNames(String service) { + return getComponents(service).stream().map(Component::getName).collect(toList()); + } + + /** + * Get whether the specified component in the service is enabled + * for auto start. + * + * @param serviceName - Service name. + * @param componentName - Component name. + * + * @return null if value is not specified; true or false if specified. + */ + @Override + public String getRecoveryEnabled(String serviceName, String componentName) { + Set> settingValue; + + if (setting == null) + return null; + + // If component name was specified in the list of "component_settings", + // determine if recovery_enabled is true or false and return it. + settingValue = setting.getSettingValue(Setting.SETTING_NAME_COMPONENT_SETTINGS); + for (Map setting : settingValue) { + String name = setting.get(Setting.SETTING_NAME_NAME); + if (StringUtils.equals(name, componentName)) { + if (!StringUtils.isEmpty(setting.get(Setting.SETTING_NAME_RECOVERY_ENABLED))) { + return setting.get(Setting.SETTING_NAME_RECOVERY_ENABLED); + } + } + } + + // If component name is not specified, look up it's service. + settingValue = setting.getSettingValue(Setting.SETTING_NAME_SERVICE_SETTINGS); + for ( Map setting : settingValue){ + String name = setting.get(Setting.SETTING_NAME_NAME); + if (StringUtils.equals(name, serviceName)) { + if (!StringUtils.isEmpty(setting.get(Setting.SETTING_NAME_RECOVERY_ENABLED))) { + return setting.get(Setting.SETTING_NAME_RECOVERY_ENABLED); + } + } + } + + // If service name is not specified, look up the cluster setting. + settingValue = setting.getSettingValue(Setting.SETTING_NAME_RECOVERY_SETTINGS); + for (Map setting : settingValue) { + if (!StringUtils.isEmpty(setting.get(Setting.SETTING_NAME_RECOVERY_ENABLED))) { + return setting.get(Setting.SETTING_NAME_RECOVERY_ENABLED); + } + } + + return null; + } + + /** + * Get whether the specified service is enabled for credential store use. + * + *
+   *     {@code
+   *       {
+   *         "service_settings" : [
+   *         { "name" : "RANGER",
+   *           "recovery_enabled" : "true",
+   *           "credential_store_enabled" : "true"
+   *         },
+   *         { "name" : "HIVE",
+   *           "recovery_enabled" : "true",
+   *           "credential_store_enabled" : "false"
+   *         },
+   *         { "name" : "TEZ",
+   *           "recovery_enabled" : "false"
+   *         }
+   *       ]
+   *     }
+   *   }
+   * 
+ * + * @param serviceName - Service name. + * + * @return null if value is not specified; true or false if specified. + */ + @Override + public String getCredentialStoreEnabled(String serviceName) { + if (setting == null) + return null; + + // Look up the service and return the credential_store_enabled value. + Set> settingValue = setting.getSettingValue(Setting.SETTING_NAME_SERVICE_SETTINGS); + for (Map setting : settingValue) { + String name = setting.get(Setting.SETTING_NAME_NAME); + if (StringUtils.equals(name, serviceName)) { + if (!StringUtils.isEmpty(setting.get(Setting.SETTING_NAME_CREDENTIAL_STORE_ENABLED))) { + return setting.get(Setting.SETTING_NAME_CREDENTIAL_STORE_ENABLED); + } + break; + } + } + + return null; + } + + @Override + public boolean shouldSkipFailure() { + if (setting == null) { + return false; + } + Set> settingValue = setting.getSettingValue(Setting.SETTING_NAME_DEPLOYMENT_SETTINGS); + for (Map setting : settingValue) { + if (setting.containsKey(Setting.SETTING_NAME_SKIP_FAILURE)) { + return setting.get(Setting.SETTING_NAME_SKIP_FAILURE).equalsIgnoreCase("true"); + } + } + return false; + } + + @Override public Collection getMpacks() { return mpacks; } + public StackDefinition getStack() { + return stack; + } + + /** + * Get host groups which contain a component. + * + * @param component component name + * + * @return collection of host groups which contain the specified component + */ @Override public Collection getHostGroupsForComponent(String component) { Collection resultGroups = new HashSet<>(); @@ -148,7 +316,24 @@ public Collection getHostGroupsForComponent(String component) { return resultGroups; } + /** + * Get host groups which contain a component for the given service. + * + * @param service service name + * + * @return collection of host groups which contain a component of the specified service + */ @Override + public Collection getHostGroupsForService(String service) { + Collection resultGroups = new HashSet<>(); + for (HostGroup group : hostGroups.values() ) { + if (group.getServices().contains(service)) { + resultGroups.add(group); + } + } + return resultGroups; + } + public BlueprintEntity toEntity() { BlueprintEntity entity = new BlueprintEntity(); entity.setBlueprintName(name); @@ -203,7 +388,7 @@ private Collection parseMpacks(BlueprintEntity blueprintEntity) t private Map parseBlueprintHostGroups(BlueprintEntity entity) { Map hostGroups = new HashMap<>(); for (HostGroupEntity hostGroupEntity : entity.getHostGroups()) { - HostGroupImpl hostGroup = new HostGroupImpl(hostGroupEntity); + HostGroupImpl hostGroup = new HostGroupImpl(hostGroupEntity, getName(), getStack()); // set the bp configuration as the host group config parent hostGroup.getConfiguration().setParentConfiguration(configuration); hostGroups.put(hostGroupEntity.getName(), hostGroup); @@ -220,6 +405,15 @@ private Configuration processConfiguration(Collection blueprintSetting) { + return blueprintSetting != null + ? new Setting(parseSetting(blueprintSetting)) + : null; + } + /** * Obtain configuration as a map of config type to corresponding properties. * @@ -242,16 +436,14 @@ private Map> parseConfigurations(Collection>> parseSetting(Collection blueprintSetting) { - if (blueprintSetting == null) { - return ImmutableMap.of(); - } + private Map>> parseSetting(Collection blueprintSetting) { - Map>> properties = new HashMap<>(); + Map>> properties = new HashMap<>(); Gson gson = new Gson(); for (BlueprintSettingEntity setting : blueprintSetting) { String settingName = setting.getSettingName(); - Set> settingProperties = gson.>>fromJson(setting.getSettingData(), Set.class); + Set> settingProperties = gson.>>fromJson( + setting.getSettingData(), Set.class); properties.put(settingName, settingProperties); } return properties; @@ -414,9 +606,9 @@ private void createBlueprintSettingEntities(BlueprintEntity blueprintEntity) { Setting blueprintSetting = getSetting(); if (blueprintSetting != null) { Map settingEntityMap = new HashMap<>(); - for (Map.Entry>> propEntry : blueprintSetting.getProperties().entrySet()) { + for (Map.Entry>> propEntry : blueprintSetting.getProperties().entrySet()) { String settingName = propEntry.getKey(); - Set> properties = propEntry.getValue(); + Set> properties = propEntry.getValue(); BlueprintSettingEntity settingEntity = new BlueprintSettingEntity(); settingEntityMap.put(settingName, settingEntity); @@ -430,21 +622,45 @@ private void createBlueprintSettingEntities(BlueprintEntity blueprintEntity) { } /** - * Parse stack repo info stored in the blueprint_settings table + * A config type is valid if there are services related to except cluster-env and global. */ - private List processRepoSettings() { - return setting != null ? setting.processRepoSettings() : Collections.emptyList(); + public boolean isValidConfigType(String configType) { + if (ConfigHelper.CLUSTER_ENV.equals(configType) || "global".equals(configType)) { + return true; + } + String service = getStack().getServiceForConfigType(configType); + return getServices().contains(service); } /** - * Add the Kerberos client to all host groups. + * Parse stack repo info stored in the blueprint_settings table */ - public boolean ensureKerberosClientIsPresent() { - boolean changed = false; - for (HostGroup group : getHostGroups().values()) { - changed |= group.addComponent(new Component("KERBEROS_CLIENT")); + private List processRepoSettings() { + if (setting == null) { + return Collections.emptyList(); + } + + Set> repositorySettingsValue = setting.getSettingValue(Setting.SETTING_NAME_REPOSITORY_SETTINGS); + if (repositorySettingsValue == null) { + return Collections.emptyList(); } - return changed; + + return repositorySettingsValue.stream() + .map(this::parseRepositorySetting) + .collect(toList()); + } + + private RepositorySetting parseRepositorySetting(Map setting){ + RepositorySetting result = new RepositorySetting(); + result.setOperatingSystem(setting.get(RepositorySetting.OPERATING_SYSTEM)); + result.setOverrideStrategy(setting.get(RepositorySetting.OVERRIDE_STRATEGY)); + result.setRepoId(setting.get(RepositorySetting.REPO_ID)); + result.setBaseUrl(setting.get(RepositorySetting.BASE_URL)); + return result; + } + + public List getRepositorySettings(){ + return repoSettings; } } diff --git a/ambari-server/src/main/java/org/apache/ambari/server/topology/validators/BlueprintValidator.java b/ambari-server/src/main/java/org/apache/ambari/server/topology/BlueprintValidator.java similarity index 51% rename from ambari-server/src/main/java/org/apache/ambari/server/topology/validators/BlueprintValidator.java rename to ambari-server/src/main/java/org/apache/ambari/server/topology/BlueprintValidator.java index 53d58785a8b..64e08ad1b5c 100644 --- a/ambari-server/src/main/java/org/apache/ambari/server/topology/validators/BlueprintValidator.java +++ b/ambari-server/src/main/java/org/apache/ambari/server/topology/BlueprintValidator.java @@ -16,15 +16,27 @@ * See the License for the specific language governing permissions and * limitations under the License. */ -package org.apache.ambari.server.topology.validators; - -import org.apache.ambari.server.topology.Blueprint; +package org.apache.ambari.server.topology; /** - * Provides basic blueprint validation. + * Provides blueprint validation. */ public interface BlueprintValidator { + /** + * Validate blueprint topology. + * + * @param blueprint the blueprint to validate + * @throws InvalidTopologyException if the topology is invalid + */ + void validateTopology(Blueprint blueprint) throws InvalidTopologyException; - void validate(Blueprint blueprint) throws IllegalArgumentException; - + /** + * Validate that required properties are provided. + * This doesn't include password properties. + * + * @param blueprint the blueprint to validate + * @throws InvalidTopologyException if required properties are not set in blueprint + * @throws GPLLicenseNotAcceptedException if the blueprint requires use of GPL software, but GPL license was not accepted + */ + void validateRequiredProperties(Blueprint blueprint) throws InvalidTopologyException, GPLLicenseNotAcceptedException; } diff --git a/ambari-server/src/main/java/org/apache/ambari/server/topology/validators/DependencyAndCardinalityValidator.java b/ambari-server/src/main/java/org/apache/ambari/server/topology/BlueprintValidatorImpl.java similarity index 58% rename from ambari-server/src/main/java/org/apache/ambari/server/topology/validators/DependencyAndCardinalityValidator.java rename to ambari-server/src/main/java/org/apache/ambari/server/topology/BlueprintValidatorImpl.java index a25dcc4ce45..39cbbfaf360 100644 --- a/ambari-server/src/main/java/org/apache/ambari/server/topology/validators/DependencyAndCardinalityValidator.java +++ b/ambari-server/src/main/java/org/apache/ambari/server/topology/BlueprintValidatorImpl.java @@ -16,52 +16,62 @@ * limitations under the License. */ -package org.apache.ambari.server.topology.validators; +package org.apache.ambari.server.topology; +import java.util.ArrayList; import java.util.Collection; import java.util.HashMap; import java.util.HashSet; +import java.util.Iterator; import java.util.Map; +import java.util.Set; -import org.apache.ambari.server.controller.internal.BlueprintConfigurationProcessor; +import org.apache.ambari.server.configuration.Configuration; import org.apache.ambari.server.controller.internal.StackDefinition; import org.apache.ambari.server.state.AutoDeployInfo; import org.apache.ambari.server.state.DependencyConditionInfo; import org.apache.ambari.server.state.DependencyInfo; -import org.apache.ambari.server.topology.Blueprint; -import org.apache.ambari.server.topology.Cardinality; -import org.apache.ambari.server.topology.ClusterTopology; -import org.apache.ambari.server.topology.Component; -import org.apache.ambari.server.topology.HostGroup; -import org.apache.ambari.server.topology.InvalidTopologyException; +import org.apache.ambari.server.utils.SecretReference; import org.slf4j.Logger; import org.slf4j.LoggerFactory; +import com.google.inject.Inject; + /** - * Verifies that service dependencies and component cardinality requirements are satisfied. + * Default blueprint validator. */ -public class DependencyAndCardinalityValidator implements TopologyValidator { +public class BlueprintValidatorImpl implements BlueprintValidator { + + private static final Logger LOGGER = LoggerFactory.getLogger(BlueprintValidatorImpl.class); + + public static final String LZO_CODEC_CLASS_PROPERTY_NAME = "io.compression.codec.lzo.class"; + public static final String CODEC_CLASSES_PROPERTY_NAME = "io.compression.codecs"; + public static final String LZO_CODEC_CLASS = "com.hadoop.compression.lzo.LzoCodec"; - private static final Logger LOGGER = LoggerFactory.getLogger(DependencyAndCardinalityValidator.class); + private final Configuration configuration; + + @Inject + public BlueprintValidatorImpl(Configuration configuration) { + this.configuration = configuration; + } @Override - public void validate(ClusterTopology topology) throws InvalidTopologyException { - Blueprint blueprint = topology.getBlueprint(); - LOGGER.info("Validating topology for blueprint: [{}]", topology.getBlueprintName()); + public void validateTopology(Blueprint blueprint) throws InvalidTopologyException { + LOGGER.info("Validating topology for blueprint: [{}]", blueprint.getName()); - StackDefinition stack = topology.getStack(); + StackDefinition stack = blueprint.getStack(); Collection hostGroups = blueprint.getHostGroups().values(); Map>> missingDependencies = new HashMap<>(); for (HostGroup group : hostGroups) { - Map> missingGroupDependencies = validateHostGroup(topology, blueprint, stack, group); + Map> missingGroupDependencies = validateHostGroup(blueprint, stack, group); if (!missingGroupDependencies.isEmpty()) { missingDependencies.put(group.getName(), missingGroupDependencies); } } Collection cardinalityFailures = new HashSet<>(); - Collection services = topology.getServices(); + Collection services = blueprint.getServices(); for (String service : services) { for (String component : stack.getComponents(service)) { @@ -71,7 +81,7 @@ public void validate(ClusterTopology topology) throws InvalidTopologyException { cardinalityFailures.addAll(verifyComponentInAllHostGroups(blueprint, new Component(component), autoDeploy)); } else { cardinalityFailures.addAll(verifyComponentCardinalityCount( - stack, topology, blueprint, new Component(component), cardinality, autoDeploy)); + stack, blueprint, new Component(component), cardinality, autoDeploy)); } } } @@ -81,6 +91,94 @@ public void validate(ClusterTopology topology) throws InvalidTopologyException { } } + @Override + public void validateRequiredProperties(Blueprint blueprint) throws InvalidTopologyException, GPLLicenseNotAcceptedException { + + // we don't want to include default stack properties so we can't just use hostGroup full properties + Map> clusterConfigurations = blueprint.getConfiguration().getProperties(); + + // we need to have real passwords, not references + if (clusterConfigurations != null) { + + // need to reject blueprints that have LZO enabled if the Ambari Server hasn't been configured for it + boolean gplEnabled = configuration.getGplLicenseAccepted(); + + StringBuilder errorMessage = new StringBuilder(); + boolean containsSecretReferences = false; + for (Map.Entry> configEntry : clusterConfigurations.entrySet()) { + String configType = configEntry.getKey(); + if (configEntry.getValue() != null) { + for (Map.Entry propertyEntry : configEntry.getValue().entrySet()) { + String propertyName = propertyEntry.getKey(); + String propertyValue = propertyEntry.getValue(); + if (propertyValue != null) { + if (!gplEnabled && configType.equals("core-site") + && (propertyName.equals(LZO_CODEC_CLASS_PROPERTY_NAME) || propertyName.equals(CODEC_CLASSES_PROPERTY_NAME)) + && propertyValue.contains(LZO_CODEC_CLASS)) { + throw new GPLLicenseNotAcceptedException("Your Ambari server has not been configured to download LZO GPL software. " + + "Please refer to documentation to configure Ambari before proceeding."); + } + if (SecretReference.isSecret(propertyValue)) { + errorMessage.append(" Config:" + configType + " Property:" + propertyName + "\n"); + containsSecretReferences = true; + } + } + } + } + } + if (containsSecretReferences) { + throw new InvalidTopologyException("Secret references are not allowed in blueprints, " + + "replace following properties with real passwords:\n" + errorMessage); + } + } + + + for (HostGroup hostGroup : blueprint.getHostGroups().values()) { + Collection processedServices = new HashSet<>(); + Map> allRequiredProperties = new HashMap<>(); + Map> operationalConfiguration = new HashMap<>(clusterConfigurations); + + operationalConfiguration.putAll(hostGroup.getConfiguration().getProperties()); + for (String component : hostGroup.getComponentNames()) { + //check that MYSQL_SERVER component is not available while hive is using existing db + if (component.equals("MYSQL_SERVER")) { + Map hiveEnvConfig = clusterConfigurations.get("hive-env"); + if (hiveEnvConfig != null && !hiveEnvConfig.isEmpty() && hiveEnvConfig.get("hive_database") != null + && hiveEnvConfig.get("hive_database").startsWith("Existing")) { + throw new InvalidTopologyException("Incorrect configuration: MYSQL_SERVER component is available but hive" + + " using existing db!"); + } + } + if (ClusterTopologyImpl.isNameNodeHAEnabled(clusterConfigurations) && component.equals("NAMENODE")) { + Map hadoopEnvConfig = clusterConfigurations.get("hadoop-env"); + if(hadoopEnvConfig != null && !hadoopEnvConfig.isEmpty() && hadoopEnvConfig.containsKey("dfs_ha_initial_namenode_active") && hadoopEnvConfig.containsKey("dfs_ha_initial_namenode_standby")) { + ArrayList hostGroupsForComponent = new ArrayList<>(blueprint.getHostGroupsForComponent(component)); + Set givenHostGroups = new HashSet<>(); + givenHostGroups.add(hadoopEnvConfig.get("dfs_ha_initial_namenode_active")); + givenHostGroups.add(hadoopEnvConfig.get("dfs_ha_initial_namenode_standby")); + if(givenHostGroups.size() != hostGroupsForComponent.size()) { + throw new IllegalArgumentException("NAMENODE HA host groups mapped incorrectly for properties 'dfs_ha_initial_namenode_active' and 'dfs_ha_initial_namenode_standby'. Expected Host groups are :" + hostGroupsForComponent); + } + if(HostGroup.HOSTGROUP_REGEX.matcher(hadoopEnvConfig.get("dfs_ha_initial_namenode_active")).matches() && HostGroup.HOSTGROUP_REGEX.matcher(hadoopEnvConfig.get("dfs_ha_initial_namenode_standby")).matches()){ + for (HostGroup hostGroupForComponent : hostGroupsForComponent) { + Iterator itr = givenHostGroups.iterator(); + while(itr.hasNext()){ + if(itr.next().contains(hostGroupForComponent.getName())){ + itr.remove(); + } + } + } + } + + if(!givenHostGroups.isEmpty()){ + throw new IllegalArgumentException("NAMENODE HA host groups mapped incorrectly for properties 'dfs_ha_initial_namenode_active' and 'dfs_ha_initial_namenode_standby'. Expected Host groups are :" + hostGroupsForComponent); + } + } + } + } + } + } + /** * Verify that a component is included in all host groups. * For components that are auto-install enabled, will add component to topology if needed. @@ -108,7 +206,7 @@ private Collection verifyComponentInAllHostGroups(Blueprint blueprint, C return cardinalityFailures; } - private Map> validateHostGroup(ClusterTopology topology, Blueprint blueprint, StackDefinition stack, HostGroup group) { + private Map> validateHostGroup(Blueprint blueprint, StackDefinition stack, HostGroup group) { LOGGER.info("Validating hostgroup: {}", group.getName()); Map> missingDependencies = new HashMap<>(); @@ -118,11 +216,18 @@ private Map> validateHostGroup(ClusterTopolog for (DependencyInfo dependency : stack.getDependenciesForComponent(component.getName())) { LOGGER.debug("Processing dependency [{}] for component [{}]", dependency.getName(), component); + String conditionalService = stack.getConditionalServiceForDependency(dependency); + if (conditionalService != null && !blueprint.getServices().contains(conditionalService)) { + LOGGER.debug("Conditional service [{}] is missing from the blueprint, skipping dependency [{}]", + conditionalService, dependency.getName()); + continue; + } + // dependent components from the stack definitions are only added if related services are explicitly added to the blueprint! boolean isClientDependency = stack.getComponentInfo(dependency.getComponentName()).isClient(); - if (isClientDependency && !topology.getServices().contains(dependency.getServiceName())) { + if (isClientDependency && !blueprint.getServices().contains(dependency.getServiceName())) { LOGGER.debug("The service [{}] for component [{}] is missing from the blueprint [{}], skipping dependency", - dependency.getServiceName(), dependency.getComponentName(), topology.getBlueprintName()); + dependency.getServiceName(), dependency.getComponentName(), blueprint.getName()); continue; } @@ -135,7 +240,7 @@ private Map> validateHostGroup(ClusterTopolog if(dependency.hasDependencyConditions()) { boolean conditionsSatisfied = true; for (DependencyConditionInfo dependencyCondition : dependency.getDependencyConditions()) { - if (!dependencyCondition.isResolved(topology.getConfiguration().getFullProperties())) { + if (!dependencyCondition.isResolved(blueprint.getConfiguration().getFullProperties())) { conditionsSatisfied = false; break; } @@ -146,7 +251,7 @@ private Map> validateHostGroup(ClusterTopolog } if (dependencyScope.equals("cluster")) { Collection missingDependencyInfo = verifyComponentCardinalityCount( - stack, topology, blueprint, new Component(componentName), new Cardinality("1+"), autoDeployInfo); + stack, blueprint, new Component(componentName), new Cardinality("1+"), autoDeployInfo); resolved = missingDependencyInfo.isEmpty(); } else if (dependencyScope.equals("host")) { @@ -184,16 +289,15 @@ private Map> validateHostGroup(ClusterTopolog */ private Collection verifyComponentCardinalityCount( StackDefinition stack, - ClusterTopology topology, Blueprint blueprint, Component component, Cardinality cardinality, AutoDeployInfo autoDeploy ) { - Map> configProperties = topology.getConfiguration().getProperties(); + Map> configProperties = blueprint.getConfiguration().getProperties(); Collection cardinalityFailures = new HashSet<>(); //todo: don't hard code this HA logic here - if (BlueprintConfigurationProcessor.isNameNodeHAEnabled(configProperties) && + if (ClusterTopologyImpl.isNameNodeHAEnabled(configProperties) && (component.getName().equals("SECONDARY_NAMENODE"))) { // override the cardinality for this component in an HA deployment, // since the SECONDARY_NAMENODE should not be started in this scenario diff --git a/ambari-server/src/main/java/org/apache/ambari/server/topology/ClusterConfigurationRequest.java b/ambari-server/src/main/java/org/apache/ambari/server/topology/ClusterConfigurationRequest.java index ae4fe6b6706..ee626bce84a 100644 --- a/ambari-server/src/main/java/org/apache/ambari/server/topology/ClusterConfigurationRequest.java +++ b/ambari-server/src/main/java/org/apache/ambari/server/topology/ClusterConfigurationRequest.java @@ -18,10 +18,6 @@ package org.apache.ambari.server.topology; -import static java.util.stream.Collectors.groupingBy; -import static java.util.stream.Collectors.mapping; -import static java.util.stream.Collectors.toSet; - import java.util.Collection; import java.util.Collections; import java.util.HashMap; @@ -79,7 +75,8 @@ public ClusterConfigurationRequest(AmbariContext ambariContext, ClusterTopology StackAdvisorBlueprintProcessor stackAdvisorBlueprintProcessor) { this.ambariContext = ambariContext; this.clusterTopology = clusterTopology; - this.stack = clusterTopology.getStack(); + Blueprint blueprint = clusterTopology.getBlueprint(); + this.stack = blueprint.getStack(); // set initial configuration (not topology resolved) this.configurationProcessor = new BlueprintConfigurationProcessor(clusterTopology); this.stackAdvisorBlueprintProcessor = stackAdvisorBlueprintProcessor; @@ -93,9 +90,11 @@ public ClusterConfigurationRequest(AmbariContext ambariContext, ClusterTopology * Remove config-types from the given configuration if there is no any services related to them (except cluster-env and global). */ private void removeOrphanConfigTypes(Configuration configuration) { + Blueprint blueprint = clusterTopology.getBlueprint(); + Collection configTypes = configuration.getAllConfigTypes(); for (String configType : configTypes) { - if (!clusterTopology.isValidConfigType(configType)) { + if (!blueprint.isValidConfigType(configType)) { configuration.removeConfigType(configType); LOG.info("Removing config type '{}' as related service is not present in either Blueprint or cluster creation template.", configType); } @@ -165,24 +164,24 @@ private Set configureKerberos(Configuration clusterConfiguration, Map> stackDefaultProps = stackDefaults.getProperties(); // add clusterHostInfo containing components to hosts map, based on Topology, to use this one instead of // StageUtils.getClusterInfo() - Map componentHostsMap = createComponentHostMap(); + Map componentHostsMap = createComponentHostMap(blueprint); existingConfigurations.put("clusterHostInfo", componentHostsMap); try { // generate principals & keytabs for headless identities AmbariContext.getController().getKerberosHelper() .ensureHeadlessIdentities(cluster, existingConfigurations, - new HashSet<>(clusterTopology.getServices())); + new HashSet<>(blueprint.getServices())); // apply Kerberos specific configurations Map> updatedConfigs = AmbariContext.getController().getKerberosHelper() .getServiceConfigurationUpdates(cluster, existingConfigurations, - createServiceComponentMap(), null, null, true, false); + createServiceComponentMap(blueprint), null, null, true, false); // ****************************************************************************************** // Since Kerberos is being enabled, make sure the cluster-env/security_enabled property is @@ -199,7 +198,7 @@ private Set configureKerberos(Configuration clusterConfiguration, Map propertyMap = updatedConfigs.get(configType); Map clusterConfigProperties = existingConfigurations.get(configType); Map stackDefaultConfigProperties = stackDefaultProps.get(configType); @@ -229,12 +228,24 @@ private Set configureKerberos(Configuration clusterConfiguration, Map> createServiceComponentMap() { - return clusterTopology.getComponents() - .collect(groupingBy(ResolvedComponent::effectiveServiceName, - mapping(ResolvedComponent::componentName, toSet()))); + private Map> createServiceComponentMap(Blueprint blueprint) { + Map> serviceComponents = new HashMap<>(); + Collection services = blueprint.getServices(); + + if(services != null) { + for (String service : services) { + Collection components = blueprint.getComponentNames(service); + serviceComponents.put(service, + (components == null) + ? Collections.emptySet() + : new HashSet<>(blueprint.getComponentNames(service))); + } + } + + return serviceComponents; } /** @@ -267,17 +278,19 @@ private boolean propertyHasCustomValue(Map clusterConfigProperti return propertyHasCustomValue; } - private Map createComponentHostMap() { + private Map createComponentHostMap(Blueprint blueprint) { Map componentHostsMap = new HashMap<>(); - for (ResolvedComponent component : clusterTopology.getComponents().collect(toSet())) { - String componentName = component.componentName(); - Collection componentHost = clusterTopology.getHostAssignmentsForComponent(componentName); - // retrieve corresponding clusterInfoKey for component using StageUtils - String clusterInfoKey = StageUtils.getComponentToClusterInfoKeyMap().get(componentName); - if (clusterInfoKey == null) { - clusterInfoKey = componentName.toLowerCase() + "_hosts"; + for (String service : blueprint.getServices()) { + Collection components = blueprint.getComponentNames(service); + for (String component : components) { + Collection componentHost = clusterTopology.getHostAssignmentsForComponent(component); + // retrieve corresponding clusterInfoKey for component using StageUtils + String clusterInfoKey = StageUtils.getComponentToClusterInfoKeyMap().get(component); + if (clusterInfoKey == null) { + clusterInfoKey = component.toLowerCase() + "_hosts"; + } + componentHostsMap.put(clusterInfoKey, StringUtils.join(componentHost, ",")); } - componentHostsMap.put(clusterInfoKey, StringUtils.join(componentHost, ",")); } return componentHostsMap; } @@ -287,6 +300,7 @@ private Collection getRequiredHostgroupsForKerberosConfiguration() { try { Cluster cluster = getCluster(); + Blueprint blueprint = clusterTopology.getBlueprint(); Configuration clusterConfiguration = clusterTopology.getConfiguration(); Map> existingConfigurations = clusterConfiguration.getFullProperties(); @@ -295,7 +309,7 @@ private Collection getRequiredHostgroupsForKerberosConfiguration() { // apply Kerberos specific configurations Map> updatedConfigs = AmbariContext.getController().getKerberosHelper() .getServiceConfigurationUpdates(cluster, existingConfigurations, - createServiceComponentMap(), null, null, true, false); + createServiceComponentMap(blueprint), null, null, true, false); // retrieve hostgroup for component names extracted from variables like "{clusterHostInfo.(component_name) // _host}" @@ -338,9 +352,10 @@ public void setConfigurationsOnCluster(ClusterTopology clusterTopology, String t //todo: also handle setting of host group scoped configuration which is updated by config processor List configurationRequests = new LinkedList<>(); + Blueprint blueprint = clusterTopology.getBlueprint(); Configuration clusterConfiguration = clusterTopology.getConfiguration(); - for (String service : clusterTopology.getServices()) { + for (String service : blueprint.getServices()) { //todo: remove intermediate request type // one bp config request per service BlueprintServiceConfigRequest blueprintConfigRequest = new BlueprintServiceConfigRequest(service); diff --git a/ambari-server/src/main/java/org/apache/ambari/server/topology/ClusterTopology.java b/ambari-server/src/main/java/org/apache/ambari/server/topology/ClusterTopology.java index 67ee0070bb5..69ccb617dae 100644 --- a/ambari-server/src/main/java/org/apache/ambari/server/topology/ClusterTopology.java +++ b/ambari-server/src/main/java/org/apache/ambari/server/topology/ClusterTopology.java @@ -20,15 +20,9 @@ import java.util.Collection; import java.util.Map; -import java.util.Set; -import java.util.stream.Stream; - -import javax.annotation.Nonnull; import org.apache.ambari.server.controller.RequestStatusResponse; import org.apache.ambari.server.controller.internal.ProvisionAction; -import org.apache.ambari.server.controller.internal.StackDefinition; -import org.apache.ambari.server.state.StackId; /** * Represents a full cluster topology including all instance information as well as the associated @@ -44,33 +38,18 @@ public interface ClusterTopology { Long getClusterId(); /** - * Get the blueprint associated with the cluster. - * - * @return associated blueprint - */ - Blueprint getBlueprint(); - - /** - * Get the name of the blueprint associated with the cluster. + * Set the id of the cluster. * - * @return associated blueprint's name + * @param clusterId cluster id */ - String getBlueprintName(); + void setClusterId(Long clusterId); /** - * Get the stack associated with the blueprint. - * For mpack-based installation this is a composite stack - * that provides a single unified view of all underlying mpacks, - * but does not have any identifier. + * Get the blueprint associated with the cluster. * - * @return associated stack - */ - StackDefinition getStack(); - - /** - * @return the set of stack (mpack) IDs associated with the cluster + * @return assocaited blueprint */ - Set getStackIds(); + Blueprint getBlueprint(); /** * Get the cluster scoped configuration for the cluster. @@ -81,15 +60,6 @@ public interface ClusterTopology { */ Configuration getConfiguration(); - /** - * Get the Blueprint cluster scoped setting. - * The blueprint cluster scoped setting has the setting properties - * with the setting names associated with the blueprint. - * - * @return blueprint cluster scoped setting - */ - Setting getSetting(); - /** * Get host group information. * @@ -104,7 +74,6 @@ public interface ClusterTopology { * * @return collection of host group names which contain the specified component */ - @Deprecated // 1. component name is not enough, 2. only used for stack-specific checks/updates Collection getHostGroupsForComponent(String component); /** @@ -125,37 +94,8 @@ public interface ClusterTopology { * * @return collection of hosts for the specified component; will not return null */ - @Deprecated Collection getHostAssignmentsForComponent(String component); - /** - * Get all of the services represented in the blueprint. - * - * @return collection of all represented service names - */ - Collection getServices(); - - /** - * Get all of the components represented in the blueprint. - * - * @return collection of all represented components - */ - Stream getComponents(); - - /** - * Get the components that are included in the specified host group. - * - * @param hostGroup host group name - * @return stream of components for the service - */ - @Nonnull - Stream getComponentsInHostGroup(String hostGroup); - - /** - * A config type is valid if there are services related to except cluster-env and global. - */ - boolean isValidConfigType(String configType); - /** * Update the existing topology based on the provided topology request. * @@ -177,6 +117,20 @@ public interface ClusterTopology { */ void addHostToTopology(String hostGroupName, String host) throws InvalidTopologyException, NoSuchHostGroupException; + /** + * Determine if NameNode HA is enabled. + * + * @return true if NameNode HA is enabled; false otherwise + */ + boolean isNameNodeHAEnabled(); + + /** + * Determine if Yarn ResourceManager HA is enabled. + * + * @return true if Yarn ResourceManager HA is enabled; false otherwise + */ + boolean isYarnResourceManagerHAEnabled(); + /** * Determine if the cluster is kerberos enabled. * @@ -201,8 +155,16 @@ public interface ClusterTopology { */ RequestStatusResponse startHost(String hostName, boolean skipFailure); + void setConfigRecommendationStrategy(ConfigRecommendationStrategy strategy); + ConfigRecommendationStrategy getConfigRecommendationStrategy(); + /** + * Set request provision action : INSTALL vs INSTALL_AND_START + * @param provisionAction @ProvisionAction + */ + void setProvisionAction(ProvisionAction provisionAction); + ProvisionAction getProvisionAction(); Map getAdvisedConfigurations(); @@ -218,12 +180,4 @@ public interface ClusterTopology { String getDefaultPassword(); - /** - * Determine if the host group contains a master component. - * - * @return true if the host group contains a master component; false otherwise - */ - boolean containsMasterComponent(String hostGroup); - - Collection getHostGroups(); } diff --git a/ambari-server/src/main/java/org/apache/ambari/server/topology/ClusterTopologyImpl.java b/ambari-server/src/main/java/org/apache/ambari/server/topology/ClusterTopologyImpl.java index 1ea1101d0fb..f50e60f428d 100644 --- a/ambari-server/src/main/java/org/apache/ambari/server/topology/ClusterTopologyImpl.java +++ b/ambari-server/src/main/java/org/apache/ambari/server/topology/ClusterTopologyImpl.java @@ -19,7 +19,6 @@ package org.apache.ambari.server.topology; -import static java.util.stream.Collectors.toSet; import static org.apache.ambari.server.controller.internal.ProvisionAction.INSTALL_AND_START; import static org.apache.ambari.server.controller.internal.ProvisionAction.INSTALL_ONLY; @@ -29,88 +28,55 @@ import java.util.HashSet; import java.util.Map; import java.util.Set; -import java.util.stream.Stream; - -import javax.annotation.Nonnull; import org.apache.ambari.server.AmbariException; import org.apache.ambari.server.controller.RequestStatusResponse; -import org.apache.ambari.server.controller.internal.BaseClusterRequest; -import org.apache.ambari.server.controller.internal.BlueprintConfigurationProcessor; import org.apache.ambari.server.controller.internal.ProvisionAction; -import org.apache.ambari.server.controller.internal.StackDefinition; -import org.apache.ambari.server.state.ConfigHelper; -import org.apache.ambari.server.state.StackId; +import org.apache.ambari.server.controller.internal.ProvisionClusterRequest; import org.slf4j.Logger; import org.slf4j.LoggerFactory; -import com.google.common.collect.ImmutableMap; -import com.google.common.collect.ImmutableSet; -import com.google.common.collect.Sets; - /** * Represents a cluster topology. * Topology includes the the associated blueprint, cluster configuration and hostgroup -> host mapping. */ public class ClusterTopologyImpl implements ClusterTopology { - private final static Logger LOG = LoggerFactory.getLogger(ClusterTopologyImpl.class); - - private final Set stackIds; - private final StackDefinition stack; private Long clusterId; - private final Blueprint blueprint; - private final Configuration configuration; - private final ConfigRecommendationStrategy configRecommendationStrategy; - private final ProvisionAction provisionAction; - private final Map advisedConfigurations = new HashMap<>(); + + //todo: currently topology is only associated with a single bp + //todo: this will need to change to allow usage of multiple bp's for the same cluster + //todo: for example: provision using bp1 and scale using bp2 + private Blueprint blueprint; + private Configuration configuration; + private ConfigRecommendationStrategy configRecommendationStrategy; + private ProvisionAction provisionAction = ProvisionAction.INSTALL_AND_START; + private Map advisedConfigurations = new HashMap<>(); private final Map hostGroupInfoMap = new HashMap<>(); private final AmbariContext ambariContext; - private final BlueprintBasedClusterProvisionRequest provisionRequest; private final String defaultPassword; - private final Map> resolvedComponents; - private final Setting setting; + private final static Logger LOG = LoggerFactory.getLogger(ClusterTopologyImpl.class); + + + //todo: will need to convert all usages of hostgroup name to use fully qualified name (BP/HG) + //todo: for now, restrict scaling to the same BP public ClusterTopologyImpl(AmbariContext ambariContext, TopologyRequest topologyRequest) throws InvalidTopologyException { - this.ambariContext = ambariContext; this.clusterId = topologyRequest.getClusterId(); + // provision cluster currently requires that all hostgroups have same BP so it is ok to use root level BP here this.blueprint = topologyRequest.getBlueprint(); - this.setting = blueprint.getSetting(); this.configuration = topologyRequest.getConfiguration(); - configRecommendationStrategy = ConfigRecommendationStrategy.NEVER_APPLY; - provisionAction = topologyRequest instanceof BaseClusterRequest ? ((BaseClusterRequest) topologyRequest).getProvisionAction() : INSTALL_AND_START; // FIXME - - provisionRequest = null; - defaultPassword = null; - stackIds = ImmutableSet.copyOf( - Sets.union(topologyRequest.getStackIds(), topologyRequest.getBlueprint().getStackIds())); - stack = ambariContext.composeStacks(stackIds); - resolvedComponents = ImmutableMap.of(); + if (topologyRequest instanceof ProvisionClusterRequest) { + this.defaultPassword = ((ProvisionClusterRequest) topologyRequest).getDefaultPassword(); + } else { + this.defaultPassword = null; + } registerHostGroupInfo(topologyRequest.getHostGroupInfo()); - } - // FIXME 2. replayed request should simply be a provision or scale request - // FIXME 3. do not create a ClusterTopologyImpl for scale request -- create for original provision request only - public ClusterTopologyImpl( - AmbariContext ambariContext, - BlueprintBasedClusterProvisionRequest request, - Map> resolvedComponents - ) throws InvalidTopologyException { + // todo extract validation to specialized service + validateTopology(); this.ambariContext = ambariContext; - this.blueprint = request.getBlueprint(); - this.configuration = request.getConfiguration(); - this.provisionRequest = request; - this.resolvedComponents = resolvedComponents; - configRecommendationStrategy = request.getConfigRecommendationStrategy(); - provisionAction = request.getProvisionAction(); - - defaultPassword = provisionRequest.getDefaultPassword(); - stackIds = request.getStackIds(); - stack = request.getStack(); - setting = request.getSetting(); - blueprint.getConfiguration().setParentConfiguration(stack.getConfiguration(getServices())); - registerHostGroupInfo(request.getHostGroupInfo()); } @Override @@ -123,6 +89,7 @@ public Long getClusterId() { return clusterId; } + @Override public void setClusterId(Long clusterId) { this.clusterId = clusterId; } @@ -132,52 +99,30 @@ public Blueprint getBlueprint() { return blueprint; } - @Override - public String getBlueprintName() { - return blueprint.getName(); - } - - @Override - public Set getStackIds() { - return stackIds; - } - - @Override - public StackDefinition getStack() { - return stack; - } - @Override public Configuration getConfiguration() { return configuration; } - @Override - public Setting getSetting() { - return setting; - } - @Override public Map getHostGroupInfo() { return hostGroupInfoMap; } - @Override - public Collection getHostGroups() { - return blueprint.getHostGroups().values(); - } - + //todo: do we want to return groups with no requested hosts? @Override public Collection getHostGroupsForComponent(String component) { - return resolvedComponents.entrySet().stream() - .filter(e -> e.getValue().stream().anyMatch(c -> component.equals(c.componentName()))) - .map(Map.Entry::getKey) - .collect(toSet()); + Collection resultGroups = new ArrayList<>(); + for (HostGroup group : getBlueprint().getHostGroups().values() ) { + if (group.getComponentNames().contains(component)) { + resultGroups.add(group.getName()); + } + } + return resultGroups; } @Override public String getHostGroupForHost(String hostname) { - // FIXME change to map lookup for (HostGroupInfo groupInfo : hostGroupInfoMap.values() ) { if (groupInfo.getHostNames().contains(hostname)) { // a host can only be associated with a single host group @@ -233,47 +178,36 @@ public Collection getHostAssignmentsForComponent(String component) { } @Override - public Collection getServices() { - return getComponents() - .map(ResolvedComponent::effectiveServiceName) - .collect(toSet()); - } - - @Override - public Stream getComponents() { - return resolvedComponents.values().stream() - .flatMap(Collection::stream); + public boolean isNameNodeHAEnabled() { + return isNameNodeHAEnabled(configuration.getFullProperties()); } - @Override @Nonnull - public Stream getComponentsInHostGroup(String hostGroup) { - return resolvedComponents.computeIfAbsent(hostGroup, __ -> ImmutableSet.of()).stream(); + public static boolean isNameNodeHAEnabled(Map> configurationProperties) { + return configurationProperties.containsKey("hdfs-site") && + (configurationProperties.get("hdfs-site").containsKey("dfs.nameservices") || + configurationProperties.get("hdfs-site").containsKey("dfs.internal.nameservices")); } @Override - public boolean containsMasterComponent(String hostGroup) { - return resolvedComponents.getOrDefault(hostGroup, ImmutableSet.of()).stream() - .anyMatch(ResolvedComponent::masterComponent); + public boolean isYarnResourceManagerHAEnabled() { + return isYarnResourceManagerHAEnabled(configuration.getFullProperties()); } - @Override - public boolean isValidConfigType(String configType) { - if (ConfigHelper.CLUSTER_ENV.equals(configType) || "global".equals(configType)) { - return true; - } - try { - String service = getStack().getServiceForConfigType(configType); - return getServices().contains(service); - } catch (IllegalArgumentException e) { - return false; - } + /** + * Static convenience function to determine if Yarn ResourceManager HA is enabled + * @param configProperties configuration properties for this cluster + * @return true if Yarn ResourceManager HA is enabled + * false if Yarn ResourceManager HA is not enabled + */ + static boolean isYarnResourceManagerHAEnabled(Map> configProperties) { + return configProperties.containsKey("yarn-site") && configProperties.get("yarn-site").containsKey("yarn.resourcemanager.ha.enabled") + && configProperties.get("yarn-site").get("yarn.resourcemanager.ha.enabled").equals("true"); } - // FIXME move out private void validateTopology() throws InvalidTopologyException { - if (BlueprintConfigurationProcessor.isNameNodeHAEnabled(getConfiguration().getFullProperties())) { + if(isNameNodeHAEnabled()){ Collection nnHosts = getHostAssignmentsForComponent("NAMENODE"); if (nnHosts.size() != 2) { throw new InvalidTopologyException("NAMENODE HA requires exactly 2 hosts running NAMENODE but there are: " + @@ -281,8 +215,8 @@ private void validateTopology() } Map hadoopEnvConfig = configuration.getFullProperties().get("hadoop-env"); if(hadoopEnvConfig != null && !hadoopEnvConfig.isEmpty() && hadoopEnvConfig.containsKey("dfs_ha_initial_namenode_active") && hadoopEnvConfig.containsKey("dfs_ha_initial_namenode_standby")) { - if ((!BlueprintConfigurationProcessor.HOST_GROUP_PLACEHOLDER_PATTERN.matcher(hadoopEnvConfig.get("dfs_ha_initial_namenode_active")).matches() && !nnHosts.contains(hadoopEnvConfig.get("dfs_ha_initial_namenode_active"))) - || (!BlueprintConfigurationProcessor.HOST_GROUP_PLACEHOLDER_PATTERN.matcher(hadoopEnvConfig.get("dfs_ha_initial_namenode_standby")).matches() && !nnHosts.contains(hadoopEnvConfig.get("dfs_ha_initial_namenode_standby")))) { + if((!HostGroup.HOSTGROUP_REGEX.matcher(hadoopEnvConfig.get("dfs_ha_initial_namenode_active")).matches() && !nnHosts.contains(hadoopEnvConfig.get("dfs_ha_initial_namenode_active"))) + || (!HostGroup.HOSTGROUP_REGEX.matcher(hadoopEnvConfig.get("dfs_ha_initial_namenode_standby")).matches() && !nnHosts.contains(hadoopEnvConfig.get("dfs_ha_initial_namenode_standby")))){ throw new IllegalArgumentException("NAMENODE HA hosts mapped incorrectly for properties 'dfs_ha_initial_namenode_active' and 'dfs_ha_initial_namenode_standby'. Expected hosts are: " + nnHosts); } } @@ -337,6 +271,11 @@ public RequestStatusResponse startHost(String hostName, boolean skipFailure) { } } + @Override + public void setConfigRecommendationStrategy(ConfigRecommendationStrategy strategy) { + this.configRecommendationStrategy = strategy; + } + @Override public ConfigRecommendationStrategy getConfigRecommendationStrategy() { return this.configRecommendationStrategy; @@ -347,6 +286,11 @@ public ProvisionAction getProvisionAction() { return provisionAction; } + @Override + public void setProvisionAction(ProvisionAction provisionAction) { + this.provisionAction = provisionAction; + } + @Override public Map getAdvisedConfigurations() { return this.advisedConfigurations; diff --git a/ambari-server/src/main/java/org/apache/ambari/server/topology/Component.java b/ambari-server/src/main/java/org/apache/ambari/server/topology/Component.java index 3cc9c9931e0..a62d341d3cc 100644 --- a/ambari-server/src/main/java/org/apache/ambari/server/topology/Component.java +++ b/ambari-server/src/main/java/org/apache/ambari/server/topology/Component.java @@ -28,8 +28,11 @@ public class Component { private final String name; + @Nullable private final String mpackInstance; + @Nullable private final String serviceInstance; + private final ProvisionAction provisionAction; @Deprecated @@ -37,7 +40,7 @@ public Component(String name) { this(name, null, null, null); } - public Component(String name, @Nullable String mpackInstance, @Nullable String serviceInstance, ProvisionAction provisionAction) { + public Component(String name, String mpackInstance, String serviceInstance, ProvisionAction provisionAction) { this.name = name; this.mpackInstance = mpackInstance; this.serviceInstance = serviceInstance; @@ -54,14 +57,14 @@ public String getName() { } /** - * @return the mpack associated with this component (can be {@code null} if component -> mpack mapping is unambiguous) + * @return the mpack associated with this component (can be {@code null} if component -> mpack mapping is unambigous) */ public String getMpackInstance() { return mpackInstance; } /** - * @return the service instance this component belongs to. Can be {@code null} if component does not belong to a service + * @return the service instance this component belongs to. Can be {@null} if component does not belong to a service * instance (there is a single service of the component's service type) */ public String getServiceInstance() { diff --git a/ambari-server/src/main/java/org/apache/ambari/server/topology/ComponentResolver.java b/ambari-server/src/main/java/org/apache/ambari/server/topology/ComponentResolver.java deleted file mode 100644 index b65af16e2f7..00000000000 --- a/ambari-server/src/main/java/org/apache/ambari/server/topology/ComponentResolver.java +++ /dev/null @@ -1,37 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package org.apache.ambari.server.topology; - -import java.util.Map; -import java.util.Set; - -/** - * Resolves all incompletely specified host group components in the topology: - * finds stack and/or service type that each component is defined in. - */ -public interface ComponentResolver { - - /** - * @return the set resolved components for each host group (the map's keys are host group names) - * @throws IllegalArgumentException if the components cannot be unambiguously resolved - * (eg. if some component is not known, or if there are multiple component with the same name and - * the request does not specify which one to select) - */ - Map> resolveComponents(BlueprintBasedClusterProvisionRequest request); - -} diff --git a/ambari-server/src/main/java/org/apache/ambari/server/topology/DefaultStackFactory.java b/ambari-server/src/main/java/org/apache/ambari/server/topology/DefaultStackFactory.java deleted file mode 100644 index 2eb1bd64aa9..00000000000 --- a/ambari-server/src/main/java/org/apache/ambari/server/topology/DefaultStackFactory.java +++ /dev/null @@ -1,43 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package org.apache.ambari.server.topology; - -import javax.inject.Inject; -import javax.inject.Provider; - -import org.apache.ambari.server.StackAccessException; -import org.apache.ambari.server.api.services.AmbariMetaInfo; -import org.apache.ambari.server.controller.internal.Stack; -import org.apache.ambari.server.state.StackId; - -/** - * Default implementation of StackFactory. - * - * Calls the Stack constructor to create the Stack instance. - * - */ -public class DefaultStackFactory implements StackFactory { - - @Inject - private Provider metaInfo; - - @Override - public Stack createStack(StackId stackId) throws StackAccessException { - return new Stack(stackId, metaInfo.get()); - } -} diff --git a/ambari-server/src/main/java/org/apache/ambari/server/topology/GPLLicenseNotAcceptedException.java b/ambari-server/src/main/java/org/apache/ambari/server/topology/GPLLicenseNotAcceptedException.java index fcc052e559e..b444d01345f 100644 --- a/ambari-server/src/main/java/org/apache/ambari/server/topology/GPLLicenseNotAcceptedException.java +++ b/ambari-server/src/main/java/org/apache/ambari/server/topology/GPLLicenseNotAcceptedException.java @@ -21,7 +21,7 @@ /** * Indicates an not permitted LZO usage. */ -public class GPLLicenseNotAcceptedException extends InvalidTopologyException { +public class GPLLicenseNotAcceptedException extends Exception { public GPLLicenseNotAcceptedException(String s) { super(s); } diff --git a/ambari-server/src/main/java/org/apache/ambari/server/topology/HostGroup.java b/ambari-server/src/main/java/org/apache/ambari/server/topology/HostGroup.java index 9ece5a26e75..a329f42053f 100644 --- a/ambari-server/src/main/java/org/apache/ambari/server/topology/HostGroup.java +++ b/ambari-server/src/main/java/org/apache/ambari/server/topology/HostGroup.java @@ -19,14 +19,20 @@ package org.apache.ambari.server.topology; import java.util.Collection; +import java.util.regex.Pattern; import org.apache.ambari.server.controller.internal.ProvisionAction; +import org.apache.ambari.server.controller.internal.StackDefinition; /** * Host Group representation. */ public interface HostGroup { + /** + * Compiled regex for hostgroup token. + */ + Pattern HOSTGROUP_REGEX = Pattern.compile("%HOSTGROUP::(\\S+?)%"); /** * Get the name of the host group. * @@ -34,6 +40,21 @@ public interface HostGroup { */ String getName(); + /** + * Get the name of the associated blueprint + * + * @return associated blueprint name + */ + String getBlueprintName(); + + /** + * Get the fully qualified host group name in the form of + * blueprintName:hostgroupName + * + * @return fully qualified host group name + */ + String getFullyQualifiedName(); + /** * Get all of the host group components. * @@ -41,6 +62,7 @@ public interface HostGroup { */ Collection getComponents(); + /** * Get all of the host group component names * @@ -62,11 +84,48 @@ public interface HostGroup { @Deprecated Collection getComponentNames(ProvisionAction provisionAction); + /** + * Get the names components for the specified service which are associated with the host group. + * + * @param service service name + * + * @return set of component names + */ + @Deprecated + public Collection getComponentNames(String service); + + /** + * Get the host group components which belong to the specified service. + * + * @param service service instance name or service name. First, services looked up + * by instance name. If no appropriate service instance is found, services are looked + * up by type + * + * @return collection of component names for the specified service; will not return null + */ + Collection getComponents(String service); + + + /** * Add a component to the host group */ boolean addComponent(Component component); + /** + * Determine if the host group contains a master component. + * + * @return true if the host group contains a master component; false otherwise + */ + boolean containsMasterComponent(); + + /** + * Get all of the services associated with the host group components. + * + * @return collection of service names + */ + Collection getServices(); + /** * Get the configuration associated with the host group. * The host group configuration has the blueprint cluster scoped @@ -76,6 +135,13 @@ public interface HostGroup { */ Configuration getConfiguration(); + /** + * Get the stack associated with the host group. + * + * @return associated stack + */ + StackDefinition getStack(); + /** * Get the cardinality value that was specified for the host group. * This is simply meta-data for the stack that a deployer can use diff --git a/ambari-server/src/main/java/org/apache/ambari/server/topology/HostGroupImpl.java b/ambari-server/src/main/java/org/apache/ambari/server/topology/HostGroupImpl.java index be607e7b0ba..0899d0bf111 100644 --- a/ambari-server/src/main/java/org/apache/ambari/server/topology/HostGroupImpl.java +++ b/ambari-server/src/main/java/org/apache/ambari/server/topology/HostGroupImpl.java @@ -20,8 +20,10 @@ package org.apache.ambari.server.topology; import static java.util.stream.Collectors.toList; +import static java.util.stream.Collectors.toSet; import java.util.Collection; +import java.util.Collections; import java.util.HashMap; import java.util.HashSet; import java.util.LinkedHashSet; @@ -29,9 +31,12 @@ import java.util.Set; import org.apache.ambari.server.controller.internal.ProvisionAction; +import org.apache.ambari.server.controller.internal.StackDefinition; import org.apache.ambari.server.orm.entities.HostGroupComponentEntity; import org.apache.ambari.server.orm.entities.HostGroupConfigEntity; import org.apache.ambari.server.orm.entities.HostGroupEntity; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; import com.google.gson.Gson; @@ -40,38 +45,63 @@ */ public class HostGroupImpl implements HostGroup { + private static final Logger LOG = LoggerFactory.getLogger(HostGroupImpl.class); + /** * host group name */ - private final String name; + private String name; + + /** + * blueprint name + */ + private String blueprintName; /** * components contained in the host group */ private final Set components = new LinkedHashSet<>(); + /** + * map of service to components for the host group + */ + // TODO: in blueprint 3.0 this should be per service instance + private Map> componentsForService = new HashMap<>(); + /** * configuration */ - private final Configuration configuration; - private final String cardinality; + private Configuration configuration = null; + + private boolean containsMasterComponent = false; // FIXME never set - public HostGroupImpl(HostGroupEntity entity) { + private StackDefinition stack; + + private String cardinality = "NOT SPECIFIED"; + + public HostGroupImpl(HostGroupEntity entity, String blueprintName, StackDefinition stack) { this.name = entity.getName(); this.cardinality = entity.getCardinality(); - configuration = parseConfigurations(entity); + this.blueprintName = blueprintName; + this.stack = stack; parseComponents(entity); + parseConfigurations(entity); } - public HostGroupImpl(String name, Collection components, Configuration configuration, String cardinality) { + public HostGroupImpl(String name, String bpName, StackDefinition stack, Collection components, Configuration configuration, String cardinality) { this.name = name; - this.configuration = configuration; - this.cardinality = (cardinality != null && !"null".equals(cardinality)) ? cardinality : "NOT SPECIFIED"; + this.blueprintName = bpName; + this.stack = stack; // process each component for (Component component: components) { addComponent(component); } + + this.configuration = configuration; + if (cardinality != null && ! cardinality.equals("null")) { + this.cardinality = cardinality; + } } @@ -80,6 +110,17 @@ public String getName() { return name; } + //todo: currently not qualifying host group name + @Override + public String getFullyQualifiedName() { + return String.format("%s:%s", blueprintName, getName()); + } + + //todo: currently not qualifying host group name + public static String formatAbsoluteName(String bpName, String hgName) { + return String.format("%s:%s", bpName, hgName); + } + @Override public Collection getComponents() { return components; @@ -103,15 +144,68 @@ public Collection getComponentNames(ProvisionAction provisionAction) { return setOfComponentNames; } + /** + * Get the services which are deployed to this host group. + * + * @return collection of services which have components in this host group + */ + @Override + public Collection getServices() { + return componentsForService.keySet(); + } + /** * Adds a component to the host group. * @param component the component to add */ @Override public boolean addComponent(Component component) { - return components.add(component); + if (components.add(component)) { + containsMasterComponent |= stack.isMasterComponent(component.getName()); + + String service = stack.getServiceForComponent(component.getName()); + if (service != null) { + componentsForService + .computeIfAbsent(service, __ -> new HashSet<>()) + .add(component); + } + + return true; + } + + return false; } + /** + * Get the components for the specified service which are associated with the host group. + * + * @param service service name + * + * @return set of components + */ + @Override + public Collection getComponents(String service) { + return componentsForService.containsKey(service) ? + new HashSet<>(componentsForService.get(service)) : + Collections.emptySet(); + } + + /** + * Get the names of components for the specified service which are associated with the host group. + * + * @param service service name + * + * @return set of component names + */ + @Override + @Deprecated + public Collection getComponentNames(String service) { + return componentsForService.containsKey(service) ? + componentsForService.get(service).stream().map(Component::getName).collect(toSet()) : + Collections.emptySet(); + } + + /** * Get this host groups configuration. * @@ -122,6 +216,26 @@ public Configuration getConfiguration() { return configuration; } + /** + * Get the associated blueprint name. + * + * @return associated blueprint name + */ + @Override + public String getBlueprintName() { + return blueprintName; + } + + @Override + public boolean containsMasterComponent() { + return containsMasterComponent; + } + + @Override + public StackDefinition getStack() { + return stack; + } + @Override public String getCardinality() { return cardinality; @@ -145,12 +259,16 @@ private void parseComponents(HostGroupEntity entity) { * Parse host group configurations. */ //todo: use ConfigurationFactory - private Configuration parseConfigurations(HostGroupEntity entity) { + private void parseConfigurations(HostGroupEntity entity) { Map> config = new HashMap<>(); Gson jsonSerializer = new Gson(); for (HostGroupConfigEntity configEntity : entity.getConfigurations()) { String type = configEntity.getType(); - Map typeProperties = config.computeIfAbsent(type, k -> new HashMap<>()); + Map typeProperties = config.get(type); + if (typeProperties == null) { + typeProperties = new HashMap<>(); + config.put(type, typeProperties); + } Map propertyMap = jsonSerializer.>fromJson( configEntity.getConfigData(), Map.class); if (propertyMap != null) { @@ -159,10 +277,9 @@ private Configuration parseConfigurations(HostGroupEntity entity) { } //todo: parse attributes Map>> attributes = new HashMap<>(); - return new Configuration(config, attributes); + configuration = new Configuration(config, attributes); } - @Override public String toString(){ return name; } diff --git a/ambari-server/src/main/java/org/apache/ambari/server/topology/HostRequest.java b/ambari-server/src/main/java/org/apache/ambari/server/topology/HostRequest.java index a8ecc12d2cf..3a8acc82162 100644 --- a/ambari-server/src/main/java/org/apache/ambari/server/topology/HostRequest.java +++ b/ambari-server/src/main/java/org/apache/ambari/server/topology/HostRequest.java @@ -98,7 +98,7 @@ public HostRequest(long requestId, long id, long clusterId, String hostname, Str this.hostGroup = hostGroup; hostgroupName = hostGroup.getName(); this.predicate = predicate; - containsMaster = topology.containsMasterComponent(hostgroupName); + containsMaster = hostGroup.containsMasterComponent(); this.topology = topology; this.skipFailure = skipFailure; createTasks(this.skipFailure); @@ -121,14 +121,14 @@ public HostRequest(long requestId, long id, String predicate, this.requestId = requestId; this.id = id; clusterId = topology.getClusterId(); - blueprint = topology.getBlueprintName(); + blueprint = topology.getBlueprint().getName(); hostgroupName = entity.getTopologyHostGroupEntity().getName(); hostGroup = topology.getBlueprint().getHostGroup(hostgroupName); hostname = entity.getHostName(); setStatus(entity.getStatus()); statusMessage = entity.getStatusMessage(); this.predicate = toPredicate(predicate); - containsMaster = topology.containsMasterComponent(hostgroupName); + containsMaster = hostGroup.containsMasterComponent(); this.topology = topology; this.skipFailure = skipFailure; @@ -257,7 +257,7 @@ private void createTasks(boolean skipFailure) { "PENDING HOST ASSIGNMENT : HOSTGROUP=" + getHostgroupName(); AmbariContext context = topology.getAmbariContext(); - StackDefinition stack = topology.getStack(); + StackDefinition stack = hostGroup.getStack(); // Skip INSTALL task in case server component is marked as START_ONLY, or the cluster provision_action is // START_ONLY, unless component is marked with INSTALL_ONLY or INSTALL_AND_START. diff --git a/ambari-server/src/main/java/org/apache/ambari/server/topology/LogicalRequest.java b/ambari-server/src/main/java/org/apache/ambari/server/topology/LogicalRequest.java index 39c711a426f..b9bbe2cf12c 100644 --- a/ambari-server/src/main/java/org/apache/ambari/server/topology/LogicalRequest.java +++ b/ambari-server/src/main/java/org/apache/ambari/server/topology/LogicalRequest.java @@ -444,7 +444,7 @@ public Optional getFailureReason() { private void createHostRequests(TopologyRequest request, ClusterTopology topology) { Map hostGroupInfoMap = request.getHostGroupInfo(); Blueprint blueprint = topology.getBlueprint(); - boolean skipFailure = topology.getSetting().shouldSkipFailure(); + boolean skipFailure = topology.getBlueprint().shouldSkipFailure(); for (HostGroupInfo hostGroupInfo : hostGroupInfoMap.values()) { String groupName = hostGroupInfo.getHostGroupName(); int hostCardinality = hostGroupInfo.getRequestedHostCount(); @@ -455,14 +455,14 @@ private void createHostRequests(TopologyRequest request, ClusterTopology topolog // host names are specified String hostname = hostnames.get(i); HostRequest hostRequest = new HostRequest(getRequestId(), hostIdCounter.getAndIncrement(), getClusterId(), - hostname, topology.getBlueprintName(), blueprint.getHostGroup(groupName), null, topology, skipFailure); + hostname, blueprint.getName(), blueprint.getHostGroup(groupName), null, topology, skipFailure); synchronized (requestsWithReservedHosts) { requestsWithReservedHosts.put(hostname, hostRequest); } } else { // host count is specified HostRequest hostRequest = new HostRequest(getRequestId(), hostIdCounter.getAndIncrement(), getClusterId(), - null, topology.getBlueprintName(), blueprint.getHostGroup(groupName), hostGroupInfo.getPredicate(), topology, skipFailure); + null, blueprint.getName(), blueprint.getHostGroup(groupName), hostGroupInfo.getPredicate(), topology, skipFailure); outstandingHostRequests.add(hostRequest); } } @@ -495,7 +495,7 @@ private void createHostRequests(ClusterTopology topology, } } - boolean skipFailure = topology.getSetting().shouldSkipFailure(); + boolean skipFailure = topology.getBlueprint().shouldSkipFailure(); for (TopologyHostRequestEntity hostRequestEntity : requestEntity.getTopologyHostRequestEntities()) { Long hostRequestId = hostRequestEntity.getId(); synchronized (hostIdCounter) { diff --git a/ambari-server/src/main/java/org/apache/ambari/server/topology/MpackInstance.java b/ambari-server/src/main/java/org/apache/ambari/server/topology/MpackInstance.java index eeb45b776eb..6b57b0020e7 100644 --- a/ambari-server/src/main/java/org/apache/ambari/server/topology/MpackInstance.java +++ b/ambari-server/src/main/java/org/apache/ambari/server/topology/MpackInstance.java @@ -26,7 +26,6 @@ import org.apache.ambari.server.orm.entities.BlueprintMpackInstanceEntity; import org.apache.ambari.server.orm.entities.BlueprintServiceConfigEntity; import org.apache.ambari.server.orm.entities.BlueprintServiceEntity; -import org.apache.ambari.server.state.StackId; import com.fasterxml.jackson.annotation.JsonIgnore; import com.fasterxml.jackson.annotation.JsonProperty; @@ -72,8 +71,8 @@ public void setMpackVersion(String mpackVersion) { } @JsonIgnore - public StackId getStackId() { - return new StackId(getMpackName(), getMpackVersion()); + public String getMpackNameAndVersion() { + return mpackName + "-" + mpackVersion; } @JsonIgnore diff --git a/ambari-server/src/main/java/org/apache/ambari/server/topology/PersistedStateImpl.java b/ambari-server/src/main/java/org/apache/ambari/server/topology/PersistedStateImpl.java index ae70db467de..bb1f85234d7 100644 --- a/ambari-server/src/main/java/org/apache/ambari/server/topology/PersistedStateImpl.java +++ b/ambari-server/src/main/java/org/apache/ambari/server/topology/PersistedStateImpl.java @@ -23,9 +23,7 @@ import java.util.HashMap; import java.util.List; import java.util.Map; -import java.util.Set; -import javax.inject.Inject; import javax.inject.Singleton; import org.apache.ambari.server.AmbariException; @@ -33,7 +31,6 @@ import org.apache.ambari.server.actionmanager.HostRoleStatus; import org.apache.ambari.server.api.predicate.InvalidQueryException; import org.apache.ambari.server.controller.internal.BaseClusterRequest; -import org.apache.ambari.server.controller.internal.ProvisionAction; import org.apache.ambari.server.orm.dao.HostDAO; import org.apache.ambari.server.orm.dao.HostRoleCommandDAO; import org.apache.ambari.server.orm.dao.TopologyHostGroupDAO; @@ -52,12 +49,12 @@ import org.apache.ambari.server.orm.entities.TopologyRequestEntity; import org.apache.ambari.server.stack.NoSuchStackException; import org.apache.ambari.server.state.Host; -import org.apache.ambari.server.state.StackId; import org.apache.ambari.server.topology.tasks.TopologyTask; import org.slf4j.Logger; import org.slf4j.LoggerFactory; import com.google.gson.Gson; +import com.google.inject.Inject; import com.google.inject.persist.Transactional; /** @@ -93,6 +90,9 @@ public class PersistedStateImpl implements PersistedState { @Inject private HostRoleCommandDAO hostRoleCommandDAO; + @Inject + private HostRoleCommandDAO physicalTaskDAO; + @Inject private BlueprintFactory blueprintFactory; @@ -215,6 +215,9 @@ public Map> getAllRequests() { if (clusterTopology == null) { try { clusterTopology = new ClusterTopologyImpl(ambariContext, replayedRequest); + if (entity.getProvisionAction() != null) { + clusterTopology.setProvisionAction(entity.getProvisionAction()); + } topologyRequests.put(replayedRequest.getClusterId(), clusterTopology); allRequests.put(clusterTopology, new ArrayList<>()); } catch (InvalidTopologyException e) { @@ -254,8 +257,6 @@ public Map> getAllRequests() { private TopologyRequestEntity toEntity(BaseClusterRequest request) { TopologyRequestEntity entity = new TopologyRequestEntity(); - entity.setRawRequestBody(request.getRawRequestBody()); - //todo: this isn't set for a scaling operation because we had intended to allow multiple //todo: bp's to be used to scale a cluster although this isn't currently supported by //todo: new topology infrastructure @@ -331,7 +332,7 @@ private TopologyHostRequestEntity toEntity(HostRequest request, TopologyLogicalR logicalTaskEntity.setTopologyHostTaskEntity(topologyTaskEntity); Long physicalId = request.getPhysicalTaskId(logicalTaskId); if (physicalId != null) { - logicalTaskEntity.setHostRoleCommandEntity(hostRoleCommandDAO.findByPK(physicalId)); + logicalTaskEntity.setHostRoleCommandEntity(physicalTaskDAO.findByPK(physicalId)); } logicalTaskEntity.setTopologyHostTaskEntity(topologyTaskEntity); } @@ -391,16 +392,11 @@ private static class ReplayedTopologyRequest implements TopologyRequest { private final Blueprint blueprint; private final Configuration configuration; private final Map hostGroupInfoMap = new HashMap<>(); - private final ProvisionAction provisionAction; - private final Set stackIds; public ReplayedTopologyRequest(TopologyRequestEntity entity, BlueprintFactory blueprintFactory) { clusterId = entity.getClusterId(); type = Type.valueOf(entity.getAction()); description = entity.getDescription(); - provisionAction = entity.getProvisionAction(); - - stackIds = TopologyRequestUtil.getStackIdsFromRequest(entity.getRawRequestBody()); try { blueprint = blueprintFactory.getBlueprint(entity.getBlueprintName()); @@ -413,11 +409,6 @@ public ReplayedTopologyRequest(TopologyRequestEntity entity, BlueprintFactory bl parseHostGroupInfo(entity); } - @Override - public Set getStackIds() { - return stackIds; - } - @Override public Long getClusterId() { return clusterId; @@ -460,10 +451,6 @@ private Configuration createConfiguration(String propString, String attributeStr return new Configuration(properties, attributes); } - public ProvisionAction getProvisionAction() { - return provisionAction; - } - private void parseHostGroupInfo(TopologyRequestEntity entity) { for (TopologyHostGroupEntity hostGroupEntity : entity.getTopologyHostGroupEntities()) { for (TopologyHostInfoEntity hostInfoEntity : hostGroupEntity.getTopologyHostInfoEntities()) { diff --git a/ambari-server/src/main/java/org/apache/ambari/server/topology/ProvisionRequest.java b/ambari-server/src/main/java/org/apache/ambari/server/topology/ProvisionRequest.java deleted file mode 100644 index 915dc142097..00000000000 --- a/ambari-server/src/main/java/org/apache/ambari/server/topology/ProvisionRequest.java +++ /dev/null @@ -1,39 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package org.apache.ambari.server.topology; - -import java.util.Collection; -import java.util.Set; - -import org.apache.ambari.server.controller.internal.ProvisionAction; -import org.apache.ambari.server.state.StackId; - -/** - * Request for provisioning the cluster. This interface is extracted - * from ProvisionClusterRequest so that we can have a unified view of the - * blueprint and the request in BlueprintBasedClusterProvisionRequest. - */ -public interface ProvisionRequest extends TopologyRequest { - - ConfigRecommendationStrategy getConfigRecommendationStrategy(); - ProvisionAction getProvisionAction(); - String getDefaultPassword(); - Set getStackIds(); - Collection getMpacks(); - -} diff --git a/ambari-server/src/main/java/org/apache/ambari/server/topology/RepositorySetting.java b/ambari-server/src/main/java/org/apache/ambari/server/topology/RepositorySetting.java index 3e7571c84d0..35e370e83d8 100644 --- a/ambari-server/src/main/java/org/apache/ambari/server/topology/RepositorySetting.java +++ b/ambari-server/src/main/java/org/apache/ambari/server/topology/RepositorySetting.java @@ -18,8 +18,6 @@ package org.apache.ambari.server.topology; -import java.util.Map; - public class RepositorySetting { /** * Settings for each repo setting sections @@ -36,15 +34,6 @@ public class RepositorySetting { private String repoId; private String baseUrl; - static RepositorySetting fromMap(Map setting) { - RepositorySetting result = new RepositorySetting(); - result.setOperatingSystem(setting.get(OPERATING_SYSTEM)); - result.setOverrideStrategy(setting.get(OVERRIDE_STRATEGY)); - result.setRepoId(setting.get(REPO_ID)); - result.setBaseUrl(setting.get(BASE_URL)); - return result; - } - /** * When specified under the "settings" section, it allows Ambari to overwrite existing repos stored * in the metainfo table in the Ambari server database. @@ -117,10 +106,11 @@ public void setBaseUrl(String baseUrl) { } public String toString(){ - return - OVERRIDE_STRATEGY + ": " + overrideStrategy + - OPERATING_SYSTEM + ": " + operatingSystem + - REPO_ID + ": " + repoId + - BASE_URL + ": " + baseUrl; + StringBuilder strBldr = new StringBuilder(); + strBldr.append(OVERRIDE_STRATEGY);strBldr.append(": ");strBldr.append(overrideStrategy); + strBldr.append(OPERATING_SYSTEM);strBldr.append(": ");strBldr.append(operatingSystem); + strBldr.append(REPO_ID);strBldr.append(": ");strBldr.append(repoId); + strBldr.append(BASE_URL);strBldr.append(": ");strBldr.append(baseUrl); + return strBldr.toString(); } } diff --git a/ambari-server/src/main/java/org/apache/ambari/server/topology/ResolvedComponent.java b/ambari-server/src/main/java/org/apache/ambari/server/topology/ResolvedComponent.java deleted file mode 100644 index 85e524bcefe..00000000000 --- a/ambari-server/src/main/java/org/apache/ambari/server/topology/ResolvedComponent.java +++ /dev/null @@ -1,75 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package org.apache.ambari.server.topology; - -import java.util.Optional; - -import org.apache.ambari.server.state.StackId; -import org.inferred.freebuilder.FreeBuilder; - -/** - * I provide additional information for a component specified in the blueprint, - * based on values resolved from the stack and sensible defaults. - */ -@FreeBuilder -public interface ResolvedComponent { - - StackId stackId(); - Optional serviceGroupName(); - String serviceType(); - Optional serviceName(); - String componentName(); - boolean masterComponent(); - - /** - * @return the component as specified in the blueprint - */ - Component component(); - - /** - * @return service group name if it set, otherwise defaults to the stack name - */ - default String effectiveServiceGroupName() { - return serviceGroupName().orElseGet(() -> stackId().getStackName()); - } - - /** - * @return service name if it set, otherwise defaults to the service type (eg. ZOOKEEPER) - */ - default String effectiveServiceName() { - return serviceName().orElseGet(this::serviceType); - } - - /** - * Starts building a {@code ResolvedComponent} for the given component. - */ - static Builder builder(Component component) { - return new Builder() - .component(component) - .componentName(component.getName()) - .serviceName(Optional.ofNullable(component.getServiceInstance())) - .serviceGroupName(Optional.ofNullable(component.getMpackInstance())); - } - - class Builder extends ResolvedComponent_Builder { - protected Builder() { - masterComponent(false); - } - } - -} diff --git a/ambari-server/src/main/java/org/apache/ambari/server/topology/Setting.java b/ambari-server/src/main/java/org/apache/ambari/server/topology/Setting.java index cf134735605..904c784dac3 100644 --- a/ambari-server/src/main/java/org/apache/ambari/server/topology/Setting.java +++ b/ambari-server/src/main/java/org/apache/ambari/server/topology/Setting.java @@ -18,31 +18,32 @@ package org.apache.ambari.server.topology; -import static java.util.stream.Collectors.toList; - -import java.util.List; +import java.util.Collections; +import java.util.HashMap; import java.util.Map; -import java.util.Objects; import java.util.Set; -import org.apache.commons.lang.StringUtils; - -import com.google.common.collect.ImmutableSet; - public class Setting { /** * Settings for this configuration instance */ - private final Map>> properties; + private Map>> properties; + + public static final String SETTING_NAME_RECOVERY_SETTINGS = "recovery_settings"; + + public static final String SETTING_NAME_SERVICE_SETTINGS = "service_settings"; + + public static final String SETTING_NAME_COMPONENT_SETTINGS = "component_settings"; + + public static final String SETTING_NAME_DEPLOYMENT_SETTINGS = "deployment_settings"; + + public static final String SETTING_NAME_RECOVERY_ENABLED = "recovery_enabled"; - static final String SETTING_NAME_RECOVERY_SETTINGS = "recovery_settings"; - static final String SETTING_NAME_SERVICE_SETTINGS = "service_settings"; - static final String SETTING_NAME_COMPONENT_SETTINGS = "component_settings"; - static final String SETTING_NAME_DEPLOYMENT_SETTINGS = "deployment_settings"; - static final String SETTING_NAME_RECOVERY_ENABLED = "recovery_enabled"; public static final String SETTING_NAME_SKIP_FAILURE = "skip_failure"; - static final String SETTING_NAME_NAME = "name"; - static final String SETTING_NAME_REPOSITORY_SETTINGS = "repository_settings"; + + public static final String SETTING_NAME_NAME = "name"; + + public static final String SETTING_NAME_REPOSITORY_SETTINGS = "repository_settings"; /** * When specified under the "service_settings" section, it indicates whether credential store @@ -63,14 +64,15 @@ public class Setting { * } * */ - private static final String SETTING_NAME_CREDENTIAL_STORE_ENABLED = "credential_store_enabled"; + public static final String SETTING_NAME_CREDENTIAL_STORE_ENABLED = "credential_store_enabled"; /** * Settings. * * @param properties setting name-->Set(property name-->property value) */ - public Setting(Map>> properties) { + public Setting(Map>> properties) { + this.properties = properties; } @@ -79,7 +81,7 @@ public Setting(Map>> properties) { * * @return map of properties for this settings instance keyed by setting name. */ - public Map>> getProperties() { + public Map>> getProperties() { return properties; } @@ -89,101 +91,11 @@ public Map>> getProperties() { * @param settingName * @return Set of Map of properties. */ - public Set> getSettingValue(String settingName) { - return properties.getOrDefault(settingName, ImmutableSet.of()); - } - - /** - * Get whether the specified service is enabled for credential store use. - * - *
-   *     {@code
-   *       {
-   *         "service_settings" : [
-   *         { "name" : "RANGER",
-   *           "recovery_enabled" : "true",
-   *           "credential_store_enabled" : "true"
-   *         },
-   *         { "name" : "HIVE",
-   *           "recovery_enabled" : "true",
-   *           "credential_store_enabled" : "false"
-   *         },
-   *         { "name" : "TEZ",
-   *           "recovery_enabled" : "false"
-   *         }
-   *       ]
-   *     }
-   *   }
-   * 
- * - * @param serviceName - Service name. - * - * @return null if value is not specified; true or false if specified. - */ - String getCredentialStoreEnabled(String serviceName) { - return getStringFromNamedMap(SETTING_NAME_SERVICE_SETTINGS, serviceName, SETTING_NAME_CREDENTIAL_STORE_ENABLED); - } - - private String getStringFromNamedMap(String outerKey, String mapName, String innerKey) { - Set> maps = getSettingValue(outerKey); - for (Map map : maps) { - String name = map.get(SETTING_NAME_NAME); - if (Objects.equals(name, mapName)) { - String value = map.get(innerKey); - if (!StringUtils.isEmpty(value)) { - return value; - } - break; - } - } - return null; - } - - private String getString(String outerKey, String innerKey) { - Set> maps = getSettingValue(outerKey); - for (Map map : maps) { - if (map.containsKey(innerKey)) { - return map.get(innerKey); - } - } - return null; - } - - public boolean shouldSkipFailure() { - return Boolean.parseBoolean(getString(SETTING_NAME_DEPLOYMENT_SETTINGS, SETTING_NAME_SKIP_FAILURE)); - } - - List processRepoSettings() { - Set> repositorySettingsValue = getSettingValue(SETTING_NAME_REPOSITORY_SETTINGS); - return repositorySettingsValue.stream() - .map(RepositorySetting::fromMap) - .collect(toList()); - } - - /** - * Get whether the specified component in the service is enabled - * for auto start. - * - * @param serviceName - Service name. - * @param componentName - Component name. - * - * @return null if value is not specified; true or false if specified. - */ - String getRecoveryEnabled(String serviceName, String componentName) { - Set> maps; - - // If component name was specified in the list of "component_settings", - // determine if recovery_enabled is true or false and return it. - String result = getStringFromNamedMap(SETTING_NAME_COMPONENT_SETTINGS, componentName, SETTING_NAME_RECOVERY_ENABLED); - // If component name is not specified, look up it's service. - if (result == null) { - result = getStringFromNamedMap(SETTING_NAME_SERVICE_SETTINGS, serviceName, SETTING_NAME_RECOVERY_ENABLED); - } - // If service name is not specified, look up the cluster setting. - if (result == null) { - result = getString(SETTING_NAME_RECOVERY_SETTINGS, SETTING_NAME_RECOVERY_ENABLED); + public Set> getSettingValue(String settingName) { + if (properties.containsKey(settingName)) { + return properties.get(settingName); } - return result; + return Collections.emptySet(); } } diff --git a/ambari-server/src/main/java/org/apache/ambari/server/topology/SettingFactory.java b/ambari-server/src/main/java/org/apache/ambari/server/topology/SettingFactory.java index ff002821179..a01f198ab1a 100644 --- a/ambari-server/src/main/java/org/apache/ambari/server/topology/SettingFactory.java +++ b/ambari-server/src/main/java/org/apache/ambari/server/topology/SettingFactory.java @@ -64,20 +64,23 @@ public class SettingFactory { /** * Attempts to build the list of settings in the following format: * setting_name1-->[propertyName1-->propertyValue1, propertyName2-->propertyValue2] + * @param blueprintSetting + * @return */ public static Setting getSetting(Collection> blueprintSetting) { - Map>> properties = new HashMap<>(); + Map>> properties = new HashMap<>(); + Setting setting = new Setting(properties); if (blueprintSetting != null) { for (Map settingMap : blueprintSetting) { for (Map.Entry entry : settingMap.entrySet()) { final String[] propertyNames = entry.getKey().split("/"); - Set> settingValue; + Set> settingValue; if (entry.getValue() instanceof Set) { - settingValue = (Set>) entry.getValue(); + settingValue = (HashSet>)entry.getValue(); } else if (propertyNames.length > 1){ - Map property = new HashMap<>(); + HashMap property = new HashMap<>(); property.put(propertyNames[1], String.valueOf(entry.getValue())); settingValue = properties.get(propertyNames[0]); if (settingValue == null) { @@ -93,6 +96,6 @@ else if (propertyNames.length > 1){ } } - return new Setting(properties); + return setting; } } diff --git a/ambari-server/src/main/java/org/apache/ambari/server/topology/StackComponentResolver.java b/ambari-server/src/main/java/org/apache/ambari/server/topology/StackComponentResolver.java deleted file mode 100644 index 9d524d027c0..00000000000 --- a/ambari-server/src/main/java/org/apache/ambari/server/topology/StackComponentResolver.java +++ /dev/null @@ -1,140 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package org.apache.ambari.server.topology; - -import static java.util.stream.Collectors.toSet; - -import java.util.HashMap; -import java.util.HashSet; -import java.util.LinkedList; -import java.util.List; -import java.util.Map; -import java.util.Set; -import java.util.stream.Stream; - -import org.apache.ambari.server.controller.internal.StackDefinition; -import org.apache.ambari.server.state.StackId; -import org.apache.commons.lang3.tuple.Pair; -import org.slf4j.Logger; -import org.slf4j.LoggerFactory; - -import com.google.common.base.Joiner; -import com.google.common.base.Strings; - -public class StackComponentResolver implements ComponentResolver { - - private static final Logger LOG = LoggerFactory.getLogger(StackComponentResolver.class); - - @Override - public Map> resolveComponents(BlueprintBasedClusterProvisionRequest request) { - Map uniqueServices = request.getUniqueServices(); - Map> mpackServices = request.getServicesByMpack(); - - Map> result = new HashMap<>(); - List problems = new LinkedList<>(); - - StackDefinition stack = request.getStack(); - for (HostGroup hg : request.getHostGroups().values()) { - result.put(hg.getName(), new HashSet<>()); - - for (Component comp : hg.getComponents()) { - Stream> servicesForComponent = stack.getServicesForComponent(comp.getName()); - servicesForComponent = filterByMpackName(comp, servicesForComponent); - servicesForComponent = filterByServiceName(comp, servicesForComponent, mpackServices, uniqueServices); - - Set> serviceMatches = servicesForComponent.collect(toSet()); - - if (serviceMatches.size() != 1) { - String msg = formatResolutionProblemMessage(hg, comp, serviceMatches); - LOG.warn("Component resolution failure:" + msg); - problems.add(msg); - } else { - Pair stackService = serviceMatches.iterator().next(); - StackId stackId = stackService.getLeft(); - String serviceType = stackService.getRight(); - - ResolvedComponent resolved = ResolvedComponent.builder(comp) - .stackId(stackId) - .serviceType(serviceType) - .build(); - - LOG.debug("Component resolved: " + resolved); - result.get(hg.getName()).add(resolved); - } - } - } - - if (!problems.isEmpty()) { - throw new IllegalArgumentException("Component resolution failure:\n" + Joiner.on("\n").join(problems)); - } - - return result; - } - - private static String formatResolutionProblemMessage(HostGroup hg, Component comp, Set> serviceMatches) { - boolean multipleMatches = !serviceMatches.isEmpty(); - String problem = multipleMatches ? "Multiple services" : "No service"; - - StringBuilder sb = new StringBuilder(problem) - .append(" found for component ").append(comp.getName()) - .append(" in host group " ).append(hg.getName()); - - if (!Strings.isNullOrEmpty(comp.getMpackInstance())) { - sb.append(" mpack: ").append(comp.getMpackInstance()); - } - if (!Strings.isNullOrEmpty(comp.getServiceInstance())) { - sb.append(" service: ").append(comp.getServiceInstance()); - } - if (multipleMatches) { - sb.append(": ").append(serviceMatches); - } - - return sb.toString(); - } - - // if component references a specific service instance, filter the stream by the type of that service - private static Stream> filterByServiceName(Component comp, Stream> stream, - Map> mpackServices, Map uniqueServices - ) { - if (!Strings.isNullOrEmpty(comp.getServiceInstance())) { - String mpackName = comp.getMpackInstance(); - Map services = !Strings.isNullOrEmpty(mpackName) - ? mpackServices.get(mpackName) - : uniqueServices; - - ServiceInstance service = services.get(comp.getServiceInstance()); - if (service != null) { - String serviceType = service.getType(); - - return stream.filter(pair -> pair.getRight().equals(serviceType)); - } - } - - return stream; - } - - // if component references a specific mpack instance, filter the stream by the name of that mpack - private static Stream> filterByMpackName(Component comp, Stream> stream) { - if (!Strings.isNullOrEmpty(comp.getMpackInstance())) { - return stream.filter(pair -> pair.getLeft().getStackName().equals(comp.getMpackInstance())); - } - - return stream; - } - -} diff --git a/ambari-server/src/main/java/org/apache/ambari/server/topology/StackFactory.java b/ambari-server/src/main/java/org/apache/ambari/server/topology/StackFactory.java deleted file mode 100644 index d7b07401603..00000000000 --- a/ambari-server/src/main/java/org/apache/ambari/server/topology/StackFactory.java +++ /dev/null @@ -1,32 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package org.apache.ambari.server.topology; - -import org.apache.ambari.server.StackAccessException; -import org.apache.ambari.server.controller.internal.Stack; -import org.apache.ambari.server.state.StackId; - -/** - * Internal interface used to abstract out the process of creating the Stack object. - * - * This is used to simplify unit testing, since a new Factory can be provided to - * simulate various Stack or error conditions. - */ -public interface StackFactory { - Stack createStack(StackId stackId) throws StackAccessException; -} diff --git a/ambari-server/src/main/java/org/apache/ambari/server/topology/TopologyManager.java b/ambari-server/src/main/java/org/apache/ambari/server/topology/TopologyManager.java index 81082aacf1c..51eedd31e23 100644 --- a/ambari-server/src/main/java/org/apache/ambari/server/topology/TopologyManager.java +++ b/ambari-server/src/main/java/org/apache/ambari/server/topology/TopologyManager.java @@ -51,6 +51,7 @@ import org.apache.ambari.server.controller.internal.ProvisionClusterRequest; import org.apache.ambari.server.controller.internal.RequestImpl; import org.apache.ambari.server.controller.internal.ScaleClusterRequest; +import org.apache.ambari.server.controller.internal.StackDefinition; import org.apache.ambari.server.controller.spi.NoSuchParentResourceException; import org.apache.ambari.server.controller.spi.RequestStatus; import org.apache.ambari.server.controller.spi.Resource; @@ -129,9 +130,6 @@ public class TopologyManager { //todo: currently only support a single cluster private Map clusterTopologyMap = new HashMap<>(); - @Inject - private Configuration configuration; - @Inject private StackAdvisorBlueprintProcessor stackAdvisorBlueprintProcessor; @@ -158,9 +156,6 @@ public class TopologyManager { @Inject private TopologyValidatorService topologyValidatorService; - @Inject - private ComponentResolver resolver; - /** * A boolean not cached thread-local (volatile) to prevent double-checked * locking on the synchronized keyword. @@ -211,7 +206,7 @@ private void ensureInitialized() { // ensure KERBEROS_CLIENT is present in each hostgroup even if it's not in original BP for(ClusterTopology clusterTopology : clusterTopologyMap.values()) { if (clusterTopology.isClusterKerberosEnabled()) { - clusterTopology.getBlueprint().ensureKerberosClientIsPresent(); + addKerberosClient(clusterTopology); } } isInitialized = true; @@ -239,12 +234,12 @@ public void onRequestFinished(RequestFinishedEvent event) { if(isLogicalRequestSuccessful(provisionRequest)) { LOG.info("Cluster creation request id={} using Blueprint {} successfully completed for cluster id={}", clusterProvisionWithBlueprintCreateRequests.get(event.getClusterId()).getRequestId(), - clusterTopologyMap.get(event.getClusterId()).getBlueprintName(), + clusterTopologyMap.get(event.getClusterId()).getBlueprint().getName(), event.getClusterId()); } else { LOG.info("Cluster creation request id={} using Blueprint {} failed for cluster id={}", clusterProvisionWithBlueprintCreateRequests.get(event.getClusterId()).getRequestId(), - clusterTopologyMap.get(event.getClusterId()).getBlueprintName(), + clusterTopologyMap.get(event.getClusterId()).getBlueprint().getName(), event.getClusterId()); } } @@ -280,35 +275,55 @@ public boolean isClusterProvisionWithBlueprintFinished(long clusterId) { public RequestStatusResponse provisionCluster(final ProvisionClusterRequest request) throws InvalidTopologyException, AmbariException { ensureInitialized(); - BlueprintBasedClusterProvisionRequest provisionRequest = new BlueprintBasedClusterProvisionRequest(ambariContext, securityConfigurationFactory, request.getBlueprint(), request); - Map> resolved = resolver.resolveComponents(provisionRequest); - - final ClusterTopologyImpl topology = new ClusterTopologyImpl(ambariContext, provisionRequest, resolved); + final ClusterTopology topology = new ClusterTopologyImpl(ambariContext, request); final String clusterName = request.getClusterName(); + final StackDefinition stack = topology.getBlueprint().getStack(); final String repoVersion = request.getRepositoryVersion(); final Long repoVersionID = request.getRepositoryVersionId(); - final SecurityConfiguration securityConfiguration = provisionRequest.getSecurity(); - - topologyValidatorService.validateTopologyConfiguration(topology); // FIXME known stacks validation is too late here // get the id prior to creating ambari resources which increments the counter final Long provisionId = ambariContext.getNextRequestId(); + SecurityType securityType = null; + Credential credential = null; + + SecurityConfiguration securityConfiguration = processSecurityConfiguration(request); + + if (securityConfiguration != null && securityConfiguration.getType() == SecurityType.KERBEROS) { + securityType = SecurityType.KERBEROS; + addKerberosClient(topology); + + // refresh default stack config after adding KERBEROS_CLIENT component to topology + topology.getBlueprint().getConfiguration().setParentConfiguration(stack.getConfiguration(topology.getBlueprint().getServices())); + + credential = request.getCredentialsMap().get(KDC_ADMIN_CREDENTIAL); + if (credential == null) { + throw new InvalidTopologyException(KDC_ADMIN_CREDENTIAL + " is missing from request."); + } + } + + topologyValidatorService.validateTopologyConfiguration(topology); + + // create resources - ambariContext.createAmbariResources(topology, clusterName, securityConfiguration.getType(), repoVersion, repoVersionID); + ambariContext.createAmbariResources(topology, clusterName, securityType, repoVersion, repoVersionID); - if (securityConfiguration.getDescriptor() != null) { + if (securityConfiguration != null && securityConfiguration.getDescriptor() != null) { submitKerberosDescriptorAsArtifact(clusterName, securityConfiguration.getDescriptor()); } - if (securityConfiguration.getType() == SecurityType.KERBEROS) { - Credential credential = request.getCredentialsMap().get(KDC_ADMIN_CREDENTIAL); + if (credential != null) { submitCredential(clusterName, credential); } long clusterId = ambariContext.getClusterId(clusterName); topology.setClusterId(clusterId); request.setClusterId(clusterId); + // set recommendation strategy + topology.setConfigRecommendationStrategy(request.getConfigRecommendationStrategy()); + // set provision action requested + topology.setProvisionAction(request.getProvisionAction()); + // create task executor for TopologyTasks getOrCreateTopologyTaskExecutor(clusterId); @@ -326,7 +341,7 @@ public LogicalRequest call() throws Exception { clusterTopologyMap.put(clusterId, topology); addClusterConfigRequest(logicalRequest, topology, new ClusterConfigurationRequest(ambariContext, topology, true, - stackAdvisorBlueprintProcessor, securityConfiguration.getType() == SecurityType.KERBEROS)); + stackAdvisorBlueprintProcessor, securityType == SecurityType.KERBEROS)); // Process the logical request processRequest(request, topology, logicalRequest); @@ -334,7 +349,7 @@ public LogicalRequest call() throws Exception { //todo: this should be invoked as part of a generic lifecycle event which could possibly //todo: be tied to cluster state - StackId stackId = Iterables.getFirst(topology.getStackIds(), null); // FIXME need for stackId in ClusterRequest will be removed + StackId stackId = Iterables.getFirst(topology.getBlueprint().getStackIds(), null); // FIXME need for stackId in ClusterRequest will be removed ambariContext.persistInstallStateForUI(clusterName, stackId); clusterProvisionWithBlueprintCreateRequests.put(clusterId, logicalRequest); return getRequestStatus(logicalRequest.getRequestId()); @@ -406,10 +421,33 @@ private void submitCredential(String clusterName, Credential credential) { } + /** + * Retrieve security info from Blueprint if missing from Cluster Template request. + * + * @param request + * @return + */ + private SecurityConfiguration processSecurityConfiguration(ProvisionClusterRequest request) { + LOG.debug("Getting security configuration from the request ..."); + SecurityConfiguration securityConfiguration = request.getSecurityConfiguration(); + + if (securityConfiguration == null) { + // todo - perform this logic at request creation instead! + LOG.debug("There's no security configuration in the request, retrieving it from the associated blueprint"); + securityConfiguration = request.getBlueprint().getSecurity(); + if (securityConfiguration != null && securityConfiguration.getType() == SecurityType.KERBEROS && + securityConfiguration.getDescriptorReference() != null) { + securityConfiguration = securityConfigurationFactory.loadSecurityConfigurationByReference + (securityConfiguration.getDescriptorReference()); + } + } + return securityConfiguration; + } + private void submitKerberosDescriptorAsArtifact(String clusterName, String descriptor) { ResourceProvider artifactProvider = - AmbariContext.getClusterController().ensureResourceProvider(Resource.Type.Artifact); + ambariContext.getClusterController().ensureResourceProvider(Resource.Type.Artifact); Map properties = new HashMap<>(); properties.put(ArtifactResourceProvider.ARTIFACT_NAME_PROPERTY, "kerberos_descriptor"); @@ -813,6 +851,8 @@ private void processRequest(TopologyRequest request, ClusterTopology topology, f LOG.info("TopologyManager.processRequest: Entering"); + finalizeTopology(request, topology); + boolean requestHostComplete = false; //todo: overall synchronization. Currently we have nested synchronization here @@ -1043,10 +1083,25 @@ private boolean isLogicalRequestSuccessful(LogicalRequest logicalRequest) { return logicalRequest != null && logicalRequest.isSuccessful(); } + //todo: this should invoke a callback on each 'service' in the topology + private void finalizeTopology(TopologyRequest request, ClusterTopology topology) { + } + private boolean isHostIgnored(String host) { return hostsToIgnore.remove(host); } + /** + * Add the kerberos client to groups if kerberos is enabled for the cluster. + * + * @param topology cluster topology + */ + private void addKerberosClient(ClusterTopology topology) { + for (HostGroup group : topology.getBlueprint().getHostGroups().values()) { + group.addComponent(new Component("KERBEROS_CLIENT")); + } + } + /** * Register the configuration task which is responsible for configuration topology resolution * and setting the updated configuration on the cluster. This task needs to be submitted to the diff --git a/ambari-server/src/main/java/org/apache/ambari/server/topology/TopologyRequest.java b/ambari-server/src/main/java/org/apache/ambari/server/topology/TopologyRequest.java index ffdf1c79ef5..bd5630b9875 100644 --- a/ambari-server/src/main/java/org/apache/ambari/server/topology/TopologyRequest.java +++ b/ambari-server/src/main/java/org/apache/ambari/server/topology/TopologyRequest.java @@ -18,12 +18,7 @@ package org.apache.ambari.server.topology; -import static java.util.Collections.emptySet; - import java.util.Map; -import java.util.Set; - -import org.apache.ambari.server.state.StackId; /** * A request which is used to create or modify a cluster topology. @@ -48,6 +43,10 @@ enum Type { PROVISION, SCALE, EXPORT } */ Type getType(); + //todo: only a single BP may be specified so all host groups have the same bp. + //todo: BP really needs to be associated with the HostGroupInfo, even for create which will have a single BP + //todo: for all HG's. + /** * Get the blueprint instance associated with the request. * @@ -75,11 +74,4 @@ enum Type { PROVISION, SCALE, EXPORT } * @return string description of the request */ String getDescription(); - - /** - * @return a set of stack id's if supported by the TopologyRequest. - */ - default Set getStackIds() { - return emptySet(); - } } diff --git a/ambari-server/src/main/java/org/apache/ambari/server/topology/TopologyRequestFactory.java b/ambari-server/src/main/java/org/apache/ambari/server/topology/TopologyRequestFactory.java index 136be393d48..751e2d7a851 100644 --- a/ambari-server/src/main/java/org/apache/ambari/server/topology/TopologyRequestFactory.java +++ b/ambari-server/src/main/java/org/apache/ambari/server/topology/TopologyRequestFactory.java @@ -28,6 +28,6 @@ */ public interface TopologyRequestFactory { - ProvisionClusterRequest createProvisionClusterRequest(String rawRequestBody, Map properties, SecurityConfiguration securityConfiguration) throws InvalidTopologyTemplateException; + ProvisionClusterRequest createProvisionClusterRequest(Map properties, SecurityConfiguration securityConfiguration) throws InvalidTopologyTemplateException; // todo: use to create other request types } diff --git a/ambari-server/src/main/java/org/apache/ambari/server/topology/TopologyRequestFactoryImpl.java b/ambari-server/src/main/java/org/apache/ambari/server/topology/TopologyRequestFactoryImpl.java index 50d3fa1226e..ff79f526ea6 100644 --- a/ambari-server/src/main/java/org/apache/ambari/server/topology/TopologyRequestFactoryImpl.java +++ b/ambari-server/src/main/java/org/apache/ambari/server/topology/TopologyRequestFactoryImpl.java @@ -27,9 +27,9 @@ * Factory for creating topology requests. */ public class TopologyRequestFactoryImpl implements TopologyRequestFactory { - @Override - public ProvisionClusterRequest createProvisionClusterRequest(String rawRequestBody, Map properties, SecurityConfiguration securityConfiguration) throws InvalidTopologyTemplateException { - return new ProvisionClusterRequest(rawRequestBody, properties, securityConfiguration); + public ProvisionClusterRequest createProvisionClusterRequest(Map properties, SecurityConfiguration securityConfiguration) throws InvalidTopologyTemplateException { + return new ProvisionClusterRequest(properties, securityConfiguration); + } } diff --git a/ambari-server/src/main/java/org/apache/ambari/server/topology/TopologyRequestUtil.java b/ambari-server/src/main/java/org/apache/ambari/server/topology/TopologyRequestUtil.java deleted file mode 100644 index 1b160a41c36..00000000000 --- a/ambari-server/src/main/java/org/apache/ambari/server/topology/TopologyRequestUtil.java +++ /dev/null @@ -1,76 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package org.apache.ambari.server.topology; - -import static com.google.common.base.Preconditions.checkArgument; -import static java.util.Collections.emptyList; -import static java.util.Collections.emptyMap; -import static java.util.stream.Collectors.toSet; - -import java.util.List; -import java.util.Map; -import java.util.Set; - -import org.apache.ambari.server.state.StackId; -import org.apache.ambari.server.utils.JsonUtils; - -import com.fasterxml.jackson.core.type.TypeReference; - -/** - * Utility functions for topology requests. - */ -public class TopologyRequestUtil { - - public static final String NAME = "name"; - public static final String VERSION = "version"; - - - /** - * @param rawRequestJson The topology request in raw JSON format. Null input is handled gracefully. - * @return a Set of stack id's contained in the request - */ - public static Set getStackIdsFromRequest(String rawRequestJson) { - return getStackIdsFromRequest(getPropertyMap(rawRequestJson)); - } - - - /** - * @param rawRequestMap The topology request in raw JSON format. Null input is handled gracefully. - * @return a Set of stack id's contained in the request - */ - public static Set getStackIdsFromRequest(Map rawRequestMap) { - List> mpackInstances = (List>) - rawRequestMap.getOrDefault("mpack_instances", emptyList()); - return mpackInstances.stream().map(m -> { - checkArgument(m.containsKey(NAME), "Missing mpack name"); - checkArgument(m.containsKey(VERSION), "Missing mpack version"); - return new StackId(m.get(NAME), m.get(VERSION)); - }).collect(toSet()); - } - - /** - * @param rawRequestJson The topology request in raw JSON format. Null input is handled gracefully. - * @return the request body parsed as map (null is parsed as empty map) - */ - public static Map getPropertyMap(String rawRequestJson) { - return null == rawRequestJson ? - emptyMap() : - JsonUtils.fromJson(rawRequestJson, new TypeReference>() {}); - } -} diff --git a/ambari-server/src/main/java/org/apache/ambari/server/topology/validators/TopologyValidator.java b/ambari-server/src/main/java/org/apache/ambari/server/topology/TopologyValidator.java similarity index 84% rename from ambari-server/src/main/java/org/apache/ambari/server/topology/validators/TopologyValidator.java rename to ambari-server/src/main/java/org/apache/ambari/server/topology/TopologyValidator.java index 41b4ad5561b..c3e4b2bfa4c 100644 --- a/ambari-server/src/main/java/org/apache/ambari/server/topology/validators/TopologyValidator.java +++ b/ambari-server/src/main/java/org/apache/ambari/server/topology/TopologyValidator.java @@ -16,10 +16,7 @@ * limitations under the License. */ -package org.apache.ambari.server.topology.validators; - -import org.apache.ambari.server.topology.ClusterTopology; -import org.apache.ambari.server.topology.InvalidTopologyException; +package org.apache.ambari.server.topology; /** * Performs topology validation. diff --git a/ambari-server/src/main/java/org/apache/ambari/server/topology/tasks/PersistHostResourcesTask.java b/ambari-server/src/main/java/org/apache/ambari/server/topology/tasks/PersistHostResourcesTask.java index 4ae136b6dba..74fb9b1c897 100644 --- a/ambari-server/src/main/java/org/apache/ambari/server/topology/tasks/PersistHostResourcesTask.java +++ b/ambari-server/src/main/java/org/apache/ambari/server/topology/tasks/PersistHostResourcesTask.java @@ -17,7 +17,13 @@ */ package org.apache.ambari.server.topology.tasks; +import java.util.Collection; +import java.util.HashMap; +import java.util.HashSet; +import java.util.Map; + import org.apache.ambari.server.topology.ClusterTopology; +import org.apache.ambari.server.topology.HostGroup; import org.apache.ambari.server.topology.HostRequest; import org.slf4j.Logger; import org.slf4j.LoggerFactory; @@ -41,10 +47,16 @@ public Type getType() { @Override public void runTask() { - String hostName = hostRequest.getHostName(); - LOG.info("HostRequest: Executing RESOURCE_CREATION task for host: {}", hostName); - long clusterId = hostRequest.getClusterId(); - clusterTopology.getAmbariContext().createAmbariHostResources(clusterId, hostName, clusterTopology.getComponentsInHostGroup(hostRequest.getHostgroupName())); - LOG.info("HostRequest: Exiting RESOURCE_CREATION task for host: {}", hostName); + LOG.info("HostRequest: Executing RESOURCE_CREATION task for host: {}", hostRequest.getHostName()); + + HostGroup group = hostRequest.getHostGroup(); + Map> serviceComponents = new HashMap<>(); + for (String service : group.getServices()) { + serviceComponents.put(service, new HashSet<>(group.getComponentNames(service))); + } + clusterTopology.getAmbariContext().createAmbariHostResources(hostRequest.getClusterId(), + hostRequest.getHostName(), serviceComponents); + + LOG.info("HostRequest: Exiting RESOURCE_CREATION task for host: {}", hostRequest.getHostName()); } } diff --git a/ambari-server/src/main/java/org/apache/ambari/server/topology/validators/BasicBlueprintValidator.java b/ambari-server/src/main/java/org/apache/ambari/server/topology/validators/BasicBlueprintValidator.java deleted file mode 100644 index 82ea759f457..00000000000 --- a/ambari-server/src/main/java/org/apache/ambari/server/topology/validators/BasicBlueprintValidator.java +++ /dev/null @@ -1,29 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package org.apache.ambari.server.topology.validators; - -import org.apache.ambari.server.topology.Blueprint; - -public class BasicBlueprintValidator implements BlueprintValidator { - - @Override - public void validate(Blueprint blueprint) throws IllegalArgumentException { - // FIXME implement - } - -} diff --git a/ambari-server/src/main/java/org/apache/ambari/server/topology/validators/ChainedTopologyValidator.java b/ambari-server/src/main/java/org/apache/ambari/server/topology/validators/ChainedTopologyValidator.java index 0a10039b6a1..8bcbcffbdc5 100644 --- a/ambari-server/src/main/java/org/apache/ambari/server/topology/validators/ChainedTopologyValidator.java +++ b/ambari-server/src/main/java/org/apache/ambari/server/topology/validators/ChainedTopologyValidator.java @@ -32,18 +32,19 @@ import org.apache.ambari.server.topology.ClusterTopology; import org.apache.ambari.server.topology.InvalidTopologyException; +import org.apache.ambari.server.topology.TopologyValidator; import org.slf4j.Logger; import org.slf4j.LoggerFactory; /** - * Topology validator wrapper implementation. Executes a set of validations by calling a preconfigured set of validator implementations. + * Topology validator wrapper implementation. Executes a set of validations by calling a preconfgured set of validator implementations. */ public class ChainedTopologyValidator implements TopologyValidator { private static final Logger LOGGER = LoggerFactory.getLogger(ChainedTopologyValidator.class); - private final List validators; + private List validators; - ChainedTopologyValidator(List validators) { + public ChainedTopologyValidator(List validators) { this.validators = validators; } diff --git a/ambari-server/src/main/java/org/apache/ambari/server/topology/validators/ClusterConfigTypeValidator.java b/ambari-server/src/main/java/org/apache/ambari/server/topology/validators/ClusterConfigTypeValidator.java index 25748695943..0170186d390 100644 --- a/ambari-server/src/main/java/org/apache/ambari/server/topology/validators/ClusterConfigTypeValidator.java +++ b/ambari-server/src/main/java/org/apache/ambari/server/topology/validators/ClusterConfigTypeValidator.java @@ -18,6 +18,7 @@ import org.apache.ambari.server.topology.ClusterTopology; import org.apache.ambari.server.topology.InvalidTopologyException; +import org.apache.ambari.server.topology.TopologyValidator; import org.slf4j.Logger; import org.slf4j.LoggerFactory; @@ -33,7 +34,7 @@ public class ClusterConfigTypeValidator implements TopologyValidator { public void validate(ClusterTopology topology) throws InvalidTopologyException { // config types in from the request / configuration is always set in the request instance - Set topologyClusterConfigTypes = new HashSet<>(topology.getConfiguration().getAllConfigTypes()); + Set topologyClusterConfigTypes = new HashSet(topology.getConfiguration().getAllConfigTypes()); LOGGER.debug("Cluster config types: {}", topologyClusterConfigTypes); if (topologyClusterConfigTypes.isEmpty()) { @@ -43,8 +44,8 @@ public void validate(ClusterTopology topology) throws InvalidTopologyException { // collecting all config types for services in the blueprint (from the related stack) Set stackServiceConfigTypes = new HashSet<>(); - for (String serviceName : topology.getServices()) { - stackServiceConfigTypes.addAll(topology.getStack().getConfigurationTypes(serviceName)); + for (String serviceName : topology.getBlueprint().getServices()) { + stackServiceConfigTypes.addAll(topology.getBlueprint().getStack().getConfigurationTypes(serviceName)); } // identifying invalid config types diff --git a/ambari-server/src/main/java/org/apache/ambari/server/topology/validators/GplPropertiesValidator.java b/ambari-server/src/main/java/org/apache/ambari/server/topology/validators/GplPropertiesValidator.java deleted file mode 100644 index 5ce4940fe50..00000000000 --- a/ambari-server/src/main/java/org/apache/ambari/server/topology/validators/GplPropertiesValidator.java +++ /dev/null @@ -1,88 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package org.apache.ambari.server.topology.validators; - -import java.util.Map; -import java.util.Set; - -import javax.inject.Inject; - -import org.apache.ambari.server.configuration.Configuration; -import org.apache.ambari.server.topology.ClusterTopology; -import org.apache.ambari.server.topology.GPLLicenseNotAcceptedException; -import org.apache.ambari.server.topology.InvalidTopologyException; -import org.slf4j.Logger; -import org.slf4j.LoggerFactory; - -import com.google.common.collect.ImmutableSet; - -/** - * Properties that indicate usage of GPL software are - * allowed with explicit approval from user. - */ -public class GplPropertiesValidator implements TopologyValidator { - - private static final Logger LOG = LoggerFactory.getLogger(GplPropertiesValidator.class); - - private static final String CORE_SITE = "core-site"; - static final String LZO_CODEC_CLASS_PROPERTY_NAME = "io.compression.codec.lzo.class"; - static final String CODEC_CLASSES_PROPERTY_NAME = "io.compression.codecs"; - static final String LZO_CODEC_CLASS = "com.hadoop.compression.lzo.LzoCodec"; - - private static final Set PROPERTY_NAMES = ImmutableSet.of( - LZO_CODEC_CLASS_PROPERTY_NAME, - CODEC_CLASSES_PROPERTY_NAME - ); - - private static final String GPL_LICENSE_ERROR_MESSAGE = - "Your Ambari server has not been configured to download LZO GPL software. " + - "Please refer to documentation to configure Ambari before proceeding."; - - private final Configuration configuration; - - @Inject - public GplPropertiesValidator(Configuration configuration) { - this.configuration = configuration; - } - - @Override - public void validate(ClusterTopology topology) throws InvalidTopologyException { - // need to reject blueprints that have LZO enabled if the Ambari Server hasn't been configured for it - boolean gplEnabled = configuration.getGplLicenseAccepted(); - - if (gplEnabled) { - LOG.info("GPL license accepted, skipping config check"); - return; - } - - // we don't want to include default stack properties so we can't use full properties - Map> clusterConfigurations = topology.getConfiguration().getProperties(); - - if (clusterConfigurations != null) { - Map properties = clusterConfigurations.get(CORE_SITE); - if (properties != null) { - for (String propertyName : PROPERTY_NAMES) { - String propertyValue = properties.get(propertyName); - if (propertyValue != null && propertyValue.contains(LZO_CODEC_CLASS)) { - throw new GPLLicenseNotAcceptedException(GPL_LICENSE_ERROR_MESSAGE); - } - } - } - } - } -} diff --git a/ambari-server/src/main/java/org/apache/ambari/server/topology/validators/HiveServiceValidator.java b/ambari-server/src/main/java/org/apache/ambari/server/topology/validators/HiveServiceValidator.java index 6ba9a2faf8f..9d9ad00a35b 100644 --- a/ambari-server/src/main/java/org/apache/ambari/server/topology/validators/HiveServiceValidator.java +++ b/ambari-server/src/main/java/org/apache/ambari/server/topology/validators/HiveServiceValidator.java @@ -17,6 +17,7 @@ import org.apache.ambari.server.topology.ClusterTopology; import org.apache.ambari.server.topology.Configuration; import org.apache.ambari.server.topology.InvalidTopologyException; +import org.apache.ambari.server.topology.TopologyValidator; import org.slf4j.Logger; import org.slf4j.LoggerFactory; @@ -30,13 +31,14 @@ public class HiveServiceValidator implements TopologyValidator { private static final String HIVE_DB_DEFAULT = "New MySQL Database"; private static final String HIVE_DB_PROPERTY = "hive_database"; private static final String MYSQL_SERVER_COMPONENT = "MYSQL_SERVER"; - private static final String HIVE_SERVICE = "HIVE"; + public static final String HIVE_SERVICE = "HIVE"; @Override public void validate(ClusterTopology topology) throws InvalidTopologyException { + // there is no hive configured in the blueprint, nothing to do (does the validator apply?) - if (!topology.getServices().contains(HIVE_SERVICE)) { + if (!topology.getBlueprint().getServices().contains(HIVE_SERVICE)) { LOGGER.info(" [{}] service is not listed in the blueprint, skipping hive service validation.", HIVE_SERVICE); return; } @@ -50,22 +52,16 @@ public void validate(ClusterTopology topology) throws InvalidTopologyException { throw new InvalidTopologyException(errorMessage); } - boolean hiveWantsMysql = HIVE_DB_DEFAULT.equals(clusterConfiguration.getPropertyValue(HIVE_ENV, HIVE_DB_PROPERTY)); - boolean topologyContainsMysql = topology.getComponents() - .anyMatch(c -> MYSQL_SERVER_COMPONENT.equals(c.componentName()) && HIVE_SERVICE.equals(c.effectiveServiceName())); - - if (topologyContainsMysql && !hiveWantsMysql) { - String errorMessage = String.format( - "Incorrect configuration: %s component is specified in blueprint, but Hive is configured to use existing DB", - MYSQL_SERVER_COMPONENT); - LOGGER.error(errorMessage); - throw new InvalidTopologyException(errorMessage); + // hive database has custom configuration, skipping validation + if (!HIVE_DB_DEFAULT.equals(clusterConfiguration.getPropertyValue(HIVE_ENV, HIVE_DB_PROPERTY))) { + LOGGER.info("Custom hive database settings detected. HIVE service validation succeeded."); + return; } - if (hiveWantsMysql && !topologyContainsMysql) { - String errorMessage = String.format( - "The component %s must explicitly be specified in the blueprint if Hive database is configured with %s.", - MYSQL_SERVER_COMPONENT, HIVE_DB_DEFAULT); + // hive database settings need the mysql-server component in the blueprint + if (!topology.getBlueprint().getComponentNames(HIVE_SERVICE).contains(MYSQL_SERVER_COMPONENT)) { + String errorMessage = String.format("Component [%s] must explicitly be set in the blueprint when hive database " + + "is configured with the current settings. HIVE service validation failed.", MYSQL_SERVER_COMPONENT); LOGGER.error(errorMessage); throw new InvalidTopologyException(errorMessage); } diff --git a/ambari-server/src/main/java/org/apache/ambari/server/topology/validators/NameNodeHighAvailabilityValidator.java b/ambari-server/src/main/java/org/apache/ambari/server/topology/validators/NameNodeHighAvailabilityValidator.java deleted file mode 100644 index fbfb03da890..00000000000 --- a/ambari-server/src/main/java/org/apache/ambari/server/topology/validators/NameNodeHighAvailabilityValidator.java +++ /dev/null @@ -1,86 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package org.apache.ambari.server.topology.validators; - -import java.util.ArrayList; -import java.util.HashMap; -import java.util.HashSet; -import java.util.List; -import java.util.Map; -import java.util.Set; - -import org.apache.ambari.server.controller.internal.BlueprintConfigurationProcessor; -import org.apache.ambari.server.topology.Blueprint; -import org.apache.ambari.server.topology.ClusterTopology; -import org.apache.ambari.server.topology.HostGroup; -import org.apache.ambari.server.topology.InvalidTopologyException; -import org.slf4j.Logger; -import org.slf4j.LoggerFactory; - -/** - * Verifies that dfs_ha_initial_namenode_active and dfs_ha_initial_namenode_standby properties - * in hadoop-env reference host groups with NAMENODE components. - */ -public class NameNodeHighAvailabilityValidator implements TopologyValidator { - - private static final Logger LOG = LoggerFactory.getLogger(NameNodeHighAvailabilityValidator.class); - - @Override - public void validate(ClusterTopology topology) throws InvalidTopologyException { - Blueprint blueprint = topology.getBlueprint(); - - Map> clusterConfigurations = topology.getConfiguration().getProperties(); - - if (!BlueprintConfigurationProcessor.isNameNodeHAEnabled(clusterConfigurations)) { - LOG.info("NAMENODE HA is not enabled, skipping validation of {}", blueprint.getName()); - return; - } - - LOG.info("Validating NAMENODE HA for blueprint: {}", blueprint.getName()); - - List hostGroupsForComponent = new ArrayList<>(topology.getHostGroupsForComponent("NAMENODE")); - - for (HostGroup hostGroup : topology.getBlueprint().getHostGroups().values()) { - Map> operationalConfiguration = new HashMap<>(clusterConfigurations); - - operationalConfiguration.putAll(hostGroup.getConfiguration().getProperties()); - if (hostGroup.getComponentNames().contains("NAMENODE")) { - Map hadoopEnvConfig = operationalConfiguration.get("hadoop-env"); - if(hadoopEnvConfig != null && !hadoopEnvConfig.isEmpty() && hadoopEnvConfig.containsKey("dfs_ha_initial_namenode_active") && hadoopEnvConfig.containsKey("dfs_ha_initial_namenode_standby")) { - Set givenHostGroups = new HashSet<>(); - givenHostGroups.add(hadoopEnvConfig.get("dfs_ha_initial_namenode_active")); - givenHostGroups.add(hadoopEnvConfig.get("dfs_ha_initial_namenode_standby")); - if(givenHostGroups.size() != hostGroupsForComponent.size()) { - throw new IllegalArgumentException("NAMENODE HA host groups mapped incorrectly for properties 'dfs_ha_initial_namenode_active' and 'dfs_ha_initial_namenode_standby'. Expected Host groups are :" + hostGroupsForComponent); - } - if (BlueprintConfigurationProcessor.HOST_GROUP_PLACEHOLDER_PATTERN.matcher(hadoopEnvConfig.get("dfs_ha_initial_namenode_active")).matches() && BlueprintConfigurationProcessor.HOST_GROUP_PLACEHOLDER_PATTERN.matcher(hadoopEnvConfig.get("dfs_ha_initial_namenode_standby")).matches()) { - for (String hostGroupForComponent : hostGroupsForComponent) { - givenHostGroups.removeIf(s -> s.contains(hostGroupForComponent)); - } - } - - if(!givenHostGroups.isEmpty()){ - throw new IllegalArgumentException("NAMENODE HA host groups mapped incorrectly for properties 'dfs_ha_initial_namenode_active' and 'dfs_ha_initial_namenode_standby'. Expected Host groups are :" + hostGroupsForComponent); - } - } - } - } - } - -} diff --git a/ambari-server/src/main/java/org/apache/ambari/server/topology/validators/RejectUnknownComponents.java b/ambari-server/src/main/java/org/apache/ambari/server/topology/validators/RejectUnknownComponents.java deleted file mode 100644 index e41fe2708ff..00000000000 --- a/ambari-server/src/main/java/org/apache/ambari/server/topology/validators/RejectUnknownComponents.java +++ /dev/null @@ -1,47 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package org.apache.ambari.server.topology.validators; - -import static java.util.stream.Collectors.joining; - -import org.apache.ambari.server.controller.RootComponent; -import org.apache.ambari.server.topology.ClusterTopology; -import org.apache.ambari.server.topology.InvalidTopologyException; -import org.apache.ambari.server.topology.ResolvedComponent; -import org.slf4j.Logger; -import org.slf4j.LoggerFactory; - -public class RejectUnknownComponents implements TopologyValidator { - - private static final Logger LOG = LoggerFactory.getLogger(RejectUnknownComponents.class); - - @Override - public void validate(ClusterTopology topology) throws InvalidTopologyException { - String unknownComponents = topology.getComponents() - .map(ResolvedComponent::componentName) - .filter(c -> !RootComponent.AMBARI_SERVER.name().equals(c)) - .filter(c -> !topology.getStack().getComponents().contains(c)) - .collect(joining(", ")); - - if (!unknownComponents.isEmpty()) { - String msg = "The following components are not valid for the specified stacks: " + unknownComponents; - LOG.info(msg); - throw new InvalidTopologyException(msg); - } - } -} diff --git a/ambari-server/src/main/java/org/apache/ambari/server/topology/validators/RejectUnknownStacks.java b/ambari-server/src/main/java/org/apache/ambari/server/topology/validators/RejectUnknownStacks.java deleted file mode 100644 index 9821bc8582a..00000000000 --- a/ambari-server/src/main/java/org/apache/ambari/server/topology/validators/RejectUnknownStacks.java +++ /dev/null @@ -1,51 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package org.apache.ambari.server.topology.validators; - -import static java.util.stream.Collectors.joining; - -import javax.inject.Provider; - -import org.apache.ambari.server.api.services.AmbariMetaInfo; -import org.apache.ambari.server.topology.ClusterTopology; -import org.apache.ambari.server.topology.InvalidTopologyException; - -/** - * Verifies that the topology only references known stacks. - */ -public class RejectUnknownStacks implements TopologyValidator { - - private final Provider metaInfo; - - RejectUnknownStacks(Provider metaInfo) { - this.metaInfo = metaInfo; - } - - @Override - public void validate(ClusterTopology topology) throws InvalidTopologyException { - String unknownStacks = topology.getStackIds().stream() - .filter(stackId -> !metaInfo.get().isKnownStack(stackId)) - .sorted() - .map(Object::toString) - .collect(joining(", ")); - - if (!unknownStacks.isEmpty()) { - throw new InvalidTopologyException("Unknown stacks found in cluster creation request: " + unknownStacks); - } - } -} diff --git a/ambari-server/src/main/java/org/apache/ambari/server/topology/validators/RequiredConfigPropertiesValidator.java b/ambari-server/src/main/java/org/apache/ambari/server/topology/validators/RequiredConfigPropertiesValidator.java index 5102fb97a74..4022fcba1e4 100644 --- a/ambari-server/src/main/java/org/apache/ambari/server/topology/validators/RequiredConfigPropertiesValidator.java +++ b/ambari-server/src/main/java/org/apache/ambari/server/topology/validators/RequiredConfigPropertiesValidator.java @@ -23,9 +23,11 @@ import org.apache.ambari.server.controller.internal.Stack; import org.apache.ambari.server.state.PropertyInfo; +import org.apache.ambari.server.topology.Blueprint; import org.apache.ambari.server.topology.ClusterTopology; import org.apache.ambari.server.topology.HostGroup; import org.apache.ambari.server.topology.InvalidTopologyException; +import org.apache.ambari.server.topology.TopologyValidator; import org.slf4j.Logger; import org.slf4j.LoggerFactory; @@ -42,7 +44,7 @@ public class RequiredConfigPropertiesValidator implements TopologyValidator { /** * Validates the configuration coming from the blueprint and cluster creation template and ensures that all the required properties are provided. - * It's expected, that a in hostgroup containing components for a given service all required configuration for the given service is available. + * It's expected, that a in hostrgroup containing components for a given service all required configuration for the given service is available. * * @param topology the topology instance holding the configuration for cluster provisioning * @throws InvalidTopologyException when there are missing configuration types or properties related to services in the blueprint @@ -51,38 +53,45 @@ public class RequiredConfigPropertiesValidator implements TopologyValidator { public void validate(ClusterTopology topology) throws InvalidTopologyException { // collect required properties - Map>> requiredPropertiesByService = getRequiredPropertiesByService(topology); + Map>> requiredPropertiesByService = getRequiredPropertiesByService(topology.getBlueprint()); // find missing properties in the cluster configuration Map> missingProperties = new TreeMap<>(); Map> topologyConfiguration = new HashMap<>(topology.getConfiguration().getFullProperties(1)); - for (Map.Entry>> entry : requiredPropertiesByService.entrySet()) { - String service = entry.getKey(); - Map> requiredPropertiesByType = entry.getValue(); + for (HostGroup hostGroup : topology.getBlueprint().getHostGroups().values()) { + LOGGER.debug("Processing hostgroup configurations for hostgroup: {}", hostGroup.getName()); - for (HostGroup hostGroup : topology.getBlueprint().getHostGroups().values()) { - LOGGER.debug("Processing configurations for service {} in hostgroup {}", service, hostGroup.getName()); + // copy of all configurations available in the topology hgConfig -> topologyConfig -> bpConfig + Map> operationalConfigurations = new HashMap<>(topologyConfiguration); - // copy of all configurations available in the topology hgConfig -> topologyConfig -> bpConfig - Map> operationalConfigurations = new HashMap<>(topologyConfiguration); + for (Map.Entry> hostgroupConfigEntry : hostGroup.getConfiguration().getProperties().entrySet()) { + if (operationalConfigurations.containsKey(hostgroupConfigEntry.getKey())) { + operationalConfigurations.get(hostgroupConfigEntry.getKey()).putAll(hostgroupConfigEntry.getValue()); + } else { + operationalConfigurations.put(hostgroupConfigEntry.getKey(), hostgroupConfigEntry.getValue()); + } + } - for (Map.Entry> hostgroupConfigEntry : hostGroup.getConfiguration().getProperties().entrySet()) { - if (operationalConfigurations.containsKey(hostgroupConfigEntry.getKey())) { - operationalConfigurations.get(hostgroupConfigEntry.getKey()).putAll(hostgroupConfigEntry.getValue()); - } else { - operationalConfigurations.put(hostgroupConfigEntry.getKey(), hostgroupConfigEntry.getValue()); - } + for (String hostGroupService : hostGroup.getServices()) { + + if (!requiredPropertiesByService.containsKey(hostGroupService)) { + // there are no required properties for the service + LOGGER.debug("There are no required properties found for hostgroup/service: [{}/{}]", hostGroup.getName(), hostGroupService); + continue; } + Map> requiredPropertiesByType = requiredPropertiesByService.get(hostGroupService); + for (String configType : requiredPropertiesByType.keySet()) { // We need a copy not to modify the original - Collection requiredPropertiesForType = new HashSet<>(requiredPropertiesByType.get(configType)); + Collection requiredPropertiesForType = new HashSet( + requiredPropertiesByType.get(configType)); if (!operationalConfigurations.containsKey(configType)) { // all required configuration is missing for the config type - missingProperties = addToMissingProperties(missingProperties, hostGroup.getName(), requiredPropertiesForType); + missingProperties = addTomissingProperties(missingProperties, hostGroup.getName(), requiredPropertiesForType); continue; } @@ -91,7 +100,7 @@ public void validate(ClusterTopology topology) throws InvalidTopologyException { if (!requiredPropertiesForType.isEmpty()) { LOGGER.info("Found missing properties in hostgroup: {}, config type: {}, mising properties: {}", hostGroup.getName(), configType, requiredPropertiesForType); - missingProperties = addToMissingProperties(missingProperties, hostGroup.getName(), requiredPropertiesForType); + missingProperties = addTomissingProperties(missingProperties, hostGroup.getName(), requiredPropertiesForType); } } } @@ -110,17 +119,18 @@ public void validate(ClusterTopology topology) throws InvalidTopologyException { * Collects required properties for services in the blueprint. Configuration properties are returned by configuration type. * service -> configType -> properties * + * @param blueprint the blueprint from the cluster topology * @return a map with configuration types mapped to collections of required property names */ - private Map>> getRequiredPropertiesByService(ClusterTopology topology) { + private Map>> getRequiredPropertiesByService(Blueprint blueprint) { Map>> requiredPropertiesForServiceByType = new HashMap<>(); - for (String bpService : topology.getServices()) { + for (String bpService : blueprint.getServices()) { LOGGER.debug("Collecting required properties for the service: {}", bpService); - Collection requiredConfigsForService = topology.getStack().getRequiredConfigurationProperties(bpService); + Collection requiredConfigsForService = blueprint.getStack().getRequiredConfigurationProperties(bpService); Map> requiredPropertiesByConfigType = new HashMap<>(); for (Stack.ConfigProperty configProperty : requiredConfigsForService) { @@ -159,7 +169,7 @@ private Map>> getRequiredPropertiesByServ } - private Map> addToMissingProperties(Map> missingProperties, String hostGroup, Collection values) { + private Map> addTomissingProperties(Map> missingProperties, String hostGroup, Collection values) { Map> missing; if (missingProperties == null) { diff --git a/ambari-server/src/main/java/org/apache/ambari/server/topology/validators/RequiredPasswordValidator.java b/ambari-server/src/main/java/org/apache/ambari/server/topology/validators/RequiredPasswordValidator.java index b3a543032f9..5c9e0eb6555 100644 --- a/ambari-server/src/main/java/org/apache/ambari/server/topology/validators/RequiredPasswordValidator.java +++ b/ambari-server/src/main/java/org/apache/ambari/server/topology/validators/RequiredPasswordValidator.java @@ -1,3 +1,5 @@ +package org.apache.ambari.server.topology.validators; + /* * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. @@ -12,30 +14,33 @@ * limitations under the License. */ -package org.apache.ambari.server.topology.validators; - -import static java.util.stream.Collectors.groupingBy; -import static java.util.stream.Collectors.mapping; -import static java.util.stream.Collectors.toSet; - +import java.util.Collection; import java.util.HashMap; +import java.util.HashSet; import java.util.Map; -import java.util.Set; import org.apache.ambari.server.controller.RootComponent; import org.apache.ambari.server.controller.internal.Stack; import org.apache.ambari.server.controller.internal.StackDefinition; import org.apache.ambari.server.state.PropertyInfo; +import org.apache.ambari.server.topology.Blueprint; import org.apache.ambari.server.topology.ClusterTopology; +import org.apache.ambari.server.topology.HostGroup; import org.apache.ambari.server.topology.HostGroupInfo; import org.apache.ambari.server.topology.InvalidTopologyException; -import org.apache.ambari.server.topology.ResolvedComponent; +import org.apache.ambari.server.topology.TopologyValidator; /** * Validates that all required passwords are provided. */ public class RequiredPasswordValidator implements TopologyValidator { + // todo remove the field as all the information is available in the topology being validated + private String defaultPassword; + + public RequiredPasswordValidator() { + } + /** * Validate that all required password properties have been set or that 'default_password' is specified. * @@ -44,7 +49,8 @@ public class RequiredPasswordValidator implements TopologyValidator { */ public void validate(ClusterTopology topology) throws InvalidTopologyException { - Map>> missingPasswords = validateRequiredPasswords(topology); + defaultPassword = topology.getDefaultPassword(); + Map>> missingPasswords = validateRequiredPasswords(topology); if (! missingPasswords.isEmpty()) { throw new InvalidTopologyException("Missing required password properties. Specify a value for these " + @@ -62,45 +68,94 @@ public void validate(ClusterTopology topology) throws InvalidTopologyException { * * @throws IllegalArgumentException if blueprint contains invalid information */ - private Map>> validateRequiredPasswords(ClusterTopology topology) { - Map>> missingProperties = new HashMap<>(); - StackDefinition stack = topology.getStack(); - String defaultPassword = topology.getDefaultPassword(); - boolean hasDefaultPassword = defaultPassword != null && !defaultPassword.trim().isEmpty(); + //todo: this is copied/pasted from Blueprint and is currently only used by validatePasswordProperties() + //todo: seems that we should have some common place for this code so it can be used by BP and here? + private Map>> validateRequiredPasswords(ClusterTopology topology) { + + Map>> missingProperties = + new HashMap<>(); for (Map.Entry groupEntry: topology.getHostGroupInfo().entrySet()) { String hostGroupName = groupEntry.getKey(); Map> groupProperties = groupEntry.getValue().getConfiguration().getFullProperties(3); - Map> missingPropertiesInHostGroup = topology.getComponentsInHostGroup(hostGroupName) - .filter(component -> !RootComponent.AMBARI_SERVER.name().equals(component.componentName())) - .map(ResolvedComponent::serviceType) - .distinct() - .flatMap(serviceType -> stack.getRequiredConfigurationProperties(serviceType, PropertyInfo.PropertyType.PASSWORD).stream()) - .filter(property -> !propertyExists(groupProperties, property.getType(), property.getName())) - .collect(groupingBy(Stack.ConfigProperty::getType, mapping(Stack.ConfigProperty::getName, toSet()))); - - if (!missingPropertiesInHostGroup.isEmpty()) { - if (hasDefaultPassword) { - for (Map.Entry> entry : missingPropertiesInHostGroup.entrySet()) { - String type = entry.getKey(); - for (String name : entry.getValue()) { - topology.getConfiguration().setProperty(type, name, defaultPassword); + Collection processedServices = new HashSet<>(); + Blueprint blueprint = topology.getBlueprint(); + StackDefinition stack = blueprint.getStack(); + + HostGroup hostGroup = blueprint.getHostGroup(hostGroupName); + for (String component : hostGroup.getComponentNames()) { + //for now, AMBARI is not recognized as a service in Stacks + if (component.equals(RootComponent.AMBARI_SERVER.name())) { + continue; + } + + String serviceName = stack.getServiceForComponent(component); + if (processedServices.add(serviceName)) { + //todo: do I need to subtract excluded configs? + Collection requiredProperties = + stack.getRequiredConfigurationProperties(serviceName, PropertyInfo.PropertyType.PASSWORD); + + for (Stack.ConfigProperty property : requiredProperties) { + String category = property.getType(); + String name = property.getName(); + if (! propertyExists(topology, groupProperties, category, name)) { + Map> missingHostGroupPropsMap = missingProperties.get(hostGroupName); + if (missingHostGroupPropsMap == null) { + missingHostGroupPropsMap = new HashMap<>(); + missingProperties.put(hostGroupName, missingHostGroupPropsMap); + } + Collection missingHostGroupTypeProps = missingHostGroupPropsMap.get(category); + if (missingHostGroupTypeProps == null) { + missingHostGroupTypeProps = new HashSet<>(); + missingHostGroupPropsMap.put(category, missingHostGroupTypeProps); + } + missingHostGroupTypeProps.add(name); } } - } else { - missingProperties.put(hostGroupName, missingPropertiesInHostGroup); } } } return missingProperties; } - private boolean propertyExists(Map> props, String type, String property) { + private boolean propertyExists(ClusterTopology topology, Map> props, String type, String property) { Map typeProps = props.get(type); - return typeProps != null && typeProps.containsKey(property); + return (typeProps != null && typeProps.containsKey(property)) || setDefaultPassword(topology, type, property); } + /** + * Attempt to set the default password in cluster configuration for missing password property. + * + * @param configType configuration type + * @param property password property name + * + * @return true if password was set, otherwise false. Currently the password will always be set + * unless it is null + */ + private boolean setDefaultPassword(ClusterTopology topology, String configType, String property) { + boolean setDefaultPassword = false; + if (defaultPassword != null && ! defaultPassword.trim().isEmpty()) { + topology.getConfiguration().setProperty(configType, property, defaultPassword); + setDefaultPassword = true; + } + return setDefaultPassword; + } + + @Override + public boolean equals(Object o) { + if (this == o) return true; + if (o == null || getClass() != o.getClass()) return false; + + RequiredPasswordValidator that = (RequiredPasswordValidator) o; + + return defaultPassword == null ? that.defaultPassword == null : defaultPassword.equals(that.defaultPassword); + } + + @Override + public int hashCode() { + return defaultPassword != null ? defaultPassword.hashCode() : 0; + } } diff --git a/ambari-server/src/main/java/org/apache/ambari/server/topology/validators/SecretReferenceValidator.java b/ambari-server/src/main/java/org/apache/ambari/server/topology/validators/SecretReferenceValidator.java deleted file mode 100644 index 2e3c61b127e..00000000000 --- a/ambari-server/src/main/java/org/apache/ambari/server/topology/validators/SecretReferenceValidator.java +++ /dev/null @@ -1,62 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package org.apache.ambari.server.topology.validators; - -import java.util.Map; - -import org.apache.ambari.server.topology.ClusterTopology; -import org.apache.ambari.server.topology.InvalidTopologyException; -import org.apache.ambari.server.utils.SecretReference; - -/** - * Secret references are not allowed in blueprints. - * @see SecretReference - */ -public class SecretReferenceValidator implements TopologyValidator { - - @Override - public void validate(ClusterTopology topology) throws InvalidTopologyException { - // we don't want to include default stack properties so we can't use full properties - Map> clusterConfigurations = topology.getConfiguration().getProperties(); - - // we need to have real passwords, not references - if (clusterConfigurations != null) { - StringBuilder errorMessage = new StringBuilder(); - boolean containsSecretReferences = false; - for (Map.Entry> configEntry : clusterConfigurations.entrySet()) { - String configType = configEntry.getKey(); - Map configEntryValue = configEntry.getValue(); - if (configEntryValue != null) { - for (Map.Entry propertyEntry : configEntryValue.entrySet()) { - String propertyName = propertyEntry.getKey(); - String propertyValue = propertyEntry.getValue(); - if (propertyValue != null && SecretReference.isSecret(propertyValue)) { - errorMessage.append(String.format(" Config:%s Property:%s\n", configType, propertyName)); - containsSecretReferences = true; - } - } - } - } - if (containsSecretReferences) { - throw new InvalidTopologyException("Secret references are not allowed in blueprints, " + - "replace following properties with real passwords:\n" + errorMessage); - } - } - } - -} diff --git a/ambari-server/src/main/java/org/apache/ambari/server/topology/validators/StackConfigTypeValidator.java b/ambari-server/src/main/java/org/apache/ambari/server/topology/validators/StackConfigTypeValidator.java index 7166998b016..f028a31e1d0 100644 --- a/ambari-server/src/main/java/org/apache/ambari/server/topology/validators/StackConfigTypeValidator.java +++ b/ambari-server/src/main/java/org/apache/ambari/server/topology/validators/StackConfigTypeValidator.java @@ -19,6 +19,7 @@ import org.apache.ambari.server.topology.ClusterTopology; import org.apache.ambari.server.topology.InvalidTopologyException; +import org.apache.ambari.server.topology.TopologyValidator; import org.slf4j.Logger; import org.slf4j.LoggerFactory; @@ -44,7 +45,7 @@ public void validate(ClusterTopology topology) throws InvalidTopologyException { return; } - Set stackConfigTypes = new HashSet<>(topology.getStack().getConfiguration().getAllConfigTypes()); + Set stackConfigTypes = new HashSet<>(topology.getBlueprint().getStack().getConfiguration().getAllConfigTypes()); // remove all "valid" config types from the incoming set incomingConfigTypes.removeAll(stackConfigTypes); @@ -57,3 +58,7 @@ public void validate(ClusterTopology topology) throws InvalidTopologyException { } } } + + + + diff --git a/ambari-server/src/main/java/org/apache/ambari/server/topology/validators/TopologyValidatorFactory.java b/ambari-server/src/main/java/org/apache/ambari/server/topology/validators/TopologyValidatorFactory.java index 1e81ef84b97..bc76bff1e7b 100644 --- a/ambari-server/src/main/java/org/apache/ambari/server/topology/validators/TopologyValidatorFactory.java +++ b/ambari-server/src/main/java/org/apache/ambari/server/topology/validators/TopologyValidatorFactory.java @@ -16,33 +16,16 @@ import java.util.List; -import javax.inject.Inject; -import javax.inject.Provider; - -import org.apache.ambari.server.api.services.AmbariMetaInfo; -import org.apache.ambari.server.configuration.Configuration; +import org.apache.ambari.server.topology.TopologyValidator; import com.google.common.collect.ImmutableList; public class TopologyValidatorFactory { + List validators; - private final List validators; - - @Inject - public TopologyValidatorFactory(Provider metaInfo, Configuration config) { - validators = ImmutableList.builder() - .add(new RejectUnknownStacks(metaInfo)) - .add(new RejectUnknownComponents()) - .add(new DependencyAndCardinalityValidator()) - .add(new StackConfigTypeValidator()) - .add(new GplPropertiesValidator(config)) - .add(new SecretReferenceValidator()) - .add(new RequiredConfigPropertiesValidator()) - .add(new RequiredPasswordValidator()) - .add(new HiveServiceValidator()) - .add(new NameNodeHighAvailabilityValidator()) - .add(new UnitValidator(UnitValidatedProperty.ALL)) - .build(); + public TopologyValidatorFactory() { + validators = ImmutableList.of(new RequiredConfigPropertiesValidator(), new RequiredPasswordValidator(), new HiveServiceValidator(), + new StackConfigTypeValidator(), new UnitValidator(UnitValidatedProperty.ALL)); } public TopologyValidator createConfigurationValidatorChain() { diff --git a/ambari-server/src/main/java/org/apache/ambari/server/topology/validators/UnitValidator.java b/ambari-server/src/main/java/org/apache/ambari/server/topology/validators/UnitValidator.java index 18e3913361a..31d5275d220 100644 --- a/ambari-server/src/main/java/org/apache/ambari/server/topology/validators/UnitValidator.java +++ b/ambari-server/src/main/java/org/apache/ambari/server/topology/validators/UnitValidator.java @@ -27,6 +27,7 @@ import org.apache.ambari.server.topology.ClusterTopology; import org.apache.ambari.server.topology.HostGroupInfo; import org.apache.ambari.server.topology.InvalidTopologyException; +import org.apache.ambari.server.topology.TopologyValidator; /** * I validate the unit of properties by checking if it matches to the stack defined unit. @@ -41,7 +42,7 @@ public UnitValidator(Set propertiesToBeValidated) { @Override public void validate(ClusterTopology topology) throws InvalidTopologyException { - StackDefinition stack = topology.getStack(); + StackDefinition stack = topology.getBlueprint().getStack(); validateConfig(topology.getConfiguration().getFullProperties(), stack); for (HostGroupInfo hostGroup : topology.getHostGroupInfo().values()) { validateConfig(hostGroup.getConfiguration().getFullProperties(), stack); diff --git a/ambari-server/src/main/python/bootstrap.py b/ambari-server/src/main/python/bootstrap.py index af4162c768a..e278917aad7 100755 --- a/ambari-server/src/main/python/bootstrap.py +++ b/ambari-server/src/main/python/bootstrap.py @@ -44,7 +44,6 @@ AMBARI_PASSPHRASE_VAR_NAME = "AMBARI_PASSPHRASE" HOST_BOOTSTRAP_TIMEOUT = 300 -HOST_CONNECTIVITY_TIMEOUT = 10 # how many parallel bootstraps may be run at a time MAX_PARALLEL_BOOTSTRAPS = 20 # How many seconds to wait between polling parallel bootstraps @@ -208,7 +207,6 @@ def __init__(self, host, shared_state): log_file = os.path.join(self.shared_state.bootdir, self.host + ".log") self.host_log = HostLog(log_file) self.daemon = True - self.timeout = HOST_BOOTSTRAP_TIMEOUT if OSCheck.is_ubuntu_family(): self.AMBARI_REPO_FILENAME = self.AMBARI_REPO_FILENAME + ".list" @@ -254,9 +252,6 @@ def createDoneFile(self, retcode): def getStatus(self): return self.status - def getTimeout(self): - return self.timeout - def interruptBootstrap(self): """ Thread is not really interrupted (moreover, Python seems to have no any @@ -798,47 +793,25 @@ def run(self): self.createDoneFile(last_retcode) self.status["return_code"] = last_retcode -class ValidateHost(Bootstrap): - def __new__(cls, *args, **kwargs): - return object.__new__(ValidateHost) - - def __init__(self, hosts, sharedState): - super(ValidateHost, self).__init__(hosts, sharedState) - self.timeout = HOST_CONNECTIVITY_TIMEOUT - - def login(self): - params = self.shared_state - self.host_log.write("==========================\n") - self.host_log.write("Running login to host {0} ...".format(self.host)) - ssh = SSH(params.user, params.sshPort, params.sshkey_file, self.host, "exit", - params.bootdir, self.host_log) #login and exit immediately - retcode = ssh.run() - self.host_log.write("\n") - return retcode - def run(self): - self.timeout = HOST_CONNECTIVITY_TIMEOUT - self.status["start_time"] = time.time() - ret = self.try_to_execute(self.login) - retcode = ret["exitstatus"] - err_msg = ret["errormsg"] - std_out = ret["log"] - if retcode != 0: - message = "WARNING: Validation of host {0} fails because it finished with non-zero exit code ({1})\nERROR MESSAGE: {2}\nSTDOUT: {3}".format(self.host, retcode, err_msg, std_out) - self.host_log.write(message) - logging.error(message) - self.createDoneFile(retcode) - self.status["return_code"] = retcode class PBootstrap: """ BootStrapping the agents on a list of hosts""" def __init__(self, hosts, sharedState): self.hostlist = hosts self.sharedState = sharedState - self.validate = sharedState.validate pass - def run_bootstraps(self, queue): + def run_bootstrap(self, host): + bootstrap = Bootstrap(host, self.sharedState) + bootstrap.start() + return bootstrap + + def run(self): + """ Run up to MAX_PARALLEL_BOOTSTRAPS at a time in parallel """ + logging.info("Executing parallel bootstrap") + queue = list(self.hostlist) + queue.reverse() running_list = [] finished_list = [] while queue or running_list: # until queue is not empty or not all parallel bootstraps are @@ -849,10 +822,10 @@ def run_bootstraps(self, queue): else: starttime = bootstrap.getStatus()["start_time"] elapsedtime = time.time() - starttime - if elapsedtime > bootstrap.getTimeout(): + if elapsedtime > HOST_BOOTSTRAP_TIMEOUT: # bootstrap timed out - logging.warn("Host {0} timed out and will be " - "interrupted".format(bootstrap.host)) + logging.warn("Bootstrap at host {0} timed out and will be " + "interrupted".format(bootstrap.host)) bootstrap.interruptBootstrap() finished_list.append(bootstrap) # Remove finished from the running list @@ -861,24 +834,17 @@ def run_bootstraps(self, queue): free_slots = MAX_PARALLEL_BOOTSTRAPS - len(running_list) for i in range(free_slots): if queue: - next_bootstrap = queue.pop() - next_bootstrap.start() - running_list.append(next_bootstrap) + next_host = queue.pop() + bootstrap = self.run_bootstrap(next_host) + running_list.append(bootstrap) time.sleep(POLL_INTERVAL_SEC) - logging.info("Finished parallel {0}".format("connectivity validation" if self.validate else "bootstrap")) + logging.info("Finished parallel bootstrap") - def run(self): - """ Run up to MAX_PARALLEL_BOOTSTRAPS at a time in parallel """ - logging.info("Executing parallel {0}".format("connectivity validation" if self.validate else "bootstrap")) - queue = map(lambda host: ValidateHost(host, self.sharedState) if self.validate else Bootstrap(host, self.sharedState), list(self.hostlist)) - queue.reverse() - self.run_bootstraps(queue) - logging.info("Finished parallel {0}".format("connectivity validation" if self.validate else "bootstrap")) class SharedState: def __init__(self, user, sshPort, sshkey_file, script_dir, boottmpdir, setup_agent_file, ambari_server, cluster_os_type, ambari_version, server_port, - user_run_as, validate = False, password_file = None): + user_run_as, password_file = None): self.hostlist_to_remove_password_file = None self.user = user self.sshPort = sshPort @@ -893,7 +859,6 @@ def __init__(self, user, sshPort, sshkey_file, script_dir, boottmpdir, setup_age self.password_file = password_file self.statuses = None self.server_port = server_port - self.validate = validate self.remote_files = {} self.ret = {} pass @@ -923,7 +888,6 @@ def main(argv=None): server_port = onlyargs[9] user_run_as = onlyargs[10] passwordFile = onlyargs[11] - validate = len(onlyargs) == 13 and onlyargs[12].lower() == "true" if not OSCheck.is_windows_family(): # ssh doesn't like open files @@ -936,10 +900,10 @@ def main(argv=None): " using " + scriptDir + " cluster primary OS: " + cluster_os_type + " with user '" + user + "'with ssh Port '" + sshPort + "' sshKey File " + sshkey_file + " password File " + passwordFile +\ " using tmp dir " + bootdir + " ambari: " + ambariServer +"; server_port: " + server_port +\ - "; ambari version: " + ambariVersion+"; validate: " + str(validate) + "; user_run_as: " + user_run_as) + "; ambari version: " + ambariVersion+"; user_run_as: " + user_run_as) sharedState = SharedState(user, sshPort, sshkey_file, scriptDir, bootdir, setupAgentFile, ambariServer, cluster_os_type, ambariVersion, - server_port, user_run_as, validate, passwordFile) + server_port, user_run_as, passwordFile) pbootstrap = PBootstrap(hostList, sharedState) pbootstrap.run() return 0 # Hack to comply with current usage diff --git a/ambari-server/src/main/resources/Ambari-DDL-Derby-CREATE.sql b/ambari-server/src/main/resources/Ambari-DDL-Derby-CREATE.sql index 7d24563ac2e..c2f83e6755a 100644 --- a/ambari-server/src/main/resources/Ambari-DDL-Derby-CREATE.sql +++ b/ambari-server/src/main/resources/Ambari-DDL-Derby-CREATE.sql @@ -18,22 +18,22 @@ ------create tables and grant privileges to db user--------- CREATE TABLE registries ( - id BIGINT NOT NULL, - registry_name VARCHAR(255) NOT NULL, - registry_type VARCHAR(255) NOT NULL, - registry_uri VARCHAR(255) NOT NULL, - CONSTRAINT PK_registries PRIMARY KEY (id), - CONSTRAINT UQ_registry_name UNIQUE (registry_name)); + id BIGINT NOT NULL, + registry_name VARCHAR(255) NOT NULL, + registry_type VARCHAR(255) NOT NULL, + registry_uri VARCHAR(255) NOT NULL, + CONSTRAINT PK_registries PRIMARY KEY (id), + CONSTRAINT UQ_registry_name UNIQUE (registry_name)); CREATE TABLE mpacks ( - id BIGINT NOT NULL, - mpack_name VARCHAR(255) NOT NULL, - mpack_version VARCHAR(255) NOT NULL, - mpack_uri VARCHAR(255), - registry_id BIGINT, - CONSTRAINT PK_mpacks PRIMARY KEY (id), - CONSTRAINT FK_registries FOREIGN KEY (registry_id) REFERENCES registries(id), - CONSTRAINT UQ_mpack_name_version UNIQUE(mpack_name, mpack_version)); + id BIGINT NOT NULL, + mpack_name VARCHAR(255) NOT NULL, + mpack_version VARCHAR(255) NOT NULL, + mpack_uri VARCHAR(255), + registry_id BIGINT, + CONSTRAINT PK_mpacks PRIMARY KEY (id), + CONSTRAINT FK_registries FOREIGN KEY (registry_id) REFERENCES registries(id), + CONSTRAINT UQ_mpack_name_version UNIQUE(mpack_name, mpack_version)); CREATE TABLE stack ( stack_id BIGINT NOT NULL, @@ -44,16 +44,6 @@ CREATE TABLE stack ( CONSTRAINT FK_mpacks FOREIGN KEY (mpack_id) REFERENCES mpacks(id), CONSTRAINT UQ_stack UNIQUE (stack_name, stack_version)); -CREATE TABLE mpack_host_state ( - id BIGINT NOT NULL, - host_id BIGINT NOT NULL, - mpack_id BIGINT NOT NULL, - state VARCHAR(32) NOT NULL, - CONSTRAINT PK_mpack_host_state PRIMARY KEY (id), - CONSTRAINT FK_mhs_host_id FOREIGN KEY (host_id) REFERENCES hosts (host_id), - CONSTRAINT FK_mhs_mpack_id FOREIGN KEY (mpack_id) REFERENCES mpacks (id), - CONSTRAINT UQ_mpack_host_state UNIQUE(host_id, mpack_id)); - CREATE TABLE extension( extension_id BIGINT NOT NULL, extension_name VARCHAR(255) NOT NULL, @@ -136,8 +126,7 @@ CREATE TABLE servicegroups ( stack_id BIGINT NOT NULL, CONSTRAINT PK_servicegroups PRIMARY KEY (id, cluster_id), CONSTRAINT FK_servicegroups_cluster_id FOREIGN KEY (cluster_id) REFERENCES clusters (cluster_id), - CONSTRAINT FK_servicegroups_stack_id FOREIGN KEY (stack_id) REFERENCES stack (stack_id), - CONSTRAINT UQ_TEMP_UNTIL_REAL_PK UNIQUE(id)); + CONSTRAINT FK_servicegroups_stack_id FOREIGN KEY (stack_id) REFERENCES stack (stack_id)); CREATE TABLE servicegroupdependencies ( id BIGINT NOT NULL, @@ -254,13 +243,11 @@ CREATE TABLE repo_version ( CREATE TABLE repo_os ( id BIGINT NOT NULL, - repo_version_id BIGINT, - mpack_id BIGINT NOT NULL, + repo_version_id BIGINT NOT NULL, family VARCHAR(255) NOT NULL DEFAULT '', ambari_managed SMALLINT DEFAULT 1, CONSTRAINT PK_repo_os_id PRIMARY KEY (id), - CONSTRAINT FK_repo_os_id_repo_version_id FOREIGN KEY (repo_version_id) REFERENCES repo_version (repo_version_id), - CONSTRAINT FK_repo_os_mpack_id FOREIGN KEY (mpack_id) REFERENCES mpacks (id)); + CONSTRAINT FK_repo_os_id_repo_version_id FOREIGN KEY (repo_version_id) REFERENCES repo_version (repo_version_id)); CREATE TABLE repo_definition ( id BIGINT NOT NULL, @@ -283,7 +270,6 @@ CREATE TABLE repo_tags ( CREATE TABLE servicecomponentdesiredstate ( id BIGINT NOT NULL, component_name VARCHAR(255) NOT NULL, - component_type VARCHAR(255) NOT NULL, cluster_id BIGINT NOT NULL, service_group_id BIGINT NOT NULL, service_id BIGINT NOT NULL, @@ -300,7 +286,6 @@ CREATE TABLE hostcomponentdesiredstate ( id BIGINT NOT NULL, cluster_id BIGINT NOT NULL, component_name VARCHAR(255) NOT NULL, - component_type VARCHAR(255) NOT NULL, desired_state VARCHAR(255) NOT NULL, host_id BIGINT NOT NULL, service_group_id BIGINT NOT NULL, @@ -316,10 +301,8 @@ CREATE TABLE hostcomponentdesiredstate ( CREATE TABLE hostcomponentstate ( id BIGINT NOT NULL, - host_component_desired_state_id BIGINT NOT NULL, cluster_id BIGINT NOT NULL, component_name VARCHAR(255) NOT NULL, - component_type VARCHAR(255) NOT NULL, version VARCHAR(32) NOT NULL DEFAULT 'UNKNOWN', current_state VARCHAR(255) NOT NULL, host_id BIGINT NOT NULL, @@ -327,9 +310,7 @@ CREATE TABLE hostcomponentstate ( service_id BIGINT NOT NULL, upgrade_state VARCHAR(32) NOT NULL DEFAULT 'NONE', CONSTRAINT pk_hostcomponentstate PRIMARY KEY (id), - CONSTRAINT UQ_hostcomponentstate_name UNIQUE (component_name, service_id, host_id, service_group_id, cluster_id), CONSTRAINT FK_hostcomponentstate_host_id FOREIGN KEY (host_id) REFERENCES hosts (host_id), - CONSTRAINT FK_hostcomponentstate_ds_id FOREIGN KEY (host_component_desired_state_id) REFERENCES hostcomponentdesiredstate (id), CONSTRAINT hstcomponentstatecomponentname FOREIGN KEY (component_name, service_id, service_group_id, cluster_id) REFERENCES servicecomponentdesiredstate (component_name, service_id, service_group_id, cluster_id)); CREATE INDEX idx_host_component_state on hostcomponentstate(host_id, component_name, service_id, cluster_id); @@ -893,7 +874,6 @@ CREATE TABLE topology_request ( action VARCHAR(255) NOT NULL, cluster_id BIGINT NOT NULL, bp_name VARCHAR(100) NOT NULL, - raw_request_body CLOB NOT NULL, cluster_properties VARCHAR(3000), cluster_attributes VARCHAR(3000), description VARCHAR(1024), @@ -1012,7 +992,6 @@ CREATE TABLE upgrade ( CREATE TABLE upgrade_group ( upgrade_group_id BIGINT NOT NULL, upgrade_id BIGINT NOT NULL, - lifecycle VARCHAR(64) DEFAULT 'UPGRADE' NOT NULL, group_name VARCHAR(255) DEFAULT '' NOT NULL, group_title VARCHAR(1024) DEFAULT '' NOT NULL, CONSTRAINT PK_upgrade_group PRIMARY KEY (upgrade_group_id), @@ -1038,18 +1017,22 @@ CREATE TABLE upgrade_history( component_name VARCHAR(255) NOT NULL, from_repo_version_id BIGINT NOT NULL, target_repo_version_id BIGINT NOT NULL, - service_group_id BIGINT NOT NULL, - source_mpack_id BIGINT NOT NULL, - target_mpack_id BIGINT NOT NULL, CONSTRAINT PK_upgrade_hist PRIMARY KEY (id), CONSTRAINT FK_upgrade_hist_upgrade_id FOREIGN KEY (upgrade_id) REFERENCES upgrade (upgrade_id), CONSTRAINT FK_upgrade_hist_from_repo FOREIGN KEY (from_repo_version_id) REFERENCES repo_version (repo_version_id), CONSTRAINT FK_upgrade_hist_target_repo FOREIGN KEY (target_repo_version_id) REFERENCES repo_version (repo_version_id), - CONSTRAINT UQ_upgrade_hist UNIQUE (upgrade_id, component_name, service_name), - CONSTRAINT FK_upgrade_hist_svc_grp_id FOREIGN KEY (service_group_id) REFERENCES servicegroups (id), - CONSTRAINT FK_upgrade_hist_src_mpack_id FOREIGN KEY (source_mpack_id) REFERENCES mpacks (id), - CONSTRAINT FK_upgrade_hist_tgt_mpack_id FOREIGN KEY (target_mpack_id) REFERENCES mpacks (id), - CONSTRAINT UQ_upgrade_hist_srvc_grp UNIQUE (upgrade_id, service_group_id) + CONSTRAINT UQ_upgrade_hist UNIQUE (upgrade_id, component_name, service_name) +); + +CREATE TABLE servicecomponent_version( + id BIGINT NOT NULL, + component_id BIGINT NOT NULL, + repo_version_id BIGINT NOT NULL, + state VARCHAR(32) NOT NULL, + user_name VARCHAR(255) NOT NULL, + CONSTRAINT PK_sc_version PRIMARY KEY (id), + CONSTRAINT FK_scv_component_id FOREIGN KEY (component_id) REFERENCES servicecomponentdesiredstate (id), + CONSTRAINT FK_scv_repo_version_id FOREIGN KEY (repo_version_id) REFERENCES repo_version (repo_version_id) ); CREATE TABLE ambari_operation_history( @@ -1317,8 +1300,6 @@ INSERT INTO ambari_sequences (sequence_name, sequence_value) union all select 'host_version_id_seq', 0 FROM SYSIBM.SYSDUMMY1 union all - select 'mpack_host_state_id_seq', 0 FROM SYSIBM.SYSDUMMY1 - union all select 'service_config_id_seq', 1 FROM SYSIBM.SYSDUMMY1 union all select 'upgrade_id_seq', 0 FROM SYSIBM.SYSDUMMY1 @@ -1371,6 +1352,8 @@ INSERT INTO ambari_sequences (sequence_name, sequence_value) union all select 'remote_cluster_service_id_seq', 0 FROM SYSIBM.SYSDUMMY1 union all + select 'servicecomponent_version_id_seq', 0 FROM SYSIBM.SYSDUMMY1 + union all select 'hostcomponentdesiredstate_id_seq', 0 FROM SYSIBM.SYSDUMMY1 union all select 'blueprint_service_id_seq', 0 FROM SYSIBM.SYSDUMMY1 diff --git a/ambari-server/src/main/resources/Ambari-DDL-MySQL-CREATE.sql b/ambari-server/src/main/resources/Ambari-DDL-MySQL-CREATE.sql index 900e46fa389..c420286d3c5 100644 --- a/ambari-server/src/main/resources/Ambari-DDL-MySQL-CREATE.sql +++ b/ambari-server/src/main/resources/Ambari-DDL-MySQL-CREATE.sql @@ -37,22 +37,22 @@ execute statement; DEALLOCATE PREPARE statement; CREATE TABLE registries ( - id BIGINT NOT NULL, - registry_name VARCHAR(255) NOT NULL, - registry_type VARCHAR(255) NOT NULL, - registry_uri VARCHAR(255) NOT NULL, - CONSTRAINT PK_registries PRIMARY KEY (id), - CONSTRAINT UQ_registry_name UNIQUE (registry_name)); + id BIGINT NOT NULL, + registry_name VARCHAR(255) NOT NULL, + registry_type VARCHAR(255) NOT NULL, + registry_uri VARCHAR(255) NOT NULL, + CONSTRAINT PK_registries PRIMARY KEY (id), + CONSTRAINT UQ_registry_name UNIQUE (registry_name)); CREATE TABLE mpacks ( - id BIGINT NOT NULL, - mpack_name VARCHAR(255) NOT NULL, - mpack_version VARCHAR(255) NOT NULL, - mpack_uri VARCHAR(255), - registry_id BIGINT, - CONSTRAINT PK_mpacks PRIMARY KEY (id), - CONSTRAINT UQ_mpack_name_version UNIQUE(mpack_name, mpack_version), - CONSTRAINT FK_registries FOREIGN KEY (registry_id) REFERENCES registries(id)); + id BIGINT NOT NULL, + mpack_name VARCHAR(255) NOT NULL, + mpack_version VARCHAR(255) NOT NULL, + mpack_uri VARCHAR(255), + registry_id BIGINT, + CONSTRAINT PK_mpacks PRIMARY KEY (id), + CONSTRAINT UQ_mpack_name_version UNIQUE(mpack_name, mpack_version), + CONSTRAINT FK_registries FOREIGN KEY (registry_id) REFERENCES registries(id)); CREATE TABLE stack ( stack_id BIGINT NOT NULL, @@ -63,16 +63,6 @@ CREATE TABLE stack ( CONSTRAINT FK_mpacks FOREIGN KEY (mpack_id) REFERENCES mpacks(id), CONSTRAINT UQ_stack UNIQUE (stack_name, stack_version)); -CREATE TABLE mpack_host_state ( - id BIGINT NOT NULL, - host_id BIGINT NOT NULL, - mpack_id BIGINT NOT NULL, - state VARCHAR(32) NOT NULL, - CONSTRAINT PK_mpack_host_state PRIMARY KEY (id), - CONSTRAINT FK_mhs_host_id FOREIGN KEY (host_id) REFERENCES hosts (host_id), - CONSTRAINT FK_mhs_mpack_id FOREIGN KEY (mpack_id) REFERENCES mpacks (id), - CONSTRAINT UQ_mpack_host_state UNIQUE(host_id, mpack_id)); - CREATE TABLE extension( extension_id BIGINT NOT NULL, extension_name VARCHAR(100) NOT NULL, @@ -155,8 +145,7 @@ CREATE TABLE servicegroups ( stack_id BIGINT NOT NULL, CONSTRAINT PK_servicegroups PRIMARY KEY (id, cluster_id), CONSTRAINT FK_servicegroups_cluster_id FOREIGN KEY (cluster_id) REFERENCES clusters (cluster_id), - CONSTRAINT FK_servicegroups_stack_id FOREIGN KEY (stack_id) REFERENCES stack (stack_id), - CONSTRAINT UQ_TEMP_UNTIL_REAL_PK UNIQUE(id)); + CONSTRAINT FK_servicegroups_stack_id FOREIGN KEY (stack_id) REFERENCES stack (stack_id)); CREATE TABLE servicegroupdependencies ( id BIGINT NOT NULL, @@ -273,13 +262,11 @@ CREATE TABLE repo_version ( CREATE TABLE repo_os ( id BIGINT NOT NULL, - repo_version_id BIGINT, - mpack_id BIGINT NOT NULL, + repo_version_id BIGINT NOT NULL, family VARCHAR(255) NOT NULL DEFAULT '', ambari_managed TINYINT(1) DEFAULT 1, CONSTRAINT PK_repo_os_id PRIMARY KEY (id), - CONSTRAINT FK_repo_os_id_repo_version_id FOREIGN KEY (repo_version_id) REFERENCES repo_version (repo_version_id), - CONSTRAINT FK_repo_os_mpack_id FOREIGN KEY (mpack_id) REFERENCES mpacks (id)); + CONSTRAINT FK_repo_os_id_repo_version_id FOREIGN KEY (repo_version_id) REFERENCES repo_version (repo_version_id)); CREATE TABLE repo_definition ( id BIGINT NOT NULL, @@ -302,7 +289,6 @@ CREATE TABLE repo_tags ( CREATE TABLE servicecomponentdesiredstate ( id BIGINT NOT NULL, component_name VARCHAR(100) NOT NULL, - component_type VARCHAR(100) NOT NULL, cluster_id BIGINT NOT NULL, service_group_id BIGINT NOT NULL, service_id BIGINT NOT NULL, @@ -319,7 +305,6 @@ CREATE TABLE hostcomponentdesiredstate ( id BIGINT NOT NULL, cluster_id BIGINT NOT NULL, component_name VARCHAR(100) NOT NULL, - component_type VARCHAR(100) NOT NULL, desired_state VARCHAR(255) NOT NULL, host_id BIGINT NOT NULL, service_group_id BIGINT NOT NULL, @@ -335,10 +320,8 @@ CREATE TABLE hostcomponentdesiredstate ( CREATE TABLE hostcomponentstate ( id BIGINT NOT NULL, - host_component_desired_state_id BIGINT NOT NULL, cluster_id BIGINT NOT NULL, component_name VARCHAR(100) NOT NULL, - component_type VARCHAR(100) NOT NULL, version VARCHAR(32) NOT NULL DEFAULT 'UNKNOWN', current_state VARCHAR(255) NOT NULL, host_id BIGINT NOT NULL, @@ -346,9 +329,7 @@ CREATE TABLE hostcomponentstate ( service_id BIGINT NOT NULL, upgrade_state VARCHAR(32) NOT NULL DEFAULT 'NONE', CONSTRAINT pk_hostcomponentstate PRIMARY KEY (id), - CONSTRAINT UQ_hostcomponentstate_name UNIQUE (component_name, service_id, host_id, service_group_id, cluster_id), CONSTRAINT FK_hostcomponentstate_host_id FOREIGN KEY (host_id) REFERENCES hosts (host_id), - CONSTRAINT FK_hostcomponentstate_ds_id FOREIGN KEY (host_component_desired_state_id) REFERENCES hostcomponentdesiredstate (id), CONSTRAINT hstcomponentstatecomponentname FOREIGN KEY (component_name, service_id, service_group_id, cluster_id) REFERENCES servicecomponentdesiredstate (component_name, service_id, service_group_id, cluster_id)); CREATE INDEX idx_host_component_state on hostcomponentstate(host_id, component_name, service_id, cluster_id); @@ -910,7 +891,6 @@ CREATE TABLE topology_request ( action VARCHAR(255) NOT NULL, cluster_id BIGINT NOT NULL, bp_name VARCHAR(100) NOT NULL, - raw_request_body LONGTEXT NOT NULL, cluster_properties LONGTEXT, cluster_attributes LONGTEXT, description VARCHAR(1024), @@ -1029,7 +1009,6 @@ CREATE TABLE upgrade ( CREATE TABLE upgrade_group ( upgrade_group_id BIGINT NOT NULL, upgrade_id BIGINT NOT NULL, - lifecycle VARCHAR(64) DEFAULT 'UPGRADE' NOT NULL, group_name VARCHAR(255) DEFAULT '' NOT NULL, group_title VARCHAR(1024) DEFAULT '' NOT NULL, CONSTRAINT PK_upgrade_group PRIMARY KEY (upgrade_group_id), @@ -1055,18 +1034,22 @@ CREATE TABLE upgrade_history( component_name VARCHAR(255) NOT NULL, from_repo_version_id BIGINT NOT NULL, target_repo_version_id BIGINT NOT NULL, - service_group_id BIGINT NOT NULL, - source_mpack_id BIGINT NOT NULL, - target_mpack_id BIGINT NOT NULL, CONSTRAINT PK_upgrade_hist PRIMARY KEY (id), CONSTRAINT FK_upgrade_hist_upgrade_id FOREIGN KEY (upgrade_id) REFERENCES upgrade (upgrade_id), CONSTRAINT FK_upgrade_hist_from_repo FOREIGN KEY (from_repo_version_id) REFERENCES repo_version (repo_version_id), CONSTRAINT FK_upgrade_hist_target_repo FOREIGN KEY (target_repo_version_id) REFERENCES repo_version (repo_version_id), - CONSTRAINT UQ_upgrade_hist UNIQUE (upgrade_id, component_name, service_name), - CONSTRAINT FK_upgrade_hist_svc_grp_id FOREIGN KEY (service_group_id) REFERENCES servicegroups (id), - CONSTRAINT FK_upgrade_hist_src_mpack_id FOREIGN KEY (source_mpack_id) REFERENCES mpacks (id), - CONSTRAINT FK_upgrade_hist_tgt_mpack_id FOREIGN KEY (target_mpack_id) REFERENCES mpacks (id), - CONSTRAINT UQ_upgrade_hist_srvc_grp UNIQUE (upgrade_id, service_group_id) + CONSTRAINT UQ_upgrade_hist UNIQUE (upgrade_id, component_name, service_name) +); + +CREATE TABLE servicecomponent_version( + id BIGINT NOT NULL, + component_id BIGINT NOT NULL, + repo_version_id BIGINT NOT NULL, + state VARCHAR(32) NOT NULL, + user_name VARCHAR(255) NOT NULL, + CONSTRAINT PK_sc_version PRIMARY KEY (id), + CONSTRAINT FK_scv_component_id FOREIGN KEY (component_id) REFERENCES servicecomponentdesiredstate (id), + CONSTRAINT FK_scv_repo_version_id FOREIGN KEY (repo_version_id) REFERENCES repo_version (repo_version_id) ); CREATE TABLE ambari_operation_history( @@ -1288,7 +1271,6 @@ INSERT INTO ambari_sequences(sequence_name, sequence_value) VALUES ('privilege_id_seq', 1), ('config_id_seq', 1), ('host_version_id_seq', 0), - ('mpack_host_state_id_seq', 0), ('service_config_id_seq', 1), ('alert_definition_id_seq', 0), ('alert_group_id_seq', 0), @@ -1324,6 +1306,7 @@ INSERT INTO ambari_sequences(sequence_name, sequence_value) VALUES ('ambari_operation_history_id_seq', 0), ('remote_cluster_id_seq', 0), ('remote_cluster_service_id_seq', 0), + ('servicecomponent_version_id_seq', 0), ('hostcomponentdesiredstate_id_seq', 0), ('blueprint_service_id_seq', 0), ('blueprint_mpack_instance_id_seq', 0), diff --git a/ambari-server/src/main/resources/Ambari-DDL-Oracle-CREATE.sql b/ambari-server/src/main/resources/Ambari-DDL-Oracle-CREATE.sql index 09d53d32a10..3bcba3f3717 100644 --- a/ambari-server/src/main/resources/Ambari-DDL-Oracle-CREATE.sql +++ b/ambari-server/src/main/resources/Ambari-DDL-Oracle-CREATE.sql @@ -18,22 +18,22 @@ ------create tables--------- CREATE TABLE registries ( - id NUMBER(19) NOT NULL, - registry_name VARCHAR(255) NOT NULL, - registry_type VARCHAR(255) NOT NULL, - registry_uri VARCHAR(255) NOT NULL, - CONSTRAINT PK_registries PRIMARY KEY (id), - CONSTRAINT UQ_registry_name UNIQUE (registry_name)); + id NUMBER(19) NOT NULL, + registry_name VARCHAR(255) NOT NULL, + registry_type VARCHAR(255) NOT NULL, + registry_uri VARCHAR(255) NOT NULL, + CONSTRAINT PK_registries PRIMARY KEY (id), + CONSTRAINT UQ_registry_name UNIQUE (registry_name)); CREATE TABLE mpacks ( - id NUMBER(19) NOT NULL, - mpack_name VARCHAR(255) NOT NULL, - mpack_version VARCHAR(255) NOT NULL, - mpack_uri VARCHAR(255), - registry_id NUMBER(19), - CONSTRAINT PK_mpacks PRIMARY KEY (id), - CONSTRAINT UQ_mpack_name_version UNIQUE(mpack_name, mpack_version), - CONSTRAINT FK_registries FOREIGN KEY (registry_id) REFERENCES registries(id)); + id NUMBER(19) NOT NULL, + mpack_name VARCHAR(255) NOT NULL, + mpack_version VARCHAR(255) NOT NULL, + mpack_uri VARCHAR(255), + registry_id NUMBER(19), + CONSTRAINT PK_mpacks PRIMARY KEY (id), + CONSTRAINT UQ_mpack_name_version UNIQUE(mpack_name, mpack_version), + CONSTRAINT FK_registries FOREIGN KEY (registry_id) REFERENCES registries(id)); CREATE TABLE stack ( stack_id NUMBER(19) NOT NULL, @@ -44,16 +44,6 @@ CREATE TABLE stack ( CONSTRAINT FK_mpacks FOREIGN KEY (mpack_id) REFERENCES mpacks(id), CONSTRAINT UQ_stack UNIQUE (stack_name, stack_version)); -CREATE TABLE mpack_host_state ( - id NUMBER(19) NOT NULL, - host_id NUMBER(19) NOT NULL, - mpack_id NUMBER(19) NOT NULL, - state VARCHAR2(32) NOT NULL, - CONSTRAINT PK_mpack_host_state PRIMARY KEY (id), - CONSTRAINT FK_mhs_host_id FOREIGN KEY (host_id) REFERENCES hosts (host_id), - CONSTRAINT FK_mhs_mpack_id FOREIGN KEY (mpack_id) REFERENCES mpacks (id), - CONSTRAINT UQ_mpack_host_state UNIQUE(host_id, mpack_id)); - CREATE TABLE extension( extension_id NUMERIC(19) NOT NULL, extension_name VARCHAR2(255) NOT NULL, @@ -136,8 +126,7 @@ CREATE TABLE servicegroups ( stack_id NUMBER(19) NOT NULL, CONSTRAINT PK_servicegroups PRIMARY KEY (id, cluster_id), CONSTRAINT FK_servicegroups_cluster_id FOREIGN KEY (cluster_id) REFERENCES clusters (cluster_id), - CONSTRAINT FK_servicegroups_stack_id FOREIGN KEY (stack_id) REFERENCES stack (stack_id), - CONSTRAINT UQ_TEMP_UNTIL_REAL_PK UNIQUE(id)); + CONSTRAINT FK_servicegroups_stack_id FOREIGN KEY (stack_id) REFERENCES stack (stack_id)); CREATE TABLE servicegroupdependencies ( id NUMBER(19) NOT NULL, @@ -209,7 +198,7 @@ CREATE TABLE serviceconfig ( CONSTRAINT PK_serviceconfig PRIMARY KEY (service_config_id), CONSTRAINT FK_serviceconfig_stack_id FOREIGN KEY (stack_id) REFERENCES stack(stack_id), CONSTRAINT FK_serviceconfig_clstr_svc FOREIGN KEY (service_id, service_group_id, cluster_id) REFERENCES clusterservices (id, service_group_id, cluster_id), - CONSTRAINT UQ_scv_service_version UNIQUE (cluster_id, service_id, version) ); + CONSTRAINT UQ_scv_service_version UNIQUE (cluster_id, service_id, version)); CREATE TABLE serviceconfighosts ( service_config_id NUMBER(19) NOT NULL, @@ -253,13 +242,11 @@ CREATE TABLE repo_version ( CREATE TABLE repo_os ( id NUMBER(19) NOT NULL, - repo_version_id NUMBER(19), - mpack_id NUMBER(19) NOT NULL, + repo_version_id NUMBER(19) NOT NULL, family VARCHAR(255) DEFAULT '' NOT NULL, ambari_managed NUMBER(1) DEFAULT 1, CONSTRAINT PK_repo_os_id PRIMARY KEY (id), - CONSTRAINT FK_repo_os_id_repo_version_id FOREIGN KEY (repo_version_id) REFERENCES repo_version (repo_version_id), - CONSTRAINT FK_repo_os_mpack_id FOREIGN KEY (mpack_id) REFERENCES mpacks (id)); + CONSTRAINT FK_repo_os_id_repo_version_id FOREIGN KEY (repo_version_id) REFERENCES repo_version (repo_version_id)); CREATE TABLE repo_definition ( id NUMBER(19) NOT NULL, @@ -282,7 +269,6 @@ CREATE TABLE repo_tags ( CREATE TABLE servicecomponentdesiredstate ( id NUMBER(19) NOT NULL, component_name VARCHAR2(255) NOT NULL, - component_type VARCHAR2(255) NOT NULL, cluster_id NUMBER(19) NOT NULL, service_group_id NUMBER(19) NOT NULL, service_id NUMBER(19) NOT NULL, @@ -299,7 +285,6 @@ CREATE TABLE hostcomponentdesiredstate ( id NUMBER(19) NOT NULL, cluster_id NUMBER(19) NOT NULL, component_name VARCHAR2(255) NOT NULL, - component_type VARCHAR2(255) NOT NULL, desired_state VARCHAR2(255) NOT NULL, host_id NUMBER(19) NOT NULL, service_group_id NUMBER(19) NOT NULL, @@ -308,16 +293,14 @@ CREATE TABLE hostcomponentdesiredstate ( maintenance_state VARCHAR2(32) NOT NULL, restart_required NUMBER(1) DEFAULT 0 NOT NULL, CONSTRAINT PK_hostcomponentdesiredstate PRIMARY KEY (id), - CONSTRAINT UQ_hcdesiredstate_name UNIQUE (component_name, service_id, host_id, service_group_id, cluster_id), + CONSTRAINT UQ_hcdesiredstate_name UNIQUE (component_name, service_id, host_id, service_group_id, host_id, cluster_id), CONSTRAINT FK_hcdesiredstate_host_id FOREIGN KEY (host_id) REFERENCES hosts (host_id), CONSTRAINT hstcmpnntdesiredstatecmpnntnme FOREIGN KEY (component_name, service_id, service_group_id, cluster_id) REFERENCES servicecomponentdesiredstate (component_name, service_id, service_group_id, cluster_id)); CREATE TABLE hostcomponentstate ( id NUMBER(19) NOT NULL, - host_component_desired_state_id NUMBER(19) NOT NULL, cluster_id NUMBER(19) NOT NULL, component_name VARCHAR2(255) NOT NULL, - component_type VARCHAR2(255) NOT NULL, version VARCHAR2(32) DEFAULT 'UNKNOWN' NOT NULL, current_state VARCHAR2(255) NOT NULL, host_id NUMBER(19) NOT NULL, @@ -325,9 +308,7 @@ CREATE TABLE hostcomponentstate ( service_id NUMBER(19) NOT NULL, upgrade_state VARCHAR2(32) DEFAULT 'NONE' NOT NULL, CONSTRAINT pk_hostcomponentstate PRIMARY KEY (id), - CONSTRAINT UQ_hostcomponentstate_name UNIQUE (component_name, service_id, host_id, service_group_id, cluster_id), CONSTRAINT FK_hostcomponentstate_host_id FOREIGN KEY (host_id) REFERENCES hosts (host_id), - CONSTRAINT FK_hostcomponentstate_ds_id FOREIGN KEY (host_component_desired_state_id) REFERENCES hostcomponentdesiredstate (id), CONSTRAINT hstcomponentstatecomponentname FOREIGN KEY (component_name, service_id, service_group_id, cluster_id) REFERENCES servicecomponentdesiredstate (component_name, service_id, service_group_id, cluster_id)); CREATE INDEX idx_host_component_state on hostcomponentstate(host_id, component_name, service_name, cluster_id); @@ -888,7 +869,6 @@ CREATE TABLE topology_request ( action VARCHAR(255) NOT NULL, cluster_id NUMBER(19) NOT NULL, bp_name VARCHAR(100) NOT NULL, - raw_request_body CLOB NOT NULL, cluster_properties CLOB, cluster_attributes CLOB, description VARCHAR(1024), @@ -1007,7 +987,6 @@ CREATE TABLE upgrade ( CREATE TABLE upgrade_group ( upgrade_group_id NUMBER(19) NOT NULL, upgrade_id NUMBER(19) NOT NULL, - lifecycle VARCHAR(64) DEFAULT 'UPGRADE' NOT NULL, group_name VARCHAR2(255) DEFAULT '' NOT NULL, group_title VARCHAR2(1024) DEFAULT '' NOT NULL, CONSTRAINT PK_upgrade_group PRIMARY KEY (upgrade_group_id), @@ -1033,18 +1012,22 @@ CREATE TABLE upgrade_history( component_name VARCHAR2(255) NOT NULL, from_repo_version_id NUMBER(19) NOT NULL, target_repo_version_id NUMBER(19) NOT NULL, - service_group_id NUMBER(19) NOT NULL, - source_mpack_id NUMBER(19) NOT NULL, - target_mpack_id NUMBER(19) NOT NULL, CONSTRAINT PK_upgrade_hist PRIMARY KEY (id), CONSTRAINT FK_upgrade_hist_upgrade_id FOREIGN KEY (upgrade_id) REFERENCES upgrade (upgrade_id), CONSTRAINT FK_upgrade_hist_from_repo FOREIGN KEY (from_repo_version_id) REFERENCES repo_version (repo_version_id), CONSTRAINT FK_upgrade_hist_target_repo FOREIGN KEY (target_repo_version_id) REFERENCES repo_version (repo_version_id), - CONSTRAINT UQ_upgrade_hist UNIQUE (upgrade_id, component_name, service_name), - CONSTRAINT FK_upgrade_hist_svc_grp_id FOREIGN KEY (service_group_id) REFERENCES servicegroups (id), - CONSTRAINT FK_upgrade_hist_src_mpack_id FOREIGN KEY (source_mpack_id) REFERENCES mpacks (id), - CONSTRAINT FK_upgrade_hist_tgt_mpack_id FOREIGN KEY (target_mpack_id) REFERENCES mpacks (id), - CONSTRAINT UQ_upgrade_hist_srvc_grp UNIQUE (upgrade_id, service_group_id) + CONSTRAINT UQ_upgrade_hist UNIQUE (upgrade_id, component_name, service_name) +); + +CREATE TABLE servicecomponent_version( + id NUMBER(19) NOT NULL, + component_id NUMBER(19) NOT NULL, + repo_version_id NUMBER(19) NOT NULL, + state VARCHAR2(32) NOT NULL, + user_name VARCHAR2(255) NOT NULL, + CONSTRAINT PK_sc_version PRIMARY KEY (id), + CONSTRAINT FK_scv_component_id FOREIGN KEY (component_id) REFERENCES servicecomponentdesiredstate (id), + CONSTRAINT FK_scv_repo_version_id FOREIGN KEY (repo_version_id) REFERENCES repo_version (repo_version_id) ); CREATE TABLE ambari_operation_history( @@ -1266,7 +1249,6 @@ INSERT INTO ambari_sequences(sequence_name, sequence_value) values ('permission_ INSERT INTO ambari_sequences(sequence_name, sequence_value) values ('privilege_id_seq', 1); INSERT INTO ambari_sequences(sequence_name, sequence_value) values ('config_id_seq', 1); INSERT INTO ambari_sequences(sequence_name, sequence_value) values ('host_version_id_seq', 0); -INSERT INTO ambari_sequences(sequence_name, sequence_value) values ('mpack_host_state_id_seq', 0); INSERT INTO ambari_sequences(sequence_name, sequence_value) values ('service_config_id_seq', 1); INSERT INTO ambari_sequences(sequence_name, sequence_value) values ('alert_definition_id_seq', 0); INSERT INTO ambari_sequences(sequence_name, sequence_value) values ('alert_group_id_seq', 0); @@ -1302,6 +1284,7 @@ INSERT INTO ambari_sequences(sequence_name, sequence_value) values ('blueprint_s INSERT INTO ambari_sequences(sequence_name, sequence_value) values ('ambari_operation_history_id_seq', 0); INSERT INTO ambari_sequences(sequence_name, sequence_value) values ('remote_cluster_id_seq', 0); INSERT INTO ambari_sequences(sequence_name, sequence_value) values ('remote_cluster_service_id_seq', 0); +INSERT INTO ambari_sequences(sequence_name, sequence_value) values ('servicecomponent_version_id_seq', 0); INSERT INTO ambari_sequences(sequence_name, sequence_value) values ('hostcomponentdesiredstate_id_seq', 0); INSERT INTO ambari_sequences(sequence_name, sequence_value) values ('blueprint_service_id_seq', 0); INSERT INTO ambari_sequences(sequence_name, sequence_value) values ('blueprint_mpack_instance_id_seq', 0); diff --git a/ambari-server/src/main/resources/Ambari-DDL-Postgres-CREATE.sql b/ambari-server/src/main/resources/Ambari-DDL-Postgres-CREATE.sql index 6936a61af1b..b3c50070f03 100644 --- a/ambari-server/src/main/resources/Ambari-DDL-Postgres-CREATE.sql +++ b/ambari-server/src/main/resources/Ambari-DDL-Postgres-CREATE.sql @@ -18,22 +18,22 @@ ------create tables and grant privileges to db user--------- CREATE TABLE registries ( - id BIGINT NOT NULL, - registry_name VARCHAR(255) NOT NULL, - registry_type VARCHAR(255) NOT NULL, - registry_uri VARCHAR(255) NOT NULL, - CONSTRAINT PK_registries PRIMARY KEY (id), - CONSTRAINT UQ_registry_name UNIQUE (registry_name)); + id BIGINT NOT NULL, + registry_name VARCHAR(255) NOT NULL, + registry_type VARCHAR(255) NOT NULL, + registry_uri VARCHAR(255) NOT NULL, + CONSTRAINT PK_registries PRIMARY KEY (id), + CONSTRAINT UQ_registry_name UNIQUE (registry_name)); CREATE TABLE mpacks ( - id BIGINT NOT NULL, - mpack_name VARCHAR(255) NOT NULL, - mpack_version VARCHAR(255) NOT NULL, - mpack_uri VARCHAR(255), - registry_id BIGINT, - CONSTRAINT PK_mpacks PRIMARY KEY (id), - CONSTRAINT FK_registries FOREIGN KEY (registry_id) REFERENCES registries(id), - CONSTRAINT UQ_mpack_name_version UNIQUE(mpack_name, mpack_version)); + id BIGINT NOT NULL, + mpack_name VARCHAR(255) NOT NULL, + mpack_version VARCHAR(255) NOT NULL, + mpack_uri VARCHAR(255), + registry_id BIGINT, + CONSTRAINT PK_mpacks PRIMARY KEY (id), + CONSTRAINT FK_registries FOREIGN KEY (registry_id) REFERENCES registries(id), + CONSTRAINT UQ_mpack_name_version UNIQUE(mpack_name, mpack_version)); CREATE TABLE stack ( stack_id BIGINT NOT NULL, @@ -113,16 +113,6 @@ CREATE TABLE hosts ( CONSTRAINT PK_hosts PRIMARY KEY (host_id), CONSTRAINT UQ_hosts_host_name UNIQUE (host_name)); -CREATE TABLE mpack_host_state ( - id BIGINT NOT NULL, - host_id BIGINT NOT NULL, - mpack_id BIGINT NOT NULL, - state VARCHAR(32) NOT NULL, - CONSTRAINT PK_mpack_host_state PRIMARY KEY (id), - CONSTRAINT FK_mhs_host_id FOREIGN KEY (host_id) REFERENCES hosts (host_id), - CONSTRAINT FK_mhs_mpack_id FOREIGN KEY (mpack_id) REFERENCES mpacks (id), - CONSTRAINT UQ_mpack_host_state UNIQUE(host_id, mpack_id)); - CREATE TABLE clustersettings ( id BIGINT NOT NULL, setting_name VARCHAR(255) NOT NULL, @@ -138,8 +128,7 @@ CREATE TABLE servicegroups ( stack_id BIGINT NOT NULL, CONSTRAINT PK_servicegroups PRIMARY KEY (id, cluster_id), CONSTRAINT FK_servicegroups_cluster_id FOREIGN KEY (cluster_id) REFERENCES clusters (cluster_id), - CONSTRAINT FK_servicegroups_stack_id FOREIGN KEY (stack_id) REFERENCES stack (stack_id), - CONSTRAINT UQ_TEMP_UNTIL_REAL_PK UNIQUE(id)); + CONSTRAINT FK_servicegroups_stack_id FOREIGN KEY (stack_id) REFERENCES stack (stack_id)); CREATE TABLE servicegroupdependencies ( id BIGINT NOT NULL, @@ -255,13 +244,11 @@ CREATE TABLE repo_version ( CREATE TABLE repo_os ( id BIGINT NOT NULL, - repo_version_id BIGINT, - mpack_id BIGINT NOT NULL, + repo_version_id BIGINT NOT NULL, family VARCHAR(255) NOT NULL DEFAULT '', ambari_managed SMALLINT DEFAULT 1, CONSTRAINT PK_repo_os_id PRIMARY KEY (id), - CONSTRAINT FK_repo_os_id_repo_version_id FOREIGN KEY (repo_version_id) REFERENCES repo_version (repo_version_id), - CONSTRAINT FK_repo_os_mpack_id FOREIGN KEY (mpack_id) REFERENCES mpacks (id)); + CONSTRAINT FK_repo_os_id_repo_version_id FOREIGN KEY (repo_version_id) REFERENCES repo_version (repo_version_id)); CREATE TABLE repo_definition ( id BIGINT NOT NULL, @@ -284,7 +271,6 @@ CREATE TABLE repo_tags ( CREATE TABLE servicecomponentdesiredstate ( id BIGINT NOT NULL, component_name VARCHAR(255) NOT NULL, - component_type VARCHAR(255) NOT NULL, cluster_id BIGINT NOT NULL, service_group_id BIGINT NOT NULL, service_id BIGINT NOT NULL, @@ -301,7 +287,6 @@ CREATE TABLE hostcomponentdesiredstate ( id BIGINT NOT NULL, cluster_id BIGINT NOT NULL, component_name VARCHAR(255) NOT NULL, - component_type VARCHAR(255) NOT NULL, desired_state VARCHAR(255) NOT NULL, host_id BIGINT NOT NULL, service_group_id BIGINT NOT NULL, @@ -316,10 +301,8 @@ CREATE TABLE hostcomponentdesiredstate ( CREATE TABLE hostcomponentstate ( id BIGINT NOT NULL, - host_component_desired_state_id BIGINT NOT NULL, cluster_id BIGINT NOT NULL, component_name VARCHAR(255) NOT NULL, - component_type VARCHAR(255) NOT NULL, version VARCHAR(32) NOT NULL DEFAULT 'UNKNOWN', current_state VARCHAR(255) NOT NULL, host_id BIGINT NOT NULL, @@ -327,9 +310,7 @@ CREATE TABLE hostcomponentstate ( service_id BIGINT NOT NULL, upgrade_state VARCHAR(32) NOT NULL DEFAULT 'NONE', CONSTRAINT pk_hostcomponentstate PRIMARY KEY (id), - CONSTRAINT UQ_hostcomponentstate_name UNIQUE (component_name, service_id, host_id, service_group_id, cluster_id), CONSTRAINT FK_hostcomponentstate_host_id FOREIGN KEY (host_id) REFERENCES hosts (host_id), - CONSTRAINT FK_hostcomponentstate_ds_id FOREIGN KEY (host_component_desired_state_id) REFERENCES hostcomponentdesiredstate (id), CONSTRAINT hstcomponentstatecomponentname FOREIGN KEY (component_name, service_id, service_group_id, cluster_id) REFERENCES servicecomponentdesiredstate (component_name, service_id, service_group_id, cluster_id)); CREATE INDEX idx_host_component_state on hostcomponentstate(host_id, component_name, service_id, cluster_id); @@ -894,7 +875,6 @@ CREATE TABLE topology_request ( action VARCHAR(255) NOT NULL, cluster_id BIGINT NOT NULL, bp_name VARCHAR(100) NOT NULL, - raw_request_body TEXT NOT NULL, cluster_properties TEXT, cluster_attributes TEXT, description VARCHAR(1024), @@ -1012,7 +992,6 @@ CREATE TABLE upgrade ( CREATE TABLE upgrade_group ( upgrade_group_id BIGINT NOT NULL, upgrade_id BIGINT NOT NULL, - lifecycle VARCHAR(64) DEFAULT 'UPGRADE' NOT NULL, group_name VARCHAR(255) DEFAULT '' NOT NULL, group_title VARCHAR(1024) DEFAULT '' NOT NULL, CONSTRAINT PK_upgrade_group PRIMARY KEY (upgrade_group_id), @@ -1038,18 +1017,22 @@ CREATE TABLE upgrade_history( component_name VARCHAR(255) NOT NULL, from_repo_version_id BIGINT NOT NULL, target_repo_version_id BIGINT NOT NULL, - service_group_id BIGINT NOT NULL, - source_mpack_id BIGINT NOT NULL, - target_mpack_id BIGINT NOT NULL, CONSTRAINT PK_upgrade_hist PRIMARY KEY (id), CONSTRAINT FK_upgrade_hist_upgrade_id FOREIGN KEY (upgrade_id) REFERENCES upgrade (upgrade_id), CONSTRAINT FK_upgrade_hist_from_repo FOREIGN KEY (from_repo_version_id) REFERENCES repo_version (repo_version_id), CONSTRAINT FK_upgrade_hist_target_repo FOREIGN KEY (target_repo_version_id) REFERENCES repo_version (repo_version_id), - CONSTRAINT UQ_upgrade_hist UNIQUE (upgrade_id, component_name, service_name), - CONSTRAINT FK_upgrade_hist_svc_grp_id FOREIGN KEY (service_group_id) REFERENCES servicegroups (id), - CONSTRAINT FK_upgrade_hist_src_mpack_id FOREIGN KEY (source_mpack_id) REFERENCES mpacks (id), - CONSTRAINT FK_upgrade_hist_tgt_mpack_id FOREIGN KEY (target_mpack_id) REFERENCES mpacks (id), - CONSTRAINT UQ_upgrade_hist_srvc_grp UNIQUE (upgrade_id, service_group_id) + CONSTRAINT UQ_upgrade_hist UNIQUE (upgrade_id, component_name, service_name) +); + +CREATE TABLE servicecomponent_version( + id BIGINT NOT NULL, + component_id BIGINT NOT NULL, + repo_version_id BIGINT NOT NULL, + state VARCHAR(32) NOT NULL, + user_name VARCHAR(255) NOT NULL, + CONSTRAINT PK_sc_version PRIMARY KEY (id), + CONSTRAINT FK_scv_component_id FOREIGN KEY (component_id) REFERENCES servicecomponentdesiredstate (id), + CONSTRAINT FK_scv_repo_version_id FOREIGN KEY (repo_version_id) REFERENCES repo_version (repo_version_id) ); CREATE TABLE ambari_operation_history( @@ -1277,7 +1260,6 @@ INSERT INTO ambari_sequences (sequence_name, sequence_value) VALUES ('config_id_seq', 1), ('repo_version_id_seq', 0), ('host_version_id_seq', 0), - ('mpack_host_state_id_seq', 0), ('service_config_id_seq', 1), ('upgrade_id_seq', 0), ('upgrade_group_id_seq', 0), @@ -1304,6 +1286,7 @@ INSERT INTO ambari_sequences (sequence_name, sequence_value) VALUES ('ambari_operation_history_id_seq', 0), ('remote_cluster_id_seq', 0), ('remote_cluster_service_id_seq', 0), + ('servicecomponent_version_id_seq', 0), ('blueprint_service_id_seq', 0), ('blueprint_mpack_instance_id_seq', 0), ('hostgroup_component_id_seq', 0), diff --git a/ambari-server/src/main/resources/Ambari-DDL-SQLAnywhere-CREATE.sql b/ambari-server/src/main/resources/Ambari-DDL-SQLAnywhere-CREATE.sql index 4b709715c16..e9897b109b8 100644 --- a/ambari-server/src/main/resources/Ambari-DDL-SQLAnywhere-CREATE.sql +++ b/ambari-server/src/main/resources/Ambari-DDL-SQLAnywhere-CREATE.sql @@ -17,22 +17,22 @@ -- CREATE TABLE registries ( - id NUMERIC(19) NOT NULL, - registry_name VARCHAR(255) NOT NULL, - registry_type VARCHAR(255) NOT NULL, - registry_uri VARCHAR(255) NOT NULL, - CONSTRAINT PK_registries PRIMARY KEY (id), - CONSTRAINT UQ_registry_name UNIQUE (registry_name)); + id NUMERIC(19) NOT NULL, + registry_name VARCHAR(255) NOT NULL, + registry_type VARCHAR(255) NOT NULL, + registry_uri VARCHAR(255) NOT NULL, + CONSTRAINT PK_registries PRIMARY KEY (id), + CONSTRAINT UQ_registry_name UNIQUE (registry_name)); CREATE TABLE mpacks ( - id NUMERIC(19) NOT NULL, - mpack_name VARCHAR(255) NOT NULL, - mpack_version VARCHAR(255) NOT NULL, - mpack_uri VARCHAR(255), - registry_id BIGINT, - CONSTRAINT PK_mpacks PRIMARY KEY (id), - CONSTRAINT UQ_mpack_name_version UNIQUE(mpack_name, mpack_version), - CONSTRAINT FK_registries FOREIGN KEY (registry_id) REFERENCES registries(id)); + id NUMERIC(19) NOT NULL, + mpack_name VARCHAR(255) NOT NULL, + mpack_version VARCHAR(255) NOT NULL, + mpack_uri VARCHAR(255), + registry_id BIGINT, + CONSTRAINT PK_mpacks PRIMARY KEY (id), + CONSTRAINT UQ_mpack_name_version UNIQUE(mpack_name, mpack_version), + CONSTRAINT FK_registries FOREIGN KEY (registry_id) REFERENCES registries(id)); CREATE TABLE stack ( stack_id NUMERIC(19) NOT NULL, @@ -43,16 +43,6 @@ CREATE TABLE stack ( CONSTRAINT FK_mpacks FOREIGN KEY (mpack_id) REFERENCES mpacks(id), CONSTRAINT UQ_stack UNIQUE (stack_name, stack_version)); -CREATE TABLE mpack_host_state ( - id NUMERIC(19) NOT NULL, - host_id NUMERIC(19) NOT NULL, - mpack_id NUMERIC(19) NOT NULL, - state VARCHAR(32) NOT NULL, - CONSTRAINT PK_mpack_host_state PRIMARY KEY (id), - CONSTRAINT FK_mhs_host_id FOREIGN KEY (host_id) REFERENCES hosts (host_id), - CONSTRAINT FK_mhs_mpack_id FOREIGN KEY (mpack_id) REFERENCES mpacks (id), - CONSTRAINT UQ_mpack_host_state UNIQUE(host_id, mpack_id)); - CREATE TABLE extension( extension_id NUMERIC(19) NOT NULL, extension_name VARCHAR(255) NOT NULL, @@ -135,8 +125,7 @@ CREATE TABLE servicegroups ( stack_id NUMERIC(19) NOT NULL, CONSTRAINT PK_servicegroups PRIMARY KEY (id, cluster_id), CONSTRAINT FK_servicegroups_cluster_id FOREIGN KEY (cluster_id) REFERENCES clusters (cluster_id), - CONSTRAINT FK_servicegroups_stack_id FOREIGN KEY (stack_id) REFERENCES stack (stack_id), - CONSTRAINT UQ_TEMP_UNTIL_REAL_PK UNIQUE(id)); + CONSTRAINT FK_servicegroups_stack_id FOREIGN KEY (stack_id) REFERENCES stack (stack_id)); CREATE TABLE servicegroupdependencies ( id NUMBER(19) NOT NULL, @@ -252,13 +241,11 @@ CREATE TABLE repo_version ( CREATE TABLE repo_os ( id NUMERIC(19) NOT NULL, - repo_version_id NUMERIC(19), - mpack_id NUMERIC(19) NOT NULL, + repo_version_id NUMERIC(19) NOT NULL, family VARCHAR(255) NOT NULL DEFAULT '', ambari_managed SMALLINT DEFAULT 1, CONSTRAINT PK_repo_os_id PRIMARY KEY (id), - CONSTRAINT FK_repo_os_id_repo_version_id FOREIGN KEY (repo_version_id) REFERENCES repo_version (repo_version_id), - CONSTRAINT FK_repo_os_mpack_id FOREIGN KEY (mpack_id) REFERENCES mpacks (id)); + CONSTRAINT FK_repo_os_id_repo_version_id FOREIGN KEY (repo_version_id) REFERENCES repo_version (repo_version_id)); CREATE TABLE repo_definition ( id NUMERIC(19) NOT NULL, @@ -281,7 +268,6 @@ CREATE TABLE repo_tags ( CREATE TABLE servicecomponentdesiredstate ( id NUMERIC(19) NOT NULL, component_name VARCHAR(255) NOT NULL, - component_type VARCHAR(255) NOT NULL, cluster_id NUMERIC(19) NOT NULL, service_group_id NUMERIC(19) NOT NULL, service_id NUMERIC(19) NOT NULL, @@ -298,7 +284,6 @@ CREATE TABLE hostcomponentdesiredstate ( id NUMERIC(19) NOT NULL, cluster_id NUMERIC(19) NOT NULL, component_name VARCHAR(255) NOT NULL, - component_type VARCHAR(255) NOT NULL, desired_state VARCHAR(255) NOT NULL, host_id NUMERIC(19) NOT NULL, service_group_id BIGINT NOT NULL, @@ -313,10 +298,8 @@ CREATE TABLE hostcomponentdesiredstate ( CREATE TABLE hostcomponentstate ( id NUMERIC(19) NOT NULL, - host_component_desired_state_id NUMERIC(19) NOT NULL, cluster_id NUMERIC(19) NOT NULL, component_name VARCHAR(255) NOT NULL, - component_type VARCHAR(255) NOT NULL, version VARCHAR(32) NOT NULL DEFAULT 'UNKNOWN', current_state VARCHAR(255) NOT NULL, host_id NUMERIC(19) NOT NULL, @@ -324,9 +307,7 @@ CREATE TABLE hostcomponentstate ( service_id BIGINT NOT NULL, upgrade_state VARCHAR(32) NOT NULL DEFAULT 'NONE', CONSTRAINT PK_hostcomponentstate PRIMARY KEY (id), - CONSTRAINT UQ_hostcomponentstate_name UNIQUE (component_name, service_id, host_id, service_group_id, cluster_id), CONSTRAINT FK_hostcomponentstate_host_id FOREIGN KEY (host_id) REFERENCES hosts (host_id), - CONSTRAINT FK_hostcomponentstate_ds_id FOREIGN KEY (host_component_desired_state_id) REFERENCES hostcomponentdesiredstate (id), CONSTRAINT hstcomponentstatecomponentname FOREIGN KEY (component_name, service_id, service_group_id, cluster_id) REFERENCES servicecomponentdesiredstate (component_name, service_id, service_group_id, cluster_id)); CREATE INDEX idx_host_component_state on hostcomponentstate(host_id, component_name, service_name, cluster_id); @@ -888,7 +869,6 @@ CREATE TABLE topology_request ( action VARCHAR(255) NOT NULL, cluster_id NUMERIC(19) NOT NULL, bp_name VARCHAR(100) NOT NULL, - raw_request_body TEXT NOT NULL, cluster_properties TEXT, cluster_attributes TEXT, description VARCHAR(1024), @@ -1007,7 +987,6 @@ CREATE TABLE upgrade ( CREATE TABLE upgrade_group ( upgrade_group_id NUMERIC(19) NOT NULL, upgrade_id NUMERIC(19) NOT NULL, - lifecycle VARCHAR(64) DEFAULT 'UPGRADE' NOT NULL, group_name VARCHAR(255) DEFAULT '' NOT NULL, group_title VARCHAR(1024) DEFAULT '' NOT NULL, CONSTRAINT PK_upgrade_group PRIMARY KEY (upgrade_group_id), @@ -1027,24 +1006,28 @@ CREATE TABLE upgrade_item ( ); CREATE TABLE upgrade_history( - id NUMERIC(19) NOT NULL, - upgrade_id NUMERIC(19) NOT NULL, + id BIGINT NOT NULL, + upgrade_id BIGINT NOT NULL, service_name VARCHAR(255) NOT NULL, component_name VARCHAR(255) NOT NULL, - from_repo_version_id NUMERIC(19) NOT NULL, - target_repo_version_id NUMERIC(19) NOT NULL, - service_group_id NUMERIC(19) NOT NULL, - source_mpack_id NUMERIC(19) NOT NULL, - target_mpack_id NUMERIC(19) NOT NULL, + from_repo_version_id BIGINT NOT NULL, + target_repo_version_id BIGINT NOT NULL, CONSTRAINT PK_upgrade_hist PRIMARY KEY (id), CONSTRAINT FK_upgrade_hist_upgrade_id FOREIGN KEY (upgrade_id) REFERENCES upgrade (upgrade_id), CONSTRAINT FK_upgrade_hist_from_repo FOREIGN KEY (from_repo_version_id) REFERENCES repo_version (repo_version_id), CONSTRAINT FK_upgrade_hist_target_repo FOREIGN KEY (target_repo_version_id) REFERENCES repo_version (repo_version_id), - CONSTRAINT UQ_upgrade_hist UNIQUE (upgrade_id, component_name, service_name), - CONSTRAINT FK_upgrade_hist_svc_grp_id FOREIGN KEY (service_group_id) REFERENCES servicegroups (id), - CONSTRAINT FK_upgrade_hist_src_mpack_id FOREIGN KEY (source_mpack_id) REFERENCES mpacks (id), - CONSTRAINT FK_upgrade_hist_tgt_mpack_id FOREIGN KEY (target_mpack_id) REFERENCES mpacks (id), - CONSTRAINT UQ_upgrade_hist_srvc_grp UNIQUE (upgrade_id, service_group_id) + CONSTRAINT UQ_upgrade_hist UNIQUE (upgrade_id, component_name, service_name) +); + +CREATE TABLE servicecomponent_version( + id NUMERIC(19) NOT NULL, + component_id NUMERIC(19) NOT NULL, + repo_version_id NUMERIC(19) NOT NULL, + state VARCHAR(32) NOT NULL, + user_name VARCHAR(255) NOT NULL, + CONSTRAINT PK_sc_version PRIMARY KEY (id), + CONSTRAINT FK_scv_component_id FOREIGN KEY (component_id) REFERENCES servicecomponentdesiredstate (id), + CONSTRAINT FK_scv_repo_version_id FOREIGN KEY (repo_version_id) REFERENCES repo_version (repo_version_id) ); CREATE TABLE ambari_operation_history( @@ -1266,7 +1249,6 @@ INSERT INTO ambari_sequences(sequence_name, sequence_value) values ('permission_ INSERT INTO ambari_sequences(sequence_name, sequence_value) values ('privilege_id_seq', 1); INSERT INTO ambari_sequences(sequence_name, sequence_value) values ('config_id_seq', 1); INSERT INTO ambari_sequences(sequence_name, sequence_value) values ('host_version_id_seq', 0); -INSERT INTO ambari_sequences(sequence_name, sequence_value) values ('mpack_host_state_id_seq', 0); INSERT INTO ambari_sequences(sequence_name, sequence_value) values ('service_config_id_seq', 1); INSERT INTO ambari_sequences(sequence_name, sequence_value) values ('alert_definition_id_seq', 0); INSERT INTO ambari_sequences(sequence_name, sequence_value) values ('alert_group_id_seq', 0); @@ -1302,6 +1284,7 @@ INSERT INTO ambari_sequences(sequence_name, sequence_value) values ('blueprint_s INSERT INTO ambari_sequences(sequence_name, sequence_value) values ('ambari_operation_history_id_seq', 0); INSERT INTO ambari_sequences(sequence_name, sequence_value) values ('remote_cluster_id_seq', 0); INSERT INTO ambari_sequences(sequence_name, sequence_value) values ('remote_cluster_service_id_seq', 0); +INSERT INTO ambari_sequences(sequence_name, sequence_value) values ('servicecomponent_version_id_seq', 0); INSERT INTO ambari_sequences(sequence_name, sequence_value) values ('hostcomponentdesiredstate_id_seq', 0); INSERT INTO ambari_sequences(sequence_name, sequence_value) values ('blueprint_service_id_seq', 0); INSERT INTO ambari_sequences(sequence_name, sequence_value) values ('blueprint_mpack_instance_id_seq', 0); diff --git a/ambari-server/src/main/resources/Ambari-DDL-SQLServer-CREATE.sql b/ambari-server/src/main/resources/Ambari-DDL-SQLServer-CREATE.sql index 97894481e93..036ebc8a9c4 100644 --- a/ambari-server/src/main/resources/Ambari-DDL-SQLServer-CREATE.sql +++ b/ambari-server/src/main/resources/Ambari-DDL-SQLServer-CREATE.sql @@ -30,22 +30,22 @@ sqlcmd -S localhost\SQLEXPRESS -i C:\app\ambari-server-1.3.0-SNAPSHOT\resources\ ------create tables and grant privileges to db user--------- CREATE TABLE registries ( - id BIGINT NOT NULL, - registry_name VARCHAR(255) NOT NULL, - registry_type VARCHAR(255) NOT NULL, - registry_uri VARCHAR(255) NOT NULL, - CONSTRAINT PK_registries PRIMARY KEY (id), - CONSTRAINT UQ_registry_name UNIQUE (registry_name)); + id BIGINT NOT NULL, + registry_name VARCHAR(255) NOT NULL, + registry_type VARCHAR(255) NOT NULL, + registry_uri VARCHAR(255) NOT NULL, + CONSTRAINT PK_registries PRIMARY KEY (id), + CONSTRAINT UQ_registry_name UNIQUE (registry_name)); CREATE TABLE mpacks ( - id BIGINT NOT NULL, - mpack_name VARCHAR(255) NOT NULL, - mpack_version VARCHAR(255) NOT NULL, - mpack_uri VARCHAR(255), - registry_id BIGINT, - CONSTRAINT PK_mpacks PRIMARY KEY (id), - CONSTRAINT UQ_mpack_name_version UNIQUE(mpack_name, mpack_version), - CONSTRAINT FK_registries FOREIGN KEY (registry_id) REFERENCES registries(id)); + id BIGINT NOT NULL, + mpack_name VARCHAR(255) NOT NULL, + mpack_version VARCHAR(255) NOT NULL, + mpack_uri VARCHAR(255), + registry_id BIGINT, + CONSTRAINT PK_mpacks PRIMARY KEY (id), + CONSTRAINT UQ_mpack_name_version UNIQUE(mpack_name, mpack_version), + CONSTRAINT FK_registries FOREIGN KEY (registry_id) REFERENCES registries(id)); CREATE TABLE stack ( stack_id BIGINT NOT NULL, @@ -56,16 +56,6 @@ CREATE TABLE stack ( CONSTRAINT FK_mpacks FOREIGN KEY (mpack_id) REFERENCES mpacks(id), CONSTRAINT UQ_stack UNIQUE (stack_name, stack_version)); -CREATE TABLE mpack_host_state ( - id BIGINT NOT NULL, - host_id BIGINT NOT NULL, - mpack_id BIGINT NOT NULL, - state VARCHAR(32) NOT NULL, - CONSTRAINT PK_mpack_host_state PRIMARY KEY (id), - CONSTRAINT FK_mhs_host_id FOREIGN KEY (host_id) REFERENCES hosts (host_id), - CONSTRAINT FK_mhs_mpack_id FOREIGN KEY (mpack_id) REFERENCES mpacks (id), - CONSTRAINT UQ_mpack_host_state UNIQUE(host_id, mpack_id)); - CREATE TABLE extension( extension_id BIGINT NOT NULL, extension_name VARCHAR(255) NOT NULL, @@ -150,8 +140,7 @@ CREATE TABLE servicegroups ( stack_id BIGINT NOT NULL, CONSTRAINT PK_servicegroups PRIMARY KEY (id, cluster_id), CONSTRAINT FK_servicegroups_cluster_id FOREIGN KEY (cluster_id) REFERENCES clusters (cluster_id), - CONSTRAINT FK_servicegroups_stack_id FOREIGN KEY (stack_id) REFERENCES stack (stack_id), - CONSTRAINT UQ_TEMP_UNTIL_REAL_PK UNIQUE(id)); + CONSTRAINT FK_servicegroups_stack_id FOREIGN KEY (stack_id) REFERENCES stack (stack_id)); CREATE TABLE servicegroupdependencies ( id BIGINT NOT NULL, @@ -267,13 +256,11 @@ CREATE TABLE repo_version ( CREATE TABLE repo_os ( id BIGINT NOT NULL, - repo_version_id BIGINT, - mpack_id BIGINT NOT NULL, + repo_version_id BIGINT NOT NULL, family VARCHAR(255) NOT NULL DEFAULT '', ambari_managed BIT DEFAULT 1, CONSTRAINT PK_repo_os_id PRIMARY KEY (id), - CONSTRAINT FK_repo_os_id_repo_version_id FOREIGN KEY (repo_version_id) REFERENCES repo_version (repo_version_id), - CONSTRAINT FK_repo_os_mpack_id FOREIGN KEY (mpack_id) REFERENCES mpacks (id)); + CONSTRAINT FK_repo_os_id_repo_version_id FOREIGN KEY (repo_version_id) REFERENCES repo_version (repo_version_id)); CREATE TABLE repo_definition ( id BIGINT NOT NULL, @@ -296,7 +283,6 @@ CREATE TABLE repo_tags ( CREATE TABLE servicecomponentdesiredstate ( id BIGINT NOT NULL, component_name VARCHAR(255) NOT NULL, - component_type VARCHAR(255) NOT NULL, cluster_id BIGINT NOT NULL, service_group_id BIGINT NOT NULL, service_id BIGINT NOT NULL, @@ -313,7 +299,6 @@ CREATE TABLE hostcomponentdesiredstate ( id BIGINT NOT NULL, cluster_id BIGINT NOT NULL, component_name VARCHAR(255) NOT NULL, - component_type VARCHAR(255) NOT NULL, desired_state VARCHAR(255) NOT NULL, host_id BIGINT NOT NULL, service_group_id BIGINT NOT NULL, @@ -328,10 +313,8 @@ CREATE TABLE hostcomponentdesiredstate ( CREATE TABLE hostcomponentstate ( id BIGINT NOT NULL, - host_component_desired_state_id BIGINT NOT NULL, cluster_id BIGINT NOT NULL, component_name VARCHAR(255) NOT NULL, - component_type VARCHAR(255) NOT NULL, version VARCHAR(32) NOT NULL DEFAULT 'UNKNOWN', current_state VARCHAR(255) NOT NULL, host_id BIGINT NOT NULL, @@ -339,9 +322,7 @@ CREATE TABLE hostcomponentstate ( service_id BIGINT NOT NULL, upgrade_state VARCHAR(32) NOT NULL DEFAULT 'NONE', CONSTRAINT PK_hostcomponentstate PRIMARY KEY CLUSTERED (id), - CONSTRAINT UQ_hostcomponentstate_name UNIQUE (component_name, service_id, host_id, service_group_id, cluster_id), CONSTRAINT FK_hostcomponentstate_host_id FOREIGN KEY (host_id) REFERENCES hosts (host_id), - CONSTRAINT FK_hostcomponentstate_ds_id FOREIGN KEY (host_component_desired_state_id) REFERENCES hostcomponentdesiredstate (id), CONSTRAINT hstcomponentstatecomponentname FOREIGN KEY (component_name, service_id, service_group_id, cluster_id) REFERENCES servicecomponentdesiredstate (component_name, service_id, service_group_id, cluster_id)); CREATE NONCLUSTERED INDEX idx_host_component_state on hostcomponentstate(host_id, component_name, service_name, cluster_id); @@ -911,7 +892,6 @@ CREATE TABLE topology_request ( action VARCHAR(255) NOT NULL, cluster_id BIGINT NOT NULL, bp_name VARCHAR(100) NOT NULL, - raw_request_body TEXT NOT NULL, cluster_properties TEXT, cluster_attributes TEXT, description VARCHAR(1024), @@ -1030,7 +1010,6 @@ CREATE TABLE upgrade ( CREATE TABLE upgrade_group ( upgrade_group_id BIGINT NOT NULL, upgrade_id BIGINT NOT NULL, - lifecycle VARCHAR(64) DEFAULT 'UPGRADE' NOT NULL, group_name VARCHAR(255) DEFAULT '' NOT NULL, group_title VARCHAR(1024) DEFAULT '' NOT NULL, CONSTRAINT PK_upgrade_group PRIMARY KEY CLUSTERED (upgrade_group_id), @@ -1056,18 +1035,22 @@ CREATE TABLE upgrade_history( component_name VARCHAR(255) NOT NULL, from_repo_version_id BIGINT NOT NULL, target_repo_version_id BIGINT NOT NULL, - service_group_id BIGINT NOT NULL, - source_mpack_id BIGINT NOT NULL, - target_mpack_id BIGINT NOT NULL, CONSTRAINT PK_upgrade_hist PRIMARY KEY (id), CONSTRAINT FK_upgrade_hist_upgrade_id FOREIGN KEY (upgrade_id) REFERENCES upgrade (upgrade_id), CONSTRAINT FK_upgrade_hist_from_repo FOREIGN KEY (from_repo_version_id) REFERENCES repo_version (repo_version_id), CONSTRAINT FK_upgrade_hist_target_repo FOREIGN KEY (target_repo_version_id) REFERENCES repo_version (repo_version_id), - CONSTRAINT UQ_upgrade_hist UNIQUE (upgrade_id, component_name, service_name), - CONSTRAINT FK_upgrade_hist_svc_grp_id FOREIGN KEY (service_group_id) REFERENCES servicegroups (id), - CONSTRAINT FK_upgrade_hist_src_mpack_id FOREIGN KEY (source_mpack_id) REFERENCES mpacks (id), - CONSTRAINT FK_upgrade_hist_tgt_mpack_id FOREIGN KEY (target_mpack_id) REFERENCES mpacks (id), - CONSTRAINT UQ_upgrade_hist_srvc_grp UNIQUE (upgrade_id, service_group_id) + CONSTRAINT UQ_upgrade_hist UNIQUE (upgrade_id, component_name, service_name) +); + +CREATE TABLE servicecomponent_version( + id BIGINT NOT NULL, + component_id BIGINT NOT NULL, + repo_version_id BIGINT NOT NULL, + state VARCHAR(32) NOT NULL, + user_name VARCHAR(255) NOT NULL, + CONSTRAINT PK_sc_version PRIMARY KEY (id), + CONSTRAINT FK_scv_component_id FOREIGN KEY (component_id) REFERENCES servicecomponentdesiredstate (id), + CONSTRAINT FK_scv_repo_version_id FOREIGN KEY (repo_version_id) REFERENCES repo_version (repo_version_id) ); CREATE TABLE ambari_operation_history( @@ -1303,7 +1286,6 @@ BEGIN TRANSACTION ('repo_os_id_seq', 0), ('repo_definition_id_seq', 0), ('host_version_id_seq', 0), - ('mpack_host_state_id_seq', 0), ('service_config_id_seq', 1), ('upgrade_id_seq', 0), ('upgrade_group_id_seq', 0), @@ -1330,6 +1312,7 @@ BEGIN TRANSACTION ('ambari_operation_history_id_seq', 0), ('remote_cluster_id_seq', 0), ('remote_cluster_service_id_seq', 0), + ('servicecomponent_version_id_seq', 0), ('hostcomponentdesiredstate_id_seq', 0), ('blueprint_service_id_seq', 0), ('blueprint_mpack_instance_id_seq', 0), diff --git a/ambari-server/src/main/resources/META-INF/persistence.xml b/ambari-server/src/main/resources/META-INF/persistence.xml index 79f051e2f7c..dbe3713899e 100644 --- a/ambari-server/src/main/resources/META-INF/persistence.xml +++ b/ambari-server/src/main/resources/META-INF/persistence.xml @@ -59,7 +59,6 @@ org.apache.ambari.server.orm.entities.KeyValueEntity org.apache.ambari.server.orm.entities.MemberEntity org.apache.ambari.server.orm.entities.MetainfoEntity - org.apache.ambari.server.orm.entities.MpackHostStateEntity org.apache.ambari.server.orm.entities.PermissionEntity org.apache.ambari.server.orm.entities.RoleAuthorizationEntity org.apache.ambari.server.orm.entities.PrincipalEntity @@ -77,6 +76,7 @@ org.apache.ambari.server.orm.entities.ResourceTypeEntity org.apache.ambari.server.orm.entities.RoleSuccessCriteriaEntity org.apache.ambari.server.orm.entities.ServiceComponentDesiredStateEntity + org.apache.ambari.server.orm.entities.ServiceComponentVersionEntity org.apache.ambari.server.orm.entities.ServiceConfigEntity org.apache.ambari.server.orm.entities.ServiceDesiredStateEntity org.apache.ambari.server.orm.entities.StackEntity diff --git a/ambari-server/src/main/resources/custom_actions/scripts/install_packages.py b/ambari-server/src/main/resources/custom_actions/scripts/install_packages.py index ba18d8d3757..cd172afb1d0 100644 --- a/ambari-server/src/main/resources/custom_actions/scripts/install_packages.py +++ b/ambari-server/src/main/resources/custom_actions/scripts/install_packages.py @@ -114,7 +114,7 @@ def actionexecute(self, env): # Build structured output with initial values self.structured_output = { 'package_installation_result': 'FAIL', - 'mpack_id': command_repository.mpack_id + 'repository_version_id': command_repository.version_id } self.put_structured_out(self.structured_output) diff --git a/ambari-server/src/main/resources/key_properties.json b/ambari-server/src/main/resources/key_properties.json index c45136cb30e..6e2463bdef6 100644 --- a/ambari-server/src/main/resources/key_properties.json +++ b/ambari-server/src/main/resources/key_properties.json @@ -11,7 +11,7 @@ "ServiceGroup": "HostRoles/service_group_name", "Host": "HostRoles/host_name", "Service": "HostRoles/service_name", - "HostComponent": "HostRoles/id", + "HostComponent": "HostRoles/component_name", "Component": "HostRoles/component_name" }, "Action": { diff --git a/ambari-server/src/main/resources/stack-hooks/before-ANY/scripts/params.py b/ambari-server/src/main/resources/stack-hooks/before-ANY/scripts/params.py index a2c35966ea9..09ed2e118de 100644 --- a/ambari-server/src/main/resources/stack-hooks/before-ANY/scripts/params.py +++ b/ambari-server/src/main/resources/stack-hooks/before-ANY/scripts/params.py @@ -63,8 +63,7 @@ ambari_server_hostname = config['clusterHostInfo']['ambari_server_host'][0] stack_version_unformatted = config['hostLevelParams']['stack_version'] -stack_version_formatted = config['hostLevelParams']['stack_version'] -#stack_version_formatted = format_stack_version(stack_version_unformatted) +stack_version_formatted = format_stack_version(stack_version_unformatted) upgrade_type = Script.get_upgrade_type(default("/commandParams/upgrade_type", "")) version = default("/commandParams/version", None) @@ -110,10 +109,7 @@ def is_secure_port(port): # force the use of "current" in the hook hdfs_user_nofile_limit = default("/configurations/hadoop-env/hdfs_user_nofile_limit", "128000") hadoop_home = stack_select.get_hadoop_dir("home") -stack_name = default("/hostLevelParams/stack_name", None) -stack_name = stack_name.lower() -component_directory = "namenode" -hadoop_libexec_dir = format("/usr/hwx/mpacks/{stack_name}/{stack_version_formatted}/{component_directory}/libexec") +hadoop_libexec_dir = stack_select.get_hadoop_dir("libexec") hadoop_lib_home = stack_select.get_hadoop_dir("lib") hadoop_dir = "/etc/hadoop" diff --git a/ambari-server/src/main/resources/stack-hooks/before-ANY/scripts/shared_initialization.py b/ambari-server/src/main/resources/stack-hooks/before-ANY/scripts/shared_initialization.py index b63c1ff2a7b..27679e0d5c2 100644 --- a/ambari-server/src/main/resources/stack-hooks/before-ANY/scripts/shared_initialization.py +++ b/ambari-server/src/main/resources/stack-hooks/before-ANY/scripts/shared_initialization.py @@ -189,9 +189,6 @@ def setup_hadoop_env(): # create /etc/hadoop Directory(params.hadoop_dir, mode=0755) - #Write out the conf directory - #TODO: Change with instance manager - Directory(params.hadoop_conf_dir, mode=0755) # write out hadoop-env.sh, but only if the directory exists if os.path.exists(params.hadoop_conf_dir): File(os.path.join(params.hadoop_conf_dir, 'hadoop-env.sh'), owner=tc_owner, diff --git a/ambari-server/src/main/resources/upgrade-pack.xsd b/ambari-server/src/main/resources/upgrade-pack.xsd index b406cfa0402..58f26f6117e 100644 --- a/ambari-server/src/main/resources/upgrade-pack.xsd +++ b/ambari-server/src/main/resources/upgrade-pack.xsd @@ -111,7 +111,8 @@ - + + @@ -123,12 +124,28 @@ + - + + + + + + + + + + + + + + + + @@ -159,10 +176,10 @@ - + - + @@ -425,7 +442,15 @@ - + + + + Ensures that the element "processing" does not have duplicate services + + + + + @@ -455,4 +480,7 @@ + + + diff --git a/ambari-server/src/test/java/org/apache/ambari/server/StateRecoveryManagerTest.java b/ambari-server/src/test/java/org/apache/ambari/server/StateRecoveryManagerTest.java index 4b928850f7a..8b94338396c 100644 --- a/ambari-server/src/test/java/org/apache/ambari/server/StateRecoveryManagerTest.java +++ b/ambari-server/src/test/java/org/apache/ambari/server/StateRecoveryManagerTest.java @@ -111,7 +111,7 @@ public void testCheckHostAndClusterVersions() throws Exception { replay(hostVersionDAOMock, serviceComponentDesiredStateDAOMock); - stateRecoveryManager.doWork(); + stateRecoveryManager.checkHostAndClusterVersions(); // Checking that only invalid host version states have been changed assertFalse(installFailedHostVersionCapture.hasCaptured()); diff --git a/ambari-server/src/test/java/org/apache/ambari/server/actionmanager/ExecutionCommandWrapperTest.java b/ambari-server/src/test/java/org/apache/ambari/server/actionmanager/ExecutionCommandWrapperTest.java index 3fd5ce11375..50653eb9888 100644 --- a/ambari-server/src/test/java/org/apache/ambari/server/actionmanager/ExecutionCommandWrapperTest.java +++ b/ambari-server/src/test/java/org/apache/ambari/server/actionmanager/ExecutionCommandWrapperTest.java @@ -116,7 +116,7 @@ public static void setup() throws AmbariException { OrmTestHelper helper = injector.getInstance(OrmTestHelper.class); RepositoryVersionEntity repositoryVersion = helper.getOrCreateRepositoryVersion(cluster1); - ServiceGroup serviceGroup = cluster1.addServiceGroup("CORE", cluster1.getDesiredStackVersion().getStackId()); + ServiceGroup serviceGroup = cluster1.addServiceGroup("CORE", "HDP-2.6.0"); cluster1.addService(serviceGroup, "HDFS", "HDFS", repositoryVersion); SERVICE_SITE_CLUSTER = new HashMap<>(); diff --git a/ambari-server/src/test/java/org/apache/ambari/server/agent/AgentResourceTest.java b/ambari-server/src/test/java/org/apache/ambari/server/agent/AgentResourceTest.java index 6bc2c93bf4d..3d9a013d561 100644 --- a/ambari-server/src/test/java/org/apache/ambari/server/agent/AgentResourceTest.java +++ b/ambari-server/src/test/java/org/apache/ambari/server/agent/AgentResourceTest.java @@ -80,14 +80,8 @@ import org.apache.ambari.server.state.scheduler.RequestExecutionImpl; import org.apache.ambari.server.state.stack.OsFamily; import org.apache.ambari.server.state.svccomphost.ServiceComponentHostImpl; -import org.apache.ambari.server.topology.ComponentResolver; -import org.apache.ambari.server.topology.DefaultStackFactory; import org.apache.ambari.server.topology.PersistedState; -import org.apache.ambari.server.topology.StackComponentResolver; -import org.apache.ambari.server.topology.StackFactory; import org.apache.ambari.server.topology.tasks.ConfigureClusterTaskFactory; -import org.apache.ambari.server.topology.validators.BasicBlueprintValidator; -import org.apache.ambari.server.topology.validators.BlueprintValidator; import org.codehaus.jettison.json.JSONException; import org.codehaus.jettison.json.JSONObject; import org.easymock.EasyMock; @@ -341,9 +335,6 @@ protected void configure() { bind(PersistedState.class).toInstance(createNiceMock(PersistedState.class)); bind(RoleCommandOrderProvider.class).to(CachedRoleCommandOrderProvider.class); bind(AmbariManagementController.class).toInstance(createNiceMock(AmbariManagementController.class)); - bind(ComponentResolver.class).to(StackComponentResolver.class); - bind(BlueprintValidator.class).to(BasicBlueprintValidator.class); - bind(StackFactory.class).to(DefaultStackFactory.class); } private void installDependencies() { diff --git a/ambari-server/src/test/java/org/apache/ambari/server/agent/HeartbeatProcessorTest.java b/ambari-server/src/test/java/org/apache/ambari/server/agent/HeartbeatProcessorTest.java index e5a7fbd5bbb..a23fae966a4 100644 --- a/ambari-server/src/test/java/org/apache/ambari/server/agent/HeartbeatProcessorTest.java +++ b/ambari-server/src/test/java/org/apache/ambari/server/agent/HeartbeatProcessorTest.java @@ -155,11 +155,11 @@ public void teardown() throws AmbariException, SQLException { public void testHeartbeatWithConfigs() throws Exception { Cluster cluster = heartbeatTestHelper.getDummyCluster(); Service hdfs = addService(cluster, HDFS); - hdfs.addServiceComponent(DATANODE, DATANODE); + hdfs.addServiceComponent(DATANODE); hdfs.getServiceComponent(DATANODE).addServiceComponentHost(DummyHostname1); - hdfs.addServiceComponent(NAMENODE, NAMENODE); + hdfs.addServiceComponent(NAMENODE); hdfs.getServiceComponent(NAMENODE).addServiceComponentHost(DummyHostname1); - hdfs.addServiceComponent(SECONDARY_NAMENODE, SECONDARY_NAMENODE); + hdfs.addServiceComponent(SECONDARY_NAMENODE); hdfs.getServiceComponent(SECONDARY_NAMENODE).addServiceComponentHost(DummyHostname1); ActionQueue aq = new ActionQueue(); @@ -223,7 +223,7 @@ public void testHeartbeatWithConfigs() throws Exception { public void testRestartRequiredAfterInstallClient() throws Exception { Cluster cluster = heartbeatTestHelper.getDummyCluster(); Service hdfs = addService(cluster, HDFS); - hdfs.addServiceComponent(HDFS_CLIENT, HDFS_CLIENT); + hdfs.addServiceComponent(HDFS_CLIENT); hdfs.getServiceComponent(HDFS_CLIENT).addServiceComponentHost(DummyHostname1); ActionQueue aq = new ActionQueue(); @@ -287,11 +287,11 @@ public void testRestartRequiredAfterInstallClient() throws Exception { public void testHeartbeatCustomCommandWithConfigs() throws Exception { Cluster cluster = heartbeatTestHelper.getDummyCluster(); Service hdfs = addService(cluster, HDFS); - hdfs.addServiceComponent(DATANODE, DATANODE); + hdfs.addServiceComponent(DATANODE); hdfs.getServiceComponent(DATANODE).addServiceComponentHost(DummyHostname1); - hdfs.addServiceComponent(NAMENODE, NAMENODE); + hdfs.addServiceComponent(NAMENODE); hdfs.getServiceComponent(NAMENODE).addServiceComponentHost(DummyHostname1); - hdfs.addServiceComponent(SECONDARY_NAMENODE, SECONDARY_NAMENODE); + hdfs.addServiceComponent(SECONDARY_NAMENODE); hdfs.getServiceComponent(SECONDARY_NAMENODE).addServiceComponentHost(DummyHostname1); ActionQueue aq = new ActionQueue(); @@ -370,11 +370,11 @@ public void testHeartbeatCustomCommandWithConfigs() throws Exception { public void testHeartbeatCustomStartStop() throws Exception { Cluster cluster = heartbeatTestHelper.getDummyCluster(); Service hdfs = addService(cluster, HDFS); - hdfs.addServiceComponent(DATANODE, DATANODE); + hdfs.addServiceComponent(DATANODE); hdfs.getServiceComponent(DATANODE).addServiceComponentHost(DummyHostname1); - hdfs.addServiceComponent(NAMENODE, NAMENODE); + hdfs.addServiceComponent(NAMENODE); hdfs.getServiceComponent(NAMENODE).addServiceComponentHost(DummyHostname1); - hdfs.addServiceComponent(SECONDARY_NAMENODE, SECONDARY_NAMENODE); + hdfs.addServiceComponent(SECONDARY_NAMENODE); hdfs.getServiceComponent(SECONDARY_NAMENODE).addServiceComponentHost(DummyHostname1); ActionQueue aq = new ActionQueue(); @@ -453,11 +453,11 @@ public void testHeartbeatCustomStartStop() throws Exception { public void testStatusHeartbeat() throws Exception { Cluster cluster = heartbeatTestHelper.getDummyCluster(); Service hdfs = addService(cluster, HDFS); - hdfs.addServiceComponent(DATANODE, DATANODE); + hdfs.addServiceComponent(DATANODE); hdfs.getServiceComponent(DATANODE).addServiceComponentHost(DummyHostname1); - hdfs.addServiceComponent(NAMENODE, NAMENODE); + hdfs.addServiceComponent(NAMENODE); hdfs.getServiceComponent(NAMENODE).addServiceComponentHost(DummyHostname1); - hdfs.addServiceComponent(SECONDARY_NAMENODE, SECONDARY_NAMENODE); + hdfs.addServiceComponent(SECONDARY_NAMENODE); hdfs.getServiceComponent(SECONDARY_NAMENODE).addServiceComponentHost(DummyHostname1); ActionQueue aq = new ActionQueue(); @@ -574,7 +574,7 @@ public void testCommandReportOnHeartbeatUpdatedState() throws Exception { Cluster cluster = heartbeatTestHelper.getDummyCluster(); Service hdfs = addService(cluster, HDFS); - hdfs.addServiceComponent(DATANODE, DATANODE); + hdfs.addServiceComponent(DATANODE); hdfs.getServiceComponent(DATANODE).addServiceComponentHost(DummyHostname1); ActionQueue aq = new ActionQueue(); @@ -692,7 +692,7 @@ public void testCommandReportOnHeartbeatUpdatedState() public void testUpgradeSpecificHandling() throws Exception { Cluster cluster = heartbeatTestHelper.getDummyCluster(); Service hdfs = addService(cluster, HDFS); - hdfs.addServiceComponent(DATANODE, DATANODE); + hdfs.addServiceComponent(DATANODE); hdfs.getServiceComponent(DATANODE).addServiceComponentHost(DummyHostname1); ActionQueue aq = new ActionQueue(); @@ -785,7 +785,7 @@ public void testUpgradeSpecificHandling() throws Exception { public void testCommandStatusProcesses() throws Exception { Cluster cluster = heartbeatTestHelper.getDummyCluster(); Service hdfs = addService(cluster, HDFS); - hdfs.addServiceComponent(DATANODE, DATANODE); + hdfs.addServiceComponent(DATANODE); hdfs.getServiceComponent(DATANODE).addServiceComponentHost(DummyHostname1); hdfs.getServiceComponent(DATANODE).getServiceComponentHost(DummyHostname1).setState(State.STARTED); @@ -863,11 +863,11 @@ public void testCommandStatusProcesses() throws Exception { public void testComponentUpgradeFailReport() throws Exception { Cluster cluster = heartbeatTestHelper.getDummyCluster(); Service hdfs = addService(cluster, HDFS); - hdfs.addServiceComponent(DATANODE, DATANODE); + hdfs.addServiceComponent(DATANODE); hdfs.getServiceComponent(DATANODE).addServiceComponentHost(DummyHostname1); - hdfs.addServiceComponent(NAMENODE, NAMENODE); + hdfs.addServiceComponent(NAMENODE); hdfs.getServiceComponent(NAMENODE).addServiceComponentHost(DummyHostname1); - hdfs.addServiceComponent(HDFS_CLIENT, HDFS_CLIENT); + hdfs.addServiceComponent(HDFS_CLIENT); hdfs.getServiceComponent(HDFS_CLIENT).addServiceComponentHost(DummyHostname1); ServiceComponentHost serviceComponentHost1 = clusters.getCluster(DummyCluster).getService(HDFS). @@ -974,11 +974,11 @@ public void testComponentUpgradeFailReport() throws Exception { public void testComponentUpgradeInProgressReport() throws Exception { Cluster cluster = heartbeatTestHelper.getDummyCluster(); Service hdfs = addService(cluster, HDFS); - hdfs.addServiceComponent(DATANODE, DATANODE); + hdfs.addServiceComponent(DATANODE); hdfs.getServiceComponent(DATANODE).addServiceComponentHost(DummyHostname1); - hdfs.addServiceComponent(NAMENODE, NAMENODE); + hdfs.addServiceComponent(NAMENODE); hdfs.getServiceComponent(NAMENODE).addServiceComponentHost(DummyHostname1); - hdfs.addServiceComponent(HDFS_CLIENT, HDFS_CLIENT); + hdfs.addServiceComponent(HDFS_CLIENT); hdfs.getServiceComponent(HDFS_CLIENT).addServiceComponentHost(DummyHostname1); ServiceComponentHost serviceComponentHost1 = clusters.getCluster(DummyCluster).getService(HDFS). @@ -1230,10 +1230,10 @@ public void testInstallPackagesWithId() throws Exception { public void testComponentInProgressStatusSafeAfterStatusReport() throws Exception { Cluster cluster = heartbeatTestHelper.getDummyCluster(); Service hdfs = addService(cluster, HDFS); - hdfs.addServiceComponent(DATANODE, DATANODE); + hdfs.addServiceComponent(DATANODE); hdfs.getServiceComponent(DATANODE). addServiceComponentHost(DummyHostname1); - hdfs.addServiceComponent(NAMENODE, NAMENODE); + hdfs.addServiceComponent(NAMENODE); hdfs.getServiceComponent(NAMENODE). addServiceComponentHost(DummyHostname1); @@ -1311,7 +1311,7 @@ public void testComponentInProgressStatusSafeAfterStatusReport() throws Exceptio */ private Service addService(Cluster cluster, String serviceName) throws AmbariException { RepositoryVersionEntity repositoryVersion = helper.getOrCreateRepositoryVersion(cluster); - ServiceGroup serviceGroup = cluster.addServiceGroup("CORE", cluster.getDesiredStackVersion().getStackId()); + ServiceGroup serviceGroup = cluster.addServiceGroup("CORE", "HDP-2.6.0"); return cluster.addService(serviceGroup, serviceName, serviceName, repositoryVersion); } } diff --git a/ambari-server/src/test/java/org/apache/ambari/server/agent/TestHeartbeatHandler.java b/ambari-server/src/test/java/org/apache/ambari/server/agent/TestHeartbeatHandler.java index afbb3706b21..2a7b8b2784b 100644 --- a/ambari-server/src/test/java/org/apache/ambari/server/agent/TestHeartbeatHandler.java +++ b/ambari-server/src/test/java/org/apache/ambari/server/agent/TestHeartbeatHandler.java @@ -186,9 +186,9 @@ public void testHeartbeat() throws Exception { Cluster cluster = heartbeatTestHelper.getDummyCluster(); Service hdfs = addService(cluster, HDFS); - hdfs.addServiceComponent(DATANODE, DATANODE); - hdfs.addServiceComponent(NAMENODE, NAMENODE); - hdfs.addServiceComponent(SECONDARY_NAMENODE, SECONDARY_NAMENODE); + hdfs.addServiceComponent(DATANODE); + hdfs.addServiceComponent(NAMENODE); + hdfs.addServiceComponent(SECONDARY_NAMENODE); Collection hosts = cluster.getHosts(); assertEquals(hosts.size(), 1); @@ -237,9 +237,9 @@ public void testHeartbeat() throws Exception { public void testStatusHeartbeatWithAnnotation() throws Exception { Cluster cluster = heartbeatTestHelper.getDummyCluster(); Service hdfs = addService(cluster, HDFS); - hdfs.addServiceComponent(DATANODE, DATANODE); - hdfs.addServiceComponent(NAMENODE, NAMENODE); - hdfs.addServiceComponent(SECONDARY_NAMENODE, SECONDARY_NAMENODE); + hdfs.addServiceComponent(DATANODE); + hdfs.addServiceComponent(NAMENODE); + hdfs.addServiceComponent(SECONDARY_NAMENODE); ActionQueue aq = new ActionQueue(); @@ -287,10 +287,10 @@ public void testStatusHeartbeatWithAnnotation() throws Exception { public void testLiveStatusUpdateAfterStopFailed() throws Exception { Cluster cluster = heartbeatTestHelper.getDummyCluster(); Service hdfs = addService(cluster, HDFS); - hdfs.addServiceComponent(DATANODE, DATANODE); + hdfs.addServiceComponent(DATANODE); hdfs.getServiceComponent(DATANODE). addServiceComponentHost(DummyHostname1); - hdfs.addServiceComponent(NAMENODE, DATANODE); + hdfs.addServiceComponent(NAMENODE); hdfs.getServiceComponent(NAMENODE). addServiceComponentHost(DummyHostname1); @@ -391,15 +391,15 @@ public void testRegistrationRecoveryConfig() throws Exception { Cluster cluster = heartbeatTestHelper.getDummyCluster(); Service hdfs = addService(cluster, HDFS); - hdfs.addServiceComponent(DATANODE, DATANODE).setRecoveryEnabled(true); + hdfs.addServiceComponent(DATANODE).setRecoveryEnabled(true); hdfs.getServiceComponent(DATANODE); hdfs.getServiceComponent(DATANODE).addServiceComponentHost(DummyHostname1); - hdfs.addServiceComponent(NAMENODE, NAMENODE).setRecoveryEnabled(true); + hdfs.addServiceComponent(NAMENODE).setRecoveryEnabled(true); hdfs.getServiceComponent(NAMENODE); hdfs.getServiceComponent(NAMENODE).addServiceComponentHost(DummyHostname1); - hdfs.addServiceComponent(HDFS_CLIENT, HDFS_CLIENT); + hdfs.addServiceComponent(HDFS_CLIENT); hdfs.getServiceComponent(HDFS_CLIENT).addServiceComponentHost(DummyHostname1); // Create helper after creating service to avoid race condition caused by asynchronous recovery configs @@ -467,15 +467,15 @@ public void testRegistrationRecoveryConfigMaintenanceMode() /* * Add three service components enabled for auto start. */ - hdfs.addServiceComponent(DATANODE, DATANODE).setRecoveryEnabled(true); + hdfs.addServiceComponent(DATANODE).setRecoveryEnabled(true); hdfs.getServiceComponent(DATANODE); hdfs.getServiceComponent(DATANODE).addServiceComponentHost(DummyHostname1); - hdfs.addServiceComponent(NAMENODE, NAMENODE).setRecoveryEnabled(true); + hdfs.addServiceComponent(NAMENODE).setRecoveryEnabled(true); hdfs.getServiceComponent(NAMENODE); hdfs.getServiceComponent(NAMENODE).addServiceComponentHost(DummyHostname1); - hdfs.addServiceComponent(HDFS_CLIENT, HDFS_CLIENT).setRecoveryEnabled(true); + hdfs.addServiceComponent(HDFS_CLIENT).setRecoveryEnabled(true); hdfs.getServiceComponent(HDFS_CLIENT); hdfs.getServiceComponent(HDFS_CLIENT).addServiceComponentHost(DummyHostname1); @@ -796,11 +796,11 @@ public void testStateCommandsAtRegistration() throws Exception, InvalidStateTran public void testTaskInProgressHandling() throws Exception, InvalidStateTransitionException { Cluster cluster = heartbeatTestHelper.getDummyCluster(); Service hdfs = addService(cluster, HDFS); - hdfs.addServiceComponent(DATANODE, DATANODE); + hdfs.addServiceComponent(DATANODE); hdfs.getServiceComponent(DATANODE).addServiceComponentHost(DummyHostname1); - hdfs.addServiceComponent(NAMENODE, NAMENODE); + hdfs.addServiceComponent(NAMENODE); hdfs.getServiceComponent(NAMENODE).addServiceComponentHost(DummyHostname1); - hdfs.addServiceComponent(SECONDARY_NAMENODE, SECONDARY_NAMENODE); + hdfs.addServiceComponent(SECONDARY_NAMENODE); hdfs.getServiceComponent(SECONDARY_NAMENODE).addServiceComponentHost(DummyHostname1); ActionQueue aq = new ActionQueue(); @@ -852,11 +852,11 @@ public void testTaskInProgressHandling() throws Exception, InvalidStateTransitio public void testOPFailedEventForAbortedTask() throws Exception, InvalidStateTransitionException { Cluster cluster = heartbeatTestHelper.getDummyCluster(); Service hdfs = addService(cluster, HDFS); - hdfs.addServiceComponent(DATANODE, DATANODE); + hdfs.addServiceComponent(DATANODE); hdfs.getServiceComponent(DATANODE).addServiceComponentHost(DummyHostname1); - hdfs.addServiceComponent(NAMENODE, NAMENODE); + hdfs.addServiceComponent(NAMENODE); hdfs.getServiceComponent(NAMENODE).addServiceComponentHost(DummyHostname1); - hdfs.addServiceComponent(SECONDARY_NAMENODE, SECONDARY_NAMENODE); + hdfs.addServiceComponent(SECONDARY_NAMENODE); hdfs.getServiceComponent(SECONDARY_NAMENODE).addServiceComponentHost(DummyHostname1); ActionQueue aq = new ActionQueue(); @@ -922,11 +922,11 @@ public void testOPFailedEventForAbortedTask() throws Exception, InvalidStateTran public void testStatusHeartbeat() throws Exception { Cluster cluster = heartbeatTestHelper.getDummyCluster(); Service hdfs = addService(cluster, HDFS); - hdfs.addServiceComponent(DATANODE, DATANODE); + hdfs.addServiceComponent(DATANODE); hdfs.getServiceComponent(DATANODE).addServiceComponentHost(DummyHostname1); - hdfs.addServiceComponent(NAMENODE, NAMENODE); + hdfs.addServiceComponent(NAMENODE); hdfs.getServiceComponent(NAMENODE).addServiceComponentHost(DummyHostname1); - hdfs.addServiceComponent(HDFS_CLIENT, HDFS_CLIENT); + hdfs.addServiceComponent(HDFS_CLIENT); hdfs.getServiceComponent(HDFS_CLIENT).addServiceComponentHost(DummyHostname1); ServiceComponentHost serviceComponentHost1 = clusters.getCluster(DummyCluster).getService(HDFS). @@ -982,9 +982,9 @@ public void testRecoveryStatusReports() throws Exception { Cluster cluster = heartbeatTestHelper.getDummyCluster(); Host hostObject = clusters.getHost(DummyHostname1); Service hdfs = addService(cluster, HDFS); - hdfs.addServiceComponent(DATANODE, DATANODE); + hdfs.addServiceComponent(DATANODE); hdfs.getServiceComponent(DATANODE).addServiceComponentHost(DummyHostname1); - hdfs.addServiceComponent(NAMENODE, NAMENODE); + hdfs.addServiceComponent(NAMENODE); hdfs.getServiceComponent(NAMENODE).addServiceComponentHost(DummyHostname1); hdfs.getServiceComponent(NAMENODE).getServiceComponentHost(DummyHostname1).setState(State.STARTED); hdfs.getServiceComponent(DATANODE).getServiceComponentHost(DummyHostname1).setState(State.STARTED); @@ -1062,9 +1062,9 @@ public void testProcessStatusReports() throws Exception { Cluster cluster = heartbeatTestHelper.getDummyCluster(); Host hostObject = clusters.getHost(DummyHostname1); Service hdfs = addService(cluster, HDFS); - hdfs.addServiceComponent(DATANODE, DATANODE); + hdfs.addServiceComponent(DATANODE); hdfs.getServiceComponent(DATANODE).addServiceComponentHost(DummyHostname1); - hdfs.addServiceComponent(NAMENODE, NAMENODE); + hdfs.addServiceComponent(NAMENODE); hdfs.getServiceComponent(NAMENODE).addServiceComponentHost(DummyHostname1); hdfs.getServiceComponent(NAMENODE).getServiceComponentHost(DummyHostname1).setState(State.STARTED); hdfs.getServiceComponent(DATANODE).getServiceComponentHost(DummyHostname1).setState(State.STARTED); @@ -1388,7 +1388,7 @@ private ComponentStatus createComponentStatus(String clusterName, String service public void testCommandStatusProcesses_empty() throws Exception { Cluster cluster = heartbeatTestHelper.getDummyCluster(); Service hdfs = addService(cluster, HDFS); - hdfs.addServiceComponent(DATANODE, DATANODE); + hdfs.addServiceComponent(DATANODE); hdfs.getServiceComponent(DATANODE).addServiceComponentHost(DummyHostname1); hdfs.getServiceComponent(DATANODE).getServiceComponentHost(DummyHostname1).setState(State.STARTED); @@ -1604,7 +1604,7 @@ private File createTestKeytabData(HeartBeatHandler heartbeatHandler) throws Exce */ private Service addService(Cluster cluster, String serviceName) throws AmbariException { RepositoryVersionEntity repositoryVersion = helper.getOrCreateRepositoryVersion(cluster); - ServiceGroup serviceGroup = cluster.addServiceGroup("CORE", DummyStackId); + ServiceGroup serviceGroup = cluster.addServiceGroup("CORE", "HDP-2.6.0"); return cluster.addService(serviceGroup, serviceName, serviceName, repositoryVersion); } diff --git a/ambari-server/src/test/java/org/apache/ambari/server/agent/TestHeartbeatMonitor.java b/ambari-server/src/test/java/org/apache/ambari/server/agent/TestHeartbeatMonitor.java index e4b8b2d2ea8..5c96ece24ef 100644 --- a/ambari-server/src/test/java/org/apache/ambari/server/agent/TestHeartbeatMonitor.java +++ b/ambari-server/src/test/java/org/apache/ambari/server/agent/TestHeartbeatMonitor.java @@ -176,11 +176,11 @@ public void testStateCommandsGeneration() throws AmbariException, InterruptedExc clusters.mapAndPublishHostsToCluster(hostNames, clusterName); ServiceGroup serviceGroup = cluster.addServiceGroup("CORE", stackId.getStackId()); Service hdfs = cluster.addService(serviceGroup, serviceName, serviceName, repositoryVersion); - hdfs.addServiceComponent(Role.DATANODE.name(), Role.DATANODE.name()); + hdfs.addServiceComponent(Role.DATANODE.name()); hdfs.getServiceComponent(Role.DATANODE.name()).addServiceComponentHost(hostname1); - hdfs.addServiceComponent(Role.NAMENODE.name(), Role.NAMENODE.name()); + hdfs.addServiceComponent(Role.NAMENODE.name()); hdfs.getServiceComponent(Role.NAMENODE.name()).addServiceComponentHost(hostname1); - hdfs.addServiceComponent(Role.SECONDARY_NAMENODE.name(), Role.SECONDARY_NAMENODE.name()); + hdfs.addServiceComponent(Role.SECONDARY_NAMENODE.name()); hdfs.getServiceComponent(Role.SECONDARY_NAMENODE.name()).addServiceComponentHost(hostname1); hdfs.getServiceComponent(Role.DATANODE.name()).getServiceComponentHost(hostname1).setState(State.INSTALLED); @@ -279,16 +279,16 @@ public void testStatusCommandForAnyComponents() throws Exception { clusters.mapAndPublishHostsToCluster(hostNames, clusterName); ServiceGroup serviceGroup = cluster.addServiceGroup("CORE", stackId.getStackId()); Service hdfs = cluster.addService(serviceGroup, serviceName, serviceName, repositoryVersion); - hdfs.addServiceComponent(Role.DATANODE.name(), Role.DATANODE.name()); + hdfs.addServiceComponent(Role.DATANODE.name()); hdfs.getServiceComponent(Role.DATANODE.name()).addServiceComponentHost (hostname1); - hdfs.addServiceComponent(Role.NAMENODE.name(), Role.NAMENODE.name()); + hdfs.addServiceComponent(Role.NAMENODE.name()); hdfs.getServiceComponent(Role.NAMENODE.name()).addServiceComponentHost (hostname1); - hdfs.addServiceComponent(Role.SECONDARY_NAMENODE.name(), Role.SECONDARY_NAMENODE.name()); + hdfs.addServiceComponent(Role.SECONDARY_NAMENODE.name()); hdfs.getServiceComponent(Role.SECONDARY_NAMENODE.name()). addServiceComponentHost(hostname1); - hdfs.addServiceComponent(Role.HDFS_CLIENT.name(), Role.HDFS_CLIENT.name()); + hdfs.addServiceComponent(Role.HDFS_CLIENT.name()); hdfs.getServiceComponent(Role.HDFS_CLIENT.name()).addServiceComponentHost (hostname1); hdfs.getServiceComponent(Role.HDFS_CLIENT.name()).addServiceComponentHost @@ -387,11 +387,11 @@ public void testHeartbeatStateCommandsEnqueueing() throws AmbariException, Inter ServiceGroup serviceGroup = cluster.addServiceGroup("CORE", stackId.getStackId()); Service hdfs = cluster.addService(serviceGroup, serviceName, serviceName, repositoryVersion); - hdfs.addServiceComponent(Role.DATANODE.name(), Role.DATANODE.name()); + hdfs.addServiceComponent(Role.DATANODE.name()); hdfs.getServiceComponent(Role.DATANODE.name()).addServiceComponentHost(hostname1); - hdfs.addServiceComponent(Role.NAMENODE.name(), Role.NAMENODE.name()); + hdfs.addServiceComponent(Role.NAMENODE.name()); hdfs.getServiceComponent(Role.NAMENODE.name()).addServiceComponentHost(hostname1); - hdfs.addServiceComponent(Role.SECONDARY_NAMENODE.name(), Role.SECONDARY_NAMENODE.name()); + hdfs.addServiceComponent(Role.SECONDARY_NAMENODE.name()); hdfs.getServiceComponent(Role.SECONDARY_NAMENODE.name()).addServiceComponentHost(hostname1); hdfs.getServiceComponent(Role.DATANODE.name()).getServiceComponentHost(hostname1).setState(State.INSTALLED); @@ -469,13 +469,13 @@ public void testHeartbeatLossWithComponent() throws AmbariException, Interrupted ServiceGroup serviceGroup = cluster.addServiceGroup("CORE", stackId.getStackId()); Service hdfs = cluster.addService(serviceGroup, serviceName, serviceName, repositoryVersion); - hdfs.addServiceComponent(Role.DATANODE.name(), Role.DATANODE.name()); + hdfs.addServiceComponent(Role.DATANODE.name()); hdfs.getServiceComponent(Role.DATANODE.name()).addServiceComponentHost(hostname1); - hdfs.addServiceComponent(Role.NAMENODE.name(), Role.NAMENODE.name()); + hdfs.addServiceComponent(Role.NAMENODE.name()); hdfs.getServiceComponent(Role.NAMENODE.name()).addServiceComponentHost(hostname1); - hdfs.addServiceComponent(Role.SECONDARY_NAMENODE.name(), Role.SECONDARY_NAMENODE.name()); + hdfs.addServiceComponent(Role.SECONDARY_NAMENODE.name()); hdfs.getServiceComponent(Role.SECONDARY_NAMENODE.name()).addServiceComponentHost(hostname1); - hdfs.addServiceComponent(Role.HDFS_CLIENT.name(), Role.HDFS_CLIENT.name()); + hdfs.addServiceComponent(Role.HDFS_CLIENT.name()); hdfs.getServiceComponent(Role.HDFS_CLIENT.name()).addServiceComponentHost(hostname1); ActionQueue aq = new ActionQueue(); @@ -589,11 +589,11 @@ public void testStateCommandsWithAlertsGeneration() throws AmbariException, Inte ServiceGroup serviceGroup = cluster.addServiceGroup("CORE", stackId.getStackId()); Service hdfs = cluster.addService(serviceGroup, serviceName, serviceName, repositoryVersion); - hdfs.addServiceComponent(Role.DATANODE.name(), Role.DATANODE.name()); + hdfs.addServiceComponent(Role.DATANODE.name()); hdfs.getServiceComponent(Role.DATANODE.name()).addServiceComponentHost(hostname1); - hdfs.addServiceComponent(Role.NAMENODE.name(), Role.NAMENODE.name()); + hdfs.addServiceComponent(Role.NAMENODE.name()); hdfs.getServiceComponent(Role.NAMENODE.name()).addServiceComponentHost(hostname1); - hdfs.addServiceComponent(Role.SECONDARY_NAMENODE.name(), Role.SECONDARY_NAMENODE.name()); + hdfs.addServiceComponent(Role.SECONDARY_NAMENODE.name()); hdfs.getServiceComponent(Role.SECONDARY_NAMENODE.name()).addServiceComponentHost(hostname1); hdfs.getServiceComponent(Role.DATANODE.name()).getServiceComponentHost(hostname1).setState(State.INSTALLED); diff --git a/ambari-server/src/test/java/org/apache/ambari/server/api/query/QueryImplTest.java b/ambari-server/src/test/java/org/apache/ambari/server/api/query/QueryImplTest.java index fd803c7e864..3a05e4800c5 100644 --- a/ambari-server/src/test/java/org/apache/ambari/server/api/query/QueryImplTest.java +++ b/ambari-server/src/test/java/org/apache/ambari/server/api/query/QueryImplTest.java @@ -263,7 +263,7 @@ public void testExecute_subResourcePredicate() throws Exception { TreeNode opSystemNode = opSystemsNode.getChild("OperatingSystem:1"); Assert.assertEquals("OperatingSystem:1", opSystemNode.getName()); Resource osResource = opSystemNode.getObject(); - Assert.assertEquals(Resource.Type.OperatingSystemReadOnly, opSystemNode.getObject().getType()); + Assert.assertEquals(Resource.Type.OperatingSystem, opSystemNode.getObject().getType()); Assert.assertEquals("centos5", osResource.getPropertyValue("OperatingSystems/os_type")); } @@ -470,7 +470,7 @@ public void testExecute__Stack_instance_specifiedSubResources() throws Exception TreeNode opSystemNode = opSystemsNode.getChild("OperatingSystem:1"); Assert.assertEquals("OperatingSystem:1", opSystemNode.getName()); - Assert.assertEquals(Resource.Type.OperatingSystemReadOnly, opSystemNode.getObject().getType()); + Assert.assertEquals(Resource.Type.OperatingSystem, opSystemNode.getObject().getType()); Assert.assertEquals(1, opSystemNode.getChildren().size()); TreeNode repositoriesNode = opSystemNode.getChild("repositories"); diff --git a/ambari-server/src/test/java/org/apache/ambari/server/api/query/render/ClusterBlueprintRendererTest.java b/ambari-server/src/test/java/org/apache/ambari/server/api/query/render/ClusterBlueprintRendererTest.java index 0a95ee673b6..7a1ce8b2175 100644 --- a/ambari-server/src/test/java/org/apache/ambari/server/api/query/render/ClusterBlueprintRendererTest.java +++ b/ambari-server/src/test/java/org/apache/ambari/server/api/query/render/ClusterBlueprintRendererTest.java @@ -19,8 +19,10 @@ package org.apache.ambari.server.api.query.render; import static java.util.stream.Collectors.toList; +import static org.easymock.EasyMock.anyLong; import static org.easymock.EasyMock.anyObject; import static org.easymock.EasyMock.createNiceMock; +import static org.easymock.EasyMock.createStrictMock; import static org.easymock.EasyMock.expect; import static org.easymock.EasyMock.replay; import static org.easymock.EasyMock.reset; @@ -61,14 +63,10 @@ import org.apache.ambari.server.controller.internal.ResourceImpl; import org.apache.ambari.server.controller.internal.Stack; import org.apache.ambari.server.controller.spi.ClusterController; -import org.apache.ambari.server.controller.spi.NoSuchParentResourceException; -import org.apache.ambari.server.controller.spi.NoSuchResourceException; import org.apache.ambari.server.controller.spi.Predicate; import org.apache.ambari.server.controller.spi.Request; import org.apache.ambari.server.controller.spi.Resource; import org.apache.ambari.server.controller.spi.ResourceProvider; -import org.apache.ambari.server.controller.spi.SystemException; -import org.apache.ambari.server.controller.spi.UnsupportedPropertyException; import org.apache.ambari.server.state.Cluster; import org.apache.ambari.server.state.Clusters; import org.apache.ambari.server.state.DesiredConfig; @@ -104,7 +102,6 @@ public class ClusterBlueprintRendererTest { private static final ClusterTopology topology = createNiceMock(ClusterTopology.class); - private static final ClusterTopology topologyWithKerberos = createNiceMock(ClusterTopology.class); private static final ClusterController clusterController = createNiceMock(ClusterControllerImpl.class); private static final AmbariContext ambariContext = createNiceMock(AmbariContext.class); @@ -127,14 +124,9 @@ public class ClusterBlueprintRendererTest { private static final Configuration clusterConfig = new Configuration(clusterProps, clusterAttributes); public static final StackId STACK_ID = new StackId("HDP", "1.3.3"); - private static final ResourceProvider artifactResourceProvider = createNiceMock(ResourceProvider.class); - private static final Resource artifactResource = createNiceMock(Resource.class); @Before public void setup() throws Exception { - PowerMock.mockStatic(AmbariContext.class); - expect(AmbariContext.getClusterController()).andReturn(clusterController).anyTimes(); - expect(AmbariContext.getController()).andReturn(controller).anyTimes(); Map clusterTypeProps = new HashMap<>(); clusterProps.put("test-type-one", clusterTypeProps); @@ -171,10 +163,11 @@ public void setup() throws Exception { groupInfoMap.put("host_group_1", group1Info); groupInfoMap.put("host_group_2", group2Info); - setupTopology(topology, groupInfoMap); - expect(topology.isClusterKerberosEnabled()).andReturn(false).anyTimes(); - setupTopology(topologyWithKerberos, groupInfoMap); - expect(topologyWithKerberos.isClusterKerberosEnabled()).andReturn(true).anyTimes(); + expect(topology.isNameNodeHAEnabled()).andReturn(false).anyTimes(); + expect(topology.getConfiguration()).andReturn(clusterConfig).anyTimes(); + expect(topology.getBlueprint()).andReturn(blueprint).anyTimes(); + expect(topology.getHostGroupInfo()).andReturn(groupInfoMap).anyTimes(); + expect(blueprint.getStack()).andReturn(stack).anyTimes(); expect(blueprint.getStackIds()).andReturn(ImmutableSet.of(STACK_ID)).anyTimes(); expect(blueprint.getHostGroups()).andReturn(hostGroups).anyTimes(); expect(blueprint.getHostGroup("host_group_1")).andReturn(group1).anyTimes(); @@ -189,6 +182,11 @@ public void setup() throws Exception { expect(group1.getComponents()).andReturn(group1Components).anyTimes(); expect(group2.getComponents()).andReturn(group2Components).anyTimes(); + expect(topology.getAmbariContext()).andReturn(ambariContext).anyTimes(); + expect(topology.getClusterId()).andReturn(1L).anyTimes(); + PowerMock.mockStatic(AmbariServer.class); + expect(AmbariServer.getController()).andReturn(controller).anyTimes(); + PowerMock.replay(AmbariServer.class); expect(clusters.getCluster("clusterName")).andReturn(cluster).anyTimes(); expect(controller.getKerberosHelper()).andReturn(kerberosHelper).anyTimes(); expect(controller.getClusters()).andReturn(clusters).anyTimes(); @@ -197,40 +195,64 @@ public void setup() throws Exception { properties.add("core-site/hadoop.security.auth_to_local"); expect(kerberosDescriptor.getAllAuthToLocalProperties()).andReturn(properties).anyTimes(); expect(ambariContext.getClusterName(1L)).andReturn("clusterName").anyTimes(); + replay(topology, blueprint, stack, group1, group2, ambariContext, clusters, controller, kerberosHelper, cluster, kerberosDescriptor); + } - setupKerberosDescriptorArtifact(); + private void setupMocksForKerberosEnabledCluster() throws Exception { - PowerMock.replay(AmbariContext.class); - replay(topology, topologyWithKerberos, blueprint, stack, group1, group2, ambariContext, clusters, controller, kerberosHelper, cluster, kerberosDescriptor, clusterController, artifactResource, artifactResourceProvider); - } + AmbariContext ambariContext = createNiceMock(AmbariContext.class); + expect(ambariContext.getClusterName(anyLong())).andReturn("clusterName").anyTimes(); - @After - public void tearDown() { - verify(topology, topologyWithKerberos, blueprint, stack, group1, group2, ambariContext, clusters, controller, kerberosHelper, cluster, kerberosDescriptor, clusterController, artifactResource, artifactResourceProvider); - reset(topology, topologyWithKerberos, blueprint, stack, group1, group2, ambariContext, clusters, controller, kerberosHelper, cluster, kerberosDescriptor, clusterController, artifactResource, artifactResourceProvider); - PowerMock.reset(AmbariContext.class); - } + PowerMock.mockStatic(AmbariContext.class); + expect(AmbariContext.getClusterController()).andReturn(clusterController).anyTimes(); + expect(AmbariContext.getController()).andReturn(controller).anyTimes(); + + reset(topology); + + HostGroupInfo group1Info = new HostGroupInfo("host_group_1"); + group1Info.addHost("host1"); + group1Info.setConfiguration(emptyConfiguration); + HostGroupInfo group2Info = new HostGroupInfo("host_group_2"); + Map groupInfoMap = new HashMap<>(); + group2Info.addHosts(Arrays.asList("host2", "host3")); + group2Info.setConfiguration(emptyConfiguration); + groupInfoMap.put("host_group_1", group1Info); + groupInfoMap.put("host_group_2", group2Info); + + expect(topology.isNameNodeHAEnabled()).andReturn(false).anyTimes(); + expect(topology.getConfiguration()).andReturn(clusterConfig).anyTimes(); + expect(topology.getBlueprint()).andReturn(blueprint).anyTimes(); + expect(topology.getHostGroupInfo()).andReturn(groupInfoMap).anyTimes(); + expect(topology.getClusterId()).andReturn(new Long(1)).anyTimes(); + expect(topology.getAmbariContext()).andReturn(ambariContext).anyTimes(); + expect(topology.isClusterKerberosEnabled()).andReturn(true).anyTimes(); + + ResourceProvider resourceProvider = createStrictMock(ResourceProvider.class); + expect(clusterController.ensureResourceProvider(Resource.Type.Artifact)).andReturn(resourceProvider).once(); + + Resource resource = createStrictMock(Resource.class); + Set result = Collections.singleton(resource); + + expect(resourceProvider.getResources((Request) anyObject(Request.class), + (Predicate) anyObject(Predicate.class))).andReturn(result).once(); - private void setupKerberosDescriptorArtifact() throws SystemException, UnsupportedPropertyException, NoSuchResourceException, NoSuchParentResourceException { Map> resourcePropertiesMap = new HashMap<>(); resourcePropertiesMap.put(ArtifactResourceProvider.ARTIFACT_DATA_PROPERTY, Collections.emptyMap()); Map propertiesMap = new HashMap<>(); propertiesMap.put("testProperty", "testValue"); resourcePropertiesMap.put(ArtifactResourceProvider.ARTIFACT_DATA_PROPERTY + "/properties", propertiesMap); - expect(clusterController.ensureResourceProvider(Resource.Type.Artifact)).andReturn(artifactResourceProvider).anyTimes(); - expect(artifactResourceProvider.getResources(anyObject(Request.class), anyObject(Predicate.class))).andReturn(Collections.singleton(artifactResource)).anyTimes(); - expect(artifactResource.getPropertiesMap()).andReturn(resourcePropertiesMap).anyTimes(); + expect(resource.getPropertiesMap()).andReturn(resourcePropertiesMap).once(); + + PowerMock.replay(AmbariContext.class); + replay(ambariContext, topology, clusterController, resource, resourceProvider); } - private void setupTopology(ClusterTopology topology, Map groupInfoMap) { - expect(topology.getConfiguration()).andReturn(clusterConfig).anyTimes(); - expect(topology.getBlueprint()).andReturn(blueprint).anyTimes(); - expect(topology.getHostGroupInfo()).andReturn(groupInfoMap).anyTimes(); - expect(topology.getStack()).andReturn(stack).anyTimes(); - expect(topology.getStackIds()).andReturn(ImmutableSet.of(STACK_ID)).anyTimes(); - expect(topology.getAmbariContext()).andReturn(ambariContext).anyTimes(); - expect(topology.getClusterId()).andReturn(1L).anyTimes(); + + @After + public void tearDown() { + verify(topology, blueprint, stack, group1, group2, ambariContext, clusters, controller, kerberosHelper, cluster, kerberosDescriptor); + reset(topology, blueprint, stack, group1, group2, ambariContext, clusters, controller, kerberosHelper, cluster, kerberosDescriptor); } @Test @@ -266,23 +288,7 @@ public TreeNode createResultTreeSettingsObject(TreeNode resu TreeNode clusterTree = resultTree.addChild(clusterResource, "Cluster:1"); - // Add global recovery_enabled as cluster setting - TreeNode settingsNode = clusterTree.addChild(null, "settings"); - Resource clusterSettingResource = new ResourceImpl(Resource.Type.ClusterSetting); - clusterSettingResource.setProperty("ClusterSettingInfo/cluster_setting_name", "recovery_enabled"); - clusterSettingResource.setProperty("ClusterSettingInfo/cluster_setting_value", "true"); - settingsNode.addChild(clusterSettingResource, "ClusterSetting:1"); - - TreeNode serviceGroupsTree = clusterTree.addChild(null, "servicegroups"); - Resource serviceGroupResource = new ResourceImpl(Resource.Type.ServiceGroup); - serviceGroupResource.setProperty("ServiceGroupInfo/cluster_id", "1"); - serviceGroupResource.setProperty("ServiceGroupInfo/cluster_name", "c1"); - serviceGroupResource.setProperty("ServiceGroupInfo/service_group_id", "1"); - serviceGroupResource.setProperty("ServiceGroupInfo/service_group_name", "core"); - TreeNode serviceGroup1Tree = serviceGroupsTree.addChild(serviceGroupResource, "ServiceGroup:1"); - clusterTree.addChild(serviceGroupsTree); - - TreeNode servicesTree = serviceGroup1Tree.addChild(null, "services"); + TreeNode servicesTree = clusterTree.addChild(null, "services"); servicesTree.setProperty("isCollection", "true"); //Scenario 1 : Service with Credential Store enabled, Recovery enabled for Component:1 and not for Component:2 @@ -375,15 +381,15 @@ public void testGetSettings_instance(){ assertTrue(children.containsKey("settings")); List> settingValues = (ArrayList)children.get("settings"); - Boolean isClusterSettings = false; + Boolean isRecoverySettings = false; Boolean isComponentSettings = false; Boolean isServiceSettings = false; //Verify actual values for(Map settingProp : settingValues){ - if(settingProp.containsKey("cluster_settings")){ - isClusterSettings = true; - HashSet> checkPropSize = (HashSet)settingProp.get("cluster_settings"); + if(settingProp.containsKey("recovery_settings")){ + isRecoverySettings = true; + HashSet> checkPropSize = (HashSet)settingProp.get("recovery_settings"); assertEquals(1,checkPropSize.size()); assertEquals("true",checkPropSize.iterator().next().get("recovery_enabled")); @@ -410,7 +416,7 @@ public void testGetSettings_instance(){ } } //Verify if required information is present in actual result - assertTrue(isClusterSettings); + assertTrue(isRecoverySettings); assertTrue(isComponentSettings); assertTrue(isServiceSettings); @@ -437,10 +443,13 @@ public void testFinalizeProperties__instance_noComponentNode() { @Test public void testFinalizeResult_kerberos() throws Exception{ + + setupMocksForKerberosEnabledCluster(); + Result result = new ResultImpl(true); createClusterResultTree(result.getResultTree()); - ClusterBlueprintRenderer renderer = new TestBlueprintRenderer(topologyWithKerberos); + ClusterBlueprintRenderer renderer = new TestBlueprintRenderer(topology); Result blueprintResult = renderer.finalizeResult(result); TreeNode blueprintTree = blueprintResult.getResultTree(); @@ -452,10 +461,10 @@ public void testFinalizeResult_kerberos() throws Exception{ Resource blueprintResource = blueprintNode.getObject(); Map> properties = blueprintResource.getPropertiesMap(); - checkMpackInstance(properties); + assertEquals(STACK_ID.getStackName(), properties.get("Blueprints").get("stack_name")); + assertEquals(STACK_ID.getStackVersion(), properties.get("Blueprints").get("stack_version")); Map securityProperties = (Map) properties.get("Blueprints").get("security"); - assertNotNull(securityProperties); assertEquals("KERBEROS", securityProperties.get("type")); assertNotNull(((Map) securityProperties.get("kerberos_descriptor")).get("properties")); } @@ -478,7 +487,8 @@ public void testFinalizeResult() throws Exception{ Resource blueprintResource = blueprintNode.getObject(); Map> properties = blueprintResource.getPropertiesMap(); - checkMpackInstance(properties); + assertEquals(STACK_ID.getStackName(), properties.get("Blueprints").get("stack_name")); + assertEquals(STACK_ID.getStackVersion(), properties.get("Blueprints").get("stack_version")); Collection> host_groups = (Collection>) properties.get("").get("host_groups"); assertEquals(2, host_groups.size()); @@ -556,7 +566,8 @@ public void testFinalizeResultWithAttributes() throws Exception{ Resource blueprintResource = blueprintNode.getObject(); Map> properties = blueprintResource.getPropertiesMap(); - checkMpackInstance(properties); + assertEquals(STACK_ID.getStackName(), properties.get("Blueprints").get("stack_name")); + assertEquals(STACK_ID.getStackVersion(), properties.get("Blueprints").get("stack_version")); Collection> host_groups = (Collection>) properties.get("").get("host_groups"); assertEquals(2, host_groups.size()); @@ -691,18 +702,9 @@ public Map> getPropertiesMap() { TreeNode clusterTree = resultTree.addChild(clusterResource, "Cluster:1"); - // add a service group and empty services resource for basic unit testing - TreeNode serviceGroupsTree = clusterTree.addChild(null, "servicegroups"); - Resource serviceGroupResource = new ResourceImpl(Resource.Type.ServiceGroup); - serviceGroupResource.setProperty("ServiceGroupInfo/cluster_id", "1"); - serviceGroupResource.setProperty("ServiceGroupInfo/cluster_name", "c1"); - serviceGroupResource.setProperty("ServiceGroupInfo/service_group_id", "1"); - serviceGroupResource.setProperty("ServiceGroupInfo/service_group_name", "core"); - TreeNode serviceGroup1Tree = serviceGroupsTree.addChild(serviceGroupResource, "ServiceGroup:1"); - clusterTree.addChild(serviceGroupsTree); - - TreeNode servicesTree = serviceGroup1Tree.addChild(null, "services"); - servicesTree.setProperty("isCollection", "true"); + // add empty services resource for basic unit testing + Resource servicesResource = new ResourceImpl(Resource.Type.Service); + clusterTree.addChild(servicesResource, "services"); Resource configurationsResource = new ResourceImpl(Resource.Type.Configuration); @@ -802,13 +804,6 @@ public Map> getPropertiesMap() { host3ComponentsTree.addChild(ttComponentResource, "HostComponent:2"); } - private void checkMpackInstance(Map> blueprintProperties) { - Map mpackInstanceProperties = - ((List>)blueprintProperties.get("").get("mpack_instances")).get(0); - assertEquals(STACK_ID.getStackName(), mpackInstanceProperties.get("name")); - assertEquals(STACK_ID.getStackVersion(), mpackInstanceProperties.get("version")); - } - private String getLocalHostName() throws UnknownHostException { return InetAddress.getLocalHost().getHostName(); } diff --git a/ambari-server/src/test/java/org/apache/ambari/server/api/query/render/MinimalRendererTest.java b/ambari-server/src/test/java/org/apache/ambari/server/api/query/render/MinimalRendererTest.java index c653f9ad325..580de5003ae 100644 --- a/ambari-server/src/test/java/org/apache/ambari/server/api/query/render/MinimalRendererTest.java +++ b/ambari-server/src/test/java/org/apache/ambari/server/api/query/render/MinimalRendererTest.java @@ -382,7 +382,7 @@ public void testFinalizeResult() throws Exception { Map> componentProperties = componentResource.getPropertiesMap(); assertEquals(1, componentProperties.size()); assertEquals(1, componentProperties.get("HostRoles").size()); - assertTrue(componentProperties.get("HostRoles").containsKey("id")); + assertTrue(componentProperties.get("HostRoles").containsKey("component_name")); } } } @@ -450,7 +450,7 @@ public void testFinalizeResult_propsSetOnSubResource() throws Exception { Map> componentProperties = componentResource.getPropertiesMap(); assertEquals(1, componentProperties.size()); assertEquals(1, componentProperties.get("HostRoles").size()); - assertTrue(componentProperties.get("HostRoles").containsKey("id")); + assertTrue(componentProperties.get("HostRoles").containsKey("component_name")); } } } @@ -514,25 +514,21 @@ private void createResultTree(TreeNode resultTree) throws Exception{ // host 1 components Resource nnComponentResource = new ResourceImpl(Resource.Type.HostComponent); - nnComponentResource.setProperty("HostRoles/id", 1L); nnComponentResource.setProperty("HostRoles/component_name", "NAMENODE"); nnComponentResource.setProperty("HostRoles/host_name", "testHost"); nnComponentResource.setProperty("HostRoles/cluster_name", "testCluster"); Resource dnComponentResource = new ResourceImpl(Resource.Type.HostComponent); - dnComponentResource.setProperty("HostRoles/id", 2L); dnComponentResource.setProperty("HostRoles/component_name", "DATANODE"); dnComponentResource.setProperty("HostRoles/host_name", "testHost"); dnComponentResource.setProperty("HostRoles/cluster_name", "testCluster"); Resource jtComponentResource = new ResourceImpl(Resource.Type.HostComponent); - jtComponentResource.setProperty("HostRoles/id", 3L); jtComponentResource.setProperty("HostRoles/component_name", "JOBTRACKER"); jtComponentResource.setProperty("HostRoles/host_name", "testHost"); jtComponentResource.setProperty("HostRoles/cluster_name", "testCluster"); Resource ttComponentResource = new ResourceImpl(Resource.Type.HostComponent); - ttComponentResource.setProperty("HostRoles/id", 4L); ttComponentResource.setProperty("HostRoles/component_name", "TASKTRACKER"); jtComponentResource.setProperty("HostRoles/host_name", "testHost"); jtComponentResource.setProperty("HostRoles/cluster_name", "testCluster"); diff --git a/ambari-server/src/test/java/org/apache/ambari/server/api/resources/RepositoryVersionResourceDefinitionTest.java b/ambari-server/src/test/java/org/apache/ambari/server/api/resources/RepositoryVersionResourceDefinitionTest.java index da15142c869..a93a61bad80 100644 --- a/ambari-server/src/test/java/org/apache/ambari/server/api/resources/RepositoryVersionResourceDefinitionTest.java +++ b/ambari-server/src/test/java/org/apache/ambari/server/api/resources/RepositoryVersionResourceDefinitionTest.java @@ -46,7 +46,7 @@ public void testGetSubResourceDefinitions() throws Exception { final RepositoryVersionResourceDefinition resourceDefinition = new RepositoryVersionResourceDefinition(); final Set subResourceDefinitions = resourceDefinition.getSubResourceDefinitions (); final Iterator iterator = subResourceDefinitions.iterator(); - Assert.assertEquals(Resource.Type.OperatingSystemReadOnly, iterator.next().getType()); + Assert.assertEquals(Resource.Type.OperatingSystem, iterator.next().getType()); Assert.assertEquals(1, subResourceDefinitions.size()); } } diff --git a/ambari-server/src/test/java/org/apache/ambari/server/api/resources/StackVersionResourceDefinitionTest.java b/ambari-server/src/test/java/org/apache/ambari/server/api/resources/StackVersionResourceDefinitionTest.java index 89cd8d743a3..0f9103769e9 100644 --- a/ambari-server/src/test/java/org/apache/ambari/server/api/resources/StackVersionResourceDefinitionTest.java +++ b/ambari-server/src/test/java/org/apache/ambari/server/api/resources/StackVersionResourceDefinitionTest.java @@ -54,7 +54,7 @@ public void testGetSubResourceDefinitions() { for (SubResourceDefinition subResource : subResources) { Resource.Type type = subResource.getType(); - if (type.equals(Resource.Type.OperatingSystemReadOnly)) { + if (type.equals(Resource.Type.OperatingSystem)) { operatingSystemFound = true; } else if (type.equals(Resource.Type.StackService)) { serviceFound = true; diff --git a/ambari-server/src/test/java/org/apache/ambari/server/api/services/HostServiceTest.java b/ambari-server/src/test/java/org/apache/ambari/server/api/services/HostServiceTest.java index f9cd4dc9040..33eb12ceef8 100644 --- a/ambari-server/src/test/java/org/apache/ambari/server/api/services/HostServiceTest.java +++ b/ambari-server/src/test/java/org/apache/ambari/server/api/services/HostServiceTest.java @@ -48,8 +48,8 @@ public List getTestInvocations() throws Exception { //getHosts service = new TestHostService("clusterName", null); - m = service.getClass().getMethod("getHosts", String.class, HttpHeaders.class, UriInfo.class, String.class); - args = new Object[] {null, getHttpHeaders(), getUriInfo(), null}; + m = service.getClass().getMethod("getHosts", String.class, HttpHeaders.class, UriInfo.class); + args = new Object[] {null, getHttpHeaders(), getUriInfo()}; listInvocations.add(new ServiceTestInvocation(Request.Type.GET, service, m, args, null)); //createHost diff --git a/ambari-server/src/test/java/org/apache/ambari/server/api/services/stackadvisor/StackAdvisorBlueprintProcessorTest.java b/ambari-server/src/test/java/org/apache/ambari/server/api/services/stackadvisor/StackAdvisorBlueprintProcessorTest.java index 2abc1522799..10fde6fa39f 100644 --- a/ambari-server/src/test/java/org/apache/ambari/server/api/services/stackadvisor/StackAdvisorBlueprintProcessorTest.java +++ b/ambari-server/src/test/java/org/apache/ambari/server/api/services/stackadvisor/StackAdvisorBlueprintProcessorTest.java @@ -87,16 +87,16 @@ public void testAdviseConfiguration() throws StackAdvisorException, Configuratio expect(clusterTopology.getConfiguration()).andReturn(configuration).anyTimes(); expect(clusterTopology.isClusterKerberosEnabled()).andReturn(false).anyTimes(); expect(clusterTopology.getConfigRecommendationStrategy()).andReturn(ConfigRecommendationStrategy.ALWAYS_APPLY).anyTimes(); - expect(clusterTopology.getStack()).andReturn(stack).anyTimes(); - expect(clusterTopology.getStackIds()).andReturn(ImmutableSet.of(STACK_ID)).anyTimes(); + expect(blueprint.getStack()).andReturn(stack).anyTimes(); + expect(blueprint.getStackIds()).andReturn(ImmutableSet.of(STACK_ID)).anyTimes(); expect(stack.getStackId()).andReturn(STACK_ID).anyTimes(); expect(stack.getVersion()).andReturn(STACK_ID.getStackVersion()).anyTimes(); expect(stack.getName()).andReturn(STACK_ID.getStackName()).anyTimes(); expect(stack.getConfiguration(services)).andReturn(createStackDefaults()).anyTimes(); expect(stack.getServices(STACK_ID)).andReturn(services).anyTimes(); - expect(clusterTopology.getServices()).andReturn(services).anyTimes(); + expect(blueprint.getServices()).andReturn(services).anyTimes(); expect(blueprint.getHostGroups()).andReturn(createHostGroupMap()).anyTimes(); - expect(clusterTopology.isValidConfigType("core-site")).andReturn(true).anyTimes(); + expect(blueprint.isValidConfigType("core-site")).andReturn(true).anyTimes(); expect(hostGroup.getComponentNames()).andReturn(Arrays.asList("comp1", "comp2")).anyTimes(); expect(stackAdvisorHelper.recommend(anyObject(StackAdvisorRequest.class))).andReturn(createRecommendationResponse()); expect(configuration.getFullProperties()).andReturn(props).anyTimes(); @@ -126,16 +126,16 @@ public void testAdviseConfigurationWithOnlyStackDefaultsApply() throws StackAdvi expect(clusterTopology.getConfiguration()).andReturn(configuration).anyTimes(); expect(clusterTopology.isClusterKerberosEnabled()).andReturn(false).anyTimes(); expect(clusterTopology.getConfigRecommendationStrategy()).andReturn(ConfigRecommendationStrategy.ONLY_STACK_DEFAULTS_APPLY); - expect(clusterTopology.getStack()).andReturn(stack).anyTimes(); - expect(clusterTopology.getStackIds()).andReturn(ImmutableSet.of(STACK_ID)).anyTimes(); + expect(blueprint.getStack()).andReturn(stack).anyTimes(); + expect(blueprint.getStackIds()).andReturn(ImmutableSet.of(STACK_ID)).anyTimes(); expect(stack.getStackId()).andReturn(STACK_ID).anyTimes(); expect(stack.getVersion()).andReturn(STACK_ID.getStackVersion()).anyTimes(); expect(stack.getName()).andReturn(STACK_ID.getStackName()).anyTimes(); expect(stack.getConfiguration(services)).andReturn(createStackDefaults()).anyTimes(); expect(stack.getServices(STACK_ID)).andReturn(services).anyTimes(); - expect(clusterTopology.getServices()).andReturn(services).anyTimes(); + expect(blueprint.getServices()).andReturn(services).anyTimes(); expect(blueprint.getHostGroups()).andReturn(createHostGroupMap()).anyTimes(); - expect(clusterTopology.isValidConfigType("core-site")).andReturn(true).anyTimes(); + expect(blueprint.isValidConfigType("core-site")).andReturn(true).anyTimes(); expect(hostGroup.getComponentNames()).andReturn(Arrays.asList("comp1", "comp2")).anyTimes(); expect(stackAdvisorHelper.recommend(anyObject(StackAdvisorRequest.class))).andReturn(createRecommendationResponse()); expect(configuration.getFullProperties()).andReturn(props).anyTimes(); @@ -166,16 +166,16 @@ public void testAdviseConfigurationWithOnlyStackDefaultsApplyWhenNoUserInputForD expect(clusterTopology.getConfiguration()).andReturn(configuration).anyTimes(); expect(clusterTopology.isClusterKerberosEnabled()).andReturn(false).anyTimes(); expect(clusterTopology.getConfigRecommendationStrategy()).andReturn(ConfigRecommendationStrategy.ONLY_STACK_DEFAULTS_APPLY); - expect(clusterTopology.getStack()).andReturn(stack).anyTimes(); - expect(clusterTopology.getStackIds()).andReturn(ImmutableSet.of(STACK_ID)).anyTimes(); + expect(blueprint.getStack()).andReturn(stack).anyTimes(); + expect(blueprint.getStackIds()).andReturn(ImmutableSet.of(STACK_ID)).anyTimes(); expect(stack.getStackId()).andReturn(STACK_ID).anyTimes(); expect(stack.getVersion()).andReturn(STACK_ID.getStackVersion()).anyTimes(); expect(stack.getName()).andReturn(STACK_ID.getStackName()).anyTimes(); expect(stack.getConfiguration(services)).andReturn(createStackDefaults()).anyTimes(); expect(stack.getServices(STACK_ID)).andReturn(services).anyTimes(); - expect(clusterTopology.getServices()).andReturn(services).anyTimes(); + expect(blueprint.getServices()).andReturn(services).anyTimes(); expect(blueprint.getHostGroups()).andReturn(createHostGroupMap()).anyTimes(); - expect(clusterTopology.isValidConfigType("core-site")).andReturn(true).anyTimes(); + expect(blueprint.isValidConfigType("core-site")).andReturn(true).anyTimes(); expect(hostGroup.getComponentNames()).andReturn(Arrays.asList("comp1", "comp2")).anyTimes(); expect(stackAdvisorHelper.recommend(anyObject(StackAdvisorRequest.class))).andReturn(createRecommendationResponse()); expect(configuration.getFullProperties()).andReturn(props).anyTimes(); @@ -204,16 +204,16 @@ public void testAdviseConfigurationWith_ALWAYS_APPLY_DONT_OVERRIDE_CUSTOM_VALUES expect(clusterTopology.getConfiguration()).andReturn(configuration).anyTimes(); expect(clusterTopology.isClusterKerberosEnabled()).andReturn(false).anyTimes(); expect(clusterTopology.getConfigRecommendationStrategy()).andReturn(ConfigRecommendationStrategy.ALWAYS_APPLY_DONT_OVERRIDE_CUSTOM_VALUES).anyTimes(); - expect(clusterTopology.getStack()).andReturn(stack).anyTimes(); - expect(clusterTopology.getStackIds()).andReturn(ImmutableSet.of(STACK_ID)).anyTimes(); + expect(blueprint.getStack()).andReturn(stack).anyTimes(); + expect(blueprint.getStackIds()).andReturn(ImmutableSet.of(STACK_ID)).anyTimes(); expect(stack.getStackId()).andReturn(STACK_ID).anyTimes(); expect(stack.getVersion()).andReturn(STACK_ID.getStackVersion()).anyTimes(); expect(stack.getName()).andReturn(STACK_ID.getStackName()).anyTimes(); expect(stack.getConfiguration(services)).andReturn(createStackDefaults()).anyTimes(); expect(stack.getServices(STACK_ID)).andReturn(services).anyTimes(); - expect(clusterTopology.getServices()).andReturn(services).anyTimes(); + expect(blueprint.getServices()).andReturn(services).anyTimes(); expect(blueprint.getHostGroups()).andReturn(createHostGroupMap()).anyTimes(); - expect(clusterTopology.isValidConfigType("core-site")).andReturn(true).anyTimes(); + expect(blueprint.isValidConfigType("core-site")).andReturn(true).anyTimes(); expect(hostGroup.getComponentNames()).andReturn(Arrays.asList("comp1", "comp2")).anyTimes(); expect(stackAdvisorHelper.recommend(anyObject(StackAdvisorRequest.class))).andReturn(createRecommendationResponse()); expect(configuration.getFullProperties()).andReturn(props).anyTimes(); @@ -240,15 +240,15 @@ public void testAdviseConfigurationWhenConfigurationRecommendFails() throws Stac expect(clusterTopology.getAdvisedConfigurations()).andReturn(advisedConfigurations).anyTimes(); expect(clusterTopology.getConfiguration()).andReturn(configuration).anyTimes(); expect(clusterTopology.isClusterKerberosEnabled()).andReturn(false).anyTimes(); - expect(clusterTopology.getStack()).andReturn(stack).anyTimes(); - expect(clusterTopology.getStackIds()).andReturn(ImmutableSet.of(STACK_ID)).anyTimes(); + expect(blueprint.getStack()).andReturn(stack).anyTimes(); + expect(blueprint.getStackIds()).andReturn(ImmutableSet.of(STACK_ID)).anyTimes(); expect(stack.getStackId()).andReturn(STACK_ID).anyTimes(); expect(stack.getVersion()).andReturn(STACK_ID.getStackVersion()).anyTimes(); expect(stack.getName()).andReturn(STACK_ID.getStackName()).anyTimes(); expect(blueprint.getHostGroups()).andReturn(createHostGroupMap()).anyTimes(); expect(hostGroup.getComponentNames()).andReturn(Arrays.asList("comp1", "comp2")).anyTimes(); expect(stack.getServices(STACK_ID)).andReturn(services).anyTimes(); - expect(clusterTopology.getServices()).andReturn(services).anyTimes(); + expect(blueprint.getServices()).andReturn(services).anyTimes(); expect(stackAdvisorHelper.recommend(anyObject(StackAdvisorRequest.class))).andThrow(new StackAdvisorException("ex")); expect(configuration.getFullProperties()).andReturn(props); @@ -273,14 +273,14 @@ public void testAdviseConfigurationWhenConfigurationRecommendHasInvalidResponse( expect(clusterTopology.getAdvisedConfigurations()).andReturn(advisedConfigurations).anyTimes(); expect(clusterTopology.getConfiguration()).andReturn(configuration).anyTimes(); expect(clusterTopology.isClusterKerberosEnabled()).andReturn(false).anyTimes(); - expect(clusterTopology.getStack()).andReturn(stack).anyTimes(); - expect(clusterTopology.getStackIds()).andReturn(ImmutableSet.of(STACK_ID)).anyTimes(); + expect(blueprint.getStack()).andReturn(stack).anyTimes(); + expect(blueprint.getStackIds()).andReturn(ImmutableSet.of(STACK_ID)).anyTimes(); expect(stack.getStackId()).andReturn(STACK_ID).anyTimes(); expect(stack.getVersion()).andReturn(STACK_ID.getStackVersion()).anyTimes(); expect(stack.getName()).andReturn(STACK_ID.getStackName()).anyTimes(); expect(stack.getConfiguration(services)).andReturn(createStackDefaults()).anyTimes(); expect(stack.getServices(STACK_ID)).andReturn(services).anyTimes(); - expect(clusterTopology.getServices()).andReturn(services).anyTimes(); + expect(blueprint.getServices()).andReturn(services).anyTimes(); expect(blueprint.getHostGroups()).andReturn(createHostGroupMap()).anyTimes(); expect(hostGroup.getComponentNames()).andReturn(Arrays.asList("comp1", "comp2")).anyTimes(); expect(stackAdvisorHelper.recommend(anyObject(StackAdvisorRequest.class))).andReturn(new RecommendationResponse()); diff --git a/ambari-server/src/test/java/org/apache/ambari/server/audit/request/creator/RepositoryVersionEventCreatorTest.java b/ambari-server/src/test/java/org/apache/ambari/server/audit/request/creator/RepositoryVersionEventCreatorTest.java index 54434302ac0..3d77c02e621 100644 --- a/ambari-server/src/test/java/org/apache/ambari/server/audit/request/creator/RepositoryVersionEventCreatorTest.java +++ b/ambari-server/src/test/java/org/apache/ambari/server/audit/request/creator/RepositoryVersionEventCreatorTest.java @@ -31,7 +31,7 @@ import org.apache.ambari.server.audit.event.request.ChangeRepositoryVersionRequestAuditEvent; import org.apache.ambari.server.audit.event.request.DeleteRepositoryVersionRequestAuditEvent; import org.apache.ambari.server.audit.request.eventcreator.RepositoryVersionEventCreator; -import org.apache.ambari.server.controller.internal.OperatingSystemReadOnlyResourceProvider; +import org.apache.ambari.server.controller.internal.OperatingSystemResourceProvider; import org.apache.ambari.server.controller.internal.RepositoryResourceProvider; import org.apache.ambari.server.controller.internal.RepositoryVersionResourceProvider; import org.apache.ambari.server.controller.spi.Resource; @@ -128,7 +128,7 @@ private Set> createOperatingSystems() { // *** Map operatingSystem = new HashMap<>(); - operatingSystem.put(OperatingSystemReadOnlyResourceProvider.OPERATING_SYSTEM_OS_TYPE_PROPERTY_ID, "redhat7"); + operatingSystem.put(OperatingSystemResourceProvider.OPERATING_SYSTEM_OS_TYPE_PROPERTY_ID, "redhat7"); Set> repositories = new HashSet<>(); @@ -143,7 +143,7 @@ private Set> createOperatingSystems() { operatingSystem.put("repositories", repositories); // *** Map operatingSystem2 = new HashMap<>(); - operatingSystem2.put(OperatingSystemReadOnlyResourceProvider.OPERATING_SYSTEM_OS_TYPE_PROPERTY_ID, "redhat6"); + operatingSystem2.put(OperatingSystemResourceProvider.OPERATING_SYSTEM_OS_TYPE_PROPERTY_ID, "redhat6"); Set> repositories2 = new HashSet<>(); diff --git a/ambari-server/src/test/java/org/apache/ambari/server/bootstrap/BootStrapResourceTest.java b/ambari-server/src/test/java/org/apache/ambari/server/bootstrap/BootStrapResourceTest.java index fcd8d812154..8fede943643 100644 --- a/ambari-server/src/test/java/org/apache/ambari/server/bootstrap/BootStrapResourceTest.java +++ b/ambari-server/src/test/java/org/apache/ambari/server/bootstrap/BootStrapResourceTest.java @@ -19,7 +19,6 @@ package org.apache.ambari.server.bootstrap; import static org.mockito.Matchers.any; -import static org.mockito.Matchers.eq; import static org.mockito.Mockito.mock; import static org.mockito.Mockito.when; @@ -69,8 +68,7 @@ public class MockModule extends AbstractModule { protected void configure() { BootStrapImpl bsImpl = mock(BootStrapImpl.class); when(bsImpl.getStatus(0)).thenReturn(generateDummyBSStatus()); - when(bsImpl.runBootStrap(any(SshHostInfo.class), eq(false))).thenReturn(generateBSResponse()); - when(bsImpl.runBootStrap(any(SshHostInfo.class), eq(true))).thenReturn(generateBSResponse()); + when(bsImpl.runBootStrap(any(SshHostInfo.class))).thenReturn(generateBSResponse()); bind(BootStrapImpl.class).toInstance(bsImpl); requestStaticInjection(BootStrapResource.class); } @@ -135,13 +133,4 @@ public void bootStrapPost() throws UniformInterfaceException, JSONException { Assert.assertEquals("OK", object.get("status")); } - - @Test - public void bootStrapValidationPost() throws UniformInterfaceException, JSONException { - WebResource webResource = resource(); - JSONObject object = webResource.path("/bootstrap/validations").type( - MediaType.APPLICATION_JSON).post(JSONObject.class, createDummySshInfo()); - - Assert.assertEquals("OK", object.get("status")); - } } diff --git a/ambari-server/src/test/java/org/apache/ambari/server/bootstrap/BootStrapTest.java b/ambari-server/src/test/java/org/apache/ambari/server/bootstrap/BootStrapTest.java index 6337c9efef4..1c4da29fcc3 100644 --- a/ambari-server/src/test/java/org/apache/ambari/server/bootstrap/BootStrapTest.java +++ b/ambari-server/src/test/java/org/apache/ambari/server/bootstrap/BootStrapTest.java @@ -94,7 +94,7 @@ public void testRun() throws Exception { info.setHosts(hosts); info.setUser("user"); info.setPassword("passwd"); - BSResponse response = impl.runBootStrap(info, false); + BSResponse response = impl.runBootStrap(info); LOG.info("Response id from bootstrap " + response.getRequestId()); /* do a query */ BootStrapStatus status = impl.getStatus(response.getRequestId()); @@ -172,7 +172,7 @@ public void testHostFailure() throws Exception { info.setUser("user"); info.setUserRunAs("root"); info.setPassword("passwd"); - BSResponse response = impl.runBootStrap(info, false); + BSResponse response = impl.runBootStrap(info); long requestId = response.getRequestId(); LOG.info("Response id from bootstrap " + requestId); /* create failed done file for host2 */ diff --git a/ambari-server/src/test/java/org/apache/ambari/server/checks/AtlasPresenceCheckTest.java b/ambari-server/src/test/java/org/apache/ambari/server/checks/AtlasPresenceCheckTest.java new file mode 100644 index 00000000000..dca14ab6703 --- /dev/null +++ b/ambari-server/src/test/java/org/apache/ambari/server/checks/AtlasPresenceCheckTest.java @@ -0,0 +1,42 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.ambari.server.checks; + +import static org.junit.Assert.assertEquals; + +import org.apache.ambari.server.controller.PrereqCheckRequest; +import org.apache.ambari.server.orm.entities.RepositoryVersionEntity; +import org.apache.ambari.server.state.stack.PrereqCheckStatus; +import org.apache.ambari.server.state.stack.PrerequisiteCheck; +import org.junit.Test; +import org.mockito.Mockito; + + +public class AtlasPresenceCheckTest { + private final AtlasPresenceCheck m_check = new AtlasPresenceCheck(); + + @Test + public void perform() throws Exception { + PrerequisiteCheck check = new PrerequisiteCheck(null, null); + PrereqCheckRequest request = new PrereqCheckRequest("cluster"); + request.setTargetRepositoryVersion(Mockito.mock(RepositoryVersionEntity.class)); + m_check.perform(check, request); + + assertEquals(PrereqCheckStatus.FAIL, check.getStatus()); + } +} \ No newline at end of file diff --git a/ambari-server/src/test/java/org/apache/ambari/server/checks/DruidHighAvailabilityCheckTest.java b/ambari-server/src/test/java/org/apache/ambari/server/checks/DruidHighAvailabilityCheckTest.java new file mode 100644 index 00000000000..efc6a80e894 --- /dev/null +++ b/ambari-server/src/test/java/org/apache/ambari/server/checks/DruidHighAvailabilityCheckTest.java @@ -0,0 +1,178 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.ambari.server.checks; + +import java.util.Collections; +import java.util.HashMap; +import java.util.Map; + +import org.apache.ambari.server.api.services.AmbariMetaInfo; +import org.apache.ambari.server.configuration.Configuration; +import org.apache.ambari.server.controller.PrereqCheckRequest; +import org.apache.ambari.server.orm.entities.RepositoryVersionEntity; +import org.apache.ambari.server.state.Cluster; +import org.apache.ambari.server.state.Clusters; +import org.apache.ambari.server.state.RepositoryType; +import org.apache.ambari.server.state.Service; +import org.apache.ambari.server.state.ServiceComponent; +import org.apache.ambari.server.state.ServiceComponentHost; +import org.apache.ambari.server.state.repository.ClusterVersionSummary; +import org.apache.ambari.server.state.repository.VersionDefinitionXml; +import org.apache.ambari.server.state.stack.PrereqCheckStatus; +import org.apache.ambari.server.state.stack.PrerequisiteCheck; +import org.junit.Assert; +import org.junit.Before; +import org.junit.Test; +import org.junit.runner.RunWith; +import org.mockito.Mock; +import org.mockito.Mockito; +import org.mockito.runners.MockitoJUnitRunner; + +import com.google.common.collect.ImmutableMap; +import com.google.inject.Provider; + +/** + * Unit tests for SecondaryNamenodeDeletedCheck + * + */ +@RunWith(MockitoJUnitRunner.class) +public class DruidHighAvailabilityCheckTest +{ + private final Clusters clusters = Mockito.mock(Clusters.class); + + private final DruidHighAvailabilityCheck druidHighAvailabilityCheck = new DruidHighAvailabilityCheck(); + + @Mock + private ClusterVersionSummary m_clusterVersionSummary; + + @Mock + private VersionDefinitionXml m_vdfXml; + + @Mock + private RepositoryVersionEntity m_repositoryVersion; + + final Map m_services = new HashMap<>(); + + @Before + public void setup() throws Exception { + druidHighAvailabilityCheck.clustersProvider = new Provider() { + @Override + public Clusters get() { + return clusters; + } + }; + + druidHighAvailabilityCheck.ambariMetaInfo = new Provider() { + @Override + public AmbariMetaInfo get() { + return Mockito.mock(AmbariMetaInfo.class); + } + }; + + Configuration config = Mockito.mock(Configuration.class); + druidHighAvailabilityCheck.config = config; + + m_services.clear(); + Mockito.when(m_repositoryVersion.getType()).thenReturn(RepositoryType.STANDARD); + Mockito.when(m_repositoryVersion.getRepositoryXml()).thenReturn(m_vdfXml); + Mockito.when(m_vdfXml.getClusterSummary(Mockito.any(Cluster.class))).thenReturn(m_clusterVersionSummary); + Mockito.when(m_clusterVersionSummary.getAvailableServiceNames()).thenReturn(m_services.keySet()); + } + + @Test + public void testIsApplicable() throws Exception { + final Cluster cluster = Mockito.mock(Cluster.class); + final Service service = Mockito.mock(Service.class); + + m_services.put("DRUID", service); + + Mockito.when(cluster.getClusterId()).thenReturn(1L); + Mockito.when(cluster.getServices()).thenReturn(m_services); + Mockito.when(clusters.getCluster("cluster")).thenReturn(cluster); + + PrereqCheckRequest request = new PrereqCheckRequest("cluster"); + request.setTargetRepositoryVersion(m_repositoryVersion); + + Assert.assertTrue(druidHighAvailabilityCheck.isApplicable(request)); + + request = new PrereqCheckRequest("cluster"); + request.setTargetRepositoryVersion(m_repositoryVersion); + + request.addResult(CheckDescription.DRUID_HA_WARNING, PrereqCheckStatus.PASS); + Assert.assertTrue(druidHighAvailabilityCheck.isApplicable(request)); + + m_services.remove("DRUID"); + Assert.assertFalse(druidHighAvailabilityCheck.isApplicable(request)); + } + + @Test + public void testPerform() throws Exception { + final ServiceComponentHost serviceComponentHost= Mockito.mock(ServiceComponentHost.class); + final Cluster cluster = Mockito.mock(Cluster.class); + Mockito.when(cluster.getClusterId()).thenReturn(1L); + Mockito.when(clusters.getCluster("cluster")).thenReturn(cluster); + + final Service service = Mockito.mock(Service.class); + final ServiceComponent serviceComponent = Mockito.mock(ServiceComponent.class); + final ServiceComponent haComponent = Mockito.mock(ServiceComponent.class); + Mockito.when(serviceComponent.getServiceComponentHosts()).thenReturn(Collections.singletonMap("host", null)); + Mockito.when(haComponent.getServiceComponentHosts()).thenReturn(ImmutableMap.of("host1", serviceComponentHost, "host2", serviceComponentHost)); + + // All Components Not HA + Mockito.when(cluster.getService("DRUID")).thenReturn(service); + Mockito.when(service.getServiceComponent("DRUID_COORDINATOR")).thenReturn(serviceComponent); + Mockito.when(service.getServiceComponent("DRUID_BROKER")).thenReturn(serviceComponent); + Mockito.when(service.getServiceComponent("DRUID_MIDDLEMANAGER")).thenReturn(serviceComponent); + Mockito.when(service.getServiceComponent("DRUID_HISTORICAL")).thenReturn(serviceComponent); + Mockito.when(service.getServiceComponent("DRUID_OVERLORD")).thenReturn(serviceComponent); + Mockito.when(service.getServiceComponent("DRUID_ROUTER")).thenReturn(serviceComponent); + PrerequisiteCheck check = new PrerequisiteCheck(null, null); + druidHighAvailabilityCheck.perform(check, new PrereqCheckRequest("cluster")); + Assert.assertEquals(PrereqCheckStatus.WARNING, check.getStatus()); + Assert.assertEquals("DRUID", check.getFailedOn().toArray(new String[1])[0]); + Assert.assertEquals("High Availability is not enabled for Druid. Druid Service may have some downtime during upgrade. Deploy multiple instances of DRUID_BROKER, DRUID_COORDINATOR, DRUID_HISTORICAL, DRUID_OVERLORD, DRUID_MIDDLEMANAGER, DRUID_ROUTER in the Cluster to avoid any downtime.", check.getFailReason()); + + // Some Components have HA + Mockito.when(cluster.getService("DRUID")).thenReturn(service); + Mockito.when(service.getServiceComponent("DRUID_COORDINATOR")).thenReturn(serviceComponent); + Mockito.when(service.getServiceComponent("DRUID_BROKER")).thenReturn(haComponent); + Mockito.when(service.getServiceComponent("DRUID_MIDDLEMANAGER")).thenReturn(serviceComponent); + Mockito.when(service.getServiceComponent("DRUID_HISTORICAL")).thenReturn(haComponent); + Mockito.when(service.getServiceComponent("DRUID_OVERLORD")).thenReturn(serviceComponent); + Mockito.when(service.getServiceComponent("DRUID_ROUTER")).thenReturn(haComponent); + check = new PrerequisiteCheck(null, null); + druidHighAvailabilityCheck.perform(check, new PrereqCheckRequest("cluster")); + Assert.assertEquals(PrereqCheckStatus.WARNING, check.getStatus()); + Assert.assertEquals("DRUID", check.getFailedOn().toArray(new String[1])[0]); + Assert.assertEquals("High Availability is not enabled for Druid. Druid Service may have some downtime during upgrade. Deploy multiple instances of DRUID_COORDINATOR, DRUID_OVERLORD, DRUID_MIDDLEMANAGER in the Cluster to avoid any downtime.", check.getFailReason()); + + // All components have HA + Mockito.when(cluster.getService("DRUID")).thenReturn(service); + Mockito.when(service.getServiceComponent("DRUID_COORDINATOR")).thenReturn(haComponent); + Mockito.when(service.getServiceComponent("DRUID_BROKER")).thenReturn(haComponent); + Mockito.when(service.getServiceComponent("DRUID_MIDDLEMANAGER")).thenReturn(haComponent); + Mockito.when(service.getServiceComponent("DRUID_HISTORICAL")).thenReturn(haComponent); + Mockito.when(service.getServiceComponent("DRUID_OVERLORD")).thenReturn(haComponent); + Mockito.when(service.getServiceComponent("DRUID_ROUTER")).thenReturn(haComponent); + + + check = new PrerequisiteCheck(null, null); + druidHighAvailabilityCheck.perform(check, new PrereqCheckRequest("cluster")); + Assert.assertEquals(PrereqCheckStatus.PASS, check.getStatus()); + } +} diff --git a/ambari-server/src/test/java/org/apache/ambari/server/checks/HiveDynamicServiceDiscoveryCheckTest.java b/ambari-server/src/test/java/org/apache/ambari/server/checks/HiveDynamicServiceDiscoveryCheckTest.java new file mode 100644 index 00000000000..606d4b73264 --- /dev/null +++ b/ambari-server/src/test/java/org/apache/ambari/server/checks/HiveDynamicServiceDiscoveryCheckTest.java @@ -0,0 +1,139 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.ambari.server.checks; + +import java.util.HashMap; +import java.util.Map; + +import org.apache.ambari.server.configuration.Configuration; +import org.apache.ambari.server.controller.PrereqCheckRequest; +import org.apache.ambari.server.orm.entities.RepositoryVersionEntity; +import org.apache.ambari.server.state.Cluster; +import org.apache.ambari.server.state.Clusters; +import org.apache.ambari.server.state.Config; +import org.apache.ambari.server.state.DesiredConfig; +import org.apache.ambari.server.state.StackId; +import org.apache.ambari.server.state.stack.PrereqCheckStatus; +import org.apache.ambari.server.state.stack.PrerequisiteCheck; +import org.apache.ambari.server.state.stack.UpgradePack.PrerequisiteCheckConfig; +import org.junit.Assert; +import org.junit.Before; +import org.junit.Test; +import org.mockito.Mockito; + +import com.google.inject.Provider; + +/** + * Tests for {@link HiveDynamicServiceDiscoveryCheck} + */ +public class HiveDynamicServiceDiscoveryCheckTest { + private final Clusters m_clusters = Mockito.mock(Clusters.class); + + private final HiveDynamicServiceDiscoveryCheck m_check = new HiveDynamicServiceDiscoveryCheck(); + + final RepositoryVersionEntity repositoryVersion = Mockito.mock(RepositoryVersionEntity.class); + + /** + * + */ + @Before + public void setup() throws Exception { + m_check.clustersProvider = new Provider() { + + @Override + public Clusters get() { + return m_clusters; + } + }; + Configuration config = Mockito.mock(Configuration.class); + m_check.config = config; + } + + @Test + public void testPerform() throws Exception { + final Cluster cluster = Mockito.mock(Cluster.class); + Mockito.when(cluster.getClusterId()).thenReturn(1L); + Mockito.when(m_clusters.getCluster("cluster")).thenReturn(cluster); + + final DesiredConfig desiredConfig = Mockito.mock(DesiredConfig.class); + Mockito.when(desiredConfig.getTag()).thenReturn("tag"); + Map configMap = new HashMap<>(); + configMap.put("hive-site", desiredConfig); + + Mockito.when(cluster.getDesiredConfigs()).thenReturn(configMap); + final Config config = Mockito.mock(Config.class); + Mockito.when(cluster.getConfig(Mockito.anyString(), Mockito.anyString())).thenReturn(config); + final Map properties = new HashMap<>(); + Mockito.when(config.getProperties()).thenReturn(properties); + + Map checkProperties = new HashMap<>(); + checkProperties.put("min-failure-stack-version","HDP-2.3.0.0"); + PrerequisiteCheckConfig prerequisiteCheckConfig = Mockito.mock(PrerequisiteCheckConfig.class); + Mockito.when(prerequisiteCheckConfig.getCheckProperties( + m_check.getClass().getName())).thenReturn(checkProperties); + + PrerequisiteCheck check = new PrerequisiteCheck(null, null); + PrereqCheckRequest request = new PrereqCheckRequest("cluster"); + request.setPrerequisiteCheckConfig(prerequisiteCheckConfig); + + // Check HDP-2.2.x => HDP-2.2.y + request.setSourceStackId(new StackId("HDP-2.2.4.2")); + + Mockito.when(repositoryVersion.getVersion()).thenReturn("2.2.8.4"); + Mockito.when(repositoryVersion.getStackId()).thenReturn(new StackId("HDP", "2.2.8.4")); + request.setTargetRepositoryVersion(repositoryVersion); + + m_check.perform(check, request); + Assert.assertEquals(PrereqCheckStatus.WARNING, check.getStatus()); + + // Check HDP-2.2.x => HDP-2.3.y + request.setSourceStackId(new StackId("HDP-2.2.4.2")); + + Mockito.when(repositoryVersion.getVersion()).thenReturn("2.3.8.4"); + Mockito.when(repositoryVersion.getStackId()).thenReturn(new StackId("HDP", "2.3.8.4")); + request.setTargetRepositoryVersion(repositoryVersion); + + m_check.perform(check, request); + Assert.assertEquals(PrereqCheckStatus.FAIL, check.getStatus()); + + // Check HDP-2.3.x => HDP-2.3.y + request.setSourceStackId(new StackId("HDP-2.3.4.2")); + request.setTargetRepositoryVersion(repositoryVersion); + m_check.perform(check, request); + Assert.assertEquals(PrereqCheckStatus.FAIL, check.getStatus()); + + // Check HDP-2.3.x => HDP-2.4.y + request.setSourceStackId(new StackId("HDP-2.3.4.2")); + + Mockito.when(repositoryVersion.getVersion()).thenReturn("2.4.8.4"); + Mockito.when(repositoryVersion.getStackId()).thenReturn(new StackId("HDP", "2.4.8.4")); + request.setTargetRepositoryVersion(repositoryVersion); + + m_check.perform(check, request); + Assert.assertEquals(PrereqCheckStatus.FAIL, check.getStatus()); + + // Check when properties are specified + properties.put("hive.server2.support.dynamic.service.discovery", "true"); + properties.put("hive.zookeeper.quorum", "host"); + properties.put("hive.server2.zookeeper.namespace", "namespace"); + check = new PrerequisiteCheck(null, null); + m_check.perform(check, request); + Assert.assertEquals(PrereqCheckStatus.PASS, check.getStatus()); + } + +} diff --git a/ambari-server/src/test/java/org/apache/ambari/server/checks/HiveMultipleMetastoreCheckTest.java b/ambari-server/src/test/java/org/apache/ambari/server/checks/HiveMultipleMetastoreCheckTest.java new file mode 100644 index 00000000000..6bba2ea8bbb --- /dev/null +++ b/ambari-server/src/test/java/org/apache/ambari/server/checks/HiveMultipleMetastoreCheckTest.java @@ -0,0 +1,199 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.ambari.server.checks; + +import java.util.HashMap; +import java.util.LinkedHashSet; +import java.util.Map; + +import org.apache.ambari.server.configuration.Configuration; +import org.apache.ambari.server.controller.PrereqCheckRequest; +import org.apache.ambari.server.orm.dao.RepositoryVersionDAO; +import org.apache.ambari.server.orm.entities.RepositoryVersionEntity; +import org.apache.ambari.server.state.Cluster; +import org.apache.ambari.server.state.Clusters; +import org.apache.ambari.server.state.RepositoryType; +import org.apache.ambari.server.state.Service; +import org.apache.ambari.server.state.ServiceComponent; +import org.apache.ambari.server.state.ServiceComponentHost; +import org.apache.ambari.server.state.StackId; +import org.apache.ambari.server.state.repository.ClusterVersionSummary; +import org.apache.ambari.server.state.repository.VersionDefinitionXml; +import org.apache.ambari.server.state.stack.PrereqCheckStatus; +import org.apache.ambari.server.state.stack.PrerequisiteCheck; +import org.junit.Assert; +import org.junit.Before; +import org.junit.Test; +import org.junit.runner.RunWith; +import org.mockito.Mock; +import org.mockito.Mockito; +import org.mockito.runners.MockitoJUnitRunner; + +import com.google.inject.Provider; + +/** + * Tests {@link HiveMultipleMetastoreCheck} + */ +@RunWith(MockitoJUnitRunner.class) +public class HiveMultipleMetastoreCheckTest { + private final Clusters m_clusters = Mockito.mock(Clusters.class); + private final HiveMultipleMetastoreCheck m_check = new HiveMultipleMetastoreCheck(); + private final RepositoryVersionDAO repositoryVersionDAO = Mockito.mock( + RepositoryVersionDAO.class); + + @Mock + private ClusterVersionSummary m_clusterVersionSummary; + + @Mock + private VersionDefinitionXml m_vdfXml; + + @Mock + private RepositoryVersionEntity m_repositoryVersion; + + final Map m_services = new HashMap<>(); + + /** + * + */ + @Before + public void setup() throws Exception { + m_check.clustersProvider = new Provider() { + + @Override + public Clusters get() { + return m_clusters; + } + }; + Configuration config = Mockito.mock(Configuration.class); + m_check.config = config; + + Mockito.when(m_repositoryVersion.getVersion()).thenReturn("1.0.0.0-1234"); + Mockito.when(m_repositoryVersion.getStackId()).thenReturn(new StackId("HDP", "1.0")); + + m_services.clear(); + + Mockito.when(m_repositoryVersion.getType()).thenReturn(RepositoryType.STANDARD); + Mockito.when(m_repositoryVersion.getRepositoryXml()).thenReturn(m_vdfXml); + Mockito.when(m_vdfXml.getClusterSummary(Mockito.any(Cluster.class))).thenReturn(m_clusterVersionSummary); + Mockito.when(m_clusterVersionSummary.getAvailableServiceNames()).thenReturn(m_services.keySet()); + } + + /** + * Tests that the check is applicable when hive is installed. + * + * @throws Exception + */ + @Test + public void testIsApplicable() throws Exception { + final Cluster cluster = Mockito.mock(Cluster.class); + + Mockito.when(cluster.getClusterId()).thenReturn(1L); + Mockito.when(m_clusters.getCluster("cluster")).thenReturn(cluster); + Mockito.when(cluster.getServices()).thenReturn(m_services); + + m_services.put("HDFS", Mockito.mock(Service.class)); + + PrereqCheckRequest request = new PrereqCheckRequest("cluster"); + request.setTargetRepositoryVersion(m_repositoryVersion); + + // HIVE not installed + Assert.assertFalse(m_check.isApplicable(request)); + + // install HIVE + m_services.put("HIVE", Mockito.mock(Service.class)); + + m_check.repositoryVersionDaoProvider = new Provider() { + @Override + public RepositoryVersionDAO get() { + return repositoryVersionDAO; + } + }; + + Mockito.when(repositoryVersionDAO.findByStackNameAndVersion(Mockito.anyString(), + Mockito.anyString())).thenReturn(m_repositoryVersion); + + // HIVE installed + Assert.assertTrue(m_check.isApplicable(request)); + } + + /** + * Tests that the warning is correctly tripped when there are not enough + * metastores. + * + * @throws Exception + */ + @Test + public void testPerform() throws Exception { + final Cluster cluster = Mockito.mock(Cluster.class); + Service hive = Mockito.mock(Service.class); + ServiceComponent metastore = Mockito.mock(ServiceComponent.class); + + Mockito.when(cluster.getClusterId()).thenReturn(1L); + Mockito.when(m_clusters.getCluster("cluster")).thenReturn(cluster); + Mockito.when(cluster.getService("HIVE")).thenReturn(hive); + + Mockito.when(hive.getServiceComponent("HIVE_METASTORE")).thenReturn(metastore); + + Map metastores = new HashMap<>(); + Mockito.when(metastore.getServiceComponentHosts()).thenReturn(metastores); + + PrerequisiteCheck check = new PrerequisiteCheck(null, null); + PrereqCheckRequest request = new PrereqCheckRequest("cluster"); + request.setTargetRepositoryVersion(m_repositoryVersion); + m_check.perform(check, request); + + Assert.assertEquals(PrereqCheckStatus.WARNING, check.getStatus()); + + metastores.put("c6401", Mockito.mock(ServiceComponentHost.class)); + metastores.put("c6402", Mockito.mock(ServiceComponentHost.class)); + + check = new PrerequisiteCheck(null, null); + m_check.perform(check, request); + + Assert.assertEquals(PrereqCheckStatus.PASS, check.getStatus()); + } + + @Test + public void testPerformFail() throws Exception{ + final Cluster cluster = Mockito.mock(Cluster.class); + final LinkedHashSet failedOnExpected = new LinkedHashSet<>(); + Service hive = Mockito.mock(Service.class); + ServiceComponent metastore = Mockito.mock(ServiceComponent.class); + Map metastores = new HashMap<>(); + + failedOnExpected.add("HIVE"); + + Mockito.when(cluster.getClusterId()).thenReturn(1L); + Mockito.when(m_clusters.getCluster("cluster")).thenReturn(cluster); + Mockito.when(cluster.getService("HIVE")).thenReturn(hive); + Mockito.when(hive.getServiceComponent("HIVE_METASTORE")).thenReturn(metastore); + Mockito.when(metastore.getServiceComponentHosts()).thenReturn(metastores); + + PrerequisiteCheck check = new PrerequisiteCheck(null, null); + PrereqCheckRequest request = new PrereqCheckRequest("cluster"); + request.setTargetRepositoryVersion(m_repositoryVersion); + m_check.perform(check, request); + + Assert.assertEquals(PrereqCheckStatus.WARNING, check.getStatus()); + + check = new PrerequisiteCheck(null, null); + m_check.perform(check, request); + Assert.assertEquals(failedOnExpected, check.getFailedOn()); + Assert.assertEquals(PrereqCheckStatus.WARNING, check.getStatus()); + } +} diff --git a/ambari-server/src/test/java/org/apache/ambari/server/checks/HiveNotRollingWarningTest.java b/ambari-server/src/test/java/org/apache/ambari/server/checks/HiveNotRollingWarningTest.java new file mode 100644 index 00000000000..bed1b4354c0 --- /dev/null +++ b/ambari-server/src/test/java/org/apache/ambari/server/checks/HiveNotRollingWarningTest.java @@ -0,0 +1,128 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.ambari.server.checks; + +import java.util.HashMap; +import java.util.Map; + +import org.apache.ambari.server.controller.PrereqCheckRequest; +import org.apache.ambari.server.orm.entities.RepositoryVersionEntity; +import org.apache.ambari.server.state.Cluster; +import org.apache.ambari.server.state.Clusters; +import org.apache.ambari.server.state.RepositoryType; +import org.apache.ambari.server.state.Service; +import org.apache.ambari.server.state.StackId; +import org.apache.ambari.server.state.repository.ClusterVersionSummary; +import org.apache.ambari.server.state.repository.VersionDefinitionXml; +import org.apache.ambari.server.state.stack.PrereqCheckStatus; +import org.apache.ambari.server.state.stack.PrerequisiteCheck; +import org.apache.ambari.server.state.stack.upgrade.UpgradeType; +import org.easymock.EasyMock; +import org.easymock.EasyMockSupport; +import org.junit.Assert; +import org.junit.Before; +import org.junit.Test; +import org.junit.runner.RunWith; +import org.mockito.Mock; +import org.mockito.Mockito; +import org.mockito.runners.MockitoJUnitRunner; + +import com.google.inject.Provider; + +/** + * Tests {@link HiveNotRollingWarning}. + */ +@RunWith(MockitoJUnitRunner.class) +public class HiveNotRollingWarningTest extends EasyMockSupport { + + private final String m_clusterName = "c1"; + private final Clusters m_clusters = niceMock(Clusters.class); + + @Mock + private ClusterVersionSummary m_clusterVersionSummary; + + @Mock + private VersionDefinitionXml m_vdfXml; + + @Mock + private RepositoryVersionEntity m_repositoryVersion; + + final Map m_services = new HashMap<>(); + + @Before + public void setup() throws Exception { + m_services.clear(); + + Mockito.when(m_repositoryVersion.getType()).thenReturn(RepositoryType.STANDARD); + Mockito.when(m_repositoryVersion.getRepositoryXml()).thenReturn(m_vdfXml); + Mockito.when(m_vdfXml.getClusterSummary(Mockito.any(Cluster.class))).thenReturn(m_clusterVersionSummary); + Mockito.when(m_clusterVersionSummary.getAvailableServiceNames()).thenReturn(m_services.keySet()); + } + + + /** + * @throws Exception + */ + @Test + public void testIsApplicable() throws Exception { + final HiveNotRollingWarning hiveWarningCheck = new HiveNotRollingWarning(); + hiveWarningCheck.clustersProvider = new Provider() { + + @Override + public Clusters get() { + return m_clusters; + } + }; + + final Cluster cluster = niceMock(Cluster.class); + final Service hive = niceMock(Service.class); + + m_services.put("HIVE", hive); + + EasyMock.expect(cluster.getClusterId()).andReturn(1L).anyTimes(); + + EasyMock.expect(cluster.getCurrentStackVersion()).andReturn(new StackId("HDP", "2.3")).anyTimes(); + EasyMock.expect(cluster.getServices()).andReturn(m_services).anyTimes(); + EasyMock.expect(m_clusters.getCluster(m_clusterName)).andReturn(cluster).atLeastOnce(); + + PrereqCheckRequest request = niceMock(PrereqCheckRequest.class); + EasyMock.expect(request.getClusterName()).andReturn(m_clusterName).anyTimes(); + EasyMock.expect(request.getUpgradeType()).andReturn(UpgradeType.ROLLING).anyTimes(); + EasyMock.expect(request.getTargetRepositoryVersion()).andReturn(m_repositoryVersion).atLeastOnce(); + + replayAll(); + + Assert.assertTrue(hiveWarningCheck.isApplicable(request)); + + verifyAll(); + } + + /** + * @throws Exception + */ + @Test + public void testPerform() throws Exception { + final HiveNotRollingWarning hiveWarningCheck = new HiveNotRollingWarning(); + + PrereqCheckRequest request = new PrereqCheckRequest(m_clusterName); + PrerequisiteCheck check = new PrerequisiteCheck(null, null); + + hiveWarningCheck.perform(check, request); + Assert.assertEquals(PrereqCheckStatus.WARNING, check.getStatus()); + } +} diff --git a/ambari-server/src/test/java/org/apache/ambari/server/checks/KafkaKerberosCheckTest.java b/ambari-server/src/test/java/org/apache/ambari/server/checks/KafkaKerberosCheckTest.java new file mode 100644 index 00000000000..b4ee6ec48c6 --- /dev/null +++ b/ambari-server/src/test/java/org/apache/ambari/server/checks/KafkaKerberosCheckTest.java @@ -0,0 +1,122 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.ambari.server.checks; + +import java.util.HashMap; +import java.util.Map; + +import org.apache.ambari.server.controller.PrereqCheckRequest; +import org.apache.ambari.server.orm.entities.RepositoryVersionEntity; +import org.apache.ambari.server.state.Cluster; +import org.apache.ambari.server.state.Clusters; +import org.apache.ambari.server.state.RepositoryType; +import org.apache.ambari.server.state.SecurityType; +import org.apache.ambari.server.state.Service; +import org.apache.ambari.server.state.repository.ClusterVersionSummary; +import org.apache.ambari.server.state.repository.VersionDefinitionXml; +import org.apache.ambari.server.state.stack.PrereqCheckStatus; +import org.apache.ambari.server.state.stack.PrerequisiteCheck; +import org.junit.Assert; +import org.junit.Before; +import org.junit.Test; +import org.junit.runner.RunWith; +import org.mockito.Mock; +import org.mockito.Mockito; +import org.mockito.runners.MockitoJUnitRunner; + +import com.google.inject.Provider; + +@RunWith(MockitoJUnitRunner.class) +public class KafkaKerberosCheckTest { + private final Clusters clusters = Mockito.mock(Clusters.class); + + private final KafkaKerberosCheck kafkaKerberosCheck = new KafkaKerberosCheck(); + + @Mock + private ClusterVersionSummary m_clusterVersionSummary; + + @Mock + private VersionDefinitionXml m_vdfXml; + + @Mock + private RepositoryVersionEntity m_repositoryVersion; + + final Map m_services = new HashMap<>(); + + @Before + public void setup() throws Exception { + kafkaKerberosCheck.clustersProvider = new Provider() { + + @Override + public Clusters get() { + return clusters; + } + }; + + m_services.clear(); + Mockito.when(m_repositoryVersion.getType()).thenReturn(RepositoryType.STANDARD); + Mockito.when(m_repositoryVersion.getRepositoryXml()).thenReturn(m_vdfXml); + Mockito.when(m_vdfXml.getClusterSummary(Mockito.any(Cluster.class))).thenReturn(m_clusterVersionSummary); + Mockito.when(m_clusterVersionSummary.getAvailableServiceNames()).thenReturn(m_services.keySet()); + } + + @Test + public void testIsApplicable() throws Exception { + final Cluster cluster = Mockito.mock(Cluster.class); + final Service service = Mockito.mock(Service.class); + + m_services.put("KAFKA", service); + + Mockito.when(cluster.getServices()).thenReturn(m_services); + Mockito.when(cluster.getClusterId()).thenReturn(1L); + Mockito.when(clusters.getCluster("cluster")).thenReturn(cluster); + + PrereqCheckRequest request = new PrereqCheckRequest("cluster"); + request.setTargetRepositoryVersion(m_repositoryVersion); + + Assert.assertTrue(kafkaKerberosCheck.isApplicable(request)); + + m_services.remove("KAFKA"); + Assert.assertFalse(kafkaKerberosCheck.isApplicable(request)); + } + + @Test + public void testPerform() throws Exception { + final Cluster cluster = Mockito.mock(Cluster.class); + final Map services = new HashMap<>(); + final Service service = Mockito.mock(Service.class); + + services.put("KAFKA", service); + + Mockito.when(cluster.getServices()).thenReturn(services); + Mockito.when(cluster.getClusterId()).thenReturn(1L); + Mockito.when(cluster.getSecurityType()).thenReturn(SecurityType.KERBEROS); + Mockito.when(clusters.getCluster("cluster")).thenReturn(cluster); + + PrerequisiteCheck check = new PrerequisiteCheck(null, null); + + kafkaKerberosCheck.perform(check, new PrereqCheckRequest("cluster")); + Assert.assertEquals(PrereqCheckStatus.WARNING, check.getStatus()); + + check = new PrerequisiteCheck(null, null); + Mockito.when(cluster.getSecurityType()).thenReturn(SecurityType.NONE); + + kafkaKerberosCheck.perform(check, new PrereqCheckRequest("cluster")); + Assert.assertEquals(PrereqCheckStatus.PASS, check.getStatus()); + } +} diff --git a/ambari-server/src/test/java/org/apache/ambari/server/checks/MapReduce2JobHistoryStatePreservingCheckTest.java b/ambari-server/src/test/java/org/apache/ambari/server/checks/MapReduce2JobHistoryStatePreservingCheckTest.java new file mode 100644 index 00000000000..f8ea1699486 --- /dev/null +++ b/ambari-server/src/test/java/org/apache/ambari/server/checks/MapReduce2JobHistoryStatePreservingCheckTest.java @@ -0,0 +1,193 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.ambari.server.checks; + +import java.util.HashMap; +import java.util.Map; + +import org.apache.ambari.server.configuration.Configuration; +import org.apache.ambari.server.controller.PrereqCheckRequest; +import org.apache.ambari.server.orm.dao.RepositoryVersionDAO; +import org.apache.ambari.server.orm.entities.RepositoryVersionEntity; +import org.apache.ambari.server.state.Cluster; +import org.apache.ambari.server.state.Clusters; +import org.apache.ambari.server.state.Config; +import org.apache.ambari.server.state.DesiredConfig; +import org.apache.ambari.server.state.RepositoryType; +import org.apache.ambari.server.state.Service; +import org.apache.ambari.server.state.StackId; +import org.apache.ambari.server.state.repository.ClusterVersionSummary; +import org.apache.ambari.server.state.repository.VersionDefinitionXml; +import org.apache.ambari.server.state.stack.PrereqCheckStatus; +import org.apache.ambari.server.state.stack.PrerequisiteCheck; +import org.junit.Assert; +import org.junit.Before; +import org.junit.Test; +import org.junit.runner.RunWith; +import org.mockito.Mock; +import org.mockito.Mockito; +import org.mockito.runners.MockitoJUnitRunner; + +import com.google.inject.Provider; + +/** + * Tests for {@link org.apache.ambari.server.checks.MapReduce2JobHistoryStatePreservingCheckTest} + */ +@RunWith(MockitoJUnitRunner.class) +public class MapReduce2JobHistoryStatePreservingCheckTest { + private final Clusters m_clusters = Mockito.mock(Clusters.class); + private final RepositoryVersionDAO m_repositoryVersionDao = Mockito.mock(RepositoryVersionDAO.class); + + private final MapReduce2JobHistoryStatePreservingCheck m_check = new MapReduce2JobHistoryStatePreservingCheck(); + + @Mock + private ClusterVersionSummary m_clusterVersionSummary; + + @Mock + private VersionDefinitionXml m_vdfXml; + + @Mock + private RepositoryVersionEntity m_repositoryVersion; + + final Map m_services = new HashMap<>(); + + /** + * + */ + @Before + public void setup() throws Exception { + m_check.clustersProvider = new Provider() { + + @Override + public Clusters get() { + return m_clusters; + } + }; + + m_check.repositoryVersionDaoProvider = new Provider() { + @Override + public RepositoryVersionDAO get() { + return m_repositoryVersionDao; + }; + }; + + Configuration config = Mockito.mock(Configuration.class); + m_check.config = config; + + RepositoryVersionEntity rve = Mockito.mock(RepositoryVersionEntity.class); + Mockito.when(rve.getType()).thenReturn(RepositoryType.STANDARD); + Mockito.when(m_repositoryVersionDao.findByStackNameAndVersion(Mockito.anyString(), Mockito.anyString())).thenReturn(rve); + + Mockito.when(m_repositoryVersion.getVersion()).thenReturn("2.3.1.1-1234"); + Mockito.when(m_repositoryVersion.getStackId()).thenReturn(new StackId("HDP", "2.3")); + + m_services.clear(); + + Mockito.when(m_repositoryVersion.getType()).thenReturn(RepositoryType.STANDARD); + Mockito.when(m_repositoryVersion.getRepositoryXml()).thenReturn(m_vdfXml); + Mockito.when(m_vdfXml.getClusterSummary(Mockito.any(Cluster.class))).thenReturn(m_clusterVersionSummary); + Mockito.when(m_clusterVersionSummary.getAvailableServiceNames()).thenReturn(m_services.keySet()); + } + + /** + * @throws Exception + */ + @Test + public void testIsApplicable() throws Exception { + final Cluster cluster = Mockito.mock(Cluster.class); + Mockito.when(cluster.getClusterId()).thenReturn(1L); + Mockito.when(m_clusters.getCluster("cluster")).thenReturn(cluster); + Mockito.when(cluster.getCurrentStackVersion()).thenReturn(new StackId("HDP-2.3")); + + Mockito.when(cluster.getServices()).thenReturn(m_services); + + PrereqCheckRequest request = new PrereqCheckRequest("cluster"); + request.setSourceStackId(new StackId("HDP", "2.3.0.0")); + request.setTargetRepositoryVersion(m_repositoryVersion); + + // MAPREDUCE2 not installed + Assert.assertFalse(m_check.isApplicable(request)); + + // MAPREDUCE2 installed + m_services.put("MAPREDUCE2", Mockito.mock(Service.class)); + Assert.assertTrue(m_check.isApplicable(request)); + } + + @Test + public void testPerform() throws Exception { + final Cluster cluster = Mockito.mock(Cluster.class); + Mockito.when(cluster.getClusterId()).thenReturn(1L); + Mockito.when(m_clusters.getCluster("cluster")).thenReturn(cluster); + + final DesiredConfig desiredConfig = Mockito.mock(DesiredConfig.class); + Mockito.when(desiredConfig.getTag()).thenReturn("tag"); + Map configMap = new HashMap<>(); + configMap.put("mapred-site", desiredConfig); + configMap.put("yarn-site", desiredConfig); + + Mockito.when(cluster.getDesiredConfigs()).thenReturn(configMap); + final Config config = Mockito.mock(Config.class); + Mockito.when(cluster.getConfig(Mockito.anyString(), Mockito.anyString())).thenReturn(config); + final Map properties = new HashMap<>(); + Mockito.when(config.getProperties()).thenReturn(properties); + + PrerequisiteCheck check = new PrerequisiteCheck(null, null); + m_check.perform(check, new PrereqCheckRequest("cluster")); + Assert.assertEquals(PrereqCheckStatus.FAIL, check.getStatus()); + + properties.put(MapReduce2JobHistoryStatePreservingCheck.MAPREDUCE2_JOBHISTORY_RECOVERY_ENABLE_KEY, "true"); + properties.put(MapReduce2JobHistoryStatePreservingCheck.MAPREDUCE2_JOBHISTORY_RECOVERY_STORE_KEY, "org.apache.hadoop.mapreduce.v2.hs.HistoryServerLeveldbStateStoreService"); + properties.put(MapReduce2JobHistoryStatePreservingCheck.MAPREDUCE2_JOBHISTORY_RECOVERY_STORE_LEVELDB_PATH_KEY, ""); + check = new PrerequisiteCheck(null, null); + m_check.perform(check, new PrereqCheckRequest("cluster")); + Assert.assertEquals(PrereqCheckStatus.FAIL, check.getStatus()); + check = new PrerequisiteCheck(null, null); + properties.put(MapReduce2JobHistoryStatePreservingCheck.MAPREDUCE2_JOBHISTORY_RECOVERY_STORE_LEVELDB_PATH_KEY, "/hadoop/yarn/timeline"); + properties.put(MapReduce2JobHistoryStatePreservingCheck.YARN_TIMELINE_SERVICE_LEVELDB_STATE_STORE_PATH_KEY, "not /hadoop/yarn/timeline"); + m_check.perform(check, new PrereqCheckRequest("cluster")); + Assert.assertEquals(PrereqCheckStatus.PASS, check.getStatus()); + check = new PrerequisiteCheck(null, null); + properties.put(MapReduce2JobHistoryStatePreservingCheck.YARN_TIMELINE_SERVICE_LEVELDB_STATE_STORE_PATH_KEY, "/hadoop/yarn/timeline"); + m_check.perform(check, new PrereqCheckRequest("cluster")); + Assert.assertEquals(PrereqCheckStatus.PASS, check.getStatus()); + } + + @SuppressWarnings("serial") + @Test + public void testIsApplicableMinimumStackVersion() throws Exception { + final Cluster cluster = Mockito.mock(Cluster.class); + Mockito.when(cluster.getClusterId()).thenReturn(1L); + Mockito.when(cluster.getServices()).thenReturn(new HashMap() { + { + put("MAPREDUCE2", null); + } + }); + Mockito.when(cluster.getCurrentStackVersion()).thenReturn(new StackId("MYSTACK-12.2")); + Mockito.when(m_clusters.getCluster("c1")).thenReturn(cluster); + PrereqCheckRequest request = new PrereqCheckRequest("c1"); + request.setTargetRepositoryVersion(m_repositoryVersion); + + Mockito.when(m_repositoryVersion.getVersion()).thenReturn("2.0.0.1"); + + // MAPREDUCE2 installed + m_services.put("MAPREDUCE2", Mockito.mock(Service.class)); + + boolean isApplicable = m_check.isApplicable(request); + Assert.assertTrue(isApplicable); + } +} diff --git a/ambari-server/src/test/java/org/apache/ambari/server/checks/RangerAuditDbCheckTest.java b/ambari-server/src/test/java/org/apache/ambari/server/checks/RangerAuditDbCheckTest.java new file mode 100644 index 00000000000..4a7e8d30213 --- /dev/null +++ b/ambari-server/src/test/java/org/apache/ambari/server/checks/RangerAuditDbCheckTest.java @@ -0,0 +1,138 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.ambari.server.checks; + +import java.util.HashMap; +import java.util.Map; + +import org.apache.ambari.server.configuration.Configuration; +import org.apache.ambari.server.controller.PrereqCheckRequest; +import org.apache.ambari.server.orm.entities.RepositoryVersionEntity; +import org.apache.ambari.server.state.Cluster; +import org.apache.ambari.server.state.Clusters; +import org.apache.ambari.server.state.Config; +import org.apache.ambari.server.state.DesiredConfig; +import org.apache.ambari.server.state.RepositoryType; +import org.apache.ambari.server.state.Service; +import org.apache.ambari.server.state.repository.ClusterVersionSummary; +import org.apache.ambari.server.state.repository.VersionDefinitionXml; +import org.apache.ambari.server.state.stack.PrereqCheckStatus; +import org.apache.ambari.server.state.stack.PrerequisiteCheck; +import org.junit.Assert; +import org.junit.Before; +import org.junit.Test; +import org.junit.runner.RunWith; +import org.mockito.Mock; +import org.mockito.Mockito; +import org.mockito.runners.MockitoJUnitRunner; + +import com.google.inject.Provider; + +/* Tests for RangerAuditDbCheck */ +@RunWith(MockitoJUnitRunner.class) +public class RangerAuditDbCheckTest { + private final Clusters clusters = Mockito.mock(Clusters.class); + private final RangerAuditDbCheck rangerAuditDbCheck = new RangerAuditDbCheck(); + + @Mock + private ClusterVersionSummary m_clusterVersionSummary; + + @Mock + private VersionDefinitionXml m_vdfXml; + + @Mock + private RepositoryVersionEntity m_repositoryVersion; + + final Map m_services = new HashMap<>(); + + @Before + public void setup() throws Exception { + rangerAuditDbCheck.clustersProvider = new Provider() { + + @Override + public Clusters get() { + return clusters; + } + }; + Configuration config = Mockito.mock(Configuration.class); + rangerAuditDbCheck.config = config; + + m_services.clear(); + + Mockito.when(m_repositoryVersion.getType()).thenReturn(RepositoryType.STANDARD); + Mockito.when(m_repositoryVersion.getRepositoryXml()).thenReturn(m_vdfXml); + Mockito.when(m_vdfXml.getClusterSummary(Mockito.any(Cluster.class))).thenReturn(m_clusterVersionSummary); + Mockito.when(m_clusterVersionSummary.getAvailableServiceNames()).thenReturn(m_services.keySet()); + } + + @Test + public void testIsApplicable() throws Exception { + final Cluster cluster = Mockito.mock(Cluster.class); + final Service service = Mockito.mock(Service.class); + + m_services.put("RANGER", service); + + Mockito.when(cluster.getServices()).thenReturn(m_services); + Mockito.when(cluster.getClusterId()).thenReturn(1L); + Mockito.when(clusters.getCluster("cluster")).thenReturn(cluster); + + PrereqCheckRequest request = new PrereqCheckRequest("cluster"); + request.setTargetRepositoryVersion(m_repositoryVersion); + + Assert.assertTrue(rangerAuditDbCheck.isApplicable(request)); + + m_services.remove("RANGER"); + Assert.assertFalse(rangerAuditDbCheck.isApplicable(request)); + } + + @Test + public void testPerform() throws Exception { + final Cluster cluster = Mockito.mock(Cluster.class); + final Map services = new HashMap<>(); + final Service service = Mockito.mock(Service.class); + + services.put("RANGER", service); + + Mockito.when(cluster.getServices()).thenReturn(services); + Mockito.when(cluster.getClusterId()).thenReturn(1L); + Mockito.when(clusters.getCluster("cluster")).thenReturn(cluster); + + final DesiredConfig desiredConfig = Mockito.mock(DesiredConfig.class); + Mockito.when(desiredConfig.getTag()).thenReturn("tag"); + Map configMap = new HashMap<>(); + configMap.put("ranger-admin-site", desiredConfig); + + Mockito.when(cluster.getDesiredConfigs()).thenReturn(configMap); + final Config config = Mockito.mock(Config.class); + Mockito.when(cluster.getConfig(Mockito.anyString(), Mockito.anyString())).thenReturn(config); + final Map properties = new HashMap<>(); + Mockito.when(config.getProperties()).thenReturn(properties); + + properties.put("ranger.audit.source.type", "db"); + PrerequisiteCheck check = new PrerequisiteCheck(null, null); + rangerAuditDbCheck.perform(check, new PrereqCheckRequest("cluster")); + Assert.assertEquals(PrereqCheckStatus.WARNING, check.getStatus()); + + properties.put("ranger.audit.source.type", "solr"); + check = new PrerequisiteCheck(null, null); + rangerAuditDbCheck.perform(check, new PrereqCheckRequest("cluster")); + Assert.assertEquals(PrereqCheckStatus.PASS, check.getStatus()); + + } + +} \ No newline at end of file diff --git a/ambari-server/src/test/java/org/apache/ambari/server/checks/RangerPasswordCheckTest.java b/ambari-server/src/test/java/org/apache/ambari/server/checks/RangerPasswordCheckTest.java new file mode 100644 index 00000000000..d24c73cc1da --- /dev/null +++ b/ambari-server/src/test/java/org/apache/ambari/server/checks/RangerPasswordCheckTest.java @@ -0,0 +1,542 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.ambari.server.checks; + +import static org.easymock.EasyMock.anyObject; +import static org.easymock.EasyMock.expect; +import static org.easymock.EasyMock.replay; +import static org.easymock.EasyMock.verify; +import static org.junit.Assert.assertEquals; +import static org.junit.Assert.assertTrue; + +import java.io.ByteArrayInputStream; +import java.io.IOException; +import java.io.InputStream; +import java.net.HttpURLConnection; +import java.util.HashMap; +import java.util.List; +import java.util.Map; + +import org.apache.ambari.server.controller.PrereqCheckRequest; +import org.apache.ambari.server.controller.internal.URLStreamProvider; +import org.apache.ambari.server.orm.entities.RepositoryVersionEntity; +import org.apache.ambari.server.state.Cluster; +import org.apache.ambari.server.state.Clusters; +import org.apache.ambari.server.state.Config; +import org.apache.ambari.server.state.DesiredConfig; +import org.apache.ambari.server.state.RepositoryType; +import org.apache.ambari.server.state.Service; +import org.apache.ambari.server.state.StackId; +import org.apache.ambari.server.state.repository.ClusterVersionSummary; +import org.apache.ambari.server.state.repository.VersionDefinitionXml; +import org.apache.ambari.server.state.stack.PrereqCheckStatus; +import org.apache.ambari.server.state.stack.PrerequisiteCheck; +import org.easymock.EasyMock; +import org.junit.Before; +import org.junit.Test; +import org.junit.runner.RunWith; +import org.mockito.Mock; +import org.mockito.Mockito; +import org.mockito.MockitoAnnotations; +import org.powermock.api.mockito.PowerMockito; +import org.powermock.core.classloader.annotations.PrepareForTest; +import org.powermock.modules.junit4.PowerMockRunner; + +import com.google.inject.Provider; + + +/** + * Unit tests for RangerPasswordCheck + * + */ +@RunWith(PowerMockRunner.class) +@PrepareForTest(RangerPasswordCheck.class) +public class RangerPasswordCheckTest { + + private static final String RANGER_URL = "http://foo:6080/"; + + private static final String GOOD_LOGIN_RESPONSE = "{\"count\": 0 }"; + + private static final String BAD_LOGIN_RESPONSE = "Ranger redirects to login HTML"; + + private static final String GOOD_USER_RESPONSE = + "{\"queryTimeMS\": 1446758948823," + + "\"vXUsers\": [" + + " {" + + " \"name\": \"r_admin\"" + + " }" + + "]}"; + + private static final String NO_USER_RESPONSE = + "{\"queryTimeMS\": 1446758948823," + + "\"vXUsers\": [" + + "]}"; + + private Clusters m_clusters = EasyMock.createMock(Clusters.class); + private Map m_configMap = new HashMap<>(); + private RangerPasswordCheck m_rpc = null; + private URLStreamProvider m_streamProvider = EasyMock.createMock(URLStreamProvider.class); + + @Mock + private ClusterVersionSummary m_clusterVersionSummary; + + @Mock + private VersionDefinitionXml m_vdfXml; + + @Mock + private RepositoryVersionEntity m_repositoryVersion; + + final Map m_services = new HashMap<>(); + + @Before + public void setup() throws Exception { + MockitoAnnotations.initMocks(this); + + m_configMap.put("policymgr_external_url", RANGER_URL); + m_configMap.put("admin_username", "admin"); + m_configMap.put("admin_password", "pass"); + m_configMap.put("ranger_admin_username", "r_admin"); + m_configMap.put("ranger_admin_password", "r_pass"); + + Cluster cluster = EasyMock.createMock(Cluster.class); + + Config config = EasyMock.createMock(Config.class); + final Map services = new HashMap<>(); + final Service service = EasyMock.createMock(Service.class); + + services.put("RANGER", service); + + Map desiredMap = new HashMap<>(); + DesiredConfig dc = EasyMock.createMock(DesiredConfig.class); + desiredMap.put("admin-properties", dc); + desiredMap.put("ranger-env", dc); + + expect(dc.getTag()).andReturn("").anyTimes(); + expect(config.getProperties()).andReturn(m_configMap).anyTimes(); + expect(cluster.getServices()).andReturn(services).anyTimes(); + expect(cluster.getService("RANGER")).andReturn(service).anyTimes(); + expect(cluster.getDesiredConfigs()).andReturn(desiredMap).anyTimes(); + expect(cluster.getDesiredConfigByType((String) anyObject())).andReturn(config).anyTimes(); + expect(cluster.getConfig((String) anyObject(), (String) anyObject())).andReturn(config).anyTimes(); + expect(m_clusters.getCluster((String) anyObject())).andReturn(cluster).anyTimes(); + + replay(m_clusters, cluster, dc, config); + + m_rpc = new RangerPasswordCheck(); + m_rpc.clustersProvider = new Provider() { + @Override + public Clusters get() { + // TODO Auto-generated method stub + return m_clusters; + } + }; + + EasyMock.reset(m_streamProvider); + PowerMockito.whenNew(URLStreamProvider.class).withAnyArguments().thenReturn(m_streamProvider); + + m_services.clear(); + + Mockito.when(m_repositoryVersion.getType()).thenReturn(RepositoryType.STANDARD); + Mockito.when(m_repositoryVersion.getRepositoryXml()).thenReturn(m_vdfXml); + Mockito.when(m_vdfXml.getClusterSummary(Mockito.any(Cluster.class))).thenReturn(m_clusterVersionSummary); + Mockito.when(m_clusterVersionSummary.getAvailableServiceNames()).thenReturn(m_services.keySet()); + } + + @Test + public void testApplicable() throws Exception { + + final Service service = EasyMock.createMock(Service.class); + m_services.put("RANGER", service); + + expect(service.getDesiredStackId()).andReturn(new StackId("HDP-2.3")).anyTimes(); + + Cluster cluster = m_clusters.getCluster("cluster"); + EasyMock.reset(cluster); + expect(cluster.getServices()).andReturn(m_services).anyTimes(); + expect(cluster.getService("RANGER")).andReturn(service).atLeastOnce(); + + replay(cluster, service); + + PrereqCheckRequest request = new PrereqCheckRequest("cluster"); + request.setTargetRepositoryVersion(m_repositoryVersion); + + assertTrue(m_rpc.isApplicable(request)); + } + + @SuppressWarnings("unchecked") + @Test + public void testMissingProps() throws Exception { + + HttpURLConnection conn = EasyMock.createMock(HttpURLConnection.class); + + m_configMap.clear(); + + PrerequisiteCheck check = new PrerequisiteCheck(null, null); + m_rpc.perform(check, new PrereqCheckRequest("cluster")); + assertEquals(PrereqCheckStatus.WARNING, check.getStatus()); + assertEquals("Could not check credentials. Missing property admin-properties/policymgr_external_url", check.getFailReason()); + + m_configMap.put("policymgr_external_url", RANGER_URL); + check = new PrerequisiteCheck(null, null); + m_rpc.perform(check, new PrereqCheckRequest("cluster")); + assertEquals(PrereqCheckStatus.WARNING, check.getStatus()); + assertEquals("Could not check credentials. Missing property ranger-env/admin_username", check.getFailReason()); + + m_configMap.put("admin_username", "admin"); + check = new PrerequisiteCheck(null, null); + m_rpc.perform(check, new PrereqCheckRequest("cluster")); + assertEquals(PrereqCheckStatus.WARNING, check.getStatus()); + assertEquals("Could not check credentials. Missing property ranger-env/admin_password", check.getFailReason()); + + + m_configMap.put("admin_password", "pass"); + check = new PrerequisiteCheck(null, null); + m_rpc.perform(check, new PrereqCheckRequest("cluster")); + assertEquals(PrereqCheckStatus.WARNING, check.getStatus()); + assertEquals("Could not check credentials. Missing property ranger-env/ranger_admin_username", check.getFailReason()); + + m_configMap.put("ranger_admin_username", "r_admin"); + check = new PrerequisiteCheck(null, null); + m_rpc.perform(check, new PrereqCheckRequest("cluster")); + assertEquals(PrereqCheckStatus.WARNING, check.getStatus()); + assertEquals("Could not check credentials. Missing property ranger-env/ranger_admin_password", check.getFailReason()); + + expect(conn.getResponseCode()).andReturn(200); + expect(conn.getInputStream()).andReturn(new ByteArrayInputStream(GOOD_LOGIN_RESPONSE.getBytes())); + expect(conn.getResponseCode()).andReturn(200); + expect(conn.getInputStream()).andReturn(new ByteArrayInputStream(GOOD_USER_RESPONSE.getBytes())); + expect(conn.getResponseCode()).andReturn(200); + expect(conn.getInputStream()).andReturn(new ByteArrayInputStream(GOOD_LOGIN_RESPONSE.getBytes())); + expect(m_streamProvider.processURL((String) anyObject(), (String) anyObject(), + (InputStream) anyObject(), (Map>) anyObject())).andReturn(conn).anyTimes(); + + replay(conn, m_streamProvider); + + m_configMap.put("ranger_admin_password", "r_pass"); + check = new PrerequisiteCheck(null, null); + m_rpc.perform(check, new PrereqCheckRequest("cluster")); + assertEquals(PrereqCheckStatus.PASS, check.getStatus()); + + } + + @SuppressWarnings("unchecked") + @Test + public void testNormal() throws Exception { + + HttpURLConnection conn = EasyMock.createMock(HttpURLConnection.class); + + expect(conn.getResponseCode()).andReturn(200); + expect(conn.getInputStream()).andReturn(new ByteArrayInputStream(GOOD_LOGIN_RESPONSE.getBytes())).once(); + expect(conn.getResponseCode()).andReturn(200); + expect(conn.getInputStream()).andReturn(new ByteArrayInputStream(GOOD_USER_RESPONSE.getBytes())).once(); + expect(conn.getResponseCode()).andReturn(200); + expect(conn.getInputStream()).andReturn(new ByteArrayInputStream(GOOD_LOGIN_RESPONSE.getBytes())).once(); + + expect(m_streamProvider.processURL((String) anyObject(), (String) anyObject(), + (InputStream) anyObject(), (Map>) anyObject())).andReturn(conn).anyTimes(); + + replay(conn, m_streamProvider); + + PrerequisiteCheck check = new PrerequisiteCheck(null, null); + m_rpc.perform(check, new PrereqCheckRequest("cluster")); + + assertEquals(PrereqCheckStatus.PASS, check.getStatus()); + + verify(conn, m_streamProvider); + } + + @SuppressWarnings("unchecked") + @Test + public void testNoUser() throws Exception { + + HttpURLConnection conn = EasyMock.createMock(HttpURLConnection.class); + + expect(conn.getResponseCode()).andReturn(200); + expect(conn.getInputStream()).andReturn(new ByteArrayInputStream(GOOD_LOGIN_RESPONSE.getBytes())).once(); + expect(conn.getResponseCode()).andReturn(200); + expect(conn.getInputStream()).andReturn(new ByteArrayInputStream(NO_USER_RESPONSE.getBytes())).once(); + + expect(m_streamProvider.processURL((String) anyObject(), (String) anyObject(), + (InputStream) anyObject(), (Map>) anyObject())).andReturn(conn).anyTimes(); + + replay(conn, m_streamProvider); + + PrerequisiteCheck check = new PrerequisiteCheck(null, null); + m_rpc.perform(check, new PrereqCheckRequest("cluster")); + + assertEquals(PrereqCheckStatus.PASS, check.getStatus()); + + verify(conn, m_streamProvider); + } + + @SuppressWarnings("unchecked") + @Test + public void testBadUserParsing() throws Exception { + + HttpURLConnection conn = EasyMock.createMock(HttpURLConnection.class); + + expect(conn.getResponseCode()).andReturn(200); + expect(conn.getInputStream()).andReturn(new ByteArrayInputStream(GOOD_LOGIN_RESPONSE.getBytes())).once(); + expect(conn.getResponseCode()).andReturn(200); + expect(conn.getInputStream()).andReturn(new ByteArrayInputStream( + "some really bad non-json".getBytes())); + + expect(m_streamProvider.processURL((String) anyObject(), (String) anyObject(), + (InputStream) anyObject(), (Map>) anyObject())).andReturn(conn).anyTimes(); + + replay(conn, m_streamProvider); + + PrerequisiteCheck check = new PrerequisiteCheck(null, null); + m_rpc.perform(check, new PrereqCheckRequest("cluster")); + + String error = "The response from Ranger was malformed. "; + error += "com.google.gson.stream.MalformedJsonException: Expected EOF at line 1 column 6. "; + error += "Request: " + RANGER_URL + "service/xusers/users?name=r_admin"; + + assertEquals(PrereqCheckStatus.WARNING, check.getStatus()); + assertEquals(error, check.getFailReason()); + + verify(conn, m_streamProvider); + } + + @SuppressWarnings("unchecked") + @Test + public void testJsonCasting() throws Exception { + + HttpURLConnection conn = EasyMock.createMock(HttpURLConnection.class); + + expect(conn.getResponseCode()).andReturn(200); + expect(conn.getInputStream()).andReturn(new ByteArrayInputStream(GOOD_LOGIN_RESPONSE.getBytes())); + expect(conn.getResponseCode()).andReturn(200); + expect(conn.getInputStream()).andReturn(new ByteArrayInputStream( + "{ \"data\": \"bad\", \"vXUsers\": \"xyz\" }".getBytes())); + + expect(m_streamProvider.processURL((String) anyObject(), (String) anyObject(), + (InputStream) anyObject(), (Map>) anyObject())).andReturn(conn).anyTimes(); + + replay(conn, m_streamProvider); + + PrerequisiteCheck check = new PrerequisiteCheck(null, null); + m_rpc.perform(check, new PrereqCheckRequest("cluster")); + + String error = "The response from Ranger was malformed. "; + error += "java.lang.String cannot be cast to java.util.List. "; + error += "Request: " + RANGER_URL + "service/xusers/users?name=r_admin"; + + assertEquals(PrereqCheckStatus.WARNING, check.getStatus()); + assertEquals(error, check.getFailReason()); + + verify(conn, m_streamProvider); + } + + + @SuppressWarnings("unchecked") + @Test + public void testAdminUnauthorized() throws Exception { + + HttpURLConnection conn = EasyMock.createMock(HttpURLConnection.class); + + expect(conn.getResponseCode()).andReturn(401); + + expect(m_streamProvider.processURL((String) anyObject(), (String) anyObject(), + (InputStream) anyObject(), (Map>) anyObject())).andReturn(conn).anyTimes(); + + replay(conn, m_streamProvider); + + PrerequisiteCheck check = new PrerequisiteCheck(CheckDescription.SERVICES_RANGER_PASSWORD_VERIFY, null); + m_rpc.perform(check, new PrereqCheckRequest("cluster")); + + assertEquals(PrereqCheckStatus.FAIL, check.getStatus()); + assertEquals("Credentials for user 'admin' in Ambari do not match Ranger.", check.getFailReason()); + + verify(conn, m_streamProvider); + } + + @SuppressWarnings("unchecked") + @Test + public void testAdminUnauthorizedByRedirect() throws Exception { + + HttpURLConnection conn = EasyMock.createMock(HttpURLConnection.class); + + expect(conn.getResponseCode()).andReturn(200); + expect(conn.getInputStream()).andReturn(new ByteArrayInputStream(BAD_LOGIN_RESPONSE.getBytes())); + + expect(m_streamProvider.processURL((String) anyObject(), (String) anyObject(), + (InputStream) anyObject(), (Map>) anyObject())).andReturn(conn).anyTimes(); + + replay(conn, m_streamProvider); + + PrerequisiteCheck check = new PrerequisiteCheck(CheckDescription.SERVICES_RANGER_PASSWORD_VERIFY, null); + m_rpc.perform(check, new PrereqCheckRequest("cluster")); + + assertEquals(PrereqCheckStatus.FAIL, check.getStatus()); + assertEquals("Credentials for user 'admin' in Ambari do not match Ranger.", check.getFailReason()); + + verify(conn, m_streamProvider); + } + + @SuppressWarnings("unchecked") + @Test + public void testAdminIOException() throws Exception { + + HttpURLConnection conn = EasyMock.createMock(HttpURLConnection.class); + + expect(conn.getResponseCode()).andThrow(new IOException("whoops")); + + expect(m_streamProvider.processURL((String) anyObject(), (String) anyObject(), + (InputStream) anyObject(), (Map>) anyObject())).andReturn(conn).anyTimes(); + + replay(conn, m_streamProvider); + + PrerequisiteCheck check = new PrerequisiteCheck(CheckDescription.SERVICES_RANGER_PASSWORD_VERIFY, null); + m_rpc.perform(check, new PrereqCheckRequest("cluster")); + + assertEquals(PrereqCheckStatus.WARNING, check.getStatus()); + assertEquals("Could not access Ranger to verify user 'admin' against " + RANGER_URL + "service/public/api/repository/count. whoops", check.getFailReason()); + + verify(conn, m_streamProvider); + } + + @SuppressWarnings("unchecked") + @Test + public void testAdminBadResponse() throws Exception { + + HttpURLConnection conn = EasyMock.createMock(HttpURLConnection.class); + + expect(conn.getResponseCode()).andReturn(404); + + expect(m_streamProvider.processURL((String) anyObject(), (String) anyObject(), + (InputStream) anyObject(), (Map>) anyObject())).andReturn(conn).anyTimes(); + + replay(conn, m_streamProvider); + + PrerequisiteCheck check = new PrerequisiteCheck(CheckDescription.SERVICES_RANGER_PASSWORD_VERIFY, null); + m_rpc.perform(check, new PrereqCheckRequest("cluster")); + + assertEquals(PrereqCheckStatus.WARNING, check.getStatus()); + assertEquals("Could not verify credentials for user 'admin'. Response code 404 received from " + RANGER_URL + "service/public/api/repository/count", check.getFailReason()); + + verify(conn, m_streamProvider); + } + + @SuppressWarnings("unchecked") + @Test + public void testUserUnauthorized() throws Exception { + + HttpURLConnection conn = EasyMock.createMock(HttpURLConnection.class); + + expect(conn.getResponseCode()).andReturn(200); + expect(conn.getInputStream()).andReturn(new ByteArrayInputStream(GOOD_LOGIN_RESPONSE.getBytes())).once(); + expect(conn.getResponseCode()).andReturn(200); + expect(conn.getInputStream()).andReturn(new ByteArrayInputStream(GOOD_USER_RESPONSE.getBytes())).once(); + expect(conn.getResponseCode()).andReturn(401); + + expect(m_streamProvider.processURL((String) anyObject(), (String) anyObject(), + (InputStream) anyObject(), (Map>) anyObject())).andReturn(conn).anyTimes(); + + replay(conn, m_streamProvider); + + PrerequisiteCheck check = new PrerequisiteCheck(null, null); + m_rpc.perform(check, new PrereqCheckRequest("cluster")); + + assertEquals(PrereqCheckStatus.FAIL, check.getStatus()); + assertEquals("Credentials for user 'r_admin' in Ambari do not match Ranger.", check.getFailReason()); + + verify(conn, m_streamProvider); + } + + @SuppressWarnings("unchecked") + @Test + public void testUserUnauthorizedByRedirect() throws Exception { + + HttpURLConnection conn = EasyMock.createMock(HttpURLConnection.class); + + expect(conn.getResponseCode()).andReturn(200); + expect(conn.getInputStream()).andReturn(new ByteArrayInputStream(GOOD_LOGIN_RESPONSE.getBytes())).once(); + expect(conn.getResponseCode()).andReturn(200); + expect(conn.getInputStream()).andReturn(new ByteArrayInputStream(GOOD_USER_RESPONSE.getBytes())).once(); + expect(conn.getResponseCode()).andReturn(200); + expect(conn.getInputStream()).andReturn(new ByteArrayInputStream(BAD_LOGIN_RESPONSE.getBytes())).once(); + + expect(m_streamProvider.processURL((String) anyObject(), (String) anyObject(), + (InputStream) anyObject(), (Map>) anyObject())).andReturn(conn).anyTimes(); + + replay(conn, m_streamProvider); + + PrerequisiteCheck check = new PrerequisiteCheck(null, null); + m_rpc.perform(check, new PrereqCheckRequest("cluster")); + + assertEquals(PrereqCheckStatus.FAIL, check.getStatus()); + assertEquals("Credentials for user 'r_admin' in Ambari do not match Ranger.", check.getFailReason()); + + verify(conn, m_streamProvider); + } + + @SuppressWarnings("unchecked") + @Test + public void testUserIOException() throws Exception { + + HttpURLConnection conn = EasyMock.createMock(HttpURLConnection.class); + + expect(conn.getResponseCode()).andReturn(200); + expect(conn.getInputStream()).andReturn(new ByteArrayInputStream(GOOD_LOGIN_RESPONSE.getBytes())).once(); + expect(conn.getResponseCode()).andReturn(200); + expect(conn.getInputStream()).andReturn(new ByteArrayInputStream(GOOD_USER_RESPONSE.getBytes())).once(); + expect(conn.getResponseCode()).andThrow(new IOException("again!")); + + expect(m_streamProvider.processURL((String) anyObject(), (String) anyObject(), + (InputStream) anyObject(), (Map>) anyObject())).andReturn(conn).anyTimes(); + + replay(conn, m_streamProvider); + + PrerequisiteCheck check = new PrerequisiteCheck(null, null); + m_rpc.perform(check, new PrereqCheckRequest("cluster")); + + assertEquals(PrereqCheckStatus.WARNING, check.getStatus()); + assertEquals("Could not access Ranger to verify user 'r_admin' against " + RANGER_URL + "service/public/api/repository/count. again!", check.getFailReason()); + + verify(conn, m_streamProvider); + } + + @SuppressWarnings("unchecked") + @Test + public void testUserBadResponse() throws Exception { + + HttpURLConnection conn = EasyMock.createMock(HttpURLConnection.class); + + expect(conn.getResponseCode()).andReturn(200); + expect(conn.getInputStream()).andReturn(new ByteArrayInputStream(GOOD_LOGIN_RESPONSE.getBytes())).once(); + expect(conn.getResponseCode()).andReturn(200); + expect(conn.getInputStream()).andReturn(new ByteArrayInputStream(GOOD_USER_RESPONSE.getBytes())).once(); + expect(conn.getResponseCode()).andReturn(500); + + expect(m_streamProvider.processURL((String) anyObject(), (String) anyObject(), + (InputStream) anyObject(), (Map>) anyObject())).andReturn( + conn).anyTimes(); + + replay(conn, m_streamProvider); + + PrerequisiteCheck check = new PrerequisiteCheck(null, null); + m_rpc.perform(check, new PrereqCheckRequest("cluster")); + + assertEquals(PrereqCheckStatus.WARNING, check.getStatus()); + assertEquals("Could not verify credentials for user 'r_admin'. Response code 500 received from " + RANGER_URL + "service/public/api/repository/count", check.getFailReason()); + + verify(conn, m_streamProvider); + } +} diff --git a/ambari-server/src/test/java/org/apache/ambari/server/checks/RangerSSLConfigCheckTest.java b/ambari-server/src/test/java/org/apache/ambari/server/checks/RangerSSLConfigCheckTest.java new file mode 100644 index 00000000000..50ec21c11de --- /dev/null +++ b/ambari-server/src/test/java/org/apache/ambari/server/checks/RangerSSLConfigCheckTest.java @@ -0,0 +1,177 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.ambari.server.checks; + +import java.util.HashMap; +import java.util.Map; + +import org.apache.ambari.server.configuration.Configuration; +import org.apache.ambari.server.controller.PrereqCheckRequest; +import org.apache.ambari.server.orm.entities.RepositoryVersionEntity; +import org.apache.ambari.server.state.Cluster; +import org.apache.ambari.server.state.Clusters; +import org.apache.ambari.server.state.Config; +import org.apache.ambari.server.state.DesiredConfig; +import org.apache.ambari.server.state.RepositoryType; +import org.apache.ambari.server.state.Service; +import org.apache.ambari.server.state.repository.ClusterVersionSummary; +import org.apache.ambari.server.state.repository.VersionDefinitionXml; +import org.apache.ambari.server.state.stack.PrereqCheckStatus; +import org.apache.ambari.server.state.stack.PrerequisiteCheck; +import org.junit.Assert; +import org.junit.Before; +import org.junit.Test; +import org.junit.runner.RunWith; +import org.mockito.Mock; +import org.mockito.Mockito; +import org.mockito.runners.MockitoJUnitRunner; + +import com.google.inject.Provider; + + +/* Test for RangerSSLConfigCheck */ +@RunWith(MockitoJUnitRunner.class) +public class RangerSSLConfigCheckTest { + + private final Clusters clusters = Mockito.mock(Clusters.class); + private final RangerSSLConfigCheck rangerSSLConfigCheck = new RangerSSLConfigCheck(); + + @Mock + private ClusterVersionSummary m_clusterVersionSummary; + + @Mock + private VersionDefinitionXml m_vdfXml; + + @Mock + private RepositoryVersionEntity m_repositoryVersion; + + final Map m_services = new HashMap<>(); + + @Before + public void setup() throws Exception { + rangerSSLConfigCheck.clustersProvider = new Provider() { + @Override + public Clusters get() { + return clusters; + } + }; + Configuration config = Mockito.mock(Configuration.class); + rangerSSLConfigCheck.config = config; + + m_services.clear(); + + Mockito.when(m_repositoryVersion.getType()).thenReturn(RepositoryType.STANDARD); + Mockito.when(m_repositoryVersion.getRepositoryXml()).thenReturn(m_vdfXml); + Mockito.when(m_vdfXml.getClusterSummary(Mockito.any(Cluster.class))).thenReturn(m_clusterVersionSummary); + Mockito.when(m_clusterVersionSummary.getAvailableServiceNames()).thenReturn(m_services.keySet()); + } + + @Test + public void testIsApplicable() throws Exception { + final Cluster cluster = Mockito.mock(Cluster.class); + final Service service = Mockito.mock(Service.class); + + m_services.put("RANGER", service); + + Mockito.when(cluster.getServices()).thenReturn(m_services); + Mockito.when(cluster.getClusterId()).thenReturn(1L); + Mockito.when(clusters.getCluster("cluster")).thenReturn(cluster); + + PrereqCheckRequest request = new PrereqCheckRequest("cluster"); + request.setTargetRepositoryVersion(m_repositoryVersion); + + Assert.assertTrue(rangerSSLConfigCheck.isApplicable(request)); + + m_services.remove("RANGER"); + Assert.assertFalse(rangerSSLConfigCheck.isApplicable(request)); + } + + @Test + public void testPerform() throws Exception { + final Cluster cluster = Mockito.mock(Cluster.class); + final Map services = new HashMap<>(); + final Service service = Mockito.mock(Service.class); + + services.put("RANGER", service); + + Mockito.when(cluster.getServices()).thenReturn(services); + Mockito.when(cluster.getClusterId()).thenReturn(1L); + Mockito.when(clusters.getCluster("cluster")).thenReturn(cluster); + + final DesiredConfig desiredConfig = Mockito.mock(DesiredConfig.class); + Mockito.when(desiredConfig.getTag()).thenReturn("tag"); + Map configMap = new HashMap<>(); + configMap.put("ranger-admin-site", desiredConfig); + + Mockito.when(cluster.getDesiredConfigs()).thenReturn(configMap); + final Config config = Mockito.mock(Config.class); + Mockito.when(cluster.getConfig(Mockito.anyString(), Mockito.anyString())).thenReturn(config); + final Map properties = new HashMap<>(); + Mockito.when(config.getProperties()).thenReturn(properties); + + properties.put("ranger.service.http.enabled","true"); + properties.put("ranger.service.https.attrib.ssl.enabled","true"); + properties.put("ranger.https.attrib.keystore.file","/etc/ranger/security/ranger-admin-keystore.jks"); + PrerequisiteCheck check = new PrerequisiteCheck(null, null); + rangerSSLConfigCheck.perform(check, new PrereqCheckRequest("cluster")); + Assert.assertEquals(PrereqCheckStatus.PASS, check.getStatus()); + + properties.put("ranger.service.http.enabled","true"); + properties.put("ranger.service.https.attrib.ssl.enabled","true"); + properties.put("ranger.https.attrib.keystore.file","/etc/ranger/security/ranger-admin-keystore.jks"); + check = new PrerequisiteCheck(null, null); + rangerSSLConfigCheck.perform(check, new PrereqCheckRequest("cluster")); + Assert.assertEquals(PrereqCheckStatus.PASS, check.getStatus()); + + + properties.put("ranger.service.http.enabled","true"); + properties.put("ranger.service.https.attrib.ssl.enabled","fasle"); + properties.put("ranger.https.attrib.keystore.file","/etc/ranger/security/ranger-admin-keystore.jks"); + check = new PrerequisiteCheck(null, null); + rangerSSLConfigCheck.perform(check, new PrereqCheckRequest("cluster")); + Assert.assertEquals(PrereqCheckStatus.PASS, check.getStatus()); + + + properties.put("ranger.service.http.enabled","false"); + properties.put("ranger.service.https.attrib.ssl.enabled","true"); + properties.put("ranger.https.attrib.keystore.file","/etc/ranger/admin/conf/ranger-admin-keystore.jks"); + check = new PrerequisiteCheck(null, null); + rangerSSLConfigCheck.perform(check, new PrereqCheckRequest("cluster")); + Assert.assertEquals(PrereqCheckStatus.WARNING, check.getStatus()); + + properties.put("ranger.service.http.enabled","false"); + properties.put("ranger.service.https.attrib.ssl.enabled","true"); + properties.put("ranger.https.attrib.keystore.file","/etc/ranger/security/ranger-admin-keystore.jks"); + check = new PrerequisiteCheck(null, null); + rangerSSLConfigCheck.perform(check, new PrereqCheckRequest("cluster")); + Assert.assertEquals(PrereqCheckStatus.PASS, check.getStatus()); + + + properties.put("ranger.service.http.enabled","false"); + properties.put("ranger.service.https.attrib.ssl.enabled","false"); + properties.put("ranger.https.attrib.keystore.file","/etc/ranger/security/ranger-admin-keystore.jks"); + check = new PrerequisiteCheck(null, null); + rangerSSLConfigCheck.perform(check, new PrereqCheckRequest("cluster")); + Assert.assertEquals(PrereqCheckStatus.PASS, check.getStatus()); + + + + } +} + + diff --git a/ambari-server/src/test/java/org/apache/ambari/server/checks/SecondaryNamenodeDeletedCheckTest.java b/ambari-server/src/test/java/org/apache/ambari/server/checks/SecondaryNamenodeDeletedCheckTest.java new file mode 100644 index 00000000000..c038c47ce4b --- /dev/null +++ b/ambari-server/src/test/java/org/apache/ambari/server/checks/SecondaryNamenodeDeletedCheckTest.java @@ -0,0 +1,150 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.ambari.server.checks; + +import java.util.Collections; +import java.util.HashMap; +import java.util.Map; + +import org.apache.ambari.server.api.services.AmbariMetaInfo; +import org.apache.ambari.server.configuration.Configuration; +import org.apache.ambari.server.controller.PrereqCheckRequest; +import org.apache.ambari.server.orm.dao.HostComponentStateDAO; +import org.apache.ambari.server.orm.entities.RepositoryVersionEntity; +import org.apache.ambari.server.state.Cluster; +import org.apache.ambari.server.state.Clusters; +import org.apache.ambari.server.state.RepositoryType; +import org.apache.ambari.server.state.Service; +import org.apache.ambari.server.state.ServiceComponent; +import org.apache.ambari.server.state.repository.ClusterVersionSummary; +import org.apache.ambari.server.state.repository.VersionDefinitionXml; +import org.apache.ambari.server.state.stack.PrereqCheckStatus; +import org.apache.ambari.server.state.stack.PrerequisiteCheck; +import org.junit.Assert; +import org.junit.Before; +import org.junit.Test; +import org.junit.runner.RunWith; +import org.mockito.Mock; +import org.mockito.Mockito; +import org.mockito.runners.MockitoJUnitRunner; + +import com.google.inject.Provider; + +/** + * Unit tests for SecondaryNamenodeDeletedCheck + * + */ +@RunWith(MockitoJUnitRunner.class) +public class SecondaryNamenodeDeletedCheckTest { + private final Clusters clusters = Mockito.mock(Clusters.class); + private final HostComponentStateDAO hostComponentStateDAO = Mockito.mock(HostComponentStateDAO.class); + + private final SecondaryNamenodeDeletedCheck secondaryNamenodeDeletedCheck = new SecondaryNamenodeDeletedCheck(); + + @Mock + private ClusterVersionSummary m_clusterVersionSummary; + + @Mock + private VersionDefinitionXml m_vdfXml; + + @Mock + private RepositoryVersionEntity m_repositoryVersion; + + final Map m_services = new HashMap<>(); + + @Before + public void setup() throws Exception { + secondaryNamenodeDeletedCheck.clustersProvider = new Provider() { + @Override + public Clusters get() { + return clusters; + } + }; + + secondaryNamenodeDeletedCheck.ambariMetaInfo = new Provider() { + @Override + public AmbariMetaInfo get() { + return Mockito.mock(AmbariMetaInfo.class); + } + }; + + secondaryNamenodeDeletedCheck.hostComponentStateDao = hostComponentStateDAO; + Configuration config = Mockito.mock(Configuration.class); + secondaryNamenodeDeletedCheck.config = config; + + m_services.clear(); + + Mockito.when(m_repositoryVersion.getType()).thenReturn(RepositoryType.STANDARD); + Mockito.when(m_repositoryVersion.getRepositoryXml()).thenReturn(m_vdfXml); + Mockito.when(m_vdfXml.getClusterSummary(Mockito.any(Cluster.class))).thenReturn(m_clusterVersionSummary); + Mockito.when(m_clusterVersionSummary.getAvailableServiceNames()).thenReturn(m_services.keySet()); + } + + @Test + public void testIsApplicable() throws Exception { + final Cluster cluster = Mockito.mock(Cluster.class); + final Service service = Mockito.mock(Service.class); + + m_services.put("HDFS", service); + + Mockito.when(cluster.getClusterId()).thenReturn(1L); + Mockito.when(cluster.getServices()).thenReturn(m_services); + Mockito.when(clusters.getCluster("cluster")).thenReturn(cluster); + + PrereqCheckRequest request = new PrereqCheckRequest("cluster"); + request.setTargetRepositoryVersion(m_repositoryVersion); + + Assert.assertTrue(secondaryNamenodeDeletedCheck.isApplicable(request)); + + request = new PrereqCheckRequest("cluster"); + request.setTargetRepositoryVersion(m_repositoryVersion); + + request.addResult(CheckDescription.SERVICES_NAMENODE_HA, PrereqCheckStatus.FAIL); + Assert.assertFalse(secondaryNamenodeDeletedCheck.isApplicable(request)); + + request.addResult(CheckDescription.SERVICES_NAMENODE_HA, PrereqCheckStatus.PASS); + Assert.assertTrue(secondaryNamenodeDeletedCheck.isApplicable(request)); + + m_services.remove("HDFS"); + Assert.assertFalse(secondaryNamenodeDeletedCheck.isApplicable(request)); + } + + @Test + public void testPerform() throws Exception { + final Cluster cluster = Mockito.mock(Cluster.class); + Mockito.when(cluster.getClusterId()).thenReturn(1L); + Mockito.when(clusters.getCluster("cluster")).thenReturn(cluster); + + final Service service = Mockito.mock(Service.class); + final ServiceComponent serviceComponent = Mockito.mock(ServiceComponent.class); + Mockito.when(cluster.getService("HDFS")).thenReturn(service); + Mockito.when(service.getServiceComponent("SECONDARY_NAMENODE")).thenReturn(serviceComponent); + Mockito.when(serviceComponent.getServiceComponentHosts()).thenReturn(Collections.singletonMap("host", null)); + + PrerequisiteCheck check = new PrerequisiteCheck(null, null); + secondaryNamenodeDeletedCheck.perform(check, new PrereqCheckRequest("cluster")); + Assert.assertEquals(PrereqCheckStatus.FAIL, check.getStatus()); + Assert.assertEquals("HDFS", check.getFailedOn().toArray(new String[1])[0]); + Assert.assertEquals("The SNameNode component must be deleted from host: host.", check.getFailReason()); + + Mockito.when(serviceComponent.getServiceComponentHosts()).thenReturn(Collections.emptyMap()); + check = new PrerequisiteCheck(null, null); + secondaryNamenodeDeletedCheck.perform(check, new PrereqCheckRequest("cluster")); + Assert.assertEquals(PrereqCheckStatus.PASS, check.getStatus()); + } +} diff --git a/ambari-server/src/test/java/org/apache/ambari/server/checks/ServicesMapReduceDistributedCacheCheckTest.java b/ambari-server/src/test/java/org/apache/ambari/server/checks/ServicesMapReduceDistributedCacheCheckTest.java new file mode 100644 index 00000000000..b32a7821dca --- /dev/null +++ b/ambari-server/src/test/java/org/apache/ambari/server/checks/ServicesMapReduceDistributedCacheCheckTest.java @@ -0,0 +1,286 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.ambari.server.checks; + +import java.util.HashMap; +import java.util.Map; + +import org.apache.ambari.server.configuration.Configuration; +import org.apache.ambari.server.controller.PrereqCheckRequest; +import org.apache.ambari.server.orm.entities.RepositoryVersionEntity; +import org.apache.ambari.server.state.Cluster; +import org.apache.ambari.server.state.Clusters; +import org.apache.ambari.server.state.Config; +import org.apache.ambari.server.state.DesiredConfig; +import org.apache.ambari.server.state.RepositoryType; +import org.apache.ambari.server.state.Service; +import org.apache.ambari.server.state.repository.ClusterVersionSummary; +import org.apache.ambari.server.state.repository.VersionDefinitionXml; +import org.apache.ambari.server.state.stack.PrereqCheckStatus; +import org.apache.ambari.server.state.stack.PrerequisiteCheck; +import org.apache.ambari.server.state.stack.UpgradePack.PrerequisiteCheckConfig; +import org.junit.Assert; +import org.junit.Before; +import org.junit.Test; +import org.junit.runner.RunWith; +import org.mockito.Mock; +import org.mockito.Mockito; +import org.mockito.runners.MockitoJUnitRunner; + +import com.google.inject.Provider; + +/** + * Unit tests for ServicesMapReduceDistributedCacheCheck + * + */ +@RunWith(MockitoJUnitRunner.class) +public class ServicesMapReduceDistributedCacheCheckTest { + private final Clusters clusters = Mockito.mock(Clusters.class); + + private final ServicesMapReduceDistributedCacheCheck servicesMapReduceDistributedCacheCheck = new ServicesMapReduceDistributedCacheCheck(); + + @Mock + private ClusterVersionSummary m_clusterVersionSummary; + + @Mock + private VersionDefinitionXml m_vdfXml; + + @Mock + private RepositoryVersionEntity m_repositoryVersion; + + final Map m_services = new HashMap<>(); + + @Before + public void setup() throws Exception { + servicesMapReduceDistributedCacheCheck.clustersProvider = new Provider() { + + @Override + public Clusters get() { + return clusters; + } + }; + Configuration config = Mockito.mock(Configuration.class); + servicesMapReduceDistributedCacheCheck.config = config; + + m_services.clear(); + + Mockito.when(m_repositoryVersion.getType()).thenReturn(RepositoryType.STANDARD); + Mockito.when(m_repositoryVersion.getRepositoryXml()).thenReturn(m_vdfXml); + Mockito.when(m_vdfXml.getClusterSummary(Mockito.any(Cluster.class))).thenReturn(m_clusterVersionSummary); + Mockito.when(m_clusterVersionSummary.getAvailableServiceNames()).thenReturn(m_services.keySet()); + } + + @Test + public void testIsApplicable() throws Exception { + final Cluster cluster = Mockito.mock(Cluster.class); + final Service service = Mockito.mock(Service.class); + + m_services.put("YARN", service); + + Mockito.when(cluster.getServices()).thenReturn(m_services); + Mockito.when(clusters.getCluster("cluster")).thenReturn(cluster); + Mockito.when(cluster.getClusterId()).thenReturn(1L); + + PrereqCheckRequest request = new PrereqCheckRequest("cluster"); + request.setTargetRepositoryVersion(m_repositoryVersion); + + Assert.assertTrue(servicesMapReduceDistributedCacheCheck.isApplicable(request)); + + request = new PrereqCheckRequest("cluster"); + request.setTargetRepositoryVersion(m_repositoryVersion); + + request.addResult(CheckDescription.SERVICES_NAMENODE_HA, PrereqCheckStatus.FAIL); + Assert.assertFalse(servicesMapReduceDistributedCacheCheck.isApplicable(request)); + + request.addResult(CheckDescription.SERVICES_NAMENODE_HA, PrereqCheckStatus.PASS); + Assert.assertTrue(servicesMapReduceDistributedCacheCheck.isApplicable(request)); + + m_services.remove("YARN"); + Assert.assertFalse(servicesMapReduceDistributedCacheCheck.isApplicable(request)); + } + + @Test + public void testPerform() throws Exception { + final Cluster cluster = Mockito.mock(Cluster.class); + Mockito.when(cluster.getClusterId()).thenReturn(1L); + Mockito.when(clusters.getCluster("cluster")).thenReturn(cluster); + + final DesiredConfig desiredConfig = Mockito.mock(DesiredConfig.class); + Mockito.when(desiredConfig.getTag()).thenReturn("tag"); + Map configMap = new HashMap<>(); + configMap.put("mapred-site", desiredConfig); + configMap.put("core-site", desiredConfig); + Mockito.when(cluster.getDesiredConfigs()).thenReturn(configMap); + final Config config = Mockito.mock(Config.class); + Mockito.when(cluster.getConfig(Mockito.anyString(), Mockito.anyString())).thenReturn(config); + final Map properties = new HashMap<>(); + Mockito.when(config.getProperties()).thenReturn(properties); + + PrerequisiteCheck check = new PrerequisiteCheck(null, null); + servicesMapReduceDistributedCacheCheck.perform(check, new PrereqCheckRequest("cluster")); + Assert.assertEquals(PrereqCheckStatus.FAIL, check.getStatus()); + + properties.put("fs.defaultFS", "anything"); + properties.put("mapreduce.application.framework.path", "hdfs://some/path"); + properties.put("mapreduce.application.classpath", "anything"); + check = new PrerequisiteCheck(null, null); + servicesMapReduceDistributedCacheCheck.perform(check, new PrereqCheckRequest("cluster")); + Assert.assertEquals(PrereqCheckStatus.PASS, check.getStatus()); + + properties.put("fs.defaultFS", "anything"); + properties.put("mapreduce.application.framework.path", "dfs://some/path"); + properties.put("mapreduce.application.classpath", "anything"); + check = new PrerequisiteCheck(null, null); + servicesMapReduceDistributedCacheCheck.perform(check, new PrereqCheckRequest("cluster")); + Assert.assertEquals(PrereqCheckStatus.PASS, check.getStatus()); + + properties.put("fs.defaultFS", "hdfs://ha"); + properties.put("mapreduce.application.framework.path", "/some/path"); + properties.put("mapreduce.application.classpath", "anything"); + check = new PrerequisiteCheck(null, null); + servicesMapReduceDistributedCacheCheck.perform(check, new PrereqCheckRequest("cluster")); + Assert.assertEquals(PrereqCheckStatus.PASS, check.getStatus()); + + properties.put("fs.defaultFS", "dfs://ha"); + properties.put("mapreduce.application.framework.path", "/some/path"); + properties.put("mapreduce.application.classpath", "anything"); + check = new PrerequisiteCheck(null, null); + servicesMapReduceDistributedCacheCheck.perform(check, new PrereqCheckRequest("cluster")); + Assert.assertEquals(PrereqCheckStatus.PASS, check.getStatus()); + + // Fail due to no dfs + properties.put("fs.defaultFS", "anything"); + properties.put("mapreduce.application.framework.path", "/some/path"); + properties.put("mapreduce.application.classpath", "anything"); + check = new PrerequisiteCheck(null, null); + servicesMapReduceDistributedCacheCheck.perform(check, new PrereqCheckRequest("cluster")); + Assert.assertEquals(PrereqCheckStatus.FAIL, check.getStatus()); + } + + @Test + public void testPerformWithCheckConfig() throws Exception { + final Cluster cluster = Mockito.mock(Cluster.class); + Mockito.when(cluster.getClusterId()).thenReturn(1L); + Mockito.when(clusters.getCluster("cluster")).thenReturn(cluster); + + final DesiredConfig desiredConfig = Mockito.mock(DesiredConfig.class); + Mockito.when(desiredConfig.getTag()).thenReturn("tag"); + Map configMap = new HashMap<>(); + configMap.put("mapred-site", desiredConfig); + configMap.put("core-site", desiredConfig); + Mockito.when(cluster.getDesiredConfigs()).thenReturn(configMap); + final Config config = Mockito.mock(Config.class); + Mockito.when(cluster.getConfig(Mockito.anyString(), Mockito.anyString())).thenReturn(config); + final Map properties = new HashMap<>(); + Mockito.when(config.getProperties()).thenReturn(properties); + + Map checkProperties = new HashMap<>(); + checkProperties.put("dfs-protocols-regex","^([^:]*dfs|wasb|ecs):.*"); + PrerequisiteCheckConfig prerequisiteCheckConfig = Mockito.mock(PrerequisiteCheckConfig.class); + Mockito.when(prerequisiteCheckConfig.getCheckProperties( + servicesMapReduceDistributedCacheCheck.getClass().getName())).thenReturn(checkProperties); + + PrereqCheckRequest request = new PrereqCheckRequest("cluster"); + request.setPrerequisiteCheckConfig(prerequisiteCheckConfig); + PrerequisiteCheck check = new PrerequisiteCheck(null, null); + servicesMapReduceDistributedCacheCheck.perform(check, request); + Assert.assertEquals(PrereqCheckStatus.FAIL, check.getStatus()); + + properties.put("fs.defaultFS", "anything"); + properties.put("mapreduce.application.framework.path", "hdfs://some/path"); + properties.put("mapreduce.application.classpath", "anything"); + request = new PrereqCheckRequest("cluster"); + request.setPrerequisiteCheckConfig(prerequisiteCheckConfig); + check = new PrerequisiteCheck(null, null); + servicesMapReduceDistributedCacheCheck.perform(check, request); + Assert.assertEquals(PrereqCheckStatus.PASS, check.getStatus()); + + properties.put("fs.defaultFS", "anything"); + properties.put("mapreduce.application.framework.path", "dfs://some/path"); + properties.put("mapreduce.application.classpath", "anything"); + request = new PrereqCheckRequest("cluster"); + request.setPrerequisiteCheckConfig(prerequisiteCheckConfig); + check = new PrerequisiteCheck(null, null); + servicesMapReduceDistributedCacheCheck.perform(check, request); + Assert.assertEquals(PrereqCheckStatus.PASS, check.getStatus()); + + properties.put("fs.defaultFS", "anything"); + properties.put("mapreduce.application.framework.path", "wasb://some/path"); + properties.put("mapreduce.application.classpath", "anything"); + request = new PrereqCheckRequest("cluster"); + request.setPrerequisiteCheckConfig(prerequisiteCheckConfig); + check = new PrerequisiteCheck(null, null); + servicesMapReduceDistributedCacheCheck.perform(check, request); + Assert.assertEquals(PrereqCheckStatus.PASS, check.getStatus()); + + properties.put("fs.defaultFS", "anything"); + properties.put("mapreduce.application.framework.path", "ecs://some/path"); + properties.put("mapreduce.application.classpath", "anything"); + request = new PrereqCheckRequest("cluster"); + request.setPrerequisiteCheckConfig(prerequisiteCheckConfig); + check = new PrerequisiteCheck(null, null); + servicesMapReduceDistributedCacheCheck.perform(check, request); + Assert.assertEquals(PrereqCheckStatus.PASS, check.getStatus()); + + properties.put("fs.defaultFS", "hdfs://ha"); + properties.put("mapreduce.application.framework.path", "/some/path"); + properties.put("mapreduce.application.classpath", "anything"); + request = new PrereqCheckRequest("cluster"); + request.setPrerequisiteCheckConfig(prerequisiteCheckConfig); + check = new PrerequisiteCheck(null, null); + servicesMapReduceDistributedCacheCheck.perform(check, request); + Assert.assertEquals(PrereqCheckStatus.PASS, check.getStatus()); + + properties.put("fs.defaultFS", "dfs://ha"); + properties.put("mapreduce.application.framework.path", "/some/path"); + properties.put("mapreduce.application.classpath", "anything"); + request = new PrereqCheckRequest("cluster"); + request.setPrerequisiteCheckConfig(prerequisiteCheckConfig); + check = new PrerequisiteCheck(null, null); + servicesMapReduceDistributedCacheCheck.perform(check, request); + Assert.assertEquals(PrereqCheckStatus.PASS, check.getStatus()); + + properties.put("fs.defaultFS", "wasb://ha"); + properties.put("mapreduce.application.framework.path", "/some/path"); + properties.put("mapreduce.application.classpath", "anything"); + request = new PrereqCheckRequest("cluster"); + request.setPrerequisiteCheckConfig(prerequisiteCheckConfig); + check = new PrerequisiteCheck(null, null); + servicesMapReduceDistributedCacheCheck.perform(check, request); + Assert.assertEquals(PrereqCheckStatus.PASS, check.getStatus()); + + properties.put("fs.defaultFS", "ecs://ha"); + properties.put("mapreduce.application.framework.path", "/some/path"); + properties.put("mapreduce.application.classpath", "anything"); + request = new PrereqCheckRequest("cluster"); + request.setPrerequisiteCheckConfig(prerequisiteCheckConfig); + check = new PrerequisiteCheck(null, null); + servicesMapReduceDistributedCacheCheck.perform(check, request); + Assert.assertEquals(PrereqCheckStatus.PASS, check.getStatus()); + + // Fail due to no dfs + properties.put("fs.defaultFS", "anything"); + properties.put("mapreduce.application.framework.path", "/some/path"); + properties.put("mapreduce.application.classpath", "anything"); + request = new PrereqCheckRequest("cluster"); + request.setPrerequisiteCheckConfig(prerequisiteCheckConfig); + check = new PrerequisiteCheck(null, null); + servicesMapReduceDistributedCacheCheck.perform(check, request); + Assert.assertEquals(PrereqCheckStatus.FAIL, check.getStatus()); + } +} diff --git a/ambari-server/src/test/java/org/apache/ambari/server/checks/ServicesNamenodeHighAvailabilityCheckTest.java b/ambari-server/src/test/java/org/apache/ambari/server/checks/ServicesNamenodeHighAvailabilityCheckTest.java new file mode 100644 index 00000000000..8c51add4cda --- /dev/null +++ b/ambari-server/src/test/java/org/apache/ambari/server/checks/ServicesNamenodeHighAvailabilityCheckTest.java @@ -0,0 +1,131 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.ambari.server.checks; + +import java.util.Collections; +import java.util.HashMap; +import java.util.Map; + +import org.apache.ambari.server.configuration.Configuration; +import org.apache.ambari.server.controller.PrereqCheckRequest; +import org.apache.ambari.server.orm.entities.RepositoryVersionEntity; +import org.apache.ambari.server.state.Cluster; +import org.apache.ambari.server.state.Clusters; +import org.apache.ambari.server.state.Config; +import org.apache.ambari.server.state.DesiredConfig; +import org.apache.ambari.server.state.RepositoryType; +import org.apache.ambari.server.state.Service; +import org.apache.ambari.server.state.repository.ClusterVersionSummary; +import org.apache.ambari.server.state.repository.VersionDefinitionXml; +import org.apache.ambari.server.state.stack.PrereqCheckStatus; +import org.apache.ambari.server.state.stack.PrerequisiteCheck; +import org.junit.Assert; +import org.junit.Before; +import org.junit.Test; +import org.junit.runner.RunWith; +import org.mockito.Mock; +import org.mockito.Mockito; +import org.mockito.runners.MockitoJUnitRunner; + +import com.google.inject.Provider; + +/** + * Unit tests for ServicesNamenodeHighAvailabilityCheck + * + */ +@RunWith(MockitoJUnitRunner.class) +public class ServicesNamenodeHighAvailabilityCheckTest { + private final Clusters clusters = Mockito.mock(Clusters.class); + + private final ServicesNamenodeHighAvailabilityCheck servicesNamenodeHighAvailabilityCheck = new ServicesNamenodeHighAvailabilityCheck(); + + @Mock + private ClusterVersionSummary m_clusterVersionSummary; + + @Mock + private VersionDefinitionXml m_vdfXml; + + @Mock + private RepositoryVersionEntity m_repositoryVersion; + + final Map m_services = new HashMap<>(); + + @Before + public void setup() throws Exception { + servicesNamenodeHighAvailabilityCheck.clustersProvider = new Provider() { + + @Override + public Clusters get() { + return clusters; + } + }; + Configuration config = Mockito.mock(Configuration.class); + servicesNamenodeHighAvailabilityCheck.config = config; + + m_services.clear(); + + Mockito.when(m_repositoryVersion.getType()).thenReturn(RepositoryType.STANDARD); + Mockito.when(m_repositoryVersion.getRepositoryXml()).thenReturn(m_vdfXml); + Mockito.when(m_vdfXml.getClusterSummary(Mockito.any(Cluster.class))).thenReturn(m_clusterVersionSummary); + Mockito.when(m_clusterVersionSummary.getAvailableServiceNames()).thenReturn(m_services.keySet()); + } + + @Test + public void testIsApplicable() throws Exception { + final Cluster cluster = Mockito.mock(Cluster.class); + final Service service = Mockito.mock(Service.class); + + m_services.put("HDFS", service); + + Mockito.when(cluster.getServices()).thenReturn(m_services); + Mockito.when(cluster.getClusterId()).thenReturn(1L); + Mockito.when(clusters.getCluster("cluster")).thenReturn(cluster); + + PrereqCheckRequest request = new PrereqCheckRequest("cluster"); + request.setTargetRepositoryVersion(m_repositoryVersion); + + Assert.assertTrue(servicesNamenodeHighAvailabilityCheck.isApplicable(request)); + + m_services.remove("HDFS"); + Assert.assertFalse(servicesNamenodeHighAvailabilityCheck.isApplicable(request)); + } + + @Test + public void testPerform() throws Exception { + final Cluster cluster = Mockito.mock(Cluster.class); + Mockito.when(cluster.getClusterId()).thenReturn(1L); + Mockito.when(clusters.getCluster("cluster")).thenReturn(cluster); + + final DesiredConfig desiredConfig = Mockito.mock(DesiredConfig.class); + Mockito.when(desiredConfig.getTag()).thenReturn("tag"); + Mockito.when(cluster.getDesiredConfigs()).thenReturn(Collections.singletonMap("hdfs-site", desiredConfig)); + final Config config = Mockito.mock(Config.class); + Mockito.when(cluster.getConfig(Mockito.anyString(), Mockito.anyString())).thenReturn(config); + final Map properties = new HashMap<>(); + Mockito.when(config.getProperties()).thenReturn(properties); + + PrerequisiteCheck check = new PrerequisiteCheck(null, null); + servicesNamenodeHighAvailabilityCheck.perform(check, new PrereqCheckRequest("cluster")); + Assert.assertEquals(PrereqCheckStatus.FAIL, check.getStatus()); + + properties.put("dfs.internal.nameservices", "anything"); + check = new PrerequisiteCheck(null, null); + servicesNamenodeHighAvailabilityCheck.perform(check, new PrereqCheckRequest("cluster")); + Assert.assertEquals(PrereqCheckStatus.PASS, check.getStatus()); + } +} diff --git a/ambari-server/src/test/java/org/apache/ambari/server/checks/ServicesNamenodeTruncateCheckTest.java b/ambari-server/src/test/java/org/apache/ambari/server/checks/ServicesNamenodeTruncateCheckTest.java new file mode 100644 index 00000000000..356d2cdf6cf --- /dev/null +++ b/ambari-server/src/test/java/org/apache/ambari/server/checks/ServicesNamenodeTruncateCheckTest.java @@ -0,0 +1,164 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.ambari.server.checks; + +import static org.easymock.EasyMock.anyObject; +import static org.easymock.EasyMock.expect; +import static org.easymock.EasyMock.replay; +import static org.junit.Assert.assertEquals; + +import java.util.HashMap; +import java.util.Map; + +import org.apache.ambari.server.configuration.Configuration; +import org.apache.ambari.server.controller.PrereqCheckRequest; +import org.apache.ambari.server.orm.entities.RepositoryVersionEntity; +import org.apache.ambari.server.state.Cluster; +import org.apache.ambari.server.state.Clusters; +import org.apache.ambari.server.state.Config; +import org.apache.ambari.server.state.RepositoryType; +import org.apache.ambari.server.state.Service; +import org.apache.ambari.server.state.StackId; +import org.apache.ambari.server.state.repository.ClusterVersionSummary; +import org.apache.ambari.server.state.repository.VersionDefinitionXml; +import org.apache.ambari.server.state.stack.PrereqCheckStatus; +import org.apache.ambari.server.state.stack.PrerequisiteCheck; +import org.easymock.EasyMock; +import org.junit.Assert; +import org.junit.Before; +import org.junit.Test; +import org.junit.runner.RunWith; +import org.mockito.Mock; +import org.mockito.Mockito; +import org.mockito.runners.MockitoJUnitRunner; + +import com.google.inject.Provider; + +/** + * Unit tests for ServicesNamenodeTruncateCheck + * + */ +@RunWith(MockitoJUnitRunner.class) +public class ServicesNamenodeTruncateCheckTest { + + private Clusters m_clusters = EasyMock.createMock(Clusters.class); + private ServicesNamenodeTruncateCheck m_check = new ServicesNamenodeTruncateCheck(); + private final Map m_configMap = new HashMap<>(); + + @Mock + private ClusterVersionSummary m_clusterVersionSummary; + + @Mock + private VersionDefinitionXml m_vdfXml; + + @Mock + private RepositoryVersionEntity m_repositoryVersion; + + final Map m_services = new HashMap<>(); + + @Before + public void setup() throws Exception { + Cluster cluster = EasyMock.createMock(Cluster.class); + + Config config = EasyMock.createMock(Config.class); + final Service service = Mockito.mock(Service.class); + + m_services.clear(); + m_services.put("HDFS", service); + + expect(cluster.getServices()).andReturn(m_services).anyTimes(); + expect(config.getProperties()).andReturn(m_configMap).anyTimes(); + expect(cluster.getService("HDFS")).andReturn(service); + expect(cluster.getDesiredConfigByType("hdfs-site")).andReturn(config).anyTimes(); + expect(m_clusters.getCluster((String) anyObject())).andReturn(cluster).anyTimes(); + + replay(m_clusters, cluster, config); + + Configuration configuration = EasyMock.createMock(Configuration.class); + replay(configuration); + m_check.config = configuration; + + m_check.clustersProvider = new Provider() { + @Override + public Clusters get() { + return m_clusters; + } + }; + + Mockito.when(m_repositoryVersion.getType()).thenReturn(RepositoryType.STANDARD); + Mockito.when(m_repositoryVersion.getVersion()).thenReturn("HDP-2.2.0.0"); + Mockito.when(m_repositoryVersion.getStackId()).thenReturn(new StackId("HDP", "2.2.0")); + + Mockito.when(m_repositoryVersion.getRepositoryXml()).thenReturn(m_vdfXml); + Mockito.when(m_vdfXml.getClusterSummary(Mockito.any(Cluster.class))).thenReturn(m_clusterVersionSummary); + Mockito.when(m_clusterVersionSummary.getAvailableServiceNames()).thenReturn(m_services.keySet()); + } + + + @Test + public void testIsApplicable() throws Exception { + + PrereqCheckRequest checkRequest = new PrereqCheckRequest("c1"); + checkRequest.setSourceStackId(new StackId("HDP", "2.2")); + checkRequest.setTargetRepositoryVersion(m_repositoryVersion); + + Assert.assertTrue(m_check.isApplicable(checkRequest)); + } + + @Test + public void testPerform() throws Exception { + PrerequisiteCheck check = new PrerequisiteCheck(null, null); + PrereqCheckRequest request = new PrereqCheckRequest("c1"); + m_check.perform(check, request); + assertEquals(PrereqCheckStatus.PASS, check.getStatus()); + + // Check HDP-2.2.x => HDP-2.2.y is FAIL + m_configMap.put("dfs.allow.truncate", "true"); + request.setSourceStackId(new StackId("HDP-2.2.4.2")); + + Mockito.when(m_repositoryVersion.getVersion()).thenReturn("2.2.8.4"); + Mockito.when(m_repositoryVersion.getStackId()).thenReturn(new StackId("HDP", "2.2.8.4")); + request.setTargetRepositoryVersion(m_repositoryVersion); + + check = new PrerequisiteCheck(null, null); + m_check.perform(check, request); + assertEquals(PrereqCheckStatus.FAIL, check.getStatus()); + + m_configMap.put("dfs.allow.truncate", "false"); + check = new PrerequisiteCheck(null, null); + m_check.perform(check, request); + assertEquals(PrereqCheckStatus.PASS, check.getStatus()); + + // Check HDP-2.2.x => HDP-2.3.y is FAIL + m_configMap.put("dfs.allow.truncate", "true"); + request.setSourceStackId(new StackId("HDP-2.2.4.2")); + + Mockito.when(m_repositoryVersion.getVersion()).thenReturn("2.3.8.4"); + Mockito.when(m_repositoryVersion.getStackId()).thenReturn(new StackId("HDP", "2.3.8.4")); + request.setTargetRepositoryVersion(m_repositoryVersion); + + check = new PrerequisiteCheck(null, null); + m_check.perform(check, request); + assertEquals(PrereqCheckStatus.FAIL, check.getStatus()); + + m_configMap.put("dfs.allow.truncate", "false"); + check = new PrerequisiteCheck(null, null); + m_check.perform(check, request); + assertEquals(PrereqCheckStatus.PASS, check.getStatus()); + } +} \ No newline at end of file diff --git a/ambari-server/src/test/java/org/apache/ambari/server/checks/ServicesTezDistributedCacheCheckTest.java b/ambari-server/src/test/java/org/apache/ambari/server/checks/ServicesTezDistributedCacheCheckTest.java new file mode 100644 index 00000000000..0123f180f05 --- /dev/null +++ b/ambari-server/src/test/java/org/apache/ambari/server/checks/ServicesTezDistributedCacheCheckTest.java @@ -0,0 +1,325 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.ambari.server.checks; + +import java.util.HashMap; +import java.util.Map; + +import org.apache.ambari.server.configuration.Configuration; +import org.apache.ambari.server.controller.PrereqCheckRequest; +import org.apache.ambari.server.orm.entities.RepositoryVersionEntity; +import org.apache.ambari.server.state.Cluster; +import org.apache.ambari.server.state.Clusters; +import org.apache.ambari.server.state.Config; +import org.apache.ambari.server.state.DesiredConfig; +import org.apache.ambari.server.state.RepositoryType; +import org.apache.ambari.server.state.Service; +import org.apache.ambari.server.state.repository.ClusterVersionSummary; +import org.apache.ambari.server.state.repository.VersionDefinitionXml; +import org.apache.ambari.server.state.stack.PrereqCheckStatus; +import org.apache.ambari.server.state.stack.PrerequisiteCheck; +import org.apache.ambari.server.state.stack.UpgradePack.PrerequisiteCheckConfig; +import org.junit.Assert; +import org.junit.Before; +import org.junit.Test; +import org.junit.runner.RunWith; +import org.mockito.Mock; +import org.mockito.Mockito; +import org.mockito.runners.MockitoJUnitRunner; + +import com.google.inject.Provider; + +/** + * Unit tests for ServicesTezDistributedCacheCheck + * + */ +@RunWith(MockitoJUnitRunner.class) +public class ServicesTezDistributedCacheCheckTest { + private final Clusters clusters = Mockito.mock(Clusters.class); + + private final ServicesTezDistributedCacheCheck servicesTezDistributedCacheCheck = new ServicesTezDistributedCacheCheck(); + + @Mock + private ClusterVersionSummary m_clusterVersionSummary; + + @Mock + private VersionDefinitionXml m_vdfXml; + + @Mock + private RepositoryVersionEntity m_repositoryVersion; + + final Map m_services = new HashMap<>(); + + @Before + public void setup() throws Exception { + servicesTezDistributedCacheCheck.clustersProvider = new Provider() { + + @Override + public Clusters get() { + return clusters; + } + }; + Configuration config = Mockito.mock(Configuration.class); + servicesTezDistributedCacheCheck.config = config; + + m_services.clear(); + + Mockito.when(m_repositoryVersion.getType()).thenReturn(RepositoryType.STANDARD); + Mockito.when(m_repositoryVersion.getRepositoryXml()).thenReturn(m_vdfXml); + Mockito.when(m_vdfXml.getClusterSummary(Mockito.any(Cluster.class))).thenReturn(m_clusterVersionSummary); + Mockito.when(m_clusterVersionSummary.getAvailableServiceNames()).thenReturn(m_services.keySet()); + } + + @Test + public void testIsApplicable() throws Exception { + final Cluster cluster = Mockito.mock(Cluster.class); + final Service service = Mockito.mock(Service.class); + + m_services.put("TEZ", service); + + Mockito.when(cluster.getServices()).thenReturn(m_services); + Mockito.when(cluster.getClusterId()).thenReturn(1L); + Mockito.when(clusters.getCluster("cluster")).thenReturn(cluster); + + PrereqCheckRequest request = new PrereqCheckRequest("cluster"); + request.setTargetRepositoryVersion(m_repositoryVersion); + + Assert.assertTrue(servicesTezDistributedCacheCheck.isApplicable(request)); + + PrereqCheckRequest req = new PrereqCheckRequest("cluster"); + req.setTargetRepositoryVersion(m_repositoryVersion); + + req.addResult(CheckDescription.SERVICES_NAMENODE_HA, PrereqCheckStatus.FAIL); + Assert.assertFalse(servicesTezDistributedCacheCheck.isApplicable(req)); + + req.addResult(CheckDescription.SERVICES_NAMENODE_HA, PrereqCheckStatus.PASS); + Assert.assertTrue(servicesTezDistributedCacheCheck.isApplicable(req)); + + + m_services.remove("TEZ"); + Assert.assertFalse(servicesTezDistributedCacheCheck.isApplicable(request)); + } + + @Test + public void testPerform() throws Exception { + final Cluster cluster = Mockito.mock(Cluster.class); + Mockito.when(cluster.getClusterId()).thenReturn(1L); + Mockito.when(clusters.getCluster("cluster")).thenReturn(cluster); + + final DesiredConfig desiredConfig = Mockito.mock(DesiredConfig.class); + Mockito.when(desiredConfig.getTag()).thenReturn("tag"); + Map configMap = new HashMap<>(); + configMap.put("tez-site", desiredConfig); + configMap.put("core-site", desiredConfig); + Mockito.when(cluster.getDesiredConfigs()).thenReturn(configMap); + final Config config = Mockito.mock(Config.class); + Mockito.when(cluster.getConfig(Mockito.anyString(), Mockito.anyString())).thenReturn(config); + final Map properties = new HashMap<>(); + Mockito.when(config.getProperties()).thenReturn(properties); + + PrerequisiteCheck check = new PrerequisiteCheck(null, null); + servicesTezDistributedCacheCheck.perform(check, new PrereqCheckRequest("cluster")); + Assert.assertEquals(PrereqCheckStatus.FAIL, check.getStatus()); + + properties.put("fs.defaultFS", "anything"); + properties.put("tez.lib.uris", "hdfs://some/path/to/archive.tar.gz"); + properties.put("tez.use.cluster.hadoop-libs", "false"); + check = new PrerequisiteCheck(null, null); + servicesTezDistributedCacheCheck.perform(check, new PrereqCheckRequest("cluster")); + Assert.assertEquals(PrereqCheckStatus.PASS, check.getStatus()); + + properties.put("fs.defaultFS", "anything"); + properties.put("tez.lib.uris", "dfs://some/path/to/archive.tar.gz"); + properties.put("tez.use.cluster.hadoop-libs", "false"); + check = new PrerequisiteCheck(null, null); + servicesTezDistributedCacheCheck.perform(check, new PrereqCheckRequest("cluster")); + Assert.assertEquals(PrereqCheckStatus.PASS, check.getStatus()); + + properties.put("fs.defaultFS", "dfs://ha"); + properties.put("tez.lib.uris", "/some/path/to/archive.tar.gz"); + properties.put("tez.use.cluster.hadoop-libs", "false"); + check = new PrerequisiteCheck(null, null); + servicesTezDistributedCacheCheck.perform(check, new PrereqCheckRequest("cluster")); + Assert.assertEquals(PrereqCheckStatus.PASS, check.getStatus()); + + properties.put("fs.defaultFS", "hdfs://ha"); + properties.put("tez.lib.uris", "/some/path/to/archive.tar.gz"); + properties.put("tez.use.cluster.hadoop-libs", "false"); + check = new PrerequisiteCheck(null, null); + servicesTezDistributedCacheCheck.perform(check, new PrereqCheckRequest("cluster")); + Assert.assertEquals(PrereqCheckStatus.PASS, check.getStatus()); + + // Fail due to no DFS + properties.put("fs.defaultFS", "anything"); + properties.put("tez.lib.uris", "/some/path/to/archive.tar.gz"); + properties.put("tez.use.cluster.hadoop-libs", "false"); + check = new PrerequisiteCheck(null, null); + servicesTezDistributedCacheCheck.perform(check, new PrereqCheckRequest("cluster")); + Assert.assertEquals(PrereqCheckStatus.FAIL, check.getStatus()); + + // Fail due to no tar.gz + properties.put("fs.defaultFS", "hdfs://ha"); + properties.put("tez.lib.uris", "/some/path/to/archive.log"); + properties.put("tez.use.cluster.hadoop-libs", "false"); + check = new PrerequisiteCheck(null, null); + servicesTezDistributedCacheCheck.perform(check, new PrereqCheckRequest("cluster")); + Assert.assertEquals(PrereqCheckStatus.FAIL, check.getStatus()); + + // Fail due to property set to true + properties.put("fs.defaultFS", "hdfs://ha"); + properties.put("tez.lib.uris", "/some/path/to/archive.tar.gz"); + properties.put("tez.use.cluster.hadoop-libs", "true"); + check = new PrerequisiteCheck(null, null); + servicesTezDistributedCacheCheck.perform(check, new PrereqCheckRequest("cluster")); + Assert.assertEquals(PrereqCheckStatus.FAIL, check.getStatus()); + } + + @Test + public void testPerformWithCheckConfig() throws Exception { + final Cluster cluster = Mockito.mock(Cluster.class); + Mockito.when(cluster.getClusterId()).thenReturn(1L); + Mockito.when(clusters.getCluster("cluster")).thenReturn(cluster); + + final DesiredConfig desiredConfig = Mockito.mock(DesiredConfig.class); + Mockito.when(desiredConfig.getTag()).thenReturn("tag"); + Map configMap = new HashMap<>(); + configMap.put("tez-site", desiredConfig); + configMap.put("core-site", desiredConfig); + Mockito.when(cluster.getDesiredConfigs()).thenReturn(configMap); + final Config config = Mockito.mock(Config.class); + Mockito.when(cluster.getConfig(Mockito.anyString(), Mockito.anyString())).thenReturn(config); + final Map properties = new HashMap<>(); + Mockito.when(config.getProperties()).thenReturn(properties); + + Map checkProperties = new HashMap<>(); + checkProperties.put("dfs-protocols-regex","^([^:]*dfs|wasb|ecs):.*"); + PrerequisiteCheckConfig prerequisiteCheckConfig = Mockito.mock(PrerequisiteCheckConfig.class); + Mockito.when(prerequisiteCheckConfig.getCheckProperties( + servicesTezDistributedCacheCheck.getClass().getName())).thenReturn(checkProperties); + + PrereqCheckRequest request = new PrereqCheckRequest("cluster"); + request.setPrerequisiteCheckConfig(prerequisiteCheckConfig); + PrerequisiteCheck check = new PrerequisiteCheck(null, null); + + servicesTezDistributedCacheCheck.perform(check, request); + Assert.assertEquals(PrereqCheckStatus.FAIL, check.getStatus()); + + properties.put("fs.defaultFS", "anything"); + properties.put("tez.lib.uris", "hdfs://some/path/to/archive.tar.gz"); + properties.put("tez.use.cluster.hadoop-libs", "false"); + request = new PrereqCheckRequest("cluster"); + request.setPrerequisiteCheckConfig(prerequisiteCheckConfig); + check = new PrerequisiteCheck(null, null); + servicesTezDistributedCacheCheck.perform(check, request); + Assert.assertEquals(PrereqCheckStatus.PASS, check.getStatus()); + + properties.put("fs.defaultFS", "anything"); + properties.put("tez.lib.uris", "dfs://some/path/to/archive.tar.gz"); + properties.put("tez.use.cluster.hadoop-libs", "false"); + request = new PrereqCheckRequest("cluster"); + request.setPrerequisiteCheckConfig(prerequisiteCheckConfig); + check = new PrerequisiteCheck(null, null); + servicesTezDistributedCacheCheck.perform(check, request); + Assert.assertEquals(PrereqCheckStatus.PASS, check.getStatus()); + + properties.put("fs.defaultFS", "anything"); + properties.put("tez.lib.uris", "wasb://some/path/to/archive.tar.gz"); + properties.put("tez.use.cluster.hadoop-libs", "false"); + request = new PrereqCheckRequest("cluster"); + request.setPrerequisiteCheckConfig(prerequisiteCheckConfig); + check = new PrerequisiteCheck(null, null); + servicesTezDistributedCacheCheck.perform(check, request); + Assert.assertEquals(PrereqCheckStatus.PASS, check.getStatus()); + + properties.put("fs.defaultFS", "anything"); + properties.put("tez.lib.uris", "ecs://some/path/to/archive.tar.gz"); + properties.put("tez.use.cluster.hadoop-libs", "false"); + request = new PrereqCheckRequest("cluster"); + request.setPrerequisiteCheckConfig(prerequisiteCheckConfig); + check = new PrerequisiteCheck(null, null); + servicesTezDistributedCacheCheck.perform(check, request); + Assert.assertEquals(PrereqCheckStatus.PASS, check.getStatus()); + + properties.put("fs.defaultFS", "dfs://ha"); + properties.put("tez.lib.uris", "/some/path/to/archive.tar.gz"); + properties.put("tez.use.cluster.hadoop-libs", "false"); + request = new PrereqCheckRequest("cluster"); + request.setPrerequisiteCheckConfig(prerequisiteCheckConfig); + check = new PrerequisiteCheck(null, null); + servicesTezDistributedCacheCheck.perform(check, request); + Assert.assertEquals(PrereqCheckStatus.PASS, check.getStatus()); + + properties.put("fs.defaultFS", "hdfs://ha"); + properties.put("tez.lib.uris", "/some/path/to/archive.tar.gz"); + properties.put("tez.use.cluster.hadoop-libs", "false"); + request = new PrereqCheckRequest("cluster"); + request.setPrerequisiteCheckConfig(prerequisiteCheckConfig); + check = new PrerequisiteCheck(null, null); + servicesTezDistributedCacheCheck.perform(check, request); + Assert.assertEquals(PrereqCheckStatus.PASS, check.getStatus()); + + properties.put("fs.defaultFS", "wasb://ha"); + properties.put("tez.lib.uris", "/some/path/to/archive.tar.gz"); + properties.put("tez.use.cluster.hadoop-libs", "false"); + request = new PrereqCheckRequest("cluster"); + request.setPrerequisiteCheckConfig(prerequisiteCheckConfig); + check = new PrerequisiteCheck(null, null); + servicesTezDistributedCacheCheck.perform(check, request); + Assert.assertEquals(PrereqCheckStatus.PASS, check.getStatus()); + + properties.put("fs.defaultFS", "ecs://ha"); + properties.put("tez.lib.uris", "/some/path/to/archive.tar.gz"); + properties.put("tez.use.cluster.hadoop-libs", "false"); + request = new PrereqCheckRequest("cluster"); + request.setPrerequisiteCheckConfig(prerequisiteCheckConfig); + check = new PrerequisiteCheck(null, null); + servicesTezDistributedCacheCheck.perform(check, request); + Assert.assertEquals(PrereqCheckStatus.PASS, check.getStatus()); + + // Fail due to no DFS + properties.put("fs.defaultFS", "anything"); + properties.put("tez.lib.uris", "/some/path/to/archive.tar.gz"); + properties.put("tez.use.cluster.hadoop-libs", "false"); + request = new PrereqCheckRequest("cluster"); + request.setPrerequisiteCheckConfig(prerequisiteCheckConfig); + check = new PrerequisiteCheck(null, null); + servicesTezDistributedCacheCheck.perform(check, request); + Assert.assertEquals(PrereqCheckStatus.FAIL, check.getStatus()); + + // Fail due to no tar.gz + properties.put("fs.defaultFS", "hdfs://ha"); + properties.put("tez.lib.uris", "/some/path/to/archive.log"); + properties.put("tez.use.cluster.hadoop-libs", "false"); + request = new PrereqCheckRequest("cluster"); + request.setPrerequisiteCheckConfig(prerequisiteCheckConfig); + check = new PrerequisiteCheck(null, null); + servicesTezDistributedCacheCheck.perform(check, request); + Assert.assertEquals(PrereqCheckStatus.FAIL, check.getStatus()); + + // Fail due to property set to true + properties.put("fs.defaultFS", "hdfs://ha"); + properties.put("tez.lib.uris", "/some/path/to/archive.tar.gz"); + properties.put("tez.use.cluster.hadoop-libs", "true"); + request = new PrereqCheckRequest("cluster"); + request.setPrerequisiteCheckConfig(prerequisiteCheckConfig); + check = new PrerequisiteCheck(null, null); + servicesTezDistributedCacheCheck.perform(check, request); + Assert.assertEquals(PrereqCheckStatus.FAIL, check.getStatus()); + + } +} diff --git a/ambari-server/src/test/java/org/apache/ambari/server/checks/ServicesYarnWorkPreservingCheckTest.java b/ambari-server/src/test/java/org/apache/ambari/server/checks/ServicesYarnWorkPreservingCheckTest.java new file mode 100644 index 00000000000..5164a4f6d9b --- /dev/null +++ b/ambari-server/src/test/java/org/apache/ambari/server/checks/ServicesYarnWorkPreservingCheckTest.java @@ -0,0 +1,129 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.ambari.server.checks; + +import java.util.Collections; +import java.util.HashMap; +import java.util.Map; + +import org.apache.ambari.server.configuration.Configuration; +import org.apache.ambari.server.controller.PrereqCheckRequest; +import org.apache.ambari.server.orm.entities.RepositoryVersionEntity; +import org.apache.ambari.server.state.Cluster; +import org.apache.ambari.server.state.Clusters; +import org.apache.ambari.server.state.Config; +import org.apache.ambari.server.state.DesiredConfig; +import org.apache.ambari.server.state.RepositoryType; +import org.apache.ambari.server.state.Service; +import org.apache.ambari.server.state.repository.ClusterVersionSummary; +import org.apache.ambari.server.state.repository.VersionDefinitionXml; +import org.apache.ambari.server.state.stack.PrereqCheckStatus; +import org.apache.ambari.server.state.stack.PrerequisiteCheck; +import org.junit.Assert; +import org.junit.Before; +import org.junit.Test; +import org.junit.runner.RunWith; +import org.mockito.Mock; +import org.mockito.Mockito; +import org.mockito.runners.MockitoJUnitRunner; + +import com.google.inject.Provider; + +/** + * Unit tests for ServicesYarnWorkPreservingCheck + * + */ +@RunWith(MockitoJUnitRunner.class) +public class ServicesYarnWorkPreservingCheckTest { + private final Clusters clusters = Mockito.mock(Clusters.class); + + private final ServicesYarnWorkPreservingCheck servicesYarnWorkPreservingCheck = new ServicesYarnWorkPreservingCheck(); + + @Mock + private ClusterVersionSummary m_clusterVersionSummary; + + @Mock + private VersionDefinitionXml m_vdfXml; + + @Mock + private RepositoryVersionEntity m_repositoryVersion; + + final Map m_services = new HashMap<>(); + + + @Before + public void setup() throws Exception { + servicesYarnWorkPreservingCheck.clustersProvider = new Provider() { + + @Override + public Clusters get() { + return clusters; + } + }; + Configuration config = Mockito.mock(Configuration.class); + servicesYarnWorkPreservingCheck.config = config; + + Mockito.when(m_repositoryVersion.getType()).thenReturn(RepositoryType.STANDARD); + Mockito.when(m_repositoryVersion.getRepositoryXml()).thenReturn(m_vdfXml); + Mockito.when(m_vdfXml.getClusterSummary(Mockito.any(Cluster.class))).thenReturn(m_clusterVersionSummary); + Mockito.when(m_clusterVersionSummary.getAvailableServiceNames()).thenReturn(m_services.keySet()); + } + + @Test + public void testIsApplicable() throws Exception { + final Cluster cluster = Mockito.mock(Cluster.class); + final Service service = Mockito.mock(Service.class); + + m_services.put("YARN", service); + + Mockito.when(cluster.getServices()).thenReturn(m_services); + Mockito.when(cluster.getClusterId()).thenReturn(1L); + Mockito.when(clusters.getCluster("cluster")).thenReturn(cluster); + + PrereqCheckRequest request = new PrereqCheckRequest("cluster"); + request.setTargetRepositoryVersion(m_repositoryVersion); + Assert.assertTrue(servicesYarnWorkPreservingCheck.isApplicable(request)); + + m_services.remove("YARN"); + Assert.assertFalse(servicesYarnWorkPreservingCheck.isApplicable(request)); + } + + @Test + public void testPerform() throws Exception { + final Cluster cluster = Mockito.mock(Cluster.class); + Mockito.when(cluster.getClusterId()).thenReturn(1L); + Mockito.when(clusters.getCluster("cluster")).thenReturn(cluster); + + final DesiredConfig desiredConfig = Mockito.mock(DesiredConfig.class); + Mockito.when(desiredConfig.getTag()).thenReturn("tag"); + Mockito.when(cluster.getDesiredConfigs()).thenReturn(Collections.singletonMap("yarn-site", desiredConfig)); + final Config config = Mockito.mock(Config.class); + Mockito.when(cluster.getConfig(Mockito.anyString(), Mockito.anyString())).thenReturn(config); + final Map properties = new HashMap<>(); + Mockito.when(config.getProperties()).thenReturn(properties); + + PrerequisiteCheck check = new PrerequisiteCheck(null, null); + servicesYarnWorkPreservingCheck.perform(check, new PrereqCheckRequest("cluster")); + Assert.assertEquals(PrereqCheckStatus.FAIL, check.getStatus()); + + properties.put("yarn.resourcemanager.work-preserving-recovery.enabled", "true"); + check = new PrerequisiteCheck(null, null); + servicesYarnWorkPreservingCheck.perform(check, new PrereqCheckRequest("cluster")); + Assert.assertEquals(PrereqCheckStatus.PASS, check.getStatus()); + } +} diff --git a/ambari-server/src/test/java/org/apache/ambari/server/checks/StormShutdownWarningTest.java b/ambari-server/src/test/java/org/apache/ambari/server/checks/StormShutdownWarningTest.java new file mode 100644 index 00000000000..a5e24f1f339 --- /dev/null +++ b/ambari-server/src/test/java/org/apache/ambari/server/checks/StormShutdownWarningTest.java @@ -0,0 +1,127 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.ambari.server.checks; + +import java.util.HashMap; +import java.util.Map; + +import org.apache.ambari.server.controller.PrereqCheckRequest; +import org.apache.ambari.server.orm.entities.RepositoryVersionEntity; +import org.apache.ambari.server.state.Cluster; +import org.apache.ambari.server.state.Clusters; +import org.apache.ambari.server.state.RepositoryType; +import org.apache.ambari.server.state.Service; +import org.apache.ambari.server.state.StackId; +import org.apache.ambari.server.state.repository.ClusterVersionSummary; +import org.apache.ambari.server.state.repository.VersionDefinitionXml; +import org.apache.ambari.server.state.stack.PrereqCheckStatus; +import org.apache.ambari.server.state.stack.PrerequisiteCheck; +import org.apache.ambari.server.state.stack.upgrade.UpgradeType; +import org.easymock.EasyMock; +import org.easymock.EasyMockSupport; +import org.junit.Assert; +import org.junit.Before; +import org.junit.Test; +import org.junit.runner.RunWith; +import org.mockito.Mock; +import org.mockito.Mockito; +import org.mockito.runners.MockitoJUnitRunner; + +import com.google.inject.Provider; + +/** + * Tests {@link StormShutdownWarning}. + */ +@RunWith(MockitoJUnitRunner.class) +public class StormShutdownWarningTest extends EasyMockSupport { + + private final String m_clusterName = "c1"; + private final Clusters m_clusters = niceMock(Clusters.class); + + @Mock + private ClusterVersionSummary m_clusterVersionSummary; + + @Mock + private VersionDefinitionXml m_vdfXml; + + @Mock + private RepositoryVersionEntity m_repositoryVersion; + + final Map m_services = new HashMap<>(); + + @Before + public void setup() throws Exception { + m_services.clear(); + + Mockito.when(m_repositoryVersion.getType()).thenReturn(RepositoryType.STANDARD); + Mockito.when(m_repositoryVersion.getRepositoryXml()).thenReturn(m_vdfXml); + Mockito.when(m_vdfXml.getClusterSummary(Mockito.any(Cluster.class))).thenReturn(m_clusterVersionSummary); + Mockito.when(m_clusterVersionSummary.getAvailableServiceNames()).thenReturn(m_services.keySet()); + } + + /** + * @throws Exception + */ + @Test + public void testIsApplicable() throws Exception { + final StormShutdownWarning shutdownWarning = new StormShutdownWarning(); + shutdownWarning.clustersProvider = new Provider() { + + @Override + public Clusters get() { + return m_clusters; + } + }; + + final Cluster cluster = niceMock(Cluster.class); + final Service hive = niceMock(Service.class); + + m_services.put("STORM", hive); + + EasyMock.expect(cluster.getClusterId()).andReturn(1L).anyTimes(); + + EasyMock.expect(cluster.getCurrentStackVersion()).andReturn(new StackId("HDP", "2.3")).anyTimes(); + EasyMock.expect(cluster.getServices()).andReturn(m_services).anyTimes(); + EasyMock.expect(m_clusters.getCluster(m_clusterName)).andReturn(cluster).atLeastOnce(); + + PrereqCheckRequest request = niceMock(PrereqCheckRequest.class); + EasyMock.expect(request.getClusterName()).andReturn(m_clusterName).anyTimes(); + EasyMock.expect(request.getUpgradeType()).andReturn(UpgradeType.ROLLING).anyTimes(); + EasyMock.expect(request.getTargetRepositoryVersion()).andReturn(m_repositoryVersion).anyTimes(); + + replayAll(); + + Assert.assertTrue(shutdownWarning.isApplicable(request)); + + verifyAll(); + } + + /** + * @throws Exception + */ + @Test + public void testPerform() throws Exception { + final StormShutdownWarning shutdownWarning = new StormShutdownWarning(); + + PrereqCheckRequest request = new PrereqCheckRequest(m_clusterName); + PrerequisiteCheck check = new PrerequisiteCheck(null, null); + + shutdownWarning.perform(check, request); + Assert.assertEquals(PrereqCheckStatus.WARNING, check.getStatus()); + } +} diff --git a/ambari-server/src/test/java/org/apache/ambari/server/checks/YarnTimelineServerStatePreservingCheckTest.java b/ambari-server/src/test/java/org/apache/ambari/server/checks/YarnTimelineServerStatePreservingCheckTest.java new file mode 100644 index 00000000000..5de4b2a4df5 --- /dev/null +++ b/ambari-server/src/test/java/org/apache/ambari/server/checks/YarnTimelineServerStatePreservingCheckTest.java @@ -0,0 +1,214 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.ambari.server.checks; + +import java.util.HashMap; +import java.util.Map; + +import org.apache.ambari.server.configuration.Configuration; +import org.apache.ambari.server.controller.PrereqCheckRequest; +import org.apache.ambari.server.orm.entities.RepositoryVersionEntity; +import org.apache.ambari.server.state.Cluster; +import org.apache.ambari.server.state.Clusters; +import org.apache.ambari.server.state.Config; +import org.apache.ambari.server.state.DesiredConfig; +import org.apache.ambari.server.state.Service; +import org.apache.ambari.server.state.StackId; +import org.apache.ambari.server.state.stack.PrereqCheckStatus; +import org.apache.ambari.server.state.stack.PrerequisiteCheck; +import org.apache.ambari.server.state.stack.UpgradePack.PrerequisiteCheckConfig; +import org.junit.Assert; +import org.junit.Before; +import org.junit.Ignore; +import org.junit.Test; +import org.mockito.Mockito; + +import com.google.inject.Provider; + +/** + * Tests for {@link YarnTimelineServerStatePreservingCheckTest} + * @Deprecated + */ +@Ignore +public class YarnTimelineServerStatePreservingCheckTest { + private final Clusters m_clusters = Mockito.mock(Clusters.class); + + private final YarnTimelineServerStatePreservingCheck m_check = new YarnTimelineServerStatePreservingCheck(); + + final RepositoryVersionEntity m_repositoryVersion = Mockito.mock(RepositoryVersionEntity.class); + + /** + * + */ + @Before + public void setup() throws Exception { + m_check.clustersProvider = new Provider() { + + @Override + public Clusters get() { + return m_clusters; + } + }; + Configuration config = Mockito.mock(Configuration.class); + m_check.config = config; + + Mockito.when(m_repositoryVersion.getVersion()).thenReturn("2.3.0.0-1234"); + Mockito.when(m_repositoryVersion.getStackId()).thenReturn(new StackId("HDP", "2.3")); + } + + /** + * @throws Exception + */ + @Test + public void testIsApplicable() throws Exception { + final Cluster cluster = Mockito.mock(Cluster.class); + Mockito.when(cluster.getClusterId()).thenReturn(1L); + Mockito.when(m_clusters.getCluster("cluster")).thenReturn(cluster); + Mockito.when(cluster.getCurrentStackVersion()).thenReturn(new StackId("HDP-2.2")); + + Map services = new HashMap<>(); + Mockito.when(cluster.getServices()).thenReturn(services); + + RepositoryVersionEntity repositoryVersionEntity = Mockito.mock(RepositoryVersionEntity.class); + Mockito.when(repositoryVersionEntity.getVersion()).thenReturn("2.2.4.2"); + + Map checkProperties = new HashMap<>(); + checkProperties.put("min-applicable-stack-version","HDP-2.2.4.2"); + PrerequisiteCheckConfig prerequisiteCheckConfig = Mockito.mock(PrerequisiteCheckConfig.class); + Mockito.when(prerequisiteCheckConfig.getCheckProperties( + m_check.getClass().getName())).thenReturn(checkProperties); + + PrereqCheckRequest request = new PrereqCheckRequest("cluster"); + request.setTargetRepositoryVersion(m_repositoryVersion); + request.setPrerequisiteCheckConfig(prerequisiteCheckConfig); + + // YARN not installed + Assert.assertFalse(m_check.isApplicable(request)); + + // YARN installed + services.put("YARN", Mockito.mock(Service.class)); + Assert.assertTrue(m_check.isApplicable(request)); + + Mockito.when(repositoryVersionEntity.getVersion()).thenReturn("2.2.0.0"); + Assert.assertFalse(m_check.isApplicable(request)); + + Mockito.when(repositoryVersionEntity.getVersion()).thenReturn("2.2.4.2"); + Assert.assertTrue(m_check.isApplicable(request)); + } + + @Test + public void testPerform() throws Exception { + final Cluster cluster = Mockito.mock(Cluster.class); + Mockito.when(cluster.getClusterId()).thenReturn(1L); + Mockito.when(m_clusters.getCluster("cluster")).thenReturn(cluster); + + final DesiredConfig desiredConfig = Mockito.mock(DesiredConfig.class); + Mockito.when(desiredConfig.getTag()).thenReturn("tag"); + Map configMap = new HashMap<>(); + configMap.put("yarn-site", desiredConfig); + configMap.put("core-site", desiredConfig); + + Mockito.when(cluster.getDesiredConfigs()).thenReturn(configMap); + final Config config = Mockito.mock(Config.class); + Mockito.when(cluster.getConfig(Mockito.anyString(), Mockito.anyString())).thenReturn(config); + final Map properties = new HashMap<>(); + Mockito.when(config.getProperties()).thenReturn(properties); + + PrerequisiteCheck check = new PrerequisiteCheck(null, null); + m_check.perform(check, new PrereqCheckRequest("cluster")); + Assert.assertEquals(PrereqCheckStatus.FAIL, check.getStatus()); + + properties.put("yarn.timeline-service.recovery.enabled", "true"); + check = new PrerequisiteCheck(null, null); + m_check.perform(check, new PrereqCheckRequest("cluster")); + Assert.assertEquals(PrereqCheckStatus.PASS, check.getStatus()); + } + + @SuppressWarnings("serial") + @Test + public void testIsApplicableMinimumHDPStackVersion() throws Exception { + final Cluster cluster = Mockito.mock(Cluster.class); + Mockito.when(cluster.getClusterId()).thenReturn(1L); + Mockito.when(cluster.getServices()).thenReturn(new HashMap() { + { + put("YARN", null); + } + }); + Mockito.when(cluster.getCurrentStackVersion()).thenReturn(new StackId("HDP-2.2")); + RepositoryVersionEntity repositoryVersionEntity = Mockito.mock(RepositoryVersionEntity.class); + Mockito.when(m_clusters.getCluster("c1")).thenReturn(cluster); + + Map checkProperties = new HashMap<>(); + checkProperties.put("min-applicable-stack-version","HDP-2.2.4.2"); + PrerequisiteCheckConfig prerequisiteCheckConfig = Mockito.mock(PrerequisiteCheckConfig.class); + Mockito.when(prerequisiteCheckConfig.getCheckProperties( + m_check.getClass().getName())).thenReturn(checkProperties); + + PrereqCheckRequest request = new PrereqCheckRequest("c1"); + request.setPrerequisiteCheckConfig(prerequisiteCheckConfig); + + // Check < 2.2.4.2 + Mockito.when(repositoryVersionEntity.getVersion()).thenReturn("2.1.1.1"); + boolean isApplicable = m_check.isApplicable(request); + Assert.assertFalse(isApplicable); + Mockito.when(repositoryVersionEntity.getVersion()).thenReturn("2.2.4.1"); + isApplicable = m_check.isApplicable(request); + Assert.assertFalse(isApplicable); + + // Check == 2.2.4.2 + Mockito.when(repositoryVersionEntity.getVersion()).thenReturn("2.2.4.2"); + isApplicable = m_check.isApplicable(request); + Assert.assertTrue(isApplicable); + + // Check > 2.2.4.2 + Mockito.when(repositoryVersionEntity.getVersion()).thenReturn("2.2.4.4"); + isApplicable = m_check.isApplicable(request); + Assert.assertTrue(isApplicable); + Mockito.when(repositoryVersionEntity.getVersion()).thenReturn("2.3.1.1"); + isApplicable = m_check.isApplicable(request); + Assert.assertTrue(isApplicable); + } + + @SuppressWarnings("serial") + @Test + public void testIsApplicableMinimumStackVersion() throws Exception { + final Cluster cluster = Mockito.mock(Cluster.class); + Mockito.when(cluster.getClusterId()).thenReturn(1L); + Mockito.when(cluster.getServices()).thenReturn(new HashMap() { + { + put("YARN", null); + } + }); + Mockito.when(cluster.getCurrentStackVersion()).thenReturn(new StackId("MYSTACK-12.2")); + RepositoryVersionEntity repositoryVersionEntity = Mockito.mock(RepositoryVersionEntity.class); + Mockito.when(m_clusters.getCluster("c1")).thenReturn(cluster); + + Map checkProperties = new HashMap<>(); + checkProperties.put("min-applicable-stack-version", "HDP-2.2.4.2"); + PrerequisiteCheckConfig prerequisiteCheckConfig = Mockito.mock(PrerequisiteCheckConfig.class); + Mockito.when(prerequisiteCheckConfig.getCheckProperties( + m_check.getClass().getName())).thenReturn(checkProperties); + + PrereqCheckRequest request = new PrereqCheckRequest("c1"); + request.setPrerequisiteCheckConfig(prerequisiteCheckConfig); + + Mockito.when(repositoryVersionEntity.getVersion()).thenReturn("2.3.0.1"); + boolean isApplicable = m_check.isApplicable(request); + Assert.assertTrue(isApplicable); + } +} diff --git a/ambari-server/src/test/java/org/apache/ambari/server/configuration/RecoveryConfigHelperTest.java b/ambari-server/src/test/java/org/apache/ambari/server/configuration/RecoveryConfigHelperTest.java index 44befba1fc8..f0fff2b19d7 100644 --- a/ambari-server/src/test/java/org/apache/ambari/server/configuration/RecoveryConfigHelperTest.java +++ b/ambari-server/src/test/java/org/apache/ambari/server/configuration/RecoveryConfigHelperTest.java @@ -148,10 +148,10 @@ public void testServiceComponentInstalled() Cluster cluster = heartbeatTestHelper.getDummyCluster(); RepositoryVersionEntity repositoryVersion = helper.getOrCreateRepositoryVersion(cluster); - ServiceGroup serviceGroup = cluster.addServiceGroup("CORE", cluster.getDesiredStackVersion()); + ServiceGroup serviceGroup = cluster.addServiceGroup("CORE", "HDP-2.6.0.0"); Service hdfs = cluster.addService(serviceGroup, HDFS, HDFS, repositoryVersion); - hdfs.addServiceComponent(DATANODE, DATANODE).setRecoveryEnabled(true); + hdfs.addServiceComponent(DATANODE).setRecoveryEnabled(true); hdfs.getServiceComponent(DATANODE).addServiceComponentHost(DummyHostname1); // Get the recovery configuration @@ -159,7 +159,7 @@ public void testServiceComponentInstalled() assertEquals(recoveryConfig.getEnabledComponents(), "DATANODE"); // Install HDFS::NAMENODE to trigger a component installed event - hdfs.addServiceComponent(NAMENODE, NAMENODE).setRecoveryEnabled(true); + hdfs.addServiceComponent(NAMENODE).setRecoveryEnabled(true); hdfs.getServiceComponent(NAMENODE).addServiceComponentHost(DummyHostname1); // Verify that the config is stale now @@ -183,13 +183,13 @@ public void testServiceComponentUninstalled() throws Exception { Cluster cluster = heartbeatTestHelper.getDummyCluster(); RepositoryVersionEntity repositoryVersion = helper.getOrCreateRepositoryVersion(cluster); - ServiceGroup serviceGroup = cluster.addServiceGroup("CORE", cluster.getDesiredStackVersion().getStackId()); + ServiceGroup serviceGroup = cluster.addServiceGroup("CORE", "HDP-2.6.0.0"); Service hdfs = cluster.addService(serviceGroup, HDFS, HDFS, repositoryVersion); - hdfs.addServiceComponent(DATANODE, DATANODE).setRecoveryEnabled(true); + hdfs.addServiceComponent(DATANODE).setRecoveryEnabled(true); hdfs.getServiceComponent(DATANODE).addServiceComponentHost(DummyHostname1); - hdfs.addServiceComponent(NAMENODE, NAMENODE).setRecoveryEnabled(true); + hdfs.addServiceComponent(NAMENODE).setRecoveryEnabled(true); hdfs.getServiceComponent(NAMENODE).addServiceComponentHost(DummyHostname1); // Get the recovery configuration @@ -220,10 +220,10 @@ public void testClusterEnvConfigChanged() throws Exception { Cluster cluster = heartbeatTestHelper.getDummyCluster(); RepositoryVersionEntity repositoryVersion = helper.getOrCreateRepositoryVersion(cluster); - ServiceGroup serviceGroup = cluster.addServiceGroup("CORE", cluster.getDesiredStackVersion().getStackId()); + ServiceGroup serviceGroup = cluster.addServiceGroup("CORE", "HDP-2.6.0.0"); Service hdfs = cluster.addService(serviceGroup, HDFS, HDFS, repositoryVersion); - hdfs.addServiceComponent(DATANODE, DATANODE).setRecoveryEnabled(true); + hdfs.addServiceComponent(DATANODE).setRecoveryEnabled(true); hdfs.getServiceComponent(DATANODE).addServiceComponentHost(DummyHostname1); hdfs.getServiceComponent(DATANODE).getServiceComponentHost(DummyHostname1).setDesiredState(State.INSTALLED); @@ -261,13 +261,13 @@ public void testMaintenanceModeChanged() throws Exception { Cluster cluster = heartbeatTestHelper.getDummyCluster(); RepositoryVersionEntity repositoryVersion = helper.getOrCreateRepositoryVersion(cluster); - ServiceGroup serviceGroup = cluster.addServiceGroup("CORE", cluster.getDesiredStackVersion().getStackId()); + ServiceGroup serviceGroup = cluster.addServiceGroup("CORE", "HDP-2.6.0.0"); Service hdfs = cluster.addService(serviceGroup, HDFS, HDFS, repositoryVersion); - hdfs.addServiceComponent(DATANODE, DATANODE).setRecoveryEnabled(true); + hdfs.addServiceComponent(DATANODE).setRecoveryEnabled(true); hdfs.getServiceComponent(DATANODE).addServiceComponentHost(DummyHostname1); - hdfs.addServiceComponent(NAMENODE, NAMENODE).setRecoveryEnabled(true); + hdfs.addServiceComponent(NAMENODE).setRecoveryEnabled(true); hdfs.getServiceComponent(NAMENODE).addServiceComponentHost(DummyHostname1); // Get the recovery configuration @@ -297,10 +297,10 @@ public void testServiceComponentRecoveryChanged() throws Exception { Cluster cluster = heartbeatTestHelper.getDummyCluster(); RepositoryVersionEntity repositoryVersion = helper.getOrCreateRepositoryVersion(cluster); - ServiceGroup serviceGroup = cluster.addServiceGroup("CORE", cluster.getDesiredStackVersion().getStackId()); + ServiceGroup serviceGroup = cluster.addServiceGroup("CORE", "HDP-2.6.0.0"); Service hdfs = cluster.addService(serviceGroup, HDFS, HDFS, repositoryVersion); - hdfs.addServiceComponent(DATANODE, DATANODE).setRecoveryEnabled(true); + hdfs.addServiceComponent(DATANODE).setRecoveryEnabled(true); hdfs.getServiceComponent(DATANODE).addServiceComponentHost(DummyHostname1); // Get the recovery configuration @@ -341,10 +341,10 @@ public void testMultiNodeCluster() RepositoryVersionEntity repositoryVersion = helper.getOrCreateRepositoryVersion(cluster); // Add HDFS service with DATANODE component to the cluster - ServiceGroup serviceGroup = cluster.addServiceGroup("CORE", cluster.getDesiredStackVersion().getStackId()); + ServiceGroup serviceGroup = cluster.addServiceGroup("CORE", "HDP-2.6.0.0"); Service hdfs = cluster.addService(serviceGroup, HDFS, HDFS, repositoryVersion); - hdfs.addServiceComponent(DATANODE, DATANODE).setRecoveryEnabled(true); + hdfs.addServiceComponent(DATANODE).setRecoveryEnabled(true); // Add SCH to Host1 and Host2 hdfs.getServiceComponent(DATANODE).addServiceComponentHost("Host1"); diff --git a/ambari-server/src/test/java/org/apache/ambari/server/controller/AmbariCustomCommandExecutionHelperTest.java b/ambari-server/src/test/java/org/apache/ambari/server/controller/AmbariCustomCommandExecutionHelperTest.java index 0dbfc08fed4..8fb34c00296 100644 --- a/ambari-server/src/test/java/org/apache/ambari/server/controller/AmbariCustomCommandExecutionHelperTest.java +++ b/ambari-server/src/test/java/org/apache/ambari/server/controller/AmbariCustomCommandExecutionHelperTest.java @@ -56,6 +56,7 @@ import org.apache.ambari.server.orm.entities.RepoOsEntity; import org.apache.ambari.server.orm.entities.RepositoryVersionEntity; import org.apache.ambari.server.orm.entities.ServiceComponentDesiredStateEntity; +import org.apache.ambari.server.orm.entities.ServiceComponentVersionEntity; import org.apache.ambari.server.orm.entities.StackEntity; import org.apache.ambari.server.security.TestAuthenticationFactory; import org.apache.ambari.server.security.authorization.AuthorizationException; @@ -580,8 +581,8 @@ public void testServiceCheckRunsOnDependentClientService() throws Exception { OrmTestHelper ormTestHelper = injector.getInstance(OrmTestHelper.class); RepositoryVersionEntity repositoryVersion = ormTestHelper.getOrCreateRepositoryVersion(new StackId("HDP-2.0.6"), "2.0.6-1234"); createService("c1", "CORE", "HADOOP_CLIENTS", repositoryVersion); - createServiceComponent("c1", "CORE", "HADOOP_CLIENTS", "SOME_CLIENT_FOR_SERVICE_CHECK", "SOME_CLIENT_FOR_SERVICE_CHECK", State.INIT); - createServiceComponentHost("c1", "CORE", "HADOOP_CLIENTS", 1L, "SOME_CLIENT_FOR_SERVICE_CHECK", "SOME_CLIENT_FOR_SERVICE_CHECK", "c1-c6403", State.INIT); + createServiceComponent("c1", "CORE", "HADOOP_CLIENTS", "SOME_CLIENT_FOR_SERVICE_CHECK", State.INIT); + createServiceComponentHost("c1", "CORE", "HADOOP_CLIENTS", "SOME_CLIENT_FOR_SERVICE_CHECK", "c1-c6403", State.INIT); //make sure there are no HDFS_CLIENT components from HDFS service Cluster c1 = clusters.getCluster("c1"); @@ -744,9 +745,14 @@ public void testCommandRepository() throws Exception { // add a repo version associated with a component ServiceComponentDesiredStateEntity componentEntity = componentDAO.findByName(cluster.getClusterId(), serviceYARN.getServiceGroupId(), - serviceYARN.getServiceId(), componentRM.getName(), componentRM.getType()); + serviceYARN.getServiceId(), componentRM.getName()); + + ServiceComponentVersionEntity componentVersionEntity = new ServiceComponentVersionEntity(); + componentVersionEntity.setRepositoryVersion(repositoryVersion); + componentVersionEntity.setUserName("admin"); componentEntity.setDesiredRepositoryVersion(repositoryVersion); + componentEntity.addVersion(componentVersionEntity); componentDAO.merge(componentEntity); // !!! make sure the override is set @@ -788,30 +794,29 @@ private void createClusterFixture(String clusterName, StackId stackId, createService(clusterName, serviceGroupName, "ZOOKEEPER", repositoryVersion); createService(clusterName, serviceGroupName, "FLUME", repositoryVersion); - createServiceComponent(clusterName, serviceGroupName, "YARN", "RESOURCEMANAGER", "RESOURCEMANAGER", State.INIT); - createServiceComponent(clusterName, serviceGroupName, "YARN", "NODEMANAGER", "NODEMANAGER", State.INIT); - createServiceComponent(clusterName, serviceGroupName, "GANGLIA", "GANGLIA_SERVER", "GANGLIA_SERVER", State.INIT); - createServiceComponent(clusterName, serviceGroupName, "GANGLIA", "GANGLIA_MONITOR", "GANGLIA_MONITOR", State.INIT); - createServiceComponent(clusterName, serviceGroupName, "ZOOKEEPER", "ZOOKEEPER_CLIENT", "ZOOKEEPER_CLIENT", State.INIT); + createServiceComponent(clusterName, serviceGroupName, "YARN", "RESOURCEMANAGER", State.INIT); + createServiceComponent(clusterName, serviceGroupName, "YARN", "NODEMANAGER", State.INIT); + createServiceComponent(clusterName, serviceGroupName, "GANGLIA", "GANGLIA_SERVER", State.INIT); + createServiceComponent(clusterName, serviceGroupName, "GANGLIA", "GANGLIA_MONITOR", State.INIT); + createServiceComponent(clusterName, serviceGroupName, "ZOOKEEPER", "ZOOKEEPER_CLIENT", State.INIT); // this component should be not installed on any host - createServiceComponent(clusterName, serviceGroupName, "FLUME", "FLUME_HANDLER", "FLUME_HANDLER", State.INIT); + createServiceComponent(clusterName, serviceGroupName, "FLUME", "FLUME_HANDLER", State.INIT); } private void createServiceComponentHosts(String clusterName, String serviceGroupName, String hostPrefix) throws AmbariException, AuthorizationException { String hostC6401 = hostPrefix + "-c6401"; String hostC6402 = hostPrefix + "-c6402"; - // TODO : Numbers for component Id may not be correct. - createServiceComponentHost(clusterName, serviceGroupName, "YARN", 1L, "RESOURCEMANAGER", "RESOURCEMANAGER", hostC6401, null); - createServiceComponentHost(clusterName, serviceGroupName, "YARN", 2L, "NODEMANAGER", "NODEMANAGER", hostC6401, null); - createServiceComponentHost(clusterName, serviceGroupName, "GANGLIA", 3L, "GANGLIA_SERVER", "GANGLIA_SERVER", hostC6401, State.INIT); - createServiceComponentHost(clusterName, serviceGroupName, "GANGLIA", 4L, "GANGLIA_MONITOR", "GANGLIA_MONITOR", hostC6401, State.INIT); - createServiceComponentHost(clusterName, serviceGroupName, "ZOOKEEPER", 5L, "ZOOKEEPER_CLIENT", "ZOOKEEPER_CLIENT", hostC6401, State.INIT); - - createServiceComponentHost(clusterName, serviceGroupName, "YARN", 6L,"NODEMANAGER", "NODEMANAGER", hostC6402, null); - createServiceComponentHost(clusterName, serviceGroupName, "GANGLIA", 7L,"GANGLIA_MONITOR", "GANGLIA_MONITOR", hostC6402, State.INIT); - createServiceComponentHost(clusterName, serviceGroupName, "ZOOKEEPER", 8L, "ZOOKEEPER_CLIENT", "ZOOKEEPER_CLIENT", hostC6402, State.INIT); + createServiceComponentHost(clusterName, serviceGroupName, "YARN", "RESOURCEMANAGER", hostC6401, null); + createServiceComponentHost(clusterName, serviceGroupName, "YARN", "NODEMANAGER", hostC6401, null); + createServiceComponentHost(clusterName, serviceGroupName, "GANGLIA", "GANGLIA_SERVER", hostC6401, State.INIT); + createServiceComponentHost(clusterName, serviceGroupName, "GANGLIA", "GANGLIA_MONITOR", hostC6401, State.INIT); + createServiceComponentHost(clusterName, serviceGroupName, "ZOOKEEPER", "ZOOKEEPER_CLIENT", hostC6401, State.INIT); + + createServiceComponentHost(clusterName, serviceGroupName, "YARN", "NODEMANAGER", hostC6402, null); + createServiceComponentHost(clusterName, serviceGroupName, "GANGLIA", "GANGLIA_MONITOR", hostC6402, State.INIT); + createServiceComponentHost(clusterName, serviceGroupName, "ZOOKEEPER", "ZOOKEEPER_CLIENT", hostC6402, State.INIT); } private void addHost(String hostname, String clusterName) throws AmbariException { clusters.addHost(hostname); @@ -844,17 +849,16 @@ private void createService( } private void createServiceComponent( - String clusterName, String serviceGroupName, String serviceName, String componentName, String componentType, State desiredState + String clusterName, String serviceGroupName, String serviceName, String componentName, State desiredState ) throws AmbariException, AuthorizationException { - ServiceComponentRequest r = new ServiceComponentRequest(clusterName, serviceGroupName, serviceName, componentName, componentType, desiredState != null ? desiredState.name() : null); + ServiceComponentRequest r = new ServiceComponentRequest(clusterName, serviceGroupName, serviceName, componentName, desiredState != null ? desiredState.name() : null); ComponentResourceProviderTest.createComponents(ambariManagementController, Collections.singleton(r)); } private void createServiceComponentHost( - String clusterName, String serviceGroupName, String serviceName, Long componentId, String componentName, String componentType, String hostname, State desiredState + String clusterName, String serviceGroupName, String serviceName, String componentName, String hostname, State desiredState ) throws AmbariException, AuthorizationException { - ServiceComponentHostRequest r = new ServiceComponentHostRequest(clusterName, serviceGroupName, serviceName, componentId, componentName, componentType, - hostname, desiredState != null ? desiredState.name() : null); + ServiceComponentHostRequest r = new ServiceComponentHostRequest(clusterName, serviceGroupName, serviceName, componentName, hostname, desiredState != null ? desiredState.name() : null); ambariManagementController.createHostComponents(Collections.singleton(r)); } diff --git a/ambari-server/src/test/java/org/apache/ambari/server/controller/AmbariManagementControllerImplTest.java b/ambari-server/src/test/java/org/apache/ambari/server/controller/AmbariManagementControllerImplTest.java index 53e1a104edd..4b996e573b2 100644 --- a/ambari-server/src/test/java/org/apache/ambari/server/controller/AmbariManagementControllerImplTest.java +++ b/ambari-server/src/test/java/org/apache/ambari/server/controller/AmbariManagementControllerImplTest.java @@ -24,6 +24,7 @@ import static org.apache.ambari.server.agent.ExecutionCommand.KeyNames.NOT_MANAGED_HDFS_PATH_LIST; import static org.apache.ambari.server.agent.ExecutionCommand.KeyNames.STACK_NAME; import static org.apache.ambari.server.agent.ExecutionCommand.KeyNames.STACK_VERSION; + import static org.easymock.EasyMock.anyBoolean; import static org.easymock.EasyMock.anyObject; import static org.easymock.EasyMock.capture; @@ -58,6 +59,7 @@ import javax.persistence.RollbackException; + import org.apache.ambari.server.AmbariException; import org.apache.ambari.server.ClusterNotFoundException; import org.apache.ambari.server.HostNotFoundException; @@ -65,6 +67,7 @@ import org.apache.ambari.server.ServiceComponentHostNotFoundException; import org.apache.ambari.server.ServiceComponentNotFoundException; import org.apache.ambari.server.ServiceNotFoundException; + import org.apache.ambari.server.actionmanager.ActionDBAccessorImpl; import org.apache.ambari.server.actionmanager.ActionManager; import org.apache.ambari.server.agent.HeartBeatHandler; @@ -73,13 +76,9 @@ import org.apache.ambari.server.configuration.Configuration; import org.apache.ambari.server.controller.internal.RequestStageContainer; import org.apache.ambari.server.orm.InMemoryDefaultTestModule; -import org.apache.ambari.server.orm.dao.HostComponentStateDAO; import org.apache.ambari.server.orm.dao.RepositoryVersionDAO; -import org.apache.ambari.server.orm.dao.ServiceComponentDesiredStateDAO; -import org.apache.ambari.server.orm.entities.HostComponentStateEntity; import org.apache.ambari.server.orm.entities.LdapSyncSpecEntity; import org.apache.ambari.server.orm.entities.RepositoryVersionEntity; -import org.apache.ambari.server.orm.entities.ServiceComponentDesiredStateEntity; import org.apache.ambari.server.registry.RegistryManager; import org.apache.ambari.server.security.authorization.Users; import org.apache.ambari.server.security.authorization.internal.InternalAuthenticationToken; @@ -108,6 +107,7 @@ import org.apache.ambari.server.state.StackInfo; import org.apache.ambari.server.state.State; import org.apache.ambari.server.state.stack.OsFamily; + import org.easymock.Capture; import org.easymock.EasyMock; import org.junit.Before; @@ -140,10 +140,6 @@ public class AmbariManagementControllerImplTest { private static final Users users = createMock(Users.class); private static final AmbariSessionManager sessionManager = createNiceMock(AmbariSessionManager.class); private static final RegistryManager registryManager = createNiceMock(RegistryManager.class); - private static final HostComponentStateEntity hostComponentStateEntity = createMock(HostComponentStateEntity.class); - private static final HostComponentStateDAO hostComponentStateDAO = createMock(HostComponentStateDAO.class); - private static final ServiceComponentDesiredStateEntity serviceComponentDesiredStateEntity = createMock(ServiceComponentDesiredStateEntity.class); - private static final ServiceComponentDesiredStateDAO serviceComponentDesiredStateDAO = createMock(ServiceComponentDesiredStateDAO.class); @BeforeClass public static void setupAuthentication() { @@ -155,8 +151,7 @@ public static void setupAuthentication() { @Before public void before() throws Exception { - reset(ldapDataPopulator, clusters, actionDBAccessor, ambariMetaInfo, users, sessionManager, - hostComponentStateEntity, hostComponentStateDAO, serviceComponentDesiredStateEntity, serviceComponentDesiredStateDAO); + reset(ldapDataPopulator, clusters, actionDBAccessor, ambariMetaInfo, users, sessionManager); } @Test @@ -171,9 +166,6 @@ public void testgetAmbariServerURI() throws Exception { expect(injector.getInstance(MaintenanceStateHelper.class)).andReturn(null); expect(injector.getInstance(KerberosHelper.class)).andReturn(createNiceMock(KerberosHelper.class)); - expect(injector.getInstance(HostComponentStateDAO.class)).andReturn(hostComponentStateDAO).anyTimes(); - expect(injector.getInstance(ServiceComponentDesiredStateDAO.class)).andReturn(serviceComponentDesiredStateDAO).anyTimes(); - //replay replay(injector); @@ -259,11 +251,8 @@ public void testGetClusters() throws Exception { CredentialStoreService credentialStoreService = createNiceMock(CredentialStoreService.class); expect(credentialStoreService.isInitialized(anyObject(CredentialStoreType.class))).andReturn(true).anyTimes(); - expect(injector.getInstance(HostComponentStateDAO.class)).andReturn(hostComponentStateDAO).anyTimes(); - expect(injector.getInstance(ServiceComponentDesiredStateDAO.class)).andReturn(serviceComponentDesiredStateDAO).anyTimes(); - // replay mocks - replay(injector, clusters, cluster, response, credentialStoreService, hostComponentStateDAO, serviceComponentDesiredStateDAO); + replay(injector, clusters, cluster, response, credentialStoreService); // test AmbariManagementController controller = new AmbariManagementControllerImpl(null, clusters, injector); @@ -278,7 +267,7 @@ public void testGetClusters() throws Exception { assertEquals(1, setResponses.size()); assertTrue(setResponses.contains(response)); - verify(injector, clusters, cluster, response, credentialStoreService, hostComponentStateDAO, serviceComponentDesiredStateDAO); + verify(injector, clusters, cluster, response, credentialStoreService); } /** @@ -306,11 +295,8 @@ public void testGetClusters___ClusterNotFoundException() throws Exception { // getCluster expect(clusters.getCluster("cluster1")).andThrow(new ClusterNotFoundException("cluster1")); - expect(injector.getInstance(HostComponentStateDAO.class)).andReturn(hostComponentStateDAO).anyTimes(); - expect(injector.getInstance(ServiceComponentDesiredStateDAO.class)).andReturn(serviceComponentDesiredStateDAO).anyTimes(); - // replay mocks - replay(injector, clusters, hostComponentStateDAO, serviceComponentDesiredStateDAO); + replay(injector, clusters); //test AmbariManagementController controller = new AmbariManagementControllerImpl(null, clusters, injector); @@ -323,7 +309,7 @@ public void testGetClusters___ClusterNotFoundException() throws Exception { // expected } - verify(injector, clusters, hostComponentStateDAO, serviceComponentDesiredStateDAO); + verify(injector, clusters); } /** @@ -372,12 +358,8 @@ public void testGetClusters___OR_Predicate_ClusterNotFoundException() throws Exc CredentialStoreService credentialStoreService = createNiceMock(CredentialStoreService.class); expect(credentialStoreService.isInitialized(anyObject(CredentialStoreType.class))).andReturn(true).anyTimes(); - expect(injector.getInstance(HostComponentStateDAO.class)).andReturn(hostComponentStateDAO).anyTimes(); - expect(injector.getInstance(ServiceComponentDesiredStateDAO.class)).andReturn(serviceComponentDesiredStateDAO).anyTimes(); - // replay mocks - replay(injector, clusters, cluster, cluster2, response, response2, credentialStoreService, - hostComponentStateDAO, serviceComponentDesiredStateDAO); + replay(injector, clusters, cluster, cluster2, response, response2, credentialStoreService); //test AmbariManagementController controller = new AmbariManagementControllerImpl(null, clusters, injector); @@ -394,8 +376,7 @@ public void testGetClusters___OR_Predicate_ClusterNotFoundException() throws Exc assertTrue(setResponses.contains(response)); assertTrue(setResponses.contains(response2)); - verify(injector, clusters, cluster, cluster2, response, response2, credentialStoreService, - hostComponentStateDAO, serviceComponentDesiredStateDAO); + verify(injector, clusters, cluster, cluster2, response, response2, credentialStoreService); } /** @@ -432,9 +413,6 @@ public void testUpdateClusters() throws Exception { expect(clusters.getClusterById(1L)).andReturn(cluster).times(1); expect(cluster.getClusterName()).andReturn("clusterOld").times(1); - expect(injector.getInstance(HostComponentStateDAO.class)).andReturn(hostComponentStateDAO).anyTimes(); - expect(injector.getInstance(ServiceComponentDesiredStateDAO.class)).andReturn(serviceComponentDesiredStateDAO).anyTimes(); - cluster.setClusterName("clusterNew"); expectLastCall(); @@ -442,8 +420,7 @@ public void testUpdateClusters() throws Exception { expectLastCall(); // replay mocks - replay(actionManager, cluster, clusters, injector, clusterRequest, sessionManager, configurationRequest, - hostComponentStateDAO, serviceComponentDesiredStateDAO); + replay(actionManager, cluster, clusters, injector, clusterRequest, sessionManager, configurationRequest); // test AmbariManagementController controller = new AmbariManagementControllerImpl(actionManager, clusters, injector); @@ -451,8 +428,7 @@ public void testUpdateClusters() throws Exception { // assert and verify assertSame(controller, controllerCapture.getValue()); - verify(actionManager, cluster, clusters, injector, clusterRequest, sessionManager, configurationRequest, - hostComponentStateDAO, serviceComponentDesiredStateDAO); + verify(actionManager, cluster, clusters, injector, clusterRequest, sessionManager, configurationRequest); } /** @@ -485,9 +461,6 @@ public void testUpdateClustersWithNullConfigPropertyValues() throws Exception { expect(clusterRequest.getClusterName()).andReturn("clusterNew").anyTimes(); expect(clusterRequest.getClusterId()).andReturn(1L).anyTimes(); - expect(injector.getInstance(HostComponentStateDAO.class)).andReturn(hostComponentStateDAO).anyTimes(); - expect(injector.getInstance(ServiceComponentDesiredStateDAO.class)).andReturn(serviceComponentDesiredStateDAO).anyTimes(); - ConfigurationRequest configReq = new ConfigurationRequest(); final Map configReqProps = Maps.newHashMap(); configReqProps.put("p1", null); @@ -510,8 +483,7 @@ public void testUpdateClustersWithNullConfigPropertyValues() throws Exception { expectLastCall(); // replay mocks - replay(actionManager, cluster, clusters, config, injector, clusterRequest, sessionManager, - hostComponentStateDAO, serviceComponentDesiredStateDAO); + replay(actionManager, cluster, clusters, config, injector, clusterRequest, sessionManager); // test AmbariManagementController controller = new AmbariManagementControllerImpl(actionManager, clusters, injector); @@ -519,8 +491,7 @@ public void testUpdateClustersWithNullConfigPropertyValues() throws Exception { // assert and verify assertSame(controller, controllerCapture.getValue()); - verify(actionManager, cluster, clusters, config, injector, clusterRequest, sessionManager, - hostComponentStateDAO, serviceComponentDesiredStateDAO); + verify(actionManager, cluster, clusters, config, injector, clusterRequest, sessionManager); } /** @@ -549,12 +520,8 @@ public void testUpdateClustersToggleKerberosNotInvoked() throws Exception { expect(clusters.getClusterById(1L)).andReturn(cluster).times(1); expect(cluster.getClusterName()).andReturn("cluster").times(1); - expect(injector.getInstance(HostComponentStateDAO.class)).andReturn(hostComponentStateDAO).anyTimes(); - expect(injector.getInstance(ServiceComponentDesiredStateDAO.class)).andReturn(serviceComponentDesiredStateDAO).anyTimes(); - // replay mocks - replay(actionManager, cluster, clusters, injector, clusterRequest, sessionManager, kerberosHelper, - hostComponentStateDAO, serviceComponentDesiredStateDAO); + replay(actionManager, cluster, clusters, injector, clusterRequest, sessionManager, kerberosHelper); // test AmbariManagementController controller = new AmbariManagementControllerImpl(actionManager, clusters, injector); @@ -562,8 +529,7 @@ public void testUpdateClustersToggleKerberosNotInvoked() throws Exception { // assert and verify assertSame(controller, controllerCapture.getValue()); - verify(actionManager, cluster, clusters, injector, clusterRequest, sessionManager, kerberosHelper, - hostComponentStateDAO, serviceComponentDesiredStateDAO); + verify(actionManager, cluster, clusters, injector, clusterRequest, sessionManager, kerberosHelper); } /** @@ -595,9 +561,6 @@ public void testUpdateClustersToggleKerberosReenable() throws Exception { expect(cluster.getClusterName()).andReturn("cluster").times(1); expect(cluster.getSecurityType()).andReturn(SecurityType.KERBEROS).anyTimes(); - expect(injector.getInstance(HostComponentStateDAO.class)).andReturn(hostComponentStateDAO).anyTimes(); - expect(injector.getInstance(ServiceComponentDesiredStateDAO.class)).andReturn(serviceComponentDesiredStateDAO).anyTimes(); - expect(kerberosHelper.shouldExecuteCustomOperations(SecurityType.KERBEROS, null)) .andReturn(false) .once(); @@ -607,8 +570,7 @@ public void testUpdateClustersToggleKerberosReenable() throws Exception { // Note: kerberosHelper.toggleKerberos is not called // replay mocks - replay(actionManager, cluster, clusters, injector, clusterRequest, sessionManager, kerberosHelper, - hostComponentStateDAO, serviceComponentDesiredStateDAO); + replay(actionManager, cluster, clusters, injector, clusterRequest, sessionManager, kerberosHelper); // test AmbariManagementController controller = new AmbariManagementControllerImpl(actionManager, clusters, injector); @@ -616,8 +578,7 @@ public void testUpdateClustersToggleKerberosReenable() throws Exception { // assert and verify assertSame(controller, controllerCapture.getValue()); - verify(actionManager, cluster, clusters, injector, clusterRequest, sessionManager, kerberosHelper, - hostComponentStateDAO, serviceComponentDesiredStateDAO); + verify(actionManager, cluster, clusters, injector, clusterRequest, sessionManager, kerberosHelper); } /** * Ensure that when the cluster security type updated from NONE to KERBEROS, KerberosHandler.toggleKerberos @@ -647,9 +608,6 @@ public void testUpdateClustersToggleKerberosEnable() throws Exception { expect(cluster.getClusterName()).andReturn("cluster").times(1); expect(cluster.getSecurityType()).andReturn(SecurityType.NONE).anyTimes(); - expect(injector.getInstance(HostComponentStateDAO.class)).andReturn(hostComponentStateDAO).anyTimes(); - expect(injector.getInstance(ServiceComponentDesiredStateDAO.class)).andReturn(serviceComponentDesiredStateDAO).anyTimes(); - expect(kerberosHelper.shouldExecuteCustomOperations(SecurityType.KERBEROS, null)) .andReturn(false) .once(); @@ -663,12 +621,8 @@ public void testUpdateClustersToggleKerberosEnable() throws Exception { .andReturn(null) .once(); - expect(injector.getInstance(HostComponentStateDAO.class)).andReturn(hostComponentStateDAO).anyTimes(); - expect(injector.getInstance(ServiceComponentDesiredStateDAO.class)).andReturn(serviceComponentDesiredStateDAO).anyTimes(); - // replay mocks - replay(actionManager, cluster, clusters, injector, clusterRequest, sessionManager, kerberosHelper, - hostComponentStateDAO, serviceComponentDesiredStateDAO); + replay(actionManager, cluster, clusters, injector, clusterRequest, sessionManager, kerberosHelper); // test AmbariManagementController controller = new AmbariManagementControllerImpl(actionManager, clusters, injector); @@ -676,8 +630,7 @@ public void testUpdateClustersToggleKerberosEnable() throws Exception { // assert and verify assertSame(controller, controllerCapture.getValue()); - verify(actionManager, cluster, clusters, injector, clusterRequest, sessionManager, kerberosHelper, - hostComponentStateDAO, serviceComponentDesiredStateDAO); + verify(actionManager, cluster, clusters, injector, clusterRequest, sessionManager, kerberosHelper); } /** @@ -736,9 +689,6 @@ private void testUpdateClustersToggleKerberosDisable(Boolean manageIdentities) t expect(cluster.getClusterName()).andReturn("cluster").times(1); expect(cluster.getSecurityType()).andReturn(SecurityType.KERBEROS).anyTimes(); - expect(injector.getInstance(HostComponentStateDAO.class)).andReturn(hostComponentStateDAO).anyTimes(); - expect(injector.getInstance(ServiceComponentDesiredStateDAO.class)).andReturn(serviceComponentDesiredStateDAO).anyTimes(); - expect(kerberosHelper.shouldExecuteCustomOperations(SecurityType.NONE, null)) .andReturn(false) .once(); @@ -815,12 +765,8 @@ public void testUpdateClustersToggleKerberos_Fail() throws Exception { .andThrow(new IllegalArgumentException("bad args!")) .once(); - expect(injector.getInstance(HostComponentStateDAO.class)).andReturn(hostComponentStateDAO).anyTimes(); - expect(injector.getInstance(ServiceComponentDesiredStateDAO.class)).andReturn(serviceComponentDesiredStateDAO).anyTimes(); - // replay mocks - replay(actionManager, cluster, clusters, injector, clusterRequest, sessionManager, kerberosHelper, - hostComponentStateDAO, serviceComponentDesiredStateDAO); + replay(actionManager, cluster, clusters, injector, clusterRequest, sessionManager, kerberosHelper); // test AmbariManagementController controller = new AmbariManagementControllerImpl(actionManager, clusters, injector); @@ -834,8 +780,7 @@ public void testUpdateClustersToggleKerberos_Fail() throws Exception { // assert and verify assertSame(controller, controllerCapture.getValue()); - verify(actionManager, cluster, clusters, injector, clusterRequest, sessionManager, kerberosHelper, - hostComponentStateDAO, serviceComponentDesiredStateDAO); + verify(actionManager, cluster, clusters, injector, clusterRequest, sessionManager, kerberosHelper); } /** @@ -863,15 +808,11 @@ public void testUpdateClusters__RollbackException() throws Exception { expect(clusterRequest.getClusterId()).andReturn(1L).times(4); expect(clusters.getClusterById(1L)).andReturn(cluster).times(1); expect(cluster.getClusterName()).andReturn("clusterOld").times(1); - expect(injector.getInstance(HostComponentStateDAO.class)).andReturn(hostComponentStateDAO).anyTimes(); - expect(injector.getInstance(ServiceComponentDesiredStateDAO.class)).andReturn(serviceComponentDesiredStateDAO).anyTimes(); - cluster.setClusterName("clusterNew"); expectLastCall().andThrow(new RollbackException()); // replay mocks - replay(actionManager, cluster, clusters, injector, clusterRequest, sessionManager, - hostComponentStateDAO, serviceComponentDesiredStateDAO); + replay(actionManager, cluster, clusters, injector, clusterRequest, sessionManager); // test AmbariManagementController controller = new AmbariManagementControllerImpl(actionManager, clusters, injector); @@ -883,8 +824,7 @@ public void testUpdateClusters__RollbackException() throws Exception { } // assert and verify assertSame(controller, controllerCapture.getValue()); - verify(actionManager, cluster, clusters, injector, clusterRequest, sessionManager, - hostComponentStateDAO, serviceComponentDesiredStateDAO); + verify(actionManager, cluster, clusters, injector, clusterRequest, sessionManager); } @Test @@ -906,7 +846,7 @@ public void testGetHostComponents() throws Exception { // requests ServiceComponentHostRequest request1 = new ServiceComponentHostRequest( - "cluster1", "CORE", "service1", 1L, "component1", "component1", "host1", null); + "cluster1", "CORE", "service1", "component1", "host1", null); Set setRequests = new HashSet<>(); setRequests.add(request1); @@ -919,19 +859,6 @@ public void testGetHostComponents() throws Exception { expect(injector.getInstance(MaintenanceStateHelper.class)).andReturn(maintHelper).anyTimes(); expect(injector.getInstance(KerberosHelper.class)).andReturn(createNiceMock(KerberosHelper.class)); - expect(injector.getInstance(HostComponentStateDAO.class)).andReturn(hostComponentStateDAO).anyTimes(); - expect(hostComponentStateDAO.findById(1L)).andReturn(hostComponentStateEntity).anyTimes(); - expect(hostComponentStateEntity.getClusterId()).andReturn(1L).anyTimes(); - expect(hostComponentStateEntity.getServiceGroupId()).andReturn(1L).anyTimes(); - expect(hostComponentStateEntity.getServiceId()).andReturn(1L).anyTimes(); - expect(hostComponentStateEntity.getComponentName()).andReturn("component1").anyTimes(); - expect(hostComponentStateEntity.getComponentType()).andReturn("component1").anyTimes(); - - expect(injector.getInstance(ServiceComponentDesiredStateDAO.class)).andReturn(serviceComponentDesiredStateDAO).anyTimes(); - expect(serviceComponentDesiredStateDAO.findByName(1L, 1L, 1L, - "component1", "component1")).andReturn(serviceComponentDesiredStateEntity).anyTimes(); - expect(serviceComponentDesiredStateEntity.getId()).andReturn(1L).times(2); - // getHostComponent expect(clusters.getCluster("cluster1")).andReturn(cluster); expect(clusters.getClustersForHost("host1")).andReturn(Collections.singleton(cluster)); @@ -944,7 +871,7 @@ public void testGetHostComponents() throws Exception { expect(cluster.getServiceByComponentName("component1")).andReturn(service).anyTimes(); expect(service.getName()).andReturn("service1").anyTimes(); expect(service.getServiceComponent("component1")).andReturn(component); - expect(component.getId()).andReturn(1L).times(2); + expect(component.getName()).andReturn("component1"); expect(component.getServiceComponentHosts()).andReturn( new HashMap() {{ put("host1", componentHost); @@ -955,8 +882,7 @@ public void testGetHostComponents() throws Exception { // replay mocks replay(maintHelper, injector, clusters, cluster, host, response, stack, - ambariMetaInfo, service, component, componentHost, hostComponentStateDAO, hostComponentStateEntity, - serviceComponentDesiredStateDAO, serviceComponentDesiredStateEntity); + ambariMetaInfo, service, component, componentHost); //test AmbariManagementController controller = new AmbariManagementControllerImpl(null, clusters, injector); @@ -969,9 +895,7 @@ public void testGetHostComponents() throws Exception { assertEquals(1, setResponses.size()); assertTrue(setResponses.contains(response)); - verify(injector, clusters, cluster, host, response, stack, ambariMetaInfo, service, component, componentHost, - hostComponentStateDAO, hostComponentStateEntity, serviceComponentDesiredStateDAO, - serviceComponentDesiredStateEntity); + verify(injector, clusters, cluster, host, response, stack, ambariMetaInfo, service, component, componentHost); } @Test @@ -989,7 +913,7 @@ public void testGetHostComponents___ServiceComponentHostNotFoundException() thro // requests ServiceComponentHostRequest request1 = new ServiceComponentHostRequest( - "cluster1", "CORE", "service1", 1L, "component1", "component1", "host1", null); + "cluster1", "CORE", "service1", "component1", "host1", null); Set setRequests = new HashSet<>(); @@ -1014,26 +938,12 @@ public void testGetHostComponents___ServiceComponentHostNotFoundException() thro expect(cluster.getService("service1")).andReturn(service); expect(cluster.getServiceByComponentName("component1")).andReturn(service).anyTimes(); expect(service.getServiceComponent("component1")).andReturn(component); - expect(component.getId()).andReturn(1L).anyTimes(); + expect(component.getName()).andReturn("component1").anyTimes(); expect(component.getServiceComponentHosts()).andReturn(null); - expect(injector.getInstance(HostComponentStateDAO.class)).andReturn(hostComponentStateDAO).anyTimes(); - expect(hostComponentStateDAO.findById(1L)).andReturn(hostComponentStateEntity).anyTimes(); - expect(hostComponentStateEntity.getClusterId()).andReturn(1L).anyTimes(); - expect(hostComponentStateEntity.getServiceGroupId()).andReturn(1L).anyTimes(); - expect(hostComponentStateEntity.getServiceId()).andReturn(1L).anyTimes(); - expect(hostComponentStateEntity.getComponentName()).andReturn("component1").anyTimes(); - expect(hostComponentStateEntity.getComponentType()).andReturn("component1").anyTimes(); - - expect(injector.getInstance(ServiceComponentDesiredStateDAO.class)).andReturn(serviceComponentDesiredStateDAO).anyTimes(); - expect(serviceComponentDesiredStateDAO.findByName(1L, 1L, 1L, - "component1", "component1")).andReturn(serviceComponentDesiredStateEntity).anyTimes(); - expect(serviceComponentDesiredStateEntity.getId()).andReturn(1L).anyTimes(); - // replay mocks replay(maintHelper, injector, clusters, cluster, host, stack, ambariMetaInfo, - service, component, hostComponentStateDAO, hostComponentStateEntity, serviceComponentDesiredStateDAO, - serviceComponentDesiredStateEntity); + service, component); //test AmbariManagementController controller = new AmbariManagementControllerImpl(null, clusters, injector); @@ -1048,9 +958,7 @@ public void testGetHostComponents___ServiceComponentHostNotFoundException() thro // assert and verify assertSame(controller, controllerCapture.getValue()); - verify(injector, clusters, cluster, host, stack, ambariMetaInfo, service, component, - hostComponentStateDAO, hostComponentStateEntity, serviceComponentDesiredStateDAO, - serviceComponentDesiredStateEntity); + verify(injector, clusters, cluster, host, stack, ambariMetaInfo, service, component); } @Test @@ -1070,7 +978,7 @@ public void testGetHostComponents___ServiceComponentHostFilteredByState() throws // requests ServiceComponentHostRequest request1 = new ServiceComponentHostRequest( - "cluster1", "CORE", "service1", "component1", "component1", "host1", null); + "cluster1", "CORE", "service1", "component1", "host1", null); request1.setState("INSTALLED"); @@ -1114,12 +1022,9 @@ public void testGetHostComponents___ServiceComponentHostFilteredByState() throws expect(componentHost1.convertToResponse(null)).andReturn(response1); expect(componentHost1.getHostName()).andReturn("host1"); - expect(injector.getInstance(HostComponentStateDAO.class)).andReturn(hostComponentStateDAO).anyTimes(); - expect(injector.getInstance(ServiceComponentDesiredStateDAO.class)).andReturn(serviceComponentDesiredStateDAO).anyTimes(); - // replay mocks replay(maintHelper, injector, clusters, cluster, host, stack, ambariMetaInfo, - service, component, componentHost1, response1, hostComponentStateDAO, serviceComponentDesiredStateDAO); + service, component, componentHost1, response1); //test AmbariManagementController controller = new AmbariManagementControllerImpl(null, clusters, injector); @@ -1130,8 +1035,7 @@ public void testGetHostComponents___ServiceComponentHostFilteredByState() throws // assert and verify assertSame(controller, controllerCapture.getValue()); assertTrue(responses.size() == 1); - verify(injector, clusters, cluster, host, stack, ambariMetaInfo, service, component, componentHost1, response1, - hostComponentStateDAO, serviceComponentDesiredStateDAO); + verify(injector, clusters, cluster, host, stack, ambariMetaInfo, service, component, componentHost1, response1); } @Test @@ -1151,7 +1055,7 @@ public void testGetHostComponents___ServiceComponentHostFilteredByMaintenanceSta // requests ServiceComponentHostRequest request1 = new ServiceComponentHostRequest( - "cluster1", "CORE", "service1", "component1", "component1", "host1", null); + "cluster1", "CORE", "service1", "component1", "host1", null); request1.setMaintenanceState("ON"); @@ -1189,12 +1093,9 @@ public void testGetHostComponents___ServiceComponentHostFilteredByMaintenanceSta expect(componentHost1.convertToResponse(null)).andReturn(response1); expect(componentHost1.getHostName()).andReturn("host1"); - expect(injector.getInstance(HostComponentStateDAO.class)).andReturn(hostComponentStateDAO).anyTimes(); - expect(injector.getInstance(ServiceComponentDesiredStateDAO.class)).andReturn(serviceComponentDesiredStateDAO).anyTimes(); - // replay mocks replay(maintHelper, injector, clusters, cluster, host, stack, ambariMetaInfo, - service, component, componentHost1, response1, hostComponentStateDAO, serviceComponentDesiredStateDAO); + service, component, componentHost1, response1); //test AmbariManagementController controller = new AmbariManagementControllerImpl(null, clusters, injector); @@ -1205,8 +1106,7 @@ public void testGetHostComponents___ServiceComponentHostFilteredByMaintenanceSta // assert and verify assertSame(controller, controllerCapture.getValue()); assertTrue(responses.size() == 1); - verify(injector, clusters, cluster, host, stack, ambariMetaInfo, service, component, componentHost1, response1, - hostComponentStateDAO, serviceComponentDesiredStateDAO); + verify(injector, clusters, cluster, host, stack, ambariMetaInfo, service, component, componentHost1, response1); } @Test @@ -1239,13 +1139,13 @@ public void testGetHostComponents___OR_Predicate_ServiceComponentHostNotFoundExc // requests ServiceComponentHostRequest request1 = new ServiceComponentHostRequest( - "cluster1", "CORE", "service1", 1L, "component1", "component1", "host1", null); + "cluster1", "CORE", "service1", "component1", "host1", null); ServiceComponentHostRequest request2 = new ServiceComponentHostRequest( - "cluster1", "CORE", "service1", 2L, "component2", "component2","host1", null); + "cluster1", "CORE", "service1", "component2", "host1", null); ServiceComponentHostRequest request3 = new ServiceComponentHostRequest( - "cluster1", "CORE", "service1", 3L, "component3", "component3", "host1", null); + "cluster1", "CORE", "service1", "component3", "host1", null); Set setRequests = new HashSet<>(); @@ -1269,6 +1169,7 @@ public void testGetHostComponents___OR_Predicate_ServiceComponentHostNotFoundExc expect(cluster.getServiceByComponentName("component1")).andReturn(service).anyTimes(); expect(service.getServiceComponent("component1")).andReturn(component1); expect(service.getName()).andReturn("service1").anyTimes(); + expect(component1.getName()).andReturn("component1"); expect(component1.getServiceComponentHosts()).andReturn( new HashMap() {{ put("host1", componentHost1); @@ -1278,60 +1179,23 @@ public void testGetHostComponents___OR_Predicate_ServiceComponentHostNotFoundExc expect(cluster.getServiceByComponentName("component2")).andReturn(service).anyTimes(); expect(service.getServiceComponent("component2")).andReturn(component2); + expect(component2.getName()).andReturn("component2"); expect(component2.getServiceComponentHosts()).andReturn(null); expect(componentHost2.getHostName()).andReturn("host1"); expect(cluster.getServiceByComponentName("component3")).andReturn(service).anyTimes(); expect(service.getServiceComponent("component3")).andReturn(component3); + expect(component3.getName()).andReturn("component3"); expect(component3.getServiceComponentHosts()).andReturn( new HashMap() {{ put("host1", componentHost2); }}); expect(componentHost2.convertToResponse(null)).andReturn(response2); - HostComponentStateEntity hostComponentStateEntity2 = createNiceMock(HostComponentStateEntity.class); - HostComponentStateEntity hostComponentStateEntity3 = createNiceMock(HostComponentStateEntity.class); - - expect(injector.getInstance(HostComponentStateDAO.class)).andReturn(hostComponentStateDAO).anyTimes(); - expect(hostComponentStateDAO.findById(1L)).andReturn(hostComponentStateEntity).anyTimes(); - expect(hostComponentStateDAO.findById(2L)).andReturn(hostComponentStateEntity2).anyTimes(); - expect(hostComponentStateDAO.findById(3L)).andReturn(hostComponentStateEntity3).anyTimes(); - - expect(hostComponentStateEntity.getClusterId()).andReturn(1L).anyTimes(); - expect(hostComponentStateEntity.getServiceGroupId()).andReturn(1L).anyTimes(); - expect(hostComponentStateEntity.getServiceId()).andReturn(1L).anyTimes(); - expect(hostComponentStateEntity.getComponentName()).andReturn("component1").anyTimes(); - expect(hostComponentStateEntity.getComponentType()).andReturn("component1").anyTimes(); - - expect(hostComponentStateEntity2.getClusterId()).andReturn(1L).anyTimes(); - expect(hostComponentStateEntity2.getServiceGroupId()).andReturn(1L).anyTimes(); - expect(hostComponentStateEntity2.getServiceId()).andReturn(1L).anyTimes(); - expect(hostComponentStateEntity2.getComponentName()).andReturn("component2").anyTimes(); - expect(hostComponentStateEntity2.getComponentType()).andReturn("component2").anyTimes(); - - expect(hostComponentStateEntity3.getClusterId()).andReturn(1L).anyTimes(); - expect(hostComponentStateEntity3.getServiceGroupId()).andReturn(1L).anyTimes(); - expect(hostComponentStateEntity3.getServiceId()).andReturn(1L).anyTimes(); - expect(hostComponentStateEntity3.getComponentName()).andReturn("component3").anyTimes(); - expect(hostComponentStateEntity3.getComponentType()).andReturn("component3").anyTimes(); - - expect(injector.getInstance(ServiceComponentDesiredStateDAO.class)).andReturn(serviceComponentDesiredStateDAO).anyTimes(); - expect(serviceComponentDesiredStateDAO.findByName(1L, 1L, 1L, - "component1", "component1")).andReturn(serviceComponentDesiredStateEntity).anyTimes(); - ServiceComponentDesiredStateEntity serviceComponentDesiredStateEntity2 = createMock(ServiceComponentDesiredStateEntity.class); - - expect(serviceComponentDesiredStateDAO.findByName(1L, 1L, 1L, - "component2", "component2")).andReturn(serviceComponentDesiredStateEntity2).anyTimes(); - ServiceComponentDesiredStateEntity serviceComponentDesiredStateEntity3 = createMock(ServiceComponentDesiredStateEntity.class); - expect(serviceComponentDesiredStateDAO.findByName(1L, 1L, 1L, - "component3", "component3")).andReturn(serviceComponentDesiredStateEntity3).anyTimes(); - expect(serviceComponentDesiredStateEntity.getId()).andReturn(1L).times(2); - // replay mocks replay(stateHelper, injector, clusters, cluster, host, stack, ambariMetaInfo, service, component1, component2, component3, componentHost1, - componentHost2, response1, response2, hostComponentStateDAO, serviceComponentDesiredStateDAO, - hostComponentStateEntity, hostComponentStateEntity2, hostComponentStateEntity3); + componentHost2, response1, response2); //test AmbariManagementController controller = new AmbariManagementControllerImpl(null, clusters, injector); @@ -1346,8 +1210,7 @@ public void testGetHostComponents___OR_Predicate_ServiceComponentHostNotFoundExc assertTrue(setResponses.contains(response2)); verify(injector, clusters, cluster, host, stack, ambariMetaInfo, service, component1, component2, component3, - componentHost1, componentHost2, response1, response2, hostComponentStateDAO, serviceComponentDesiredStateDAO, - hostComponentStateEntity, hostComponentStateEntity2, hostComponentStateEntity3); + componentHost1, componentHost2, response1, response2); } @Test @@ -1375,13 +1238,13 @@ public void testGetHostComponents___OR_Predicate_ServiceNotFoundException() thro // requests ServiceComponentHostRequest request1 = new ServiceComponentHostRequest( - "cluster1", "CORE", "service1", 1L, "component1", "component1", "host1", null); + "cluster1", "CORE", "service1", "component1", "host1", null); ServiceComponentHostRequest request2 = new ServiceComponentHostRequest( - "cluster1", "CORE", "service2", 2L, "component2", "component2", "host1", null); + "cluster1", "CORE", "service2", "component2", "host1", null); ServiceComponentHostRequest request3 = new ServiceComponentHostRequest( - "cluster1", "CORE", "service1", 3L, "component3", "component3", "host1", null); + "cluster1", "CORE", "service1", "component3", "host1", null); Set setRequests = new HashSet<>(); @@ -1411,6 +1274,7 @@ public void testGetHostComponents___OR_Predicate_ServiceNotFoundException() thro expect(cluster.getService("service1")).andReturn(service); expect(cluster.getServiceByComponentName("component1")).andReturn(service).anyTimes(); expect(service.getServiceComponent("component1")).andReturn(component1); + expect(component1.getName()).andReturn("component1"); expect(component1.getServiceComponentHosts()).andReturn(new HashMap() {{ put("host1", componentHost1); @@ -1424,6 +1288,7 @@ public void testGetHostComponents___OR_Predicate_ServiceNotFoundException() thro expect(service.getName()).andReturn("service1").anyTimes(); expect(cluster.getServiceByComponentName("component3")).andReturn(service).anyTimes(); expect(service.getServiceComponent("component3")).andReturn(component3); + expect(component3.getName()).andReturn("component3"); expect(component3.getServiceComponentHosts()).andReturn(new HashMap() {{ put("host1", componentHost2); @@ -1431,49 +1296,10 @@ public void testGetHostComponents___OR_Predicate_ServiceNotFoundException() thro expect(componentHost2.convertToResponse(null)).andReturn(response2); expect(componentHost2.getHostName()).andReturn("host1"); - HostComponentStateEntity hostComponentStateEntity2 = createNiceMock(HostComponentStateEntity.class); - HostComponentStateEntity hostComponentStateEntity3 = createNiceMock(HostComponentStateEntity.class); - - expect(injector.getInstance(HostComponentStateDAO.class)).andReturn(hostComponentStateDAO).anyTimes(); - expect(hostComponentStateDAO.findById(1L)).andReturn(hostComponentStateEntity).anyTimes(); - expect(hostComponentStateDAO.findById(2L)).andReturn(hostComponentStateEntity2).anyTimes(); - expect(hostComponentStateDAO.findById(3L)).andReturn(hostComponentStateEntity3).anyTimes(); - - expect(hostComponentStateEntity.getClusterId()).andReturn(1L).anyTimes(); - expect(hostComponentStateEntity.getServiceGroupId()).andReturn(1L).anyTimes(); - expect(hostComponentStateEntity.getServiceId()).andReturn(1L).anyTimes(); - expect(hostComponentStateEntity.getComponentName()).andReturn("component1").anyTimes(); - expect(hostComponentStateEntity.getComponentType()).andReturn("component1").anyTimes(); - - expect(hostComponentStateEntity2.getClusterId()).andReturn(1L).anyTimes(); - expect(hostComponentStateEntity2.getServiceGroupId()).andReturn(1L).anyTimes(); - expect(hostComponentStateEntity2.getServiceId()).andReturn(1L).anyTimes(); - expect(hostComponentStateEntity2.getComponentName()).andReturn("component2").anyTimes(); - expect(hostComponentStateEntity2.getComponentType()).andReturn("component2").anyTimes(); - - expect(hostComponentStateEntity3.getClusterId()).andReturn(1L).anyTimes(); - expect(hostComponentStateEntity3.getServiceGroupId()).andReturn(1L).anyTimes(); - expect(hostComponentStateEntity3.getServiceId()).andReturn(1L).anyTimes(); - expect(hostComponentStateEntity3.getComponentName()).andReturn("component3").anyTimes(); - expect(hostComponentStateEntity3.getComponentType()).andReturn("component3").anyTimes(); - - expect(injector.getInstance(ServiceComponentDesiredStateDAO.class)).andReturn(serviceComponentDesiredStateDAO).anyTimes(); - expect(serviceComponentDesiredStateDAO.findByName(1L, 1L, 1L, - "component1", "component1")).andReturn(serviceComponentDesiredStateEntity).anyTimes(); - ServiceComponentDesiredStateEntity serviceComponentDesiredStateEntity2 = createMock(ServiceComponentDesiredStateEntity.class); - - expect(serviceComponentDesiredStateDAO.findByName(1L, 1L, 1L, - "component2", "component2")).andReturn(serviceComponentDesiredStateEntity2).anyTimes(); - ServiceComponentDesiredStateEntity serviceComponentDesiredStateEntity3 = createMock(ServiceComponentDesiredStateEntity.class); - expect(serviceComponentDesiredStateDAO.findByName(1L, 1L, 1L, - "component3", "component3")).andReturn(serviceComponentDesiredStateEntity3).anyTimes(); - expect(serviceComponentDesiredStateEntity.getId()).andReturn(1L).times(2); - // replay mocks replay(maintHelper, injector, clusters, cluster, host, stack, ambariMetaInfo, service, component1, component2, component3, componentHost1, - componentHost2, response1, response2, hostComponentStateDAO, serviceComponentDesiredStateDAO, - hostComponentStateEntity, hostComponentStateEntity2, hostComponentStateEntity3); + componentHost2, response1, response2); //test AmbariManagementController controller = new AmbariManagementControllerImpl(null, clusters, injector); @@ -1488,8 +1314,7 @@ public void testGetHostComponents___OR_Predicate_ServiceNotFoundException() thro assertTrue(setResponses.contains(response2)); verify(injector, clusters, cluster, host, stack, ambariMetaInfo, service, component1, component2, component3, - componentHost1, componentHost2, response1, response2, hostComponentStateDAO, serviceComponentDesiredStateDAO, - hostComponentStateEntity, hostComponentStateEntity2, hostComponentStateEntity3); + componentHost1, componentHost2, response1, response2); } @Test @@ -1519,13 +1344,13 @@ public void testGetHostComponents___OR_Predicate_ServiceComponentNotFoundExcepti // requests ServiceComponentHostRequest request1 = new ServiceComponentHostRequest( - "cluster1", "CORE", "service1", 1L, "component1", "component1", "host1", null); + "cluster1", "CORE", "service1", "component1", "host1", null); ServiceComponentHostRequest request2 = new ServiceComponentHostRequest( - "cluster1", "CORE", "service2", 2L, "component2", "component2", "host1", null); + "cluster1", "CORE", "service2", "component2", "host1", null); ServiceComponentHostRequest request3 = new ServiceComponentHostRequest( - "cluster1", "CORE", "service1", 3L, "component3", "component3", "host1", null); + "cluster1", "CORE", "service1", "component3", "host1", null); Set setRequests = new HashSet<>(); @@ -1555,6 +1380,7 @@ public void testGetHostComponents___OR_Predicate_ServiceComponentNotFoundExcepti expect(cluster.getService("service1")).andReturn(service); expect(cluster.getServiceByComponentName("component1")).andReturn(service).anyTimes(); expect(service.getServiceComponent("component1")).andReturn(component); + expect(component.getName()).andReturn("component1"); expect(component.getServiceComponentHosts()).andReturn(ImmutableMap.builder() .put("host1", componentHost1) .build()); @@ -1572,55 +1398,17 @@ public void testGetHostComponents___OR_Predicate_ServiceComponentNotFoundExcepti expect(cluster.getServiceByComponentName("component3")).andReturn(service).anyTimes(); expect(service.getServiceComponent("component3")).andReturn(component3); + expect(component3.getName()).andReturn("component3"); expect(component3.getServiceComponentHosts()).andReturn(ImmutableMap.builder() .put("host1", componentHost2) .build()); expect(componentHost2.convertToResponse(null)).andReturn(response2); expect(componentHost2.getHostName()).andReturn("host1"); - HostComponentStateEntity hostComponentStateEntity2 = createNiceMock(HostComponentStateEntity.class); - HostComponentStateEntity hostComponentStateEntity3 = createNiceMock(HostComponentStateEntity.class); - - expect(injector.getInstance(HostComponentStateDAO.class)).andReturn(hostComponentStateDAO).anyTimes(); - expect(hostComponentStateDAO.findById(1L)).andReturn(hostComponentStateEntity).anyTimes(); - expect(hostComponentStateDAO.findById(2L)).andReturn(hostComponentStateEntity2).anyTimes(); - expect(hostComponentStateDAO.findById(3L)).andReturn(hostComponentStateEntity3).anyTimes(); - - expect(hostComponentStateEntity.getClusterId()).andReturn(1L).anyTimes(); - expect(hostComponentStateEntity.getServiceGroupId()).andReturn(1L).anyTimes(); - expect(hostComponentStateEntity.getServiceId()).andReturn(1L).anyTimes(); - expect(hostComponentStateEntity.getComponentName()).andReturn("component1").anyTimes(); - expect(hostComponentStateEntity.getComponentType()).andReturn("component1").anyTimes(); - - expect(hostComponentStateEntity2.getClusterId()).andReturn(1L).anyTimes(); - expect(hostComponentStateEntity2.getServiceGroupId()).andReturn(1L).anyTimes(); - expect(hostComponentStateEntity2.getServiceId()).andReturn(1L).anyTimes(); - expect(hostComponentStateEntity2.getComponentName()).andReturn("component2").anyTimes(); - expect(hostComponentStateEntity2.getComponentType()).andReturn("component2").anyTimes(); - - expect(hostComponentStateEntity3.getClusterId()).andReturn(1L).anyTimes(); - expect(hostComponentStateEntity3.getServiceGroupId()).andReturn(1L).anyTimes(); - expect(hostComponentStateEntity3.getServiceId()).andReturn(1L).anyTimes(); - expect(hostComponentStateEntity3.getComponentName()).andReturn("component3").anyTimes(); - expect(hostComponentStateEntity3.getComponentType()).andReturn("component3").anyTimes(); - - expect(injector.getInstance(ServiceComponentDesiredStateDAO.class)).andReturn(serviceComponentDesiredStateDAO).anyTimes(); - expect(serviceComponentDesiredStateDAO.findByName(1L, 1L, 1L, - "component1", "component1")).andReturn(serviceComponentDesiredStateEntity).anyTimes(); - ServiceComponentDesiredStateEntity serviceComponentDesiredStateEntity2 = createMock(ServiceComponentDesiredStateEntity.class); - - expect(serviceComponentDesiredStateDAO.findByName(1L, 1L, 1L, - "component2", "component2")).andReturn(serviceComponentDesiredStateEntity2).anyTimes(); - ServiceComponentDesiredStateEntity serviceComponentDesiredStateEntity3 = createMock(ServiceComponentDesiredStateEntity.class); - expect(serviceComponentDesiredStateDAO.findByName(1L, 1L, 1L, - "component3", "component3")).andReturn(serviceComponentDesiredStateEntity3).anyTimes(); - expect(serviceComponentDesiredStateEntity.getId()).andReturn(1L).times(2); - // replay mocks replay(maintHelper, injector, clusters, cluster, host, stack, ambariMetaInfo, service, service2, component, component2, component3, componentHost1, - componentHost2, response1, response2, hostComponentStateDAO, serviceComponentDesiredStateDAO, - hostComponentStateEntity, hostComponentStateEntity2, hostComponentStateEntity3); + componentHost2, response1, response2); //test AmbariManagementController controller = new AmbariManagementControllerImpl(null, clusters, injector); @@ -1635,8 +1423,7 @@ public void testGetHostComponents___OR_Predicate_ServiceComponentNotFoundExcepti assertTrue(setResponses.contains(response2)); verify(injector, clusters, cluster, host, stack, ambariMetaInfo, service, service2, component, component2, component3, - componentHost1, componentHost2, response1, response2, hostComponentStateDAO, serviceComponentDesiredStateDAO, - hostComponentStateEntity, hostComponentStateEntity2, hostComponentStateEntity3); + componentHost1, componentHost2, response1, response2); } @Test @@ -1665,13 +1452,13 @@ public void testGetHostComponents___OR_Predicate_HostNotFoundException_hostProvi // requests ServiceComponentHostRequest request1 = new ServiceComponentHostRequest( - "cluster1", "CORE", "service1", 1L, "component1", "component1", null, null); + "cluster1", "CORE", "service1", "component1", null, null); ServiceComponentHostRequest request2 = new ServiceComponentHostRequest( - "cluster1", "CORE", "service1", 2L, "component2", "component2", "host2", null); + "cluster1", "CORE", "service1", "component2", "host2", null); ServiceComponentHostRequest request3 = new ServiceComponentHostRequest( - "cluster1", "CORE", "service1", 3L, "component3", "component3", null, null); + "cluster1", "CORE", "service1", "component3", null, null); Set setRequests = new HashSet<>(); @@ -1701,6 +1488,7 @@ public void testGetHostComponents___OR_Predicate_HostNotFoundException_hostProvi expect(cluster.getServiceByComponentName("component1")).andReturn(service).anyTimes(); expect(service.getServiceComponent("component1")).andReturn(component); expect(service.getName()).andReturn("service1").anyTimes(); + expect(component.getName()).andReturn("component1"); expect(component.getServiceComponentHosts()).andReturn(Collections.singletonMap("foo", componentHost1)); expect(componentHost1.convertToResponse(null)).andReturn(response1); expect(componentHost1.getHostName()).andReturn("host1"); @@ -1710,53 +1498,15 @@ public void testGetHostComponents___OR_Predicate_HostNotFoundException_hostProvi expect(cluster.getService("service1")).andReturn(service); expect(cluster.getServiceByComponentName("component3")).andReturn(service).anyTimes(); expect(service.getServiceComponent("component3")).andReturn(component3); + expect(component3.getName()).andReturn("component3"); expect(component3.getServiceComponentHosts()).andReturn(Collections.singletonMap("foo", componentHost2)); expect(componentHost2.convertToResponse(null)).andReturn(response2); expect(componentHost2.getHostName()).andReturn("host1"); - HostComponentStateEntity hostComponentStateEntity2 = createNiceMock(HostComponentStateEntity.class); - HostComponentStateEntity hostComponentStateEntity3 = createNiceMock(HostComponentStateEntity.class); - - expect(injector.getInstance(HostComponentStateDAO.class)).andReturn(hostComponentStateDAO).anyTimes(); - expect(hostComponentStateDAO.findById(1L)).andReturn(hostComponentStateEntity).anyTimes(); - expect(hostComponentStateDAO.findById(2L)).andReturn(hostComponentStateEntity2).anyTimes(); - expect(hostComponentStateDAO.findById(3L)).andReturn(hostComponentStateEntity3).anyTimes(); - - expect(hostComponentStateEntity.getClusterId()).andReturn(1L).anyTimes(); - expect(hostComponentStateEntity.getServiceGroupId()).andReturn(1L).anyTimes(); - expect(hostComponentStateEntity.getServiceId()).andReturn(1L).anyTimes(); - expect(hostComponentStateEntity.getComponentName()).andReturn("component1").anyTimes(); - expect(hostComponentStateEntity.getComponentType()).andReturn("component1").anyTimes(); - - expect(hostComponentStateEntity2.getClusterId()).andReturn(1L).anyTimes(); - expect(hostComponentStateEntity2.getServiceGroupId()).andReturn(1L).anyTimes(); - expect(hostComponentStateEntity2.getServiceId()).andReturn(1L).anyTimes(); - expect(hostComponentStateEntity2.getComponentName()).andReturn("component2").anyTimes(); - expect(hostComponentStateEntity2.getComponentType()).andReturn("component2").anyTimes(); - - expect(hostComponentStateEntity3.getClusterId()).andReturn(1L).anyTimes(); - expect(hostComponentStateEntity3.getServiceGroupId()).andReturn(1L).anyTimes(); - expect(hostComponentStateEntity3.getServiceId()).andReturn(1L).anyTimes(); - expect(hostComponentStateEntity3.getComponentName()).andReturn("component3").anyTimes(); - expect(hostComponentStateEntity3.getComponentType()).andReturn("component3").anyTimes(); - - expect(injector.getInstance(ServiceComponentDesiredStateDAO.class)).andReturn(serviceComponentDesiredStateDAO).anyTimes(); - expect(serviceComponentDesiredStateDAO.findByName(1L, 1L, 1L, - "component1", "component1")).andReturn(serviceComponentDesiredStateEntity).anyTimes(); - ServiceComponentDesiredStateEntity serviceComponentDesiredStateEntity2 = createMock(ServiceComponentDesiredStateEntity.class); - - expect(serviceComponentDesiredStateDAO.findByName(1L, 1L, 1L, - "component2", "component2")).andReturn(serviceComponentDesiredStateEntity2).anyTimes(); - ServiceComponentDesiredStateEntity serviceComponentDesiredStateEntity3 = createMock(ServiceComponentDesiredStateEntity.class); - expect(serviceComponentDesiredStateDAO.findByName(1L, 1L, 1L, - "component3", "component3")).andReturn(serviceComponentDesiredStateEntity3).anyTimes(); - expect(serviceComponentDesiredStateEntity.getId()).andReturn(1L).times(2); - // replay mocks replay(maintHelper, injector, clusters, cluster, host, stack, ambariMetaInfo, service, service2, component, component2, component3, componentHost1, - componentHost2, response1, response2, hostComponentStateDAO, serviceComponentDesiredStateDAO, - hostComponentStateEntity, hostComponentStateEntity2, hostComponentStateEntity3); + componentHost2, response1, response2); //test AmbariManagementController controller = new AmbariManagementControllerImpl(null, clusters, injector); @@ -1772,8 +1522,7 @@ public void testGetHostComponents___OR_Predicate_HostNotFoundException_hostProvi assertTrue(setResponses.contains(response2)); verify(injector, clusters, cluster, host, stack, ambariMetaInfo, service, service2, component, component2, component3, - componentHost1, componentHost2, response1, response2, hostComponentStateDAO, serviceComponentDesiredStateDAO, - hostComponentStateEntity, hostComponentStateEntity2, hostComponentStateEntity3); + componentHost1, componentHost2, response1, response2); } @Test @@ -1788,13 +1537,13 @@ public void testGetHostComponents___OR_Predicate_HostNotFoundException_hostProvi // requests ServiceComponentHostRequest request1 = new ServiceComponentHostRequest( - "cluster1", "CORE", "service1", "component1", "component1", "host1", null); + "cluster1", "CORE", "service1", "component1", "host1", null); ServiceComponentHostRequest request2 = new ServiceComponentHostRequest( - "cluster1", "CORE", "service1", "component2", "component2", "host1", null); + "cluster1", "CORE", "service1", "component2", "host1", null); ServiceComponentHostRequest request3 = new ServiceComponentHostRequest( - "cluster1", "CORE", "service1", "component3", "component3", "host1", null); + "cluster1", "CORE", "service1", "component3", "host1", null); Set setRequests = new HashSet<>(); @@ -1809,15 +1558,12 @@ public void testGetHostComponents___OR_Predicate_HostNotFoundException_hostProvi expect(injector.getInstance(MaintenanceStateHelper.class)).andReturn(maintHelper); expect(injector.getInstance(KerberosHelper.class)).andReturn(createNiceMock(KerberosHelper.class)); - expect(injector.getInstance(HostComponentStateDAO.class)).andReturn(hostComponentStateDAO).anyTimes(); - expect(injector.getInstance(ServiceComponentDesiredStateDAO.class)).andReturn(serviceComponentDesiredStateDAO).anyTimes(); - // getHostComponent expect(clusters.getCluster("cluster1")).andReturn(cluster); expect(clusters.getClustersForHost("host1")).andThrow(new HostNotFoundException("host1")); // replay mocks - replay(maintHelper, injector, clusters, cluster, stack, ambariMetaInfo, hostComponentStateDAO, serviceComponentDesiredStateDAO); + replay(maintHelper, injector, clusters, cluster, stack, ambariMetaInfo); //test AmbariManagementController controller = new AmbariManagementControllerImpl(null, clusters, injector); @@ -1833,7 +1579,7 @@ public void testGetHostComponents___OR_Predicate_HostNotFoundException_hostProvi // assert and verify assertSame(controller, controllerCapture.getValue()); - verify(injector, clusters, cluster, stack, ambariMetaInfo, hostComponentStateDAO, serviceComponentDesiredStateDAO); + verify(injector, clusters, cluster, stack, ambariMetaInfo); } @Test @@ -1846,13 +1592,13 @@ public void testGetHostComponents___OR_Predicate_ClusterNotFoundException() thro // requests ServiceComponentHostRequest request1 = new ServiceComponentHostRequest( - "cluster1", "CORE", "service1", "component1", "component1", "host1", null); + "cluster1", "CORE", "service1", "component1", "host1", null); ServiceComponentHostRequest request2 = new ServiceComponentHostRequest( - "cluster1", "CORE", "service1", "component2", "component2", "host2", null); + "cluster1", "CORE", "service1", "component2", "host2", null); ServiceComponentHostRequest request3 = new ServiceComponentHostRequest( - "cluster1", "CORE", "service1", "component3", "component3", "host1", null); + "cluster1", "CORE", "service1", "component3", "host1", null); Set setRequests = new HashSet<>(); @@ -1870,9 +1616,6 @@ public void testGetHostComponents___OR_Predicate_ClusterNotFoundException() thro // getHostComponent expect(clusters.getCluster("cluster1")).andThrow(new ClusterNotFoundException("cluster1")); - expect(injector.getInstance(HostComponentStateDAO.class)).andReturn(hostComponentStateDAO).anyTimes(); - expect(injector.getInstance(ServiceComponentDesiredStateDAO.class)).andReturn(serviceComponentDesiredStateDAO).anyTimes(); - // replay mocks replay(maintHelper, injector, clusters, stack, ambariMetaInfo); @@ -1914,7 +1657,7 @@ public void testGetHostComponents___NullHostName() throws Exception { // requests ServiceComponentHostRequest request1 = new ServiceComponentHostRequest( - "cluster1", "CORE", "service1", "component1", "component1", null, null); + "cluster1", "CORE", "service1", "component1", null, null); Set setRequests = new HashSet<>(); @@ -1950,13 +1693,9 @@ public void testGetHostComponents___NullHostName() throws Exception { expect(componentHost1.getHostName()).andReturn("host1"); expect(componentHost2.getHostName()).andReturn("host1"); - expect(injector.getInstance(HostComponentStateDAO.class)).andReturn(hostComponentStateDAO).anyTimes(); - expect(injector.getInstance(ServiceComponentDesiredStateDAO.class)).andReturn(serviceComponentDesiredStateDAO).anyTimes(); - // replay mocks replay(maintHelper, injector, clusters, cluster, response1, response2, - stack, ambariMetaInfo, service, component, componentHost1, componentHost2, - hostComponentStateDAO, serviceComponentDesiredStateDAO); + stack, ambariMetaInfo, service, component, componentHost1, componentHost2); //test AmbariManagementController controller = new AmbariManagementControllerImpl(null, clusters, injector); @@ -1970,8 +1709,7 @@ public void testGetHostComponents___NullHostName() throws Exception { assertTrue(setResponses.contains(response1)); assertTrue(setResponses.contains(response2)); - verify(injector, clusters, cluster, response1, response2, stack, ambariMetaInfo, service, component, componentHost1, - componentHost2, hostComponentStateDAO, serviceComponentDesiredStateDAO); + verify(injector, clusters, cluster, response1, response2, stack, ambariMetaInfo, service, component, componentHost1, componentHost2); } @Test @@ -1999,7 +1737,7 @@ public void testGetHostComponents___NullHostName_NullComponentName() throws Exce // requests ServiceComponentHostRequest request1 = new ServiceComponentHostRequest( - "cluster1", "CORE", null, null, null, null, null); + "cluster1", "CORE", null, null, null, null); Set setRequests = new HashSet<>(); @@ -2042,16 +1780,13 @@ public void testGetHostComponents___NullHostName_NullComponentName() throws Exce expect(componentHost2.getHostName()).andReturn("host1"); expect(componentHost3.getHostName()).andReturn("host1"); - expect(injector.getInstance(HostComponentStateDAO.class)).andReturn(hostComponentStateDAO).anyTimes(); - expect(injector.getInstance(ServiceComponentDesiredStateDAO.class)).andReturn(serviceComponentDesiredStateDAO).anyTimes(); - expect(component2.getServiceComponentHosts()).andReturn(Collections.singletonMap("foobar", componentHost3)); expect(componentHost3.convertToResponse(null)).andReturn(response3); // replay mocks replay(maintHelper, injector, clusters, cluster, response1, response2, response3, stack, ambariMetaInfo, service1, service2, component1, component2, - componentHost1, componentHost2, componentHost3, hostComponentStateDAO, serviceComponentDesiredStateDAO); + componentHost1, componentHost2, componentHost3); //test AmbariManagementController controller = new AmbariManagementControllerImpl(null, clusters, injector); @@ -2067,7 +1802,7 @@ public void testGetHostComponents___NullHostName_NullComponentName() throws Exce assertTrue(setResponses.contains(response3)); verify(injector, clusters, cluster, response1, response2, response3, stack, ambariMetaInfo, service1, service2, - component1, component2, componentHost1, componentHost2, componentHost3, hostComponentStateDAO, serviceComponentDesiredStateDAO); + component1, component2, componentHost1, componentHost2, componentHost3); } @Test @@ -2114,19 +1849,15 @@ public void testPopulatePackagesInfo() throws Exception { expect(serviceInfo.getOsSpecifics()).andReturn(osSpecificsService); expect(stackInfo.getOsSpecifics()).andReturn(osSpecificsStack); - injector.injectMembers(capture(controllerCapture)); expect(injector.getInstance(Gson.class)).andReturn(null); expect(injector.getInstance(MaintenanceStateHelper.class)).andReturn(maintHelper).anyTimes(); expect(injector.getInstance(KerberosHelper.class)).andReturn(createNiceMock(KerberosHelper.class)); - expect(injector.getInstance(HostComponentStateDAO.class)).andReturn(hostComponentStateDAO).anyTimes(); - expect(injector.getInstance(ServiceComponentDesiredStateDAO.class)).andReturn(serviceComponentDesiredStateDAO).anyTimes(); - OsFamily osFamilyMock = createNiceMock(OsFamily.class); EasyMock.expect(osFamilyMock.isVersionedOsFamilyExtendedByVersionedFamily("testOSFamily", "testOSFamily")).andReturn(true).times(3); - replay(maintHelper, injector, clusters, stackInfo, serviceInfo, osFamilyMock, hostComponentStateDAO, serviceComponentDesiredStateDAO); + replay(maintHelper, injector, clusters, stackInfo, serviceInfo, osFamilyMock); AmbariManagementControllerImplTest.NestedTestClass nestedTestClass = this.new NestedTestClass(null, clusters, injector, osFamilyMock); @@ -2198,10 +1929,13 @@ public void testCreateDefaultHostParams() throws Exception { replay(manager, clusters, cluster, injector, stackId, configuration, repositoryVersionEntity, configHelper); - AmbariManagementControllerImpl ambariManagementControllerImpl = createMockBuilder( - AmbariManagementControllerImpl.class).withConstructor(manager, clusters, - injector).createNiceMock(); + AmbariManagementControllerImpl ambariManagementControllerImpl = + createMockBuilder(AmbariManagementControllerImpl.class) + .addMockedMethod("getRcaParameters") + .withConstructor(manager, clusters, injector).createNiceMock(); + expect(ambariManagementControllerImpl. + getRcaParameters()).andReturn(new HashMap<>()); replay(ambariManagementControllerImpl); // Inject configuration manually @@ -2272,8 +2006,7 @@ public void testSynchronizeLdapUsersAndGroups() throws Exception { expectLastCall().anyTimes(); //replay - replay(ldapDataPopulator, clusters, actionDBAccessor, ambariMetaInfo, users, ldapBatchDto, - hostComponentStateDAO, serviceComponentDesiredStateDAO); + replay(ldapDataPopulator, clusters, actionDBAccessor, ambariMetaInfo, users, ldapBatchDto); AmbariManagementControllerImpl controller = injector.getInstance(AmbariManagementControllerImpl.class); @@ -2292,7 +2025,7 @@ public void testSynchronizeLdapUsersAndGroups() throws Exception { controller.synchronizeLdapUsersAndGroups(userRequest, groupRequest); - verify(ldapDataPopulator, clusters, users, ldapBatchDto, hostComponentStateDAO, serviceComponentDesiredStateDAO); + verify(ldapDataPopulator, clusters, users, ldapBatchDto); } private void setAmbariMetaInfo(AmbariMetaInfo metaInfo, AmbariManagementController controller) throws NoSuchFieldException, IllegalAccessException { @@ -2314,10 +2047,6 @@ public void configure(Binder binder) { binder.bind(Users.class).toInstance(users); binder.bind(AmbariSessionManager.class).toInstance(sessionManager); binder.bind(RegistryManager.class).toInstance(registryManager); - binder.bind(HostComponentStateEntity.class).toInstance(hostComponentStateEntity); - binder.bind(HostComponentStateDAO.class).toInstance(hostComponentStateDAO); - binder.bind(ServiceComponentDesiredStateEntity.class).toInstance(serviceComponentDesiredStateEntity); - binder.bind(ServiceComponentDesiredStateDAO.class).toInstance(serviceComponentDesiredStateDAO); } } @@ -2325,7 +2054,7 @@ private class NestedTestClass extends AmbariManagementControllerImpl { public NestedTestClass(ActionManager actionManager, Clusters clusters, Injector injector, OsFamily osFamilyMock) throws Exception { super(actionManager, clusters, injector); - osFamily = osFamilyMock; + this.osFamily = osFamilyMock; } // public ServiceOsSpecific testPopulateServicePackagesInfo(ServiceInfo serviceInfo, Map hostParams, @@ -2348,15 +2077,12 @@ public void testVerifyRepositories() throws Exception { expect(injector.getInstance(MaintenanceStateHelper.class)).andReturn(null); expect(injector.getInstance(KerberosHelper.class)).andReturn(createNiceMock(KerberosHelper.class)); - expect(injector.getInstance(HostComponentStateDAO.class)).andReturn(hostComponentStateDAO).anyTimes(); - expect(injector.getInstance(ServiceComponentDesiredStateDAO.class)).andReturn(serviceComponentDesiredStateDAO).anyTimes(); - Configuration configuration = createNiceMock(Configuration.class); String[] suffices = {"/repodata/repomd.xml"}; expect(configuration.getRepoValidationSuffixes("redhat6")).andReturn(suffices); // replay mocks - replay(injector, clusters, ambariMetaInfo, configuration, hostComponentStateDAO, serviceComponentDesiredStateDAO); + replay(injector, clusters, ambariMetaInfo, configuration); // test AmbariManagementController controller = new AmbariManagementControllerImpl(null, clusters, injector); @@ -2381,7 +2107,7 @@ public void testVerifyRepositories() throws Exception { Assert.assertEquals("Could not access base url . file:///some/repo/repodata/repomd.xml . ", e.getMessage()); } - verify(injector, clusters, ambariMetaInfo, configuration, hostComponentStateDAO, serviceComponentDesiredStateDAO); + verify(injector, clusters, ambariMetaInfo, configuration); } @Test @@ -2402,9 +2128,6 @@ public void testRegisterRackChange() throws Exception { expect(injector.getInstance(MaintenanceStateHelper.class)).andReturn(null); expect(injector.getInstance(KerberosHelper.class)).andReturn(createNiceMock(KerberosHelper.class)); - expect(injector.getInstance(HostComponentStateDAO.class)).andReturn(hostComponentStateDAO).anyTimes(); - expect(injector.getInstance(ServiceComponentDesiredStateDAO.class)).andReturn(serviceComponentDesiredStateDAO).anyTimes(); - RepositoryInfo dummyRepoInfo = new RepositoryInfo(); dummyRepoInfo.setRepoName("repo_name"); @@ -2536,8 +2259,6 @@ public void testGetPacklets() throws Exception { samplePacklet.setDefinition("nifi.tar.gz"); packletArrayList.add(samplePacklet); expect(injector.getInstance(MaintenanceStateHelper.class)).andReturn(null).atLeastOnce(); - expect(injector.getInstance(HostComponentStateDAO.class)).andReturn(hostComponentStateDAO).anyTimes(); - expect(injector.getInstance(ServiceComponentDesiredStateDAO.class)).andReturn(serviceComponentDesiredStateDAO).anyTimes(); expect(ambariMetaInfo.getModules(mpackId)).andReturn(packletArrayList).atLeastOnce(); replay(ambariMetaInfo,injector); AmbariManagementController controller = new AmbariManagementControllerImpl(null, clusters, injector); diff --git a/ambari-server/src/test/java/org/apache/ambari/server/controller/AmbariManagementControllerTest.java b/ambari-server/src/test/java/org/apache/ambari/server/controller/AmbariManagementControllerTest.java index cd3391eb579..1b4033b1938 100644 --- a/ambari-server/src/test/java/org/apache/ambari/server/controller/AmbariManagementControllerTest.java +++ b/ambari-server/src/test/java/org/apache/ambari/server/controller/AmbariManagementControllerTest.java @@ -21,7 +21,6 @@ import static java.util.stream.Collectors.toSet; import static org.easymock.EasyMock.capture; -import static org.easymock.EasyMock.createMock; import static org.easymock.EasyMock.createNiceMock; import static org.easymock.EasyMock.createStrictMock; import static org.easymock.EasyMock.expect; @@ -103,11 +102,9 @@ import org.apache.ambari.server.orm.OrmTestHelper; import org.apache.ambari.server.orm.dao.ClusterServiceDAO; import org.apache.ambari.server.orm.dao.ExecutionCommandDAO; -import org.apache.ambari.server.orm.dao.HostComponentStateDAO; import org.apache.ambari.server.orm.dao.HostDAO; import org.apache.ambari.server.orm.dao.HostRoleCommandDAO; import org.apache.ambari.server.orm.dao.RepositoryVersionDAO; -import org.apache.ambari.server.orm.dao.ServiceComponentDesiredStateDAO; import org.apache.ambari.server.orm.dao.ServiceGroupDAO; import org.apache.ambari.server.orm.dao.StackDAO; import org.apache.ambari.server.orm.dao.TopologyHostInfoDAO; @@ -201,7 +198,6 @@ public class AmbariManagementControllerTest { private static final String COMPONENT_NAME_HIVE_SERVER = "HIVE_SERVER"; private static final String STACK_VERSION = "0.2"; private static final String NEW_STACK_VERSION = "2.0.6"; - private static final String HDP_0_1 = "HDP-0.1"; private static final String OS_TYPE = "centos5"; private static final String REPO_ID = "HDP-1.1.1.16"; private static final String REPO_NAME = "HDP"; @@ -301,7 +297,7 @@ public void setup() throws Exception { EasyMock.replay(injector.getInstance(AuditLogger.class)); repositoryVersion01 = helper.getOrCreateRepositoryVersion( - new StackId(HDP_0_1), "0.1-1234"); + new StackId("HDP-0.1"), "0.1-1234"); repositoryVersion02 = helper.getOrCreateRepositoryVersion( new StackId("HDP-0.2"), "0.2-1234"); @@ -380,7 +376,7 @@ private void deleteHost(String hostname) throws Exception { * @throws Exception */ private void createCluster(String clusterName) throws Exception{ - ClusterRequest r = new ClusterRequest(null, clusterName, State.INSTALLED.name(), SecurityType.NONE, HDP_0_1, null); + ClusterRequest r = new ClusterRequest(null, clusterName, State.INSTALLED.name(), SecurityType.NONE, "HDP-0.1", null); controller.createCluster(r); } @@ -413,7 +409,7 @@ private void createServiceComponent(String clusterName, dStateStr = desiredState.toString(); } ServiceComponentRequest r = new ServiceComponentRequest(clusterName, serviceGroupName, - serviceName, componentName, componentName, dStateStr); + serviceName, componentName, dStateStr); ComponentResourceProviderTest.createComponents(controller, Collections.singleton(r)); } @@ -426,7 +422,7 @@ private void createServiceComponentHost(String clusterName, dStateStr = desiredState.toString(); } ServiceComponentHostRequest r = new ServiceComponentHostRequest(clusterName, serviceGroupName, - serviceName, componentName, componentName, hostname, dStateStr); + serviceName, componentName, hostname, dStateStr); controller.createHostComponents(Collections.singleton(r)); } @@ -439,7 +435,7 @@ private void deleteServiceComponentHost(String clusterName, dStateStr = desiredState.toString(); } ServiceComponentHostRequest r = new ServiceComponentHostRequest(clusterName, serviceGroupName, - serviceName, componentName, componentName, hostname, dStateStr); + serviceName, componentName, hostname, dStateStr); controller.deleteHostComponents(Collections.singleton(r)); } @@ -509,7 +505,7 @@ private long stopServiceComponentHosts(String clusterName, String serviceGroupNa for (ServiceComponent sc : s.getServiceComponents().values()) { for (ServiceComponentHost sch : sc.getServiceComponentHosts().values()) { ServiceComponentHostRequest schr = new ServiceComponentHostRequest - (clusterName, serviceGroupName, serviceName, sc.getName(), sc.getType(), + (clusterName, serviceGroupName, serviceName, sc.getName(), sch.getHostName(), State.INSTALLED.name()); requests.add(schr); } @@ -662,7 +658,7 @@ public void testCreateClusterWithHostMapping() throws Exception { Set hostNames = new HashSet<>(); hostNames.add(host1); hostNames.add(host2); - ClusterRequest r = new ClusterRequest(null, cluster1, HDP_0_1, hostNames); + ClusterRequest r = new ClusterRequest(null, cluster1, "HDP-0.1", hostNames); try { controller.createCluster(r); @@ -717,7 +713,7 @@ public void testCreateServicesSimple() throws Exception { String cluster1 = getUniqueName(); createCluster(cluster1); String serviceGroupName = "CORE"; - ServiceGroupResourceProviderTest.createServiceGroup(controller, cluster1, serviceGroupName, HDP_0_1); + ServiceGroupResourceProviderTest.createServiceGroup(controller, cluster1, serviceGroupName); String serviceName = "HDFS"; createService(cluster1, serviceGroupName, serviceName, State.INIT); @@ -782,11 +778,11 @@ public void testCreateServicesWithInvalidRequest() throws Exception { String cluster2 = getUniqueName(); - clusters.addCluster(cluster1, new StackId(HDP_0_1)); - clusters.addCluster(cluster2, new StackId(HDP_0_1)); + clusters.addCluster(cluster1, new StackId("HDP-0.1")); + clusters.addCluster(cluster2, new StackId("HDP-0.1")); String serviceGroupName = "CORE"; - ServiceGroupResourceProviderTest.createServiceGroup(controller, cluster1, serviceGroupName, HDP_0_1); - ServiceGroupResourceProviderTest.createServiceGroup(controller, cluster2, serviceGroupName, HDP_0_1); + ServiceGroupResourceProviderTest.createServiceGroup(controller, cluster1, serviceGroupName); + ServiceGroupResourceProviderTest.createServiceGroup(controller, cluster2, serviceGroupName); try { set1.clear(); @@ -852,7 +848,7 @@ public void testCreateServiceWithInvalidInfo() throws Exception { String cluster1 = getUniqueName(); createCluster(cluster1); String serviceGroupName = "CORE"; - ServiceGroupResourceProviderTest.createServiceGroup(controller, cluster1, serviceGroupName, HDP_0_1); + ServiceGroupResourceProviderTest.createServiceGroup(controller, cluster1, serviceGroupName); String serviceName = "HDFS"; try { createService(cluster1, serviceGroupName, serviceName, State.INSTALLING); @@ -896,10 +892,10 @@ public void testCreateServicesMultiple() throws Exception { String cluster1 = getUniqueName(); - clusters.addCluster(cluster1, new StackId(HDP_0_1)); + clusters.addCluster(cluster1, new StackId("HDP-0.1")); String serviceGroupName = "CORE"; - ServiceGroupResourceProviderTest.createServiceGroup(controller, cluster1, serviceGroupName, HDP_0_1); + ServiceGroupResourceProviderTest.createServiceGroup(controller, cluster1, serviceGroupName); ServiceRequest valid1 = new ServiceRequest(cluster1, serviceGroupName, "HDFS", repositoryVersion01.getId(), null, null); ServiceRequest valid2 = new ServiceRequest(cluster1, serviceGroupName, "MAPREDUCE", repositoryVersion01.getId(), null, null); set1.add(valid1); @@ -929,7 +925,7 @@ public void testCreateServiceComponentSimple() throws Exception { String cluster1 = getUniqueName(); createCluster(cluster1); String serviceGroupName = "CORE"; - ServiceGroupResourceProviderTest.createServiceGroup(controller, cluster1, serviceGroupName, HDP_0_1); + ServiceGroupResourceProviderTest.createServiceGroup(controller, cluster1, serviceGroupName); String serviceName = "HDFS"; createService(cluster1, serviceGroupName, serviceName, null); @@ -955,7 +951,7 @@ public void testCreateServiceComponentSimple() throws Exception { .getService(serviceName).getServiceComponent(componentName)); ServiceComponentRequest r = - new ServiceComponentRequest(cluster1, serviceGroupName, serviceName, null, null, null); + new ServiceComponentRequest(cluster1, serviceGroupName, serviceName, null, null); Set response = ComponentResourceProviderTest.getComponents(controller, Collections.singleton(r)); Assert.assertEquals(1, response.size()); @@ -994,7 +990,7 @@ public void testCreateServiceComponentWithInvalidRequest() try { set1.clear(); ServiceComponentRequest rInvalid = - new ServiceComponentRequest(cluster1, null, null, null, null, null); + new ServiceComponentRequest(cluster1, null, null, null, null); set1.add(rInvalid); ComponentResourceProviderTest.createComponents(controller, set1); fail("Expected failure for invalid requests"); @@ -1005,7 +1001,7 @@ public void testCreateServiceComponentWithInvalidRequest() try { set1.clear(); ServiceComponentRequest rInvalid = - new ServiceComponentRequest(cluster1, "s1", null, null, null, null); + new ServiceComponentRequest(cluster1, "s1", null, null, null); set1.add(rInvalid); ComponentResourceProviderTest.createComponents(controller, set1); fail("Expected failure for invalid requests"); @@ -1024,8 +1020,8 @@ public void testCreateServiceComponentWithInvalidRequest() // Expected } - clusters.addCluster(cluster1, new StackId(HDP_0_1)); - clusters.addCluster(cluster2, new StackId(HDP_0_1)); + clusters.addCluster(cluster1, new StackId("HDP-0.1")); + clusters.addCluster(cluster2, new StackId("HDP-0.1")); String serviceGroupName = "CORE"; @@ -1041,7 +1037,7 @@ public void testCreateServiceComponentWithInvalidRequest() } Cluster c1 = clusters.getCluster(cluster1); - StackId stackId = new StackId(HDP_0_1); + StackId stackId = new StackId("HDP-0.1"); c1.setDesiredStackVersion(stackId); RepositoryVersionEntity repositoryVersion = helper.getOrCreateRepositoryVersion(stackId, @@ -1055,11 +1051,11 @@ public void testCreateServiceComponentWithInvalidRequest() set1.clear(); ServiceComponentRequest valid1 = - new ServiceComponentRequest(cluster1, serviceGroupName, "HDFS", "NAMENODE", "NAMENODE", null); + new ServiceComponentRequest(cluster1, serviceGroupName, "HDFS", "NAMENODE", null); ServiceComponentRequest valid2 = - new ServiceComponentRequest(cluster1, serviceGroupName, "MAPREDUCE", "JOBTRACKER", "JOBTRACKER", null); + new ServiceComponentRequest(cluster1, serviceGroupName, "MAPREDUCE", "JOBTRACKER", null); ServiceComponentRequest valid3 = - new ServiceComponentRequest(cluster1, serviceGroupName, "MAPREDUCE", "TASKTRACKER", "TASKTRACKER", null); + new ServiceComponentRequest(cluster1, serviceGroupName, "MAPREDUCE", "TASKTRACKER", null); set1.add(valid1); set1.add(valid2); set1.add(valid3); @@ -1068,9 +1064,9 @@ public void testCreateServiceComponentWithInvalidRequest() try { set1.clear(); ServiceComponentRequest rInvalid1 = - new ServiceComponentRequest(cluster1, serviceGroupName, "HDFS", "HDFS_CLIENT", "HDFS_CLIENT", null); + new ServiceComponentRequest(cluster1, serviceGroupName, "HDFS", "HDFS_CLIENT", null); ServiceComponentRequest rInvalid2 = - new ServiceComponentRequest(cluster1, serviceGroupName, "HDFS", "HDFS_CLIENT", "HDFS_CLIENT", null); + new ServiceComponentRequest(cluster1, serviceGroupName, "HDFS", "HDFS_CLIENT", null); set1.add(rInvalid1); set1.add(rInvalid2); ComponentResourceProviderTest.createComponents(controller, set1); @@ -1082,9 +1078,9 @@ public void testCreateServiceComponentWithInvalidRequest() try { set1.clear(); ServiceComponentRequest rInvalid1 = - new ServiceComponentRequest(cluster1, serviceGroupName, "HDFS", "HDFS_CLIENT", "HDFS_CLIENT", null); + new ServiceComponentRequest(cluster1, serviceGroupName, "HDFS", "HDFS_CLIENT", null); ServiceComponentRequest rInvalid2 = - new ServiceComponentRequest(cluster2, serviceGroupName, "HDFS", "HDFS_CLIENT", "HDFS_CLIENT", null); + new ServiceComponentRequest(cluster2, serviceGroupName, "HDFS", "HDFS_CLIENT", null); set1.add(rInvalid1); set1.add(rInvalid2); ComponentResourceProviderTest.createComponents(controller, set1); @@ -1096,7 +1092,7 @@ public void testCreateServiceComponentWithInvalidRequest() try { set1.clear(); ServiceComponentRequest rInvalid = - new ServiceComponentRequest(cluster1, serviceGroupName, "HDFS", "NAMENODE", "NAMENODE", null); + new ServiceComponentRequest(cluster1, serviceGroupName, "HDFS", "NAMENODE", null); set1.add(rInvalid); ComponentResourceProviderTest.createComponents(controller, set1); fail("Expected failure for already existing component"); @@ -1123,9 +1119,9 @@ public void testGetExecutionCommandWithClusterEnvForRetry() throws Exception { createCluster(cluster1); Cluster cluster = clusters.getCluster(cluster1); clusters.getCluster(cluster1) - .setDesiredStackVersion(new StackId(HDP_0_1)); + .setDesiredStackVersion(new StackId("HDP-0.1")); String serviceGroupName = "CORE"; - ServiceGroupResourceProviderTest.createServiceGroup(controller, cluster1, serviceGroupName, HDP_0_1); + ServiceGroupResourceProviderTest.createServiceGroup(controller, cluster1, serviceGroupName); String serviceName = "HDFS"; createService(cluster1, serviceGroupName, serviceName, null); String componentName1 = "NAMENODE"; @@ -1167,7 +1163,7 @@ public void testGetExecutionCommandWithClusterEnvForRetry() throws Exception { // issue an install command, expect retry is enabled ServiceComponentHostRequest schr = - new ServiceComponentHostRequest(cluster1, serviceGroupName, "HDFS", "DATANODE", "DATANODE", host2, "INSTALLED"); + new ServiceComponentHostRequest(cluster1, serviceGroupName, "HDFS", "DATANODE", host2, "INSTALLED"); Map requestProps = new HashMap<>(); requestProps.put("phase", "INITIAL_INSTALL"); RequestStatusResponse rsr = updateHostComponents(Collections.singleton(schr), requestProps, false); @@ -1193,7 +1189,7 @@ public void testGetExecutionCommandWithClusterEnvForRetry() throws Exception { } // issue an start command but no retry as phase is only INITIAL_INSTALL - schr = new ServiceComponentHostRequest(cluster1, serviceGroupName, "HDFS", "DATANODE", "DATANODE", host2, "STARTED"); + schr = new ServiceComponentHostRequest(cluster1, serviceGroupName, "HDFS", "DATANODE", host2, "STARTED"); rsr = updateHostComponents(Collections.singleton(schr), requestProps, false); stages = actionDB.getAllStages(rsr.getRequestId()); Assert.assertEquals(1, stages.size()); @@ -1223,7 +1219,7 @@ public void testGetExecutionCommandWithClusterEnvForRetry() throws Exception { // issue an start command and retry is expected requestProps.put("phase", "INITIAL_START"); - schr = new ServiceComponentHostRequest(cluster1, serviceGroupName, "HDFS", "DATANODE", "DATANODE", host2, "STARTED"); + schr = new ServiceComponentHostRequest(cluster1, serviceGroupName, "HDFS", "DATANODE", host2, "STARTED"); rsr = updateHostComponents(Collections.singleton(schr), requestProps, false); stages = actionDB.getAllStages(rsr.getRequestId()); Assert.assertEquals(1, stages.size()); @@ -1253,7 +1249,7 @@ public void testGetExecutionCommandWithClusterEnvForRetry() throws Exception { controller.updateClusters(Collections.singleton(crReq), null); requestProps.put("phase", "INITIAL_START"); - schr = new ServiceComponentHostRequest(cluster1, serviceGroupName, "HDFS", "DATANODE", "DATANODE", host2, "STARTED"); + schr = new ServiceComponentHostRequest(cluster1, serviceGroupName, "HDFS", "DATANODE", host2, "STARTED"); rsr = updateHostComponents(Collections.singleton(schr), requestProps, false); stages = actionDB.getAllStages(rsr.getRequestId()); Assert.assertEquals(1, stages.size()); @@ -1376,11 +1372,11 @@ public void testCreateServiceComponentMultiple() throws Exception { Set set1 = new HashSet<>(); ServiceComponentRequest valid1 = - new ServiceComponentRequest(cluster1, serviceGroupName, "HDFS", "NAMENODE", "NAMENODE", null); + new ServiceComponentRequest(cluster1, serviceGroupName, "HDFS", "NAMENODE", null); ServiceComponentRequest valid2 = - new ServiceComponentRequest(cluster1, serviceGroupName, "MAPREDUCE", "JOBTRACKER", "JOBTRACKER", null); + new ServiceComponentRequest(cluster1, serviceGroupName, "MAPREDUCE", "JOBTRACKER", null); ServiceComponentRequest valid3 = - new ServiceComponentRequest(cluster1, serviceGroupName, "MAPREDUCE", "TASKTRACKER", "TASKTRACKER", null); + new ServiceComponentRequest(cluster1, serviceGroupName, "MAPREDUCE", "TASKTRACKER", null); set1.add(valid1); set1.add(valid2); set1.add(valid3); @@ -1407,8 +1403,8 @@ public void testCreateServiceComponentHostSimple1() throws Exception { private void createServiceComponentHostSimple(String clusterName, String host1, String host2, String serviceGroupName, String serviceName) throws Exception { createCluster(clusterName); - clusters.getCluster(clusterName).setDesiredStackVersion(new StackId(HDP_0_1)); - ServiceGroupResourceProviderTest.createServiceGroup(controller, clusterName, serviceGroupName, HDP_0_1); + clusters.getCluster(clusterName).setDesiredStackVersion(new StackId("HDP-0.1")); + ServiceGroupResourceProviderTest.createServiceGroup(controller, clusterName, serviceGroupName); createService(clusterName, serviceGroupName, serviceName, repositoryVersion01, null); String componentName1 = "NAMENODE"; String componentName2 = "DATANODE"; @@ -1446,7 +1442,8 @@ private void createServiceComponentHostSimple(String clusterName, String host1, // Expected } - createServiceComponentHost(clusterName, serviceGroupName, serviceName, componentName1, host1, null); + // null service should work + createServiceComponentHost(clusterName, serviceGroupName, null, componentName1, host1, null); createServiceComponentHost(clusterName, serviceGroupName, serviceName, componentName2, host1, null); createServiceComponentHost(clusterName, serviceGroupName, serviceName, componentName2, host2, null); createServiceComponentHost(clusterName, serviceGroupName, serviceName, componentName3, host1, null); @@ -1483,7 +1480,7 @@ private void createServiceComponentHostSimple(String clusterName, String host1, ServiceComponentHostRequest r = new ServiceComponentHostRequest(clusterName, serviceGroupName, serviceName, - componentName2, componentName2, null, null); + componentName2, null, null); Set response = controller.getHostComponents(Collections.singleton(r)); @@ -1497,7 +1494,7 @@ public void testCreateServiceComponentHostMultiple() String cluster1 = getUniqueName(); createCluster(cluster1); String serviceGroupName = "CORE"; - ServiceGroupResourceProviderTest.createServiceGroup(controller, cluster1, serviceGroupName, HDP_0_1); + ServiceGroupResourceProviderTest.createServiceGroup(controller, cluster1, serviceGroupName); String serviceName = "HDFS"; createService(cluster1, serviceGroupName, serviceName, null); String componentName1 = "NAMENODE"; @@ -1513,16 +1510,16 @@ public void testCreateServiceComponentHostMultiple() new HashSet<>(); ServiceComponentHostRequest r1 = new ServiceComponentHostRequest(cluster1, serviceGroupName, serviceName, - componentName1, componentName1, host1, State.INIT.toString()); + componentName1, host1, State.INIT.toString()); ServiceComponentHostRequest r2 = new ServiceComponentHostRequest(cluster1, serviceGroupName, serviceName, - componentName2, componentName2, host1, State.INIT.toString()); + componentName2, host1, State.INIT.toString()); ServiceComponentHostRequest r3 = new ServiceComponentHostRequest(cluster1, serviceGroupName, serviceName, - componentName1, componentName1, host2, State.INIT.toString()); + componentName1, host2, State.INIT.toString()); ServiceComponentHostRequest r4 = new ServiceComponentHostRequest(cluster1, serviceGroupName, serviceName, - componentName2, componentName2, host2, State.INIT.toString()); + componentName2, host2, State.INIT.toString()); set1.add(r1); set1.add(r2); @@ -1552,7 +1549,7 @@ public void testCreateServiceComponentHostMultiple() @Test(expected = IllegalArgumentException.class) public void createHostComponentsRequestRejectedWithoutClusterName() throws Exception { Set requests = ImmutableSet.of( - new ServiceComponentHostRequest(null, "CORE", "HDFS", "NAMENODE", "NAMENODE", "host1", null) + new ServiceComponentHostRequest(null, "CORE", "HDFS", "NAMENODE", "host1", null) ); controller.createHostComponents(requests); } @@ -1560,7 +1557,7 @@ public void createHostComponentsRequestRejectedWithoutClusterName() throws Excep @Test(expected = IllegalArgumentException.class) public void createHostComponentsRequestRejectedWithoutComponentName() throws Exception { Set requests = ImmutableSet.of( - new ServiceComponentHostRequest("foo", "CORE", "HDFS", null, null, "host1", null) + new ServiceComponentHostRequest("foo", "CORE", "HDFS", null, "host1", null) ); controller.createHostComponents(requests); } @@ -1568,7 +1565,7 @@ public void createHostComponentsRequestRejectedWithoutComponentName() throws Exc @Test(expected = IllegalArgumentException.class) public void createHostComponentsRequestRejectedWithoutHostname() throws Exception { Set requests = ImmutableSet.of( - new ServiceComponentHostRequest("foo", "CORE", "HDFS", "NAMENODE", "NAMENODE",null, null) + new ServiceComponentHostRequest("foo", "CORE", "HDFS", "NAMENODE", null, null) ); controller.createHostComponents(requests); } @@ -1576,7 +1573,7 @@ public void createHostComponentsRequestRejectedWithoutHostname() throws Exceptio @Test(expected = ParentObjectNotFoundException.class) public void createHostComponentsRequestRejectedForNonexistentCluster() throws Exception { Set requests = ImmutableSet.of( - new ServiceComponentHostRequest("foo", "CORE", "HDFS", "NAMENODE", "NAMENODE", "host1", null) + new ServiceComponentHostRequest("foo", "CORE", "HDFS", "NAMENODE", "host1", null) ); controller.createHostComponents(requests); } @@ -1621,7 +1618,7 @@ public void testCreateServiceComponentHostWithInvalidRequest() throws Exception try { ServiceComponentHostRequest rInvalid = - new ServiceComponentHostRequest(clusterFoo, serviceGroupName, "HDFS", "NAMENODE", "NAMENODE", host1, null); + new ServiceComponentHostRequest(clusterFoo, serviceGroupName, "HDFS", "NAMENODE", host1, null); controller.createHostComponents(Collections.singleton(rInvalid)); fail("Expected failure for invalid service"); } catch (IllegalArgumentException e) { @@ -1638,24 +1635,24 @@ public void testCreateServiceComponentHostWithInvalidRequest() throws Exception try { ServiceComponentHostRequest rInvalid = - new ServiceComponentHostRequest(clusterFoo, serviceGroupName, "HDFS", "NAMENODE", "NAMENODE", host1, null); + new ServiceComponentHostRequest(clusterFoo, serviceGroupName, "HDFS", "NAMENODE", host1, null); controller.createHostComponents(Collections.singleton(rInvalid)); fail("Expected failure for invalid service"); } catch (Exception e) { // Expected } - ServiceComponent sc1 = serviceComponentFactory.createNew(s1, "NAMENODE", "NAMENODE"); + ServiceComponent sc1 = serviceComponentFactory.createNew(s1, "NAMENODE"); s1.addServiceComponent(sc1); - ServiceComponent sc2 = serviceComponentFactory.createNew(s2, "NAMENODE", "NAMENODE"); + ServiceComponent sc2 = serviceComponentFactory.createNew(s2, "NAMENODE"); s2.addServiceComponent(sc2); - ServiceComponent sc3 = serviceComponentFactory.createNew(s3, "NAMENODE", "NAMENODE"); + ServiceComponent sc3 = serviceComponentFactory.createNew(s3, "NAMENODE"); s3.addServiceComponent(sc3); try { ServiceComponentHostRequest rInvalid = - new ServiceComponentHostRequest(clusterFoo, serviceGroupName, "HDFS", "NAMENODE", "NAMENODE", host1, null); + new ServiceComponentHostRequest(clusterFoo, serviceGroupName, "HDFS", "NAMENODE", host1, null); controller.createHostComponents(Collections.singleton(rInvalid)); fail("Expected failure for invalid host"); } catch (Exception e) { @@ -1680,7 +1677,7 @@ public void testCreateServiceComponentHostWithInvalidRequest() throws Exception try { ServiceComponentHostRequest rInvalid = - new ServiceComponentHostRequest(clusterFoo, serviceGroupName, "HDFS", "NAMENODE", "NAMENODE", host1, null); + new ServiceComponentHostRequest(clusterFoo, serviceGroupName, "HDFS", "NAMENODE", host1, null); controller.createHostComponents(Collections.singleton(rInvalid)); fail("Expected failure for invalid host cluster mapping"); } catch (Exception e) { @@ -1693,13 +1690,13 @@ public void testCreateServiceComponentHostWithInvalidRequest() throws Exception clusters.mapAndPublishHostsToCluster(hostnames, cluster2); ServiceComponentHostRequest valid = - new ServiceComponentHostRequest(clusterFoo, serviceGroupName, "HDFS", "NAMENODE", "NAMENODE", host1, null); + new ServiceComponentHostRequest(clusterFoo, serviceGroupName, "HDFS", "NAMENODE", host1, null); controller.createHostComponents(Collections.singleton(valid)); try { Set requests = ImmutableSet.of( - new ServiceComponentHostRequest(clusterFoo, "SG1", "HDFS", "NAMENODE", "NAMENODE", host2, null), - new ServiceComponentHostRequest(clusterFoo, "SG2", "HDFS", "NAMENODE", "NAMENODE", host2, null) + new ServiceComponentHostRequest(clusterFoo, "SG1", "HDFS", "NAMENODE", host2, null), + new ServiceComponentHostRequest(clusterFoo, "SG2", "HDFS", "NAMENODE", host2, null) ); controller.createHostComponents(requests); fail("Expected failure for wrong service requests as the SG1 service group doesn't exist on cluster"); @@ -1709,8 +1706,8 @@ public void testCreateServiceComponentHostWithInvalidRequest() throws Exception try { Set requests = ImmutableSet.of( - new ServiceComponentHostRequest(cluster1, serviceGroupName, "HDFS", "NAMENODE", "NAMENODE", host2, null), - new ServiceComponentHostRequest(cluster2, serviceGroupName, "HDFS", "NAMENODE", "NAMENODE", host3, null) + new ServiceComponentHostRequest(cluster1, serviceGroupName, "HDFS", "NAMENODE", host2, null), + new ServiceComponentHostRequest(cluster2, serviceGroupName, "HDFS", "NAMENODE", host3, null) ); controller.createHostComponents(requests); fail("Expected failure for multiple clusters"); @@ -1720,8 +1717,8 @@ public void testCreateServiceComponentHostWithInvalidRequest() throws Exception try { Set requests = ImmutableSet.of( - new ServiceComponentHostRequest(clusterFoo, serviceGroupName, "HDFS", "NAMENODE", "NAMENODE", host1, null), - new ServiceComponentHostRequest(clusterFoo, serviceGroupName, "HDFS", "NAMENODE", "NAMENODE", host2, null) + new ServiceComponentHostRequest(clusterFoo, serviceGroupName, "HDFS", "NAMENODE", host1, null), + new ServiceComponentHostRequest(clusterFoo, serviceGroupName, "HDFS", "NAMENODE", host2, null) ); controller.createHostComponents(requests); fail("Expected failure for already existing"); @@ -1734,11 +1731,11 @@ public void testCreateServiceComponentHostWithInvalidRequest() throws Exception Assert.assertEquals(0, foo.getServiceComponentHosts(host3).size()); ServiceComponentHostRequest valid1 = - new ServiceComponentHostRequest(cluster1, serviceGroupName, "HDFS", "NAMENODE", "NAMENODE", host1, null); + new ServiceComponentHostRequest(cluster1, serviceGroupName, "HDFS", "NAMENODE", host1, null); controller.createHostComponents(Collections.singleton(valid1)); ServiceComponentHostRequest valid2 = - new ServiceComponentHostRequest(cluster2, serviceGroupName, "HDFS", "NAMENODE", "NAMENODE", host1, null); + new ServiceComponentHostRequest(cluster2, serviceGroupName, "HDFS", "NAMENODE", host1, null); controller.createHostComponents(Collections.singleton(valid2)); Assert.assertEquals(1, foo.getServiceComponentHosts(host1).size()); @@ -1782,9 +1779,9 @@ public void testCreateHostSimple() throws Exception { request.setClusterName(cluster1); - clusters.addCluster(cluster1, new StackId(HDP_0_1)); + clusters.addCluster(cluster1, new StackId("HDP-0.1")); Cluster c = clusters.getCluster(cluster1); - StackId stackId = new StackId(HDP_0_1); + StackId stackId = new StackId("HDP-0.1"); c.setDesiredStackVersion(stackId); c.setCurrentStackVersion(stackId); helper.getOrCreateRepositoryVersion(stackId, stackId.getStackVersion()); @@ -1809,9 +1806,9 @@ public void testCreateHostMultiple() throws Exception { clusters.addHost(host1); clusters.addHost(host2); clusters.addHost(host3); - clusters.addCluster(cluster1, new StackId(HDP_0_1)); + clusters.addCluster(cluster1, new StackId("HDP-0.1")); Cluster c = clusters.getCluster(cluster1); - StackId stackID = new StackId(HDP_0_1); + StackId stackID = new StackId("HDP-0.1"); c.setDesiredStackVersion(stackID); c.setCurrentStackVersion(stackID); helper.getOrCreateRepositoryVersion(stackID, stackID.getStackVersion()); @@ -1869,7 +1866,7 @@ public void testCreateHostWithInvalidRequests() throws Exception { // Expected } - clusters.addCluster(cluster1, new StackId(HDP_0_1)); + clusters.addCluster(cluster1, new StackId("HDP-0.1")); try { set1.clear(); @@ -2150,11 +2147,11 @@ public void testInstallAndStartService() throws Exception { public void testGetClusters() throws Exception { String cluster1 = getUniqueName(); - clusters.addCluster(cluster1, new StackId(HDP_0_1)); + clusters.addCluster(cluster1, new StackId("HDP-0.1")); Cluster c1 = clusters.getCluster(cluster1); - StackId stackId = new StackId(HDP_0_1); + StackId stackId = new StackId("HDP-0.1"); c1.setDesiredStackVersion(stackId); helper.getOrCreateRepositoryVersion(stackId, stackId.getStackVersion()); @@ -2184,10 +2181,10 @@ public void testGetClustersWithFilters() throws Exception { String cluster3 = getUniqueName(); String cluster4 = getUniqueName(); - clusters.addCluster(cluster1, new StackId(HDP_0_1)); - clusters.addCluster(cluster2, new StackId(HDP_0_1)); + clusters.addCluster(cluster1, new StackId("HDP-0.1")); + clusters.addCluster(cluster2, new StackId("HDP-0.1")); clusters.addCluster(cluster3, new StackId("HDP-1.2.0")); - clusters.addCluster(cluster4, new StackId(HDP_0_1)); + clusters.addCluster(cluster4, new StackId("HDP-0.1")); ClusterRequest r = new ClusterRequest(null, null, null, null); Set resp = controller.getClusters(Collections.singleton(r)); @@ -2199,7 +2196,7 @@ public void testGetClustersWithFilters() throws Exception { Cluster c1 = clusters.getCluster(cluster1); Assert.assertEquals(c1.getClusterId(), resp.iterator().next().getClusterId()); - r = new ClusterRequest(null, null, HDP_0_1, null); + r = new ClusterRequest(null, null, "HDP-0.1", null); resp = controller.getClusters(Collections.singleton(r)); Assert.assertTrue(resp.size() >= 3); @@ -2212,7 +2209,7 @@ public void testGetClustersWithFilters() throws Exception { public void testGetServices() throws Exception { String cluster1 = getUniqueName(); - StackId stackId = new StackId(HDP_0_1); + StackId stackId = new StackId("HDP-0.1"); RepositoryVersionEntity repositoryVersion = helper.getOrCreateRepositoryVersion(stackId, stackId.getStackVersion()); @@ -2232,7 +2229,7 @@ public void testGetServices() throws Exception { Assert.assertTrue(s1.getClusterId().longValue() == resp1.getClusterId().longValue()); Assert.assertEquals(s1.getCluster().getClusterName(), resp1.getClusterName()); Assert.assertEquals(s1.getName(), resp1.getServiceName()); - Assert.assertEquals(HDP_0_1, s1.getDesiredStackId().getStackId()); + Assert.assertEquals("HDP-0.1", s1.getDesiredStackId().getStackId()); Assert.assertEquals(s1.getDesiredStackId().getStackId(), resp1.getDesiredStackId()); Assert.assertEquals(State.INSTALLED.toString(), resp1.getDesiredState()); @@ -2335,12 +2332,12 @@ public void testGetServiceComponents() throws Exception { Service s1 = serviceFactory.createNew(c1, c1.addServiceGroup(serviceGroupName, "HDP-0.2"), new ArrayList<>(), "HDFS", "HDFS", repositoryVersion); c1.addService(s1); s1.setDesiredState(State.INSTALLED); - ServiceComponent sc1 = serviceComponentFactory.createNew(s1, "DATANODE", "DATANODE"); + ServiceComponent sc1 = serviceComponentFactory.createNew(s1, "DATANODE"); s1.addServiceComponent(sc1); sc1.setDesiredState(State.UNINSTALLED); ServiceComponentRequest r = new ServiceComponentRequest(cluster1, serviceGroupName, - s1.getName(), sc1.getName(), sc1.getType(), null); + s1.getName(), sc1.getName(), null); Set resps = ComponentResourceProviderTest.getComponents(controller, Collections.singleton(r)); Assert.assertEquals(1, resps.size()); @@ -2390,14 +2387,14 @@ public void testGetServiceComponentsWithFilters() throws Exception { s2.setDesiredState(State.INSTALLED); s4.setDesiredState(State.INSTALLED); - ServiceComponent sc1 = serviceComponentFactory.createNew(s1, "DATANODE", "DATANODE"); - ServiceComponent sc2 = serviceComponentFactory.createNew(s1, "NAMENODE", "NAMENODE"); - ServiceComponent sc3 = serviceComponentFactory.createNew(s3, "HBASE_REGIONSERVER", "HBASE_REGIONSERVER"); - ServiceComponent sc4 = serviceComponentFactory.createNew(s4, "HIVE_SERVER", "HIVE_SERVER"); - ServiceComponent sc5 = serviceComponentFactory.createNew(s4, "HIVE_CLIENT", "HIVE_CLIENT"); - ServiceComponent sc6 = serviceComponentFactory.createNew(s4, "MYSQL_SERVER", "MYSQL_SERVER"); - ServiceComponent sc7 = serviceComponentFactory.createNew(s5, "ZOOKEEPER_SERVER", "ZOOKEEPER_SERVER"); - ServiceComponent sc8 = serviceComponentFactory.createNew(s5, "ZOOKEEPER_CLIENT", "ZOOKEEPER_CLIENT"); + ServiceComponent sc1 = serviceComponentFactory.createNew(s1, "DATANODE"); + ServiceComponent sc2 = serviceComponentFactory.createNew(s1, "NAMENODE"); + ServiceComponent sc3 = serviceComponentFactory.createNew(s3, "HBASE_REGIONSERVER"); + ServiceComponent sc4 = serviceComponentFactory.createNew(s4, "HIVE_SERVER"); + ServiceComponent sc5 = serviceComponentFactory.createNew(s4, "HIVE_CLIENT"); + ServiceComponent sc6 = serviceComponentFactory.createNew(s4, "MYSQL_SERVER"); + ServiceComponent sc7 = serviceComponentFactory.createNew(s5, "ZOOKEEPER_SERVER"); + ServiceComponent sc8 = serviceComponentFactory.createNew(s5, "ZOOKEEPER_CLIENT"); s1.addServiceComponent(sc1); s1.addServiceComponent(sc2); @@ -2416,7 +2413,7 @@ public void testGetServiceComponentsWithFilters() throws Exception { sc8.setDesiredState(State.UNINSTALLED); ServiceComponentRequest r = new ServiceComponentRequest(null, null, null, - null, null, null); + null, null); try { ComponentResourceProviderTest.getComponents(controller, Collections.singleton(r)); @@ -2427,25 +2424,25 @@ public void testGetServiceComponentsWithFilters() throws Exception { // all comps per cluster r = new ServiceComponentRequest(c1.getClusterName(), null, - null, null, null, null); + null, null, null); Set resps = ComponentResourceProviderTest.getComponents(controller, Collections.singleton(r)); Assert.assertEquals(3, resps.size()); // all comps per cluster filter on state r = new ServiceComponentRequest(c2.getClusterName(), null, - null, null, null, State.UNINSTALLED.toString()); + null, null, State.UNINSTALLED.toString()); resps = ComponentResourceProviderTest.getComponents(controller, Collections.singleton(r)); Assert.assertEquals(4, resps.size()); // all comps for given service r = new ServiceComponentRequest(c2.getClusterName(), null, - s5.getName(), null, null, null); + s5.getName(), null, null); resps = ComponentResourceProviderTest.getComponents(controller, Collections.singleton(r)); Assert.assertEquals(2, resps.size()); // all comps for given service filter by state r = new ServiceComponentRequest(c2.getClusterName(), null, - s4.getName(), null, null, State.INIT.toString()); + s4.getName(), null, State.INIT.toString()); resps = ComponentResourceProviderTest.getComponents(controller, Collections.singleton(r)); Assert.assertEquals(1, resps.size()); Assert.assertEquals(sc4.getName(), @@ -2453,7 +2450,7 @@ public void testGetServiceComponentsWithFilters() throws Exception { // get single given comp r = new ServiceComponentRequest(c2.getClusterName(), null, - null, sc5.getName(), sc5.getType(), State.INIT.toString()); + null, sc5.getName(), State.INIT.toString()); resps = ComponentResourceProviderTest.getComponents(controller, Collections.singleton(r)); Assert.assertEquals(1, resps.size()); Assert.assertEquals(sc5.getName(), @@ -2461,7 +2458,7 @@ public void testGetServiceComponentsWithFilters() throws Exception { // get single given comp and given svc r = new ServiceComponentRequest(c2.getClusterName(), null, - s4.getName(), sc5.getName(), sc5.getType(), State.INIT.toString()); + s4.getName(), sc5.getName(), State.INIT.toString()); resps = ComponentResourceProviderTest.getComponents(controller, Collections.singleton(r)); Assert.assertEquals(1, resps.size()); Assert.assertEquals(sc5.getName(), @@ -2471,11 +2468,11 @@ public void testGetServiceComponentsWithFilters() throws Exception { ServiceComponentRequest r1, r2, r3; Set reqs = new HashSet<>(); r1 = new ServiceComponentRequest(c2.getClusterName(), null, - null, null, null, State.UNINSTALLED.toString()); + null, null, State.UNINSTALLED.toString()); r2 = new ServiceComponentRequest(c1.getClusterName(), null, - null, null, null, null); + null, null, null); r3 = new ServiceComponentRequest(c1.getClusterName(), null, - null, null, null, State.INIT.toString()); + null, null, State.INIT.toString()); reqs.addAll(Arrays.asList(r1, r2, r3)); resps = ComponentResourceProviderTest.getComponents(controller, reqs); Assert.assertEquals(7, resps.size()); @@ -2486,13 +2483,13 @@ public void testGetServiceComponentHosts() throws Exception { String cluster1 = getUniqueName(); String host1 = getUniqueName(); - Cluster c1 = setupClusterWithHosts(cluster1, HDP_0_1, Lists.newArrayList(host1), "centos5"); + Cluster c1 = setupClusterWithHosts(cluster1, "HDP-0.1", Lists.newArrayList(host1), "centos5"); RepositoryVersionEntity repositoryVersion = repositoryVersion01; - ServiceGroup serviceGroup = c1.addServiceGroup("CORE", HDP_0_1); + ServiceGroup serviceGroup = c1.addServiceGroup("CORE", "HDP-0.1"); Service s1 = serviceFactory.createNew(c1, serviceGroup, new ArrayList<>(), "HDFS", "HDFS", repositoryVersion); c1.addService(s1); - ServiceComponent sc1 = serviceComponentFactory.createNew(s1, "DATANODE", "DATANODE"); + ServiceComponent sc1 = serviceComponentFactory.createNew(s1, "DATANODE"); s1.addServiceComponent(sc1); sc1.setDesiredState(State.UNINSTALLED); ServiceComponentHost sch1 = serviceComponentHostFactory.createNew(sc1, host1); @@ -2507,7 +2504,7 @@ public void testGetServiceComponentHosts() throws Exception { ServiceComponentHostRequest r = new ServiceComponentHostRequest(c1.getClusterName(), null, - null, null, null, null, null); + null, null, null, null); Set resps = controller.getHostComponents(Collections.singleton(r)); Assert.assertEquals(1, resps.size()); @@ -2538,7 +2535,7 @@ public void testGetServiceComponentHostsWithStaleConfigFilter() throws Exception Long clusterId = c.getClusterId(); String serviceGroupName = "CORE"; - ServiceGroupResourceProviderTest.createServiceGroup(controller, cluster1, serviceGroupName, "HDP-2.0.5"); + ServiceGroupResourceProviderTest.createServiceGroup(controller, cluster1, serviceGroupName); String serviceName = "HDFS"; createService(cluster1, serviceGroupName, serviceName, null); String componentName1 = "NAMENODE"; @@ -2593,30 +2590,30 @@ public void testGetServiceComponentHostsWithStaleConfigFilter() throws Exception s1.getServiceComponent(componentName3).getServiceComponentHost(host2).updateActualConfigs(actualConfig); ServiceComponentHostRequest r = - new ServiceComponentHostRequest(cluster1, null, null, null, null, null, null); + new ServiceComponentHostRequest(cluster1, null, null, null, null, null); Set resps = controller.getHostComponents(Collections.singleton(r)); Assert.assertEquals(5, resps.size()); //Get all host components with stale config = true - r = new ServiceComponentHostRequest(cluster1, null, null, null, null, null, null); + r = new ServiceComponentHostRequest(cluster1, null, null, null, null, null); r.setStaleConfig("true"); resps = controller.getHostComponents(Collections.singleton(r)); Assert.assertEquals(2, resps.size()); //Get all host components with stale config = false - r = new ServiceComponentHostRequest(cluster1, null, null, null, null, null, null); + r = new ServiceComponentHostRequest(cluster1, null, null, null, null, null); r.setStaleConfig("false"); resps = controller.getHostComponents(Collections.singleton(r)); Assert.assertEquals(3, resps.size()); //Get all host components with stale config = false and hostname filter - r = new ServiceComponentHostRequest(cluster1, null, null, null, null, host1, null); + r = new ServiceComponentHostRequest(cluster1, null, null, null, host1, null); r.setStaleConfig("false"); resps = controller.getHostComponents(Collections.singleton(r)); Assert.assertEquals(2, resps.size()); //Get all host components with stale config = false and hostname filter - r = new ServiceComponentHostRequest(cluster1, null, null, null, null, host2, null); + r = new ServiceComponentHostRequest(cluster1, null, null, null, host2, null); r.setStaleConfig("true"); resps = controller.getHostComponents(Collections.singleton(r)); Assert.assertEquals(1, resps.size()); @@ -2631,7 +2628,7 @@ public void testServiceComponentHostsWithDecommissioned() throws Exception { setupClusterWithHosts(cluster1, "HDP-2.0.7", Arrays.asList(host1, host2), "centos5"); String serviceGroupName = "CORE"; - ServiceGroupResourceProviderTest.createServiceGroup(controller, cluster1, serviceGroupName, "HDP-2.0.7"); + ServiceGroupResourceProviderTest.createServiceGroup(controller, cluster1, serviceGroupName); String serviceName = "HDFS"; createService(cluster1, serviceGroupName, serviceName, null); String componentName1 = "NAMENODE"; @@ -2661,30 +2658,30 @@ public void testServiceComponentHostsWithDecommissioned() throws Exception { setComponentAdminState(HostComponentAdminState.INSERVICE); ServiceComponentHostRequest r = - new ServiceComponentHostRequest(cluster1, null, null, null, null, null, null); + new ServiceComponentHostRequest(cluster1, null, null, null, null, null); Set resps = controller.getHostComponents(Collections.singleton(r)); Assert.assertEquals(5, resps.size()); //Get all host components with decommissiond = true - r = new ServiceComponentHostRequest(cluster1, null, null, null, null, null, null); + r = new ServiceComponentHostRequest(cluster1, null, null, null, null, null); r.setAdminState("DECOMMISSIONED"); resps = controller.getHostComponents(Collections.singleton(r)); Assert.assertEquals(1, resps.size()); //Get all host components with decommissioned = false - r = new ServiceComponentHostRequest(cluster1, null, null, null, null, null, null); + r = new ServiceComponentHostRequest(cluster1, null, null, null, null, null); r.setAdminState("INSERVICE"); resps = controller.getHostComponents(Collections.singleton(r)); Assert.assertEquals(1, resps.size()); //Get all host components with decommissioned = some random string - r = new ServiceComponentHostRequest(cluster1, null, null, null, null, null, null); + r = new ServiceComponentHostRequest(cluster1, null, null, null, null, null); r.setAdminState("INSTALLED"); resps = controller.getHostComponents(Collections.singleton(r)); Assert.assertEquals(0, resps.size()); //Update adminState - r = new ServiceComponentHostRequest(cluster1, serviceGroupName, "HDFS", "DATANODE", "DATANODE", host2, null); + r = new ServiceComponentHostRequest(cluster1, serviceGroupName, "HDFS", "DATANODE", host2, null); r.setAdminState("DECOMMISSIONED"); try { updateHostComponents(Collections.singleton(r), new HashMap<>(), false); @@ -2700,7 +2697,7 @@ public void testHbaseDecommission() throws Exception { createCluster(cluster1); clusters.getCluster(cluster1).setDesiredStackVersion(new StackId("HDP-2.0.7")); String serviceGroupName = "CORE"; - ServiceGroupResourceProviderTest.createServiceGroup(controller, cluster1, serviceGroupName, "HDP-2.0.7"); + ServiceGroupResourceProviderTest.createServiceGroup(controller, cluster1, serviceGroupName); String serviceName = "HBASE"; createService(cluster1, serviceGroupName, serviceName, repositoryVersion207, null); String componentName1 = "HBASE_MASTER"; @@ -2873,9 +2870,9 @@ public void testGetServiceComponentHostsWithFilters() throws Exception { s1.setDesiredState(State.INSTALLED); s2.setDesiredState(State.INSTALLED); - ServiceComponent sc1 = serviceComponentFactory.createNew(s1, "DATANODE", "DATANODE"); - ServiceComponent sc2 = serviceComponentFactory.createNew(s1, "NAMENODE", "NAMENODE"); - ServiceComponent sc3 = serviceComponentFactory.createNew(s3, "HBASE_REGIONSERVER", "HBASE_REGIONSERVER"); + ServiceComponent sc1 = serviceComponentFactory.createNew(s1, "DATANODE"); + ServiceComponent sc2 = serviceComponentFactory.createNew(s1, "NAMENODE"); + ServiceComponent sc3 = serviceComponentFactory.createNew(s3, "HBASE_REGIONSERVER"); s1.addServiceComponent(sc1); s1.addServiceComponent(sc2); @@ -2904,7 +2901,7 @@ public void testGetServiceComponentHostsWithFilters() throws Exception { sch5.setDesiredState(State.UNINSTALLED); ServiceComponentHostRequest r = - new ServiceComponentHostRequest(null, null, null, null, null, null, null); + new ServiceComponentHostRequest(null, null, null, null, null, null); try { controller.getHostComponents(Collections.singleton(r)); @@ -2914,14 +2911,14 @@ public void testGetServiceComponentHostsWithFilters() throws Exception { } // all across cluster - r = new ServiceComponentHostRequest(c1.getClusterName(), null, null, null, + r = new ServiceComponentHostRequest(c1.getClusterName(), null, null, null, null, null); Set resps = controller.getHostComponents(Collections.singleton(r)); Assert.assertEquals(6, resps.size()); // all for service r = new ServiceComponentHostRequest(c1.getClusterName(), serviceGroupName, s1.getName(), - null, null, null, null); + null, null, null); resps = controller.getHostComponents(Collections.singleton(r)); Set actual = resps.stream() .map(AmbariManagementControllerTest::serviceComponentHostToString) @@ -2934,65 +2931,65 @@ public void testGetServiceComponentHostsWithFilters() throws Exception { // all for component r = new ServiceComponentHostRequest(c1.getClusterName(), null, null, - sc3.getName(), sc3.getType(), null, null); + sc3.getName(), null, null); resps = controller.getHostComponents(Collections.singleton(r)); Assert.assertEquals(1, resps.size()); // all for host r = new ServiceComponentHostRequest(c1.getClusterName(), null, null, - null, null, host2, null); + null, host2, null); resps = controller.getHostComponents(Collections.singleton(r)); Assert.assertEquals(2, resps.size()); // all across cluster with state filter r = new ServiceComponentHostRequest(c1.getClusterName(), null, null, - null, null, null, State.UNINSTALLED.toString()); + null, null, State.UNINSTALLED.toString()); resps = controller.getHostComponents(Collections.singleton(r)); Assert.assertEquals(1, resps.size()); // all for service with state filter r = new ServiceComponentHostRequest(c1.getClusterName(), serviceGroupName, s1.getName(), - null, null, null, State.INIT.toString()); + null, null, State.INIT.toString()); resps = controller.getHostComponents(Collections.singleton(r)); Assert.assertEquals(2, resps.size()); // all for component with state filter r = new ServiceComponentHostRequest(c1.getClusterName(), serviceGroupName, null, - sc3.getName(), sc3.getType(), null, State.INSTALLED.toString()); + sc3.getName(), null, State.INSTALLED.toString()); resps = controller.getHostComponents(Collections.singleton(r)); Assert.assertEquals(0, resps.size()); // all for host with state filter r = new ServiceComponentHostRequest(c1.getClusterName(), serviceGroupName, null, - null, null, host2, State.INIT.toString()); + null, host2, State.INIT.toString()); resps = controller.getHostComponents(Collections.singleton(r)); Assert.assertEquals(1, resps.size()); // for service and host r = new ServiceComponentHostRequest(c1.getClusterName(), serviceGroupName, s3.getName(), - null, null, host1, null); + null, host1, null); resps = controller.getHostComponents(Collections.singleton(r)); Assert.assertEquals(0, resps.size()); // single sch - given service and host and component r = new ServiceComponentHostRequest(c1.getClusterName(), serviceGroupName, s3.getName(), - sc3.getName(), sc3.getType(), host3, State.INSTALLED.toString()); + sc3.getName(), host3, State.INSTALLED.toString()); resps = controller.getHostComponents(Collections.singleton(r)); Assert.assertEquals(0, resps.size()); // single sch - given service and host and component r = new ServiceComponentHostRequest(c1.getClusterName(), serviceGroupName, s3.getName(), - sc3.getName(), sc3.getType(), host3, null); + sc3.getName(), host3, null); resps = controller.getHostComponents(Collections.singleton(r)); Assert.assertEquals(1, resps.size()); ServiceComponentHostRequest r1, r2, r3; r1 = new ServiceComponentHostRequest(c1.getClusterName(), serviceGroupName, null, - null, null, host3, null); + null, host3, null); r2 = new ServiceComponentHostRequest(c1.getClusterName(), serviceGroupName, s3.getName(), - sc3.getName(), sc3.getType(), host2, null); + sc3.getName(), host2, null); r3 = new ServiceComponentHostRequest(c1.getClusterName(), serviceGroupName, null, - null, null, host2, null); + null, host2, null); Set reqs = new HashSet<>(); reqs.addAll(Arrays.asList(r1, r2, r3)); @@ -3077,7 +3074,7 @@ public void testServiceUpdateBasic() throws Exception { String cluster1 = getUniqueName(); createCluster(cluster1); String serviceGroupName = "CORE"; - ServiceGroupResourceProviderTest.createServiceGroup(controller, cluster1, serviceGroupName, HDP_0_1); + ServiceGroupResourceProviderTest.createServiceGroup(controller, cluster1, serviceGroupName); String serviceName = "HDFS"; Map mapRequestProps = new HashMap<>(); @@ -3129,8 +3126,8 @@ public void testServiceUpdateInvalidRequest() throws Exception { createCluster(cluster2); String serviceGroupName = "CORE"; - ServiceGroupResourceProviderTest.createServiceGroup(controller, cluster1, serviceGroupName, HDP_0_1); - ServiceGroupResourceProviderTest.createServiceGroup(controller, cluster2, serviceGroupName, HDP_0_1); + ServiceGroupResourceProviderTest.createServiceGroup(controller, cluster1, serviceGroupName); + ServiceGroupResourceProviderTest.createServiceGroup(controller, cluster2, serviceGroupName); String serviceName1 = "HDFS"; createService(cluster1, serviceGroupName, serviceName1, null); @@ -3175,7 +3172,7 @@ public void testServiceUpdateInvalidRequest() throws Exception { req1 = new ServiceRequest(cluster1, serviceGroupName, serviceName1, repositoryVersion02.getId(), State.INSTALLED.toString(), null); req2 = new ServiceRequest(cluster1, serviceGroupName, serviceName1, repositoryVersion02.getId(), - State.STARTED.toString(), null); + State.INSTALLED.toString(), null); reqs.add(req1); reqs.add(req2); ServiceResourceProviderTest.updateServices(controller, reqs, mapRequestProps, true, false); @@ -3211,7 +3208,7 @@ public void testServiceUpdateRecursive() throws Exception { clusters.getCluster(cluster1) .setDesiredStackVersion(new StackId("HDP-0.2")); String serviceGroupName = "CORE"; - ServiceGroupResourceProviderTest.createServiceGroup(controller, cluster1, serviceGroupName, "HDP-0.2"); + ServiceGroupResourceProviderTest.createServiceGroup(controller, cluster1, serviceGroupName); String serviceName1 = "HDFS"; createService(cluster1, serviceGroupName, serviceName1, null); @@ -3239,22 +3236,22 @@ public void testServiceUpdateRecursive() throws Exception { new HashSet<>(); ServiceComponentHostRequest r1 = new ServiceComponentHostRequest(cluster1, serviceGroupName, serviceName1, - componentName1, componentName1, host1, State.INIT.toString()); + componentName1, host1, State.INIT.toString()); ServiceComponentHostRequest r2 = new ServiceComponentHostRequest(cluster1, serviceGroupName, serviceName1, - componentName2, componentName2, host1, State.INIT.toString()); + componentName2, host1, State.INIT.toString()); ServiceComponentHostRequest r3 = new ServiceComponentHostRequest(cluster1, serviceGroupName, serviceName1, - componentName1, componentName1, host2, State.INIT.toString()); + componentName1, host2, State.INIT.toString()); ServiceComponentHostRequest r4 = new ServiceComponentHostRequest(cluster1, serviceGroupName, serviceName1, - componentName2, componentName2, host2, State.INIT.toString()); + componentName2, host2, State.INIT.toString()); ServiceComponentHostRequest r5 = new ServiceComponentHostRequest(cluster1, serviceGroupName, serviceName2, - componentName3, componentName3, host1, State.INIT.toString()); + componentName3, host1, State.INIT.toString()); ServiceComponentHostRequest r6 = new ServiceComponentHostRequest(cluster1, serviceGroupName, serviceName1, - componentName4, componentName4, host2, State.INIT.toString()); + componentName4, host2, State.INIT.toString()); set1.add(r1); set1.add(r2); @@ -3462,7 +3459,7 @@ public void testServiceComponentUpdateRecursive() throws Exception { createCluster(cluster1); String serviceGroupName = "CORE"; - ServiceGroupResourceProviderTest.createServiceGroup(controller, cluster1, serviceGroupName, HDP_0_1); + ServiceGroupResourceProviderTest.createServiceGroup(controller, cluster1, serviceGroupName); String serviceName1 = "HDFS"; createService(cluster1, serviceGroupName, serviceName1, null); String componentName1 = "NAMENODE"; @@ -3482,19 +3479,19 @@ public void testServiceComponentUpdateRecursive() throws Exception { new HashSet<>(); ServiceComponentHostRequest r1 = new ServiceComponentHostRequest(cluster1, serviceGroupName, serviceName1, - componentName1, componentName1, host1, State.INIT.toString()); + componentName1, host1, State.INIT.toString()); ServiceComponentHostRequest r2 = new ServiceComponentHostRequest(cluster1, serviceGroupName, serviceName1, - componentName2, componentName2, host1, State.INIT.toString()); + componentName2, host1, State.INIT.toString()); ServiceComponentHostRequest r3 = new ServiceComponentHostRequest(cluster1, serviceGroupName, serviceName1, - componentName1, componentName1, host2, State.INIT.toString()); + componentName1, host2, State.INIT.toString()); ServiceComponentHostRequest r4 = new ServiceComponentHostRequest(cluster1, serviceGroupName, serviceName1, - componentName2, componentName2, host2, State.INIT.toString()); + componentName2, host2, State.INIT.toString()); ServiceComponentHostRequest r5 = new ServiceComponentHostRequest(cluster1, serviceGroupName, serviceName1, - componentName3, componentName3, host1, State.INIT.toString()); + componentName3, host1, State.INIT.toString()); set1.add(r1); set1.add(r2); @@ -3535,13 +3532,13 @@ public void testServiceComponentUpdateRecursive() throws Exception { // confirm an UNKOWN doesn't fail req1 = new ServiceComponentRequest(cluster1, serviceGroupName, serviceName1, - sc3.getName(), sc3.getType(), State.INSTALLED.toString()); + sc3.getName(), State.INSTALLED.toString()); reqs.add(req1); ComponentResourceProviderTest.updateComponents(controller, reqs, Collections.emptyMap(), true); try { reqs.clear(); req1 = new ServiceComponentRequest(cluster1, serviceGroupName, serviceName1, - sc1.getName(), sc1.getType(), State.INIT.toString()); + sc1.getName(), State.INIT.toString()); reqs.add(req1); ComponentResourceProviderTest.updateComponents(controller, reqs, Collections.emptyMap(), true); fail("Expected failure for invalid state update"); @@ -3567,7 +3564,7 @@ public void testServiceComponentUpdateRecursive() throws Exception { try { reqs.clear(); req1 = new ServiceComponentRequest(cluster1, serviceGroupName, serviceName1, - sc1.getName(), sc1.getType(), State.STARTED.toString()); + sc1.getName(), State.STARTED.toString()); reqs.add(req1); ComponentResourceProviderTest.updateComponents(controller, reqs, Collections.emptyMap(), true); fail("Expected failure for invalid state update"); @@ -3592,11 +3589,11 @@ public void testServiceComponentUpdateRecursive() throws Exception { reqs.clear(); req1 = new ServiceComponentRequest(cluster1, serviceGroupName, serviceName1, - sc1.getName(), sc1.getType(), State.INSTALLED.toString()); + sc1.getName(), State.INSTALLED.toString()); req2 = new ServiceComponentRequest(cluster1, serviceGroupName, serviceName1, - sc2.getName(), sc2.getType(), State.INSTALLED.toString()); + sc2.getName(), State.INSTALLED.toString()); req3 = new ServiceComponentRequest(cluster1, serviceGroupName, serviceName1, - sc3.getName(), sc3.getType(), State.INSTALLED.toString()); + sc3.getName(), State.INSTALLED.toString()); reqs.add(req1); reqs.add(req2); reqs.add(req3); @@ -3639,9 +3636,9 @@ public void testServiceComponentUpdateRecursive() throws Exception { // test no-op reqs.clear(); req1 = new ServiceComponentRequest(cluster1, serviceGroupName, serviceName1, - sc1.getName(), sc1.getType(), State.INSTALLED.toString()); + sc1.getName(), State.INSTALLED.toString()); req2 = new ServiceComponentRequest(cluster1, serviceGroupName, serviceName1, - sc2.getName(), sc2.getType(), State.INSTALLED.toString()); + sc2.getName(), State.INSTALLED.toString()); reqs.add(req1); reqs.add(req2); trackAction = ComponentResourceProviderTest.updateComponents(controller, reqs, Collections.emptyMap(), true); @@ -3653,7 +3650,7 @@ public void testServiceComponentHostUpdateRecursive() throws Exception { String cluster1 = getUniqueName(); createCluster(cluster1); String serviceGroupName = "CORE"; - ServiceGroupResourceProviderTest.createServiceGroup(controller, cluster1, serviceGroupName, HDP_0_1); + ServiceGroupResourceProviderTest.createServiceGroup(controller, cluster1, serviceGroupName); String serviceName1 = "HDFS"; createService(cluster1, serviceGroupName, serviceName1, null); String componentName1 = "NAMENODE"; @@ -3672,19 +3669,19 @@ public void testServiceComponentHostUpdateRecursive() throws Exception { new HashSet<>(); ServiceComponentHostRequest r1 = new ServiceComponentHostRequest(cluster1, serviceGroupName, serviceName1, - componentName1, componentName1, host1, State.INIT.toString()); + componentName1, host1, State.INIT.toString()); ServiceComponentHostRequest r2 = new ServiceComponentHostRequest(cluster1, serviceGroupName, serviceName1, - componentName2, componentName2, host1, State.INIT.toString()); + componentName2, host1, State.INIT.toString()); ServiceComponentHostRequest r3 = new ServiceComponentHostRequest(cluster1, serviceGroupName, serviceName1, - componentName1, componentName1, host2, State.INIT.toString()); + componentName1, host2, State.INIT.toString()); ServiceComponentHostRequest r4 = new ServiceComponentHostRequest(cluster1, serviceGroupName, serviceName1, - componentName2, componentName2, host2, State.INIT.toString()); + componentName2, host2, State.INIT.toString()); ServiceComponentHostRequest r5 = new ServiceComponentHostRequest(cluster1, serviceGroupName, serviceName1, - componentName3, componentName3, host1, State.INIT.toString()); + componentName3, host1, State.INIT.toString()); set1.add(r1); set1.add(r2); @@ -3741,19 +3738,19 @@ public void testServiceComponentHostUpdateRecursive() throws Exception { try { reqs.clear(); req1 = new ServiceComponentHostRequest(cluster1, serviceGroupName, serviceName1, - componentName1, componentName1, host1, + componentName1, host1, State.INSTALLED.toString()); req2 = new ServiceComponentHostRequest(cluster1, serviceGroupName, serviceName1, - componentName1, componentName2, host2, + componentName1, host2, State.INSTALLED.toString()); req3 = new ServiceComponentHostRequest(cluster1, serviceGroupName, serviceName1, - componentName2, componentName2, host1, + componentName2, host1, State.INSTALLED.toString()); req4 = new ServiceComponentHostRequest(cluster1, serviceGroupName, serviceName1, - componentName2, componentName2, host2, + componentName2, host2, State.INSTALLED.toString()); req5 = new ServiceComponentHostRequest(cluster1, serviceGroupName, serviceName1, - componentName3, componentName3, host1, + componentName3, host1, State.STARTED.toString()); reqs.add(req1); reqs.add(req2); @@ -3768,15 +3765,15 @@ public void testServiceComponentHostUpdateRecursive() throws Exception { reqs.clear(); req1 = new ServiceComponentHostRequest(cluster1, serviceGroupName, null, - componentName1, componentName1, host1, State.INSTALLED.toString()); + componentName1, host1, State.INSTALLED.toString()); req2 = new ServiceComponentHostRequest(cluster1, serviceGroupName, serviceName1, - componentName1, componentName1, host2, State.INSTALLED.toString()); + componentName1, host2, State.INSTALLED.toString()); req3 = new ServiceComponentHostRequest(cluster1, serviceGroupName, null, - componentName2, componentName2, host1, State.INSTALLED.toString()); + componentName2, host1, State.INSTALLED.toString()); req4 = new ServiceComponentHostRequest(cluster1, serviceGroupName, serviceName1, - componentName2, componentName2, host2, State.INSTALLED.toString()); + componentName2, host2, State.INSTALLED.toString()); req5 = new ServiceComponentHostRequest(cluster1, serviceGroupName, serviceName1, - componentName3, componentName3, host1, State.INSTALLED.toString()); + componentName3, host1, State.INSTALLED.toString()); reqs.add(req1); reqs.add(req2); reqs.add(req3); @@ -3808,10 +3805,10 @@ public void testServiceComponentHostUpdateRecursive() throws Exception { // test no-op reqs.clear(); req1 = new ServiceComponentHostRequest(cluster1, serviceGroupName, serviceName1, - componentName1, componentName1, host1, + componentName1, host1, State.INSTALLED.toString()); req2 = new ServiceComponentHostRequest(cluster1, serviceGroupName, serviceName1, - componentName1, componentName1, host2, + componentName1, host2, State.INSTALLED.toString()); reqs.add(req1); reqs.add(req2); @@ -3856,11 +3853,11 @@ public void testCreateCustomActions() throws Exception { Service hdfs = cluster.addService(serviceGroup, "HDFS", "HDFS", repositoryVersion); Service mapred = cluster.addService(serviceGroup, "YARN", "YARN", repositoryVersion); - hdfs.addServiceComponent(Role.HDFS_CLIENT.name(), Role.HDFS_CLIENT.name()); - hdfs.addServiceComponent(Role.NAMENODE.name(), Role.NAMENODE.name()); - hdfs.addServiceComponent(Role.DATANODE.name(), Role.DATANODE.name()); + hdfs.addServiceComponent(Role.HDFS_CLIENT.name()); + hdfs.addServiceComponent(Role.NAMENODE.name()); + hdfs.addServiceComponent(Role.DATANODE.name()); - mapred.addServiceComponent(Role.RESOURCEMANAGER.name(), Role.RESOURCEMANAGER.name()); + mapred.addServiceComponent(Role.RESOURCEMANAGER.name()); hdfs.getServiceComponent(Role.HDFS_CLIENT.name()).addServiceComponentHost(host1); hdfs.getServiceComponent(Role.NAMENODE.name()).addServiceComponentHost(host1); @@ -4013,9 +4010,9 @@ public void testComponentCategorySentWithRestart() throws Exception { ServiceGroup serviceGroup = cluster.addServiceGroup(serviceGroupName, "HDP-2.0.7"); Service hdfs = cluster.addService(serviceGroup, "HDFS", "HDFS", repositoryVersion); - hdfs.addServiceComponent(Role.HDFS_CLIENT.name(), Role.HDFS_CLIENT.name()); - hdfs.addServiceComponent(Role.NAMENODE.name(), Role.NAMENODE.name()); - hdfs.addServiceComponent(Role.DATANODE.name(), Role.DATANODE.name()); + hdfs.addServiceComponent(Role.HDFS_CLIENT.name()); + hdfs.addServiceComponent(Role.NAMENODE.name()); + hdfs.addServiceComponent(Role.DATANODE.name()); hdfs.getServiceComponent(Role.HDFS_CLIENT.name()).addServiceComponentHost(host1); hdfs.getServiceComponent(Role.NAMENODE.name()).addServiceComponentHost(host1); @@ -4120,11 +4117,11 @@ public void testCreateActionsFailures() throws Exception { Service hdfs = cluster.addService(serviceGroup, "HDFS", "HDFS", repositoryVersion); Service hive = cluster.addService(serviceGroup, "HIVE", "HIVE", repositoryVersion); - hdfs.addServiceComponent(Role.HDFS_CLIENT.name(), Role.HDFS_CLIENT.name()); - hdfs.addServiceComponent(Role.NAMENODE.name(), Role.NAMENODE.name()); - hdfs.addServiceComponent(Role.DATANODE.name(), Role.DATANODE.name()); + hdfs.addServiceComponent(Role.HDFS_CLIENT.name()); + hdfs.addServiceComponent(Role.NAMENODE.name()); + hdfs.addServiceComponent(Role.DATANODE.name()); - hive.addServiceComponent(Role.HIVE_SERVER.name(), Role.HIVE_SERVER.name()); + hive.addServiceComponent(Role.HIVE_SERVER.name()); hdfs.getServiceComponent(Role.HDFS_CLIENT.name()).addServiceComponentHost(host1); hdfs.getServiceComponent(Role.NAMENODE.name()).addServiceComponentHost(host1); @@ -4364,11 +4361,11 @@ public void testCreateServiceCheckActions() throws Exception { final String host1 = getUniqueName(); final String host2 = getUniqueName(); - setupClusterWithHosts(cluster1, HDP_0_1, Arrays.asList(host1, host2), "centos5"); + setupClusterWithHosts(cluster1, "HDP-0.1", Arrays.asList(host1, host2), "centos5"); Cluster cluster = clusters.getCluster(cluster1); - cluster.setDesiredStackVersion(new StackId(HDP_0_1)); - cluster.setCurrentStackVersion(new StackId(HDP_0_1)); + cluster.setDesiredStackVersion(new StackId("HDP-0.1")); + cluster.setCurrentStackVersion(new StackId("HDP-0.1")); RepositoryVersionEntity repositoryVersion = repositoryVersion01; @@ -4384,12 +4381,12 @@ public void testCreateServiceCheckActions() throws Exception { cluster.addDesiredConfig("_test", Collections.singleton(config1)); cluster.addDesiredConfig("_test", Collections.singleton(config2)); - ServiceGroup serviceGroup = cluster.addServiceGroup("CORE", HDP_0_1); + ServiceGroup serviceGroup = cluster.addServiceGroup("CORE", "HDP-0.1"); Service hdfs = cluster.addService(serviceGroup, "HDFS", "HDFS", repositoryVersion); Service mapReduce = cluster.addService(serviceGroup, "MAPREDUCE", "MAPREDUCE", repositoryVersion); - hdfs.addServiceComponent(Role.HDFS_CLIENT.name(), Role.HDFS_CLIENT.name()); - mapReduce.addServiceComponent(Role.MAPREDUCE_CLIENT.name(), Role.MAPREDUCE_CLIENT.name()); + hdfs.addServiceComponent(Role.HDFS_CLIENT.name()); + mapReduce.addServiceComponent(Role.MAPREDUCE_CLIENT.name()); hdfs.getServiceComponent(Role.HDFS_CLIENT.name()).addServiceComponentHost(host1); mapReduce.getServiceComponent(Role.MAPREDUCE_CLIENT.name()).addServiceComponentHost(host2); @@ -4469,9 +4466,9 @@ public void testUpdateConfigForRunningService() throws Exception { String cluster1 = getUniqueName(); createCluster(cluster1); clusters.getCluster(cluster1) - .setDesiredStackVersion(new StackId(HDP_0_1)); + .setDesiredStackVersion(new StackId("HDP-0.1")); String serviceGroupName = "CORE"; - ServiceGroupResourceProviderTest.createServiceGroup(controller, cluster1, serviceGroupName, HDP_0_1); + ServiceGroupResourceProviderTest.createServiceGroup(controller, cluster1, serviceGroupName); String serviceName = "HDFS"; createService(cluster1, serviceGroupName, serviceName, null); String componentName1 = "NAMENODE"; @@ -4492,7 +4489,8 @@ public void testUpdateConfigForRunningService() throws Exception { addHostToCluster(host2, cluster1); - createServiceComponentHost(cluster1, serviceGroupName, serviceName, componentName1, + // null service should work + createServiceComponentHost(cluster1, serviceGroupName, null, componentName1, host1, null); createServiceComponentHost(cluster1, serviceGroupName, serviceName, componentName2, host1, null); @@ -4621,14 +4619,14 @@ public void testUpdateConfigForRunningService() throws Exception { configVersions.put("typeC", "v1"); schReqs.clear(); schReqs.add(new ServiceComponentHostRequest(cluster1, serviceGroupName, serviceName, - componentName1, componentName1, host1, null)); + componentName1, host1, null)); assertNull(updateHostComponents(schReqs, Collections.emptyMap(), true)); configVersions.clear(); configVersions.put("typeC", "v1"); configVersions.put("typeD", "v1"); scReqs.clear(); - scReqs.add(new ServiceComponentRequest(cluster1, serviceGroupName, serviceName, componentName2, componentName2, null)); + scReqs.add(new ServiceComponentRequest(cluster1, serviceGroupName, serviceName, componentName2, null)); assertNull(ComponentResourceProviderTest.updateComponents(controller, scReqs, Collections.emptyMap(), true)); // update configs at service level @@ -4648,7 +4646,7 @@ public void testUpdateConfigForRunningService() throws Exception { configVersions.put("typeC", "v1"); schReqs.clear(); schReqs.add(new ServiceComponentHostRequest(cluster1, serviceGroupName, serviceName, - componentName1, componentName1, host1, null)); + componentName1, host1, null)); assertNull(updateHostComponents(schReqs, Collections.emptyMap(), true)); // update configs at SC level @@ -4657,7 +4655,7 @@ public void testUpdateConfigForRunningService() throws Exception { configVersions.put("typeD", "v1"); scReqs.clear(); scReqs.add(new ServiceComponentRequest(cluster1, serviceGroupName, serviceName, - componentName1, componentName1, null)); + componentName1, null)); assertNull(ComponentResourceProviderTest.updateComponents(controller, scReqs, Collections.emptyMap(), true)); } @@ -4667,9 +4665,9 @@ public void testConfigUpdates() throws Exception { String cluster1 = getUniqueName(); createCluster(cluster1); clusters.getCluster(cluster1) - .setDesiredStackVersion(new StackId(HDP_0_1)); + .setDesiredStackVersion(new StackId("HDP-0.1")); String serviceGroupName = "CORE"; - ServiceGroupResourceProviderTest.createServiceGroup(controller, cluster1, serviceGroupName, HDP_0_1); + ServiceGroupResourceProviderTest.createServiceGroup(controller, cluster1, serviceGroupName); String serviceName = "HDFS"; createService(cluster1, serviceGroupName, serviceName, null); String componentName1 = "NAMENODE"; @@ -4690,7 +4688,8 @@ public void testConfigUpdates() throws Exception { addHostToCluster(host2, cluster1); - createServiceComponentHost(cluster1, serviceGroupName, serviceName, componentName1, host1, null); + // null service should work + createServiceComponentHost(cluster1, serviceGroupName, null, componentName1, host1, null); createServiceComponentHost(cluster1, serviceGroupName, serviceName, componentName2, host1, null); createServiceComponentHost(cluster1, serviceGroupName, serviceName, componentName2, host2, null); createServiceComponentHost(cluster1, serviceGroupName, serviceName, componentName3, host1, null); @@ -4762,7 +4761,7 @@ public void testConfigUpdates() throws Exception { configVersions.put("typeC", "v1"); schReqs.clear(); schReqs.add(new ServiceComponentHostRequest(cluster1, serviceGroupName, serviceName, - componentName1, componentName1, host1, null)); + componentName1, host1, null)); assertNull(updateHostComponents(schReqs, Collections.emptyMap(), true)); configVersions.clear(); @@ -4770,7 +4769,7 @@ public void testConfigUpdates() throws Exception { configVersions.put("typeD", "v1"); scReqs.clear(); scReqs.add(new ServiceComponentRequest(cluster1, serviceGroupName, serviceName, - componentName2, componentName2, null)); + componentName2, null)); assertNull(ComponentResourceProviderTest.updateComponents(controller, scReqs, Collections.emptyMap(), true)); // update configs at service level @@ -4789,7 +4788,7 @@ public void testConfigUpdates() throws Exception { configVersions.put("typeC", "v1"); schReqs.clear(); schReqs.add(new ServiceComponentHostRequest(cluster1, serviceGroupName, serviceName, - componentName1, componentName1, host1, null)); + componentName1, host1, null)); assertNull(updateHostComponents(schReqs, Collections.emptyMap(), true)); // update configs at SC level @@ -4798,7 +4797,7 @@ public void testConfigUpdates() throws Exception { configVersions.put("typeD", "v1"); scReqs.clear(); scReqs.add(new ServiceComponentRequest(cluster1, serviceGroupName, serviceName, - componentName1, componentName1, null)); + componentName1, null)); assertNull(ComponentResourceProviderTest.updateComponents(controller, scReqs, Collections.emptyMap(), true)); } @@ -4808,9 +4807,9 @@ public void testReConfigureService() throws Exception { String cluster1 = getUniqueName(); createCluster(cluster1); clusters.getCluster(cluster1) - .setDesiredStackVersion(new StackId(HDP_0_1)); + .setDesiredStackVersion(new StackId("HDP-0.1")); String serviceGroupName = "CORE"; - ServiceGroupResourceProviderTest.createServiceGroup(controller, cluster1, serviceGroupName, HDP_0_1); + ServiceGroupResourceProviderTest.createServiceGroup(controller, cluster1, serviceGroupName); String serviceName = "HDFS"; createService(cluster1, serviceGroupName, serviceName, null); String componentName1 = "NAMENODE"; @@ -4831,7 +4830,8 @@ public void testReConfigureService() throws Exception { addHostToCluster(host2, cluster1); - createServiceComponentHost(cluster1, serviceGroupName, serviceName, componentName1, host1, null); + // null service should work + createServiceComponentHost(cluster1, serviceGroupName, null, componentName1, host1, null); createServiceComponentHost(cluster1, serviceGroupName, serviceName, componentName2, host1, null); createServiceComponentHost(cluster1, serviceGroupName, serviceName, componentName2, host2, null); createServiceComponentHost(cluster1, serviceGroupName, serviceName, componentName3, host1, null); @@ -4891,7 +4891,7 @@ public void testReConfigureService() throws Exception { configVersions.put("hdfs-site", "version1"); schReqs.clear(); schReqs.add(new ServiceComponentHostRequest(cluster1, serviceGroupName, serviceName, - componentName1, componentName1, host1, null)); + componentName1, host1, null)); assertNull(updateHostComponents(schReqs, Collections.emptyMap(), true)); // Reconfigure SCH level @@ -4899,7 +4899,7 @@ public void testReConfigureService() throws Exception { configVersions.put("core-site", "version122"); schReqs.clear(); schReqs.add(new ServiceComponentHostRequest(cluster1, serviceGroupName, serviceName, - componentName1, componentName1, host1, null)); + componentName1, host1, null)); assertNull(updateHostComponents(schReqs, Collections.emptyMap(), true)); // Clear Entity Manager @@ -4910,11 +4910,11 @@ public void testReConfigureService() throws Exception { configVersions.put("core-site", "version1"); configVersions.put("hdfs-site", "version1"); scReqs.add(new ServiceComponentRequest(cluster1, serviceGroupName, serviceName, - componentName2, componentName2, null)); + componentName2, null)); assertNull(ComponentResourceProviderTest.updateComponents(controller, scReqs, Collections.emptyMap(), true)); scReqs.add(new ServiceComponentRequest(cluster1, serviceGroupName, serviceName, - componentName1, componentName1, null)); + componentName1, null)); assertNull(ComponentResourceProviderTest.updateComponents(controller, scReqs, Collections.emptyMap(), true)); // Reconfigure SC level @@ -4923,12 +4923,12 @@ public void testReConfigureService() throws Exception { scReqs.clear(); scReqs.add(new ServiceComponentRequest(cluster1, serviceGroupName, serviceName, - componentName2, componentName2, null)); + componentName2, null)); assertNull(ComponentResourceProviderTest.updateComponents(controller, scReqs, Collections.emptyMap(), true)); scReqs.clear(); scReqs.add(new ServiceComponentRequest(cluster1, serviceGroupName, serviceName, - componentName1, componentName1, null)); + componentName1, null)); assertNull(ComponentResourceProviderTest.updateComponents(controller, scReqs, Collections.emptyMap(), true)); entityManager.clear(); @@ -4958,9 +4958,9 @@ public void testReConfigureServiceClient() throws Exception { String cluster1 = getUniqueName(); createCluster(cluster1); Cluster cluster = clusters.getCluster(cluster1); - cluster.setDesiredStackVersion(new StackId(HDP_0_1)); + cluster.setDesiredStackVersion(new StackId("HDP-0.1")); String serviceGroupName = "CORE"; - ServiceGroupResourceProviderTest.createServiceGroup(controller, cluster1, serviceGroupName, HDP_0_1); + ServiceGroupResourceProviderTest.createServiceGroup(controller, cluster1, serviceGroupName); String serviceName1 = "HDFS"; String serviceName2 = "MAPREDUCE"; String componentName1 = "NAMENODE"; @@ -5136,9 +5136,9 @@ public void testReconfigureClientWithServiceStarted() throws Exception { String cluster1 = getUniqueName(); createCluster(cluster1); Cluster cluster = clusters.getCluster(cluster1); - cluster.setDesiredStackVersion(new StackId(HDP_0_1)); + cluster.setDesiredStackVersion(new StackId("HDP-0.1")); String serviceGroupName = "CORE"; - ServiceGroupResourceProviderTest.createServiceGroup(controller, cluster1, serviceGroupName, HDP_0_1); + ServiceGroupResourceProviderTest.createServiceGroup(controller, cluster1, serviceGroupName); String serviceName = "HDFS"; createService(cluster1, serviceGroupName, serviceName, null); String componentName1 = "NAMENODE"; @@ -5222,12 +5222,11 @@ public void testClientServiceSmokeTests() throws Exception { String cluster1 = getUniqueName(); createCluster(cluster1); String serviceGroupName = "CORE"; - ServiceGroupResourceProviderTest.createServiceGroup(controller, cluster1, serviceGroupName, HDP_0_1); + ServiceGroupResourceProviderTest.createServiceGroup(controller, cluster1, serviceGroupName); String serviceName = "PIG"; createService(cluster1, serviceGroupName, serviceName, repositoryVersion01, null); String componentName1 = "PIG"; createServiceComponent(cluster1, serviceGroupName, serviceName, componentName1, State.INIT); - createServiceComponent(cluster1, serviceGroupName, serviceName, "SOME_CLIENT_FOR_SERVICE_CHECK", State.INIT); String host1 = getUniqueName(); String host2 = getUniqueName(); @@ -5238,11 +5237,10 @@ public void testClientServiceSmokeTests() throws Exception { Map mapRequestProps = new HashMap<>(); mapRequestProps.put("context", "Called from a test"); - createServiceComponentHost(cluster1, serviceGroupName, serviceName, componentName1, + // null service should work + createServiceComponentHost(cluster1, serviceGroupName, null, componentName1, host1, null); - createServiceComponentHost(cluster1, serviceGroupName, serviceName, componentName1, - host2, null); - createServiceComponentHost(cluster1, serviceGroupName, null, "SOME_CLIENT_FOR_SERVICE_CHECK", + createServiceComponentHost(cluster1, serviceGroupName, null, componentName1, host2, null); ServiceRequest r = new ServiceRequest(cluster1, serviceGroupName, serviceName, repositoryVersion01.getId(), @@ -5267,7 +5265,7 @@ public void testClientServiceSmokeTests() throws Exception { } List taskStatuses = trackAction.getTasks(); - Assert.assertEquals(3, taskStatuses.size()); + Assert.assertEquals(2, taskStatuses.size()); List stages = actionDB.getAllStages(trackAction.getRequestId()); Assert.assertEquals(1, stages.size()); @@ -5320,18 +5318,16 @@ public void testSkipTaskOnUnhealthyHosts() throws Exception { String cluster1 = getUniqueName(); createCluster(cluster1); clusters.getCluster(cluster1) - .setDesiredStackVersion(new StackId(HDP_0_1)); + .setDesiredStackVersion(new StackId("HDP-0.1")); String serviceGroupName = "CORE"; - ServiceGroupResourceProviderTest.createServiceGroup(controller, cluster1, serviceGroupName, HDP_0_1); + ServiceGroupResourceProviderTest.createServiceGroup(controller, cluster1, serviceGroupName); String serviceName = "HDFS"; createService(cluster1, serviceGroupName, serviceName, null); String componentName1 = "NAMENODE"; String componentName2 = "DATANODE"; - String componentName3 = "HDFS_CLIENT"; createServiceComponent(cluster1, serviceGroupName, serviceName, componentName1, State.INIT); createServiceComponent(cluster1, serviceGroupName, serviceName, componentName2, State.INIT); - createServiceComponent(cluster1, serviceGroupName, serviceName, componentName3, State.INIT); String host1 = getUniqueName(); String host2 = getUniqueName(); @@ -5347,8 +5343,6 @@ public void testSkipTaskOnUnhealthyHosts() throws Exception { host2, null); createServiceComponentHost(cluster1, serviceGroupName, serviceName, componentName2, host3, null); - createServiceComponentHost(cluster1, serviceGroupName, serviceName, componentName3, - host2, null); // Install installService(cluster1, serviceGroupName, serviceName, false, false); @@ -5458,9 +5452,9 @@ public void testSkipTaskOnUnhealthyHosts() throws Exception { public void testServiceCheckWhenHostIsUnhealthy() throws Exception { String cluster1 = getUniqueName(); createCluster(cluster1); - clusters.getCluster(cluster1).setDesiredStackVersion(new StackId(HDP_0_1)); + clusters.getCluster(cluster1).setDesiredStackVersion(new StackId("HDP-0.1")); String serviceGroupName = "CORE"; - ServiceGroupResourceProviderTest.createServiceGroup(controller, cluster1, serviceGroupName, HDP_0_1); + ServiceGroupResourceProviderTest.createServiceGroup(controller, cluster1, serviceGroupName); String serviceName = "HDFS"; createService(cluster1, serviceGroupName, serviceName, null); String componentName1 = "NAMENODE"; @@ -5552,9 +5546,9 @@ public void testReInstallForInstallFailedClient() throws Exception { String cluster1 = getUniqueName(); createCluster(cluster1); clusters.getCluster(cluster1) - .setDesiredStackVersion(new StackId(HDP_0_1)); + .setDesiredStackVersion(new StackId("HDP-0.1")); String serviceGroupName = "CORE"; - ServiceGroupResourceProviderTest.createServiceGroup(controller, cluster1, serviceGroupName, HDP_0_1); + ServiceGroupResourceProviderTest.createServiceGroup(controller, cluster1, serviceGroupName); String serviceName = "HDFS"; createService(cluster1, serviceGroupName, serviceName, null); String componentName1 = "NAMENODE"; @@ -5614,7 +5608,7 @@ public void testReInstallClientComponent() throws Exception { clusters.getCluster(cluster1) .setDesiredStackVersion(new StackId("HDP-2.0.6")); String serviceGroupName = "CORE"; - ServiceGroupResourceProviderTest.createServiceGroup(controller, cluster1, serviceGroupName, "HDP-2.0.6"); + ServiceGroupResourceProviderTest.createServiceGroup(controller, cluster1, serviceGroupName); String serviceName = "HDFS"; createService(cluster1, serviceGroupName, serviceName, null); String componentName1 = "NAMENODE"; @@ -5643,7 +5637,7 @@ public void testReInstallClientComponent() throws Exception { // Reinstall SCH ServiceComponentHostRequest schr = new ServiceComponentHostRequest - (cluster1, serviceGroupName, serviceName, componentName3, componentName3, host3, State.INSTALLED.name()); + (cluster1, serviceGroupName, serviceName, componentName3, host3, State.INSTALLED.name()); Set setReqs = new HashSet<>(); setReqs.add(schr); @@ -5673,7 +5667,7 @@ public void testReInstallClientComponentFromServiceChange() throws Exception { clusters.getCluster(cluster1) .setDesiredStackVersion(new StackId("HDP-2.0.6")); String serviceGroupName = "CORE"; - ServiceGroupResourceProviderTest.createServiceGroup(controller, cluster1, serviceGroupName, "HDP-2.0.6"); + ServiceGroupResourceProviderTest.createServiceGroup(controller, cluster1, serviceGroupName); String serviceName = "HDFS"; createService(cluster1, serviceGroupName, serviceName, null); String componentName = "HDFS_CLIENT"; @@ -5726,7 +5720,7 @@ public void testDecommissonDatanodeAction() throws Exception { Cluster cluster = clusters.getCluster(cluster1); cluster.setDesiredStackVersion(new StackId("HDP-2.0.7")); String serviceGroupName = "CORE"; - ServiceGroupResourceProviderTest.createServiceGroup(controller, cluster1, serviceGroupName, "HDP-2.0.7"); + ServiceGroupResourceProviderTest.createServiceGroup(controller, cluster1, serviceGroupName); String serviceName = "HDFS"; createService(cluster1, serviceGroupName, serviceName, null); String componentName1 = "NAMENODE"; @@ -5841,13 +5835,13 @@ public void testDecommissonDatanodeAction() throws Exception { createServiceComponentHost(cluster1, serviceGroupName, serviceName, componentName1, host2, null); ServiceComponentHostRequest r = new ServiceComponentHostRequest(cluster1, serviceGroupName, serviceName, - componentName1, componentName1, host2, State.INSTALLED.toString()); + componentName1, host2, State.INSTALLED.toString()); Set requests = new HashSet<>(); requests.add(r); updateHostComponents(requests, Collections.emptyMap(), true); s.getServiceComponent(componentName1).getServiceComponentHost(host2).setState(State.INSTALLED); r = new ServiceComponentHostRequest(cluster1, serviceGroupName, serviceName, - componentName1, componentName1, host2, State.STARTED.toString()); + componentName1, host2, State.STARTED.toString()); requests.clear(); requests.add(r); updateHostComponents(requests, Collections.emptyMap(), true); @@ -5926,11 +5920,11 @@ public void testResourceFiltersWithCustomActions() throws Exception { Service hdfs = cluster.addService(serviceGroup, "HDFS", "HDFS", repositoryVersion); Service mapred = cluster.addService(serviceGroup, "YARN", "YARN", repositoryVersion); - hdfs.addServiceComponent(Role.HDFS_CLIENT.name(), Role.HDFS_CLIENT.name()); - hdfs.addServiceComponent(Role.NAMENODE.name(), Role.NAMENODE.name()); - hdfs.addServiceComponent(Role.DATANODE.name(), Role.DATANODE.name()); + hdfs.addServiceComponent(Role.HDFS_CLIENT.name()); + hdfs.addServiceComponent(Role.NAMENODE.name()); + hdfs.addServiceComponent(Role.DATANODE.name()); - mapred.addServiceComponent(Role.RESOURCEMANAGER.name(), Role.RESOURCEMANAGER.name()); + mapred.addServiceComponent(Role.RESOURCEMANAGER.name()); hdfs.getServiceComponent(Role.HDFS_CLIENT.name()).addServiceComponentHost(host1); hdfs.getServiceComponent(Role.NAMENODE.name()).addServiceComponentHost(host1); @@ -6023,15 +6017,12 @@ public void testResourceFiltersWithCustomCommands() throws Exception { ServiceGroup serviceGroup = cluster.addServiceGroup("CORE", "HDP-2.0.6"); Service hdfs = cluster.addService(serviceGroup, "HDFS", "HDFS", repositoryVersion); Service mapred = cluster.addService(serviceGroup, "YARN", "YARN", repositoryVersion); - Service hadoop_clients = cluster.addService(serviceGroup, "HADOOP_CLIENTS", "HADOOP_CLIENTS", repositoryVersion); - hdfs = cluster.addDependencyToService("CORE", "HDFS", hadoop_clients.getServiceId()); + hdfs.addServiceComponent(Role.HDFS_CLIENT.name()); + hdfs.addServiceComponent(Role.NAMENODE.name()); + hdfs.addServiceComponent(Role.DATANODE.name()); - hdfs.addServiceComponent(Role.HDFS_CLIENT.name(), Role.HDFS_CLIENT.name()); - hdfs.addServiceComponent(Role.NAMENODE.name(), Role.NAMENODE.name()); - hdfs.addServiceComponent(Role.DATANODE.name(), Role.DATANODE.name()); - - mapred.addServiceComponent(Role.RESOURCEMANAGER.name(), Role.RESOURCEMANAGER.name()); + mapred.addServiceComponent(Role.RESOURCEMANAGER.name()); hdfs.getServiceComponent(Role.HDFS_CLIENT.name()).addServiceComponentHost(host1); hdfs.getServiceComponent(Role.NAMENODE.name()).addServiceComponentHost(host1); @@ -6124,11 +6115,6 @@ public void testResourceFiltersWithCustomCommands() throws Exception { } // Test service checks - specific host - - hadoop_clients.addServiceComponent("SOME_CLIENT_FOR_SERVICE_CHECK", "SOME_CLIENT_FOR_SERVICE_CHECK"); - hadoop_clients.getServiceComponent("SOME_CLIENT_FOR_SERVICE_CHECK").addServiceComponentHost(host1); - hadoop_clients.getServiceComponent("SOME_CLIENT_FOR_SERVICE_CHECK").addServiceComponentHost(host2); - resourceFilters.clear(); resourceFilter = new RequestResourceFilter("CORE", "HDFS", null, Arrays.asList(host1)); @@ -6156,9 +6142,9 @@ public void testConfigsAttachedToServiceChecks() throws Exception { String cluster1 = getUniqueName(); createCluster(cluster1); Cluster cluster = clusters.getCluster(cluster1); - cluster.setDesiredStackVersion(new StackId(HDP_0_1)); + cluster.setDesiredStackVersion(new StackId("HDP-0.1")); String serviceGroupName = "CORE"; - ServiceGroupResourceProviderTest.createServiceGroup(controller, cluster1, serviceGroupName, HDP_0_1); + ServiceGroupResourceProviderTest.createServiceGroup(controller, cluster1, serviceGroupName); String serviceName = "HDFS"; createService(cluster1, serviceGroupName, serviceName, null); String componentName1 = "NAMENODE"; @@ -6176,7 +6162,8 @@ public void testConfigsAttachedToServiceChecks() throws Exception { addHostToCluster(host2, cluster1); - createServiceComponentHost(cluster1, serviceGroupName, serviceName, componentName1, host1, null); + // null service should work + createServiceComponentHost(cluster1, serviceGroupName, null, componentName1, host1, null); createServiceComponentHost(cluster1, serviceGroupName, serviceName, componentName2, host1, null); createServiceComponentHost(cluster1, serviceGroupName, serviceName, componentName2, host2, null); createServiceComponentHost(cluster1, serviceGroupName, serviceName, componentName3, host1, null); @@ -6230,10 +6217,10 @@ public void testConfigsAttachedToServiceChecks() throws Exception { public void testConfigsAttachedToServiceNotCluster() throws Exception { String cluster1 = getUniqueName(); createCluster(cluster1); - clusters.getCluster(cluster1).setDesiredStackVersion(new StackId(HDP_0_1)); + clusters.getCluster(cluster1).setDesiredStackVersion(new StackId("HDP-0.1")); String serviceGroupName = "CORE"; - ServiceGroupResourceProviderTest.createServiceGroup(controller, cluster1, serviceGroupName, HDP_0_1); + ServiceGroupResourceProviderTest.createServiceGroup(controller, cluster1, serviceGroupName); String serviceName = "HDFS"; createService(cluster1, serviceGroupName, serviceName, null); String componentName1 = "NAMENODE"; @@ -6251,7 +6238,8 @@ public void testConfigsAttachedToServiceNotCluster() throws Exception { addHostToCluster(host2, cluster1); - createServiceComponentHost(cluster1, serviceGroupName, serviceName, componentName1, host1, null); + // null service should work + createServiceComponentHost(cluster1, serviceGroupName, null, componentName1, host1, null); createServiceComponentHost(cluster1, serviceGroupName, serviceName, componentName2, host1, null); createServiceComponentHost(cluster1, serviceGroupName, serviceName, componentName2, host2, null); createServiceComponentHost(cluster1, serviceGroupName, serviceName, componentName3, host1, null); @@ -6304,7 +6292,7 @@ public void testHostLevelParamsSentWithCommands() throws Exception { String cluster1 = getUniqueName(); createCluster(cluster1); String serviceGroupName = "CORE"; - ServiceGroupResourceProviderTest.createServiceGroup(controller, cluster1, serviceGroupName, HDP_0_1); + ServiceGroupResourceProviderTest.createServiceGroup(controller, cluster1, serviceGroupName); String serviceName = "PIG"; createService(cluster1, serviceGroupName, serviceName, repositoryVersion01, null); String componentName1 = "PIG"; @@ -6319,8 +6307,9 @@ public void testHostLevelParamsSentWithCommands() throws Exception { Map mapRequestProps = new HashMap<>(); mapRequestProps.put("context", "Called from a test"); - createServiceComponentHost(cluster1, serviceGroupName, serviceName, componentName1, host1, null); - createServiceComponentHost(cluster1, serviceGroupName, serviceName, componentName1, host2, null); + // null service should work + createServiceComponentHost(cluster1, serviceGroupName, null, componentName1, host1, null); + createServiceComponentHost(cluster1, serviceGroupName, null, componentName1, host2, null); @@ -6361,7 +6350,7 @@ public void testConfigGroupOverridesWithHostActions() throws Exception { Cluster cluster = clusters.getCluster(cluster1); cluster.setDesiredStackVersion(new StackId("HDP-2.0.6")); String serviceGroupName = "CORE"; - ServiceGroupResourceProviderTest.createServiceGroup(controller, cluster1, serviceGroupName, "HDP-2.0.6"); + ServiceGroupResourceProviderTest.createServiceGroup(controller, cluster1, serviceGroupName); String serviceName1 = "HDFS"; String serviceName2 = "MAPREDUCE2"; createService(cluster1, serviceGroupName, serviceName1, repositoryVersion206, null); @@ -6522,7 +6511,7 @@ public void testConfigGroupOverridesWithDecommissionDatanode() throws Exception Cluster cluster = clusters.getCluster(cluster1); cluster.setDesiredStackVersion(new StackId("HDP-2.0.7")); String serviceGroupName = "CORE"; - ServiceGroupResourceProviderTest.createServiceGroup(controller, cluster1, serviceGroupName, "HDP-2.0.7"); + ServiceGroupResourceProviderTest.createServiceGroup(controller, cluster1, serviceGroupName); String serviceName = "HDFS"; createService(cluster1, serviceGroupName, serviceName, null); String componentName1 = "NAMENODE"; @@ -6611,9 +6600,9 @@ public void testConfigGroupOverridesWithServiceCheckActions() throws Exception { String cluster1 = getUniqueName(); createCluster(cluster1); Cluster cluster = clusters.getCluster(cluster1); - cluster.setDesiredStackVersion(new StackId(HDP_0_1)); + cluster.setDesiredStackVersion(new StackId("HDP-0.1")); String serviceGroupName = "CORE"; - ServiceGroupResourceProviderTest.createServiceGroup(controller, cluster1, serviceGroupName, HDP_0_1); + ServiceGroupResourceProviderTest.createServiceGroup(controller, cluster1, serviceGroupName); String serviceName = "HDFS"; createService(cluster1, serviceGroupName, serviceName, null); String componentName1 = "NAMENODE"; @@ -6630,7 +6619,8 @@ public void testConfigGroupOverridesWithServiceCheckActions() throws Exception { addHostToCluster(host1, cluster1); addHostToCluster(host2, cluster1); - createServiceComponentHost(cluster1, serviceGroupName, serviceName, componentName1, host1, null); + // null service should work + createServiceComponentHost(cluster1, serviceGroupName, null, componentName1, host1, null); createServiceComponentHost(cluster1, serviceGroupName, serviceName, componentName2, host1, null); createServiceComponentHost(cluster1, serviceGroupName, serviceName, componentName2, host2, null); createServiceComponentHost(cluster1, serviceGroupName, serviceName, componentName3, host1, null); @@ -6747,7 +6737,7 @@ public void testGetRepositories() throws Exception { public void testGetStackServices() throws Exception { StackServiceRequest request = new StackServiceRequest(STACK_NAME, NEW_STACK_VERSION, null); Set responses = controller.getStackServices(Collections.singleton(request)); - Assert.assertEquals(13, responses.size()); + Assert.assertEquals(12, responses.size()); StackServiceRequest requestWithParams = new StackServiceRequest(STACK_NAME, NEW_STACK_VERSION, SERVICE_NAME); @@ -7193,9 +7183,9 @@ public void testServiceStopWhileStopping() throws Exception { String cluster1 = getUniqueName(); createCluster(cluster1); clusters.getCluster(cluster1) - .setDesiredStackVersion(new StackId(HDP_0_1)); + .setDesiredStackVersion(new StackId("HDP-0.1")); String serviceGroupName = "CORE"; - ServiceGroupResourceProviderTest.createServiceGroup(controller, cluster1, serviceGroupName, HDP_0_1); + ServiceGroupResourceProviderTest.createServiceGroup(controller, cluster1, serviceGroupName); String serviceName = "HDFS"; createService(cluster1, serviceGroupName, serviceName, null); String componentName1 = "NAMENODE"; @@ -7214,7 +7204,8 @@ public void testServiceStopWhileStopping() throws Exception { Map mapRequestProps = new HashMap<>(); mapRequestProps.put("context", "Called from a test"); - createServiceComponentHost(cluster1, serviceGroupName, serviceName, componentName1, + // null service should work + createServiceComponentHost(cluster1, serviceGroupName, null, componentName1, host1, null); createServiceComponentHost(cluster1, serviceGroupName, serviceName, componentName2, host1, null); @@ -7299,7 +7290,7 @@ public void testServiceStopWhileStopping() throws Exception { sch.setState(State.STOPPING); } else if (sch.getServiceComponentName().equals("DATANODE")) { ServiceComponentHostRequest r1 = new ServiceComponentHostRequest - (cluster1, serviceGroupName, serviceName, sch.getServiceComponentName(), sch.getServiceComponentType(), + (cluster1, serviceGroupName, serviceName, sch.getServiceComponentName(), sch.getHostName(), State.INSTALLED.name()); Set reqs1 = new HashSet<>(); @@ -7339,7 +7330,7 @@ public void testGetTasksByRequestId() throws Exception { final String hostName1 = getUniqueName(); final String context = "Test invocation"; - StackId stackID = new StackId(HDP_0_1); + StackId stackID = new StackId("HDP-0.1"); clusters.addCluster(cluster1, stackID); Cluster c = clusters.getCluster(cluster1); Long clusterId = c.getClusterId(); @@ -7458,9 +7449,9 @@ public void testUpdateHostComponentsBadState() throws Exception { String cluster1 = getUniqueName(); createCluster(cluster1); clusters.getCluster(cluster1) - .setDesiredStackVersion(new StackId(HDP_0_1)); + .setDesiredStackVersion(new StackId("HDP-0.1")); String serviceGroupName = "CORE"; - ServiceGroupResourceProviderTest.createServiceGroup(controller, cluster1, serviceGroupName, HDP_0_1); + ServiceGroupResourceProviderTest.createServiceGroup(controller, cluster1, serviceGroupName); String serviceName = "HDFS"; createService(cluster1, serviceGroupName, serviceName, null); String componentName1 = "NAMENODE"; @@ -7479,7 +7470,8 @@ public void testUpdateHostComponentsBadState() throws Exception { Map mapRequestProps = new HashMap<>(); mapRequestProps.put("context", "Called from a test"); - createServiceComponentHost(cluster1, serviceGroupName, serviceName, componentName1, + // null service should work + createServiceComponentHost(cluster1, serviceGroupName, null, componentName1, host1, null); createServiceComponentHost(cluster1, serviceGroupName, serviceName, componentName2, host1, null); @@ -7529,7 +7521,7 @@ public void testUpdateHostComponentsBadState() throws Exception { } // issue an installed state request without failure - ServiceComponentHostRequest schr = new ServiceComponentHostRequest(cluster1, serviceGroupName, "HDFS", "DATANODE", "DATANODE", host2, "INSTALLED"); + ServiceComponentHostRequest schr = new ServiceComponentHostRequest(cluster1, serviceGroupName, "HDFS", "DATANODE", host2, "INSTALLED"); Map requestProps = new HashMap<>(); requestProps.put("datanode", "dn_value"); requestProps.put("namenode", "nn_value"); @@ -7560,7 +7552,7 @@ public void testServiceUpdateRecursiveBadHostComponent() throws Exception { .setDesiredStackVersion(new StackId("HDP-0.2")); String serviceGroupName = "CORE"; - ServiceGroupResourceProviderTest.createServiceGroup(controller, cluster1, serviceGroupName, "HDP-0.2"); + ServiceGroupResourceProviderTest.createServiceGroup(controller, cluster1, serviceGroupName); String serviceName1 = "HDFS"; createService(cluster1, serviceGroupName, serviceName1, null); @@ -7576,11 +7568,11 @@ public void testServiceUpdateRecursiveBadHostComponent() throws Exception { Set set1 = new HashSet<>(); ServiceComponentHostRequest r1 = new ServiceComponentHostRequest(cluster1, serviceGroupName, serviceName1, - componentName1, componentName1, host1, State.INIT.toString()); + componentName1, host1, State.INIT.toString()); ServiceComponentHostRequest r2 = new ServiceComponentHostRequest(cluster1, serviceGroupName, serviceName1, - componentName2, componentName2, host1, State.INIT.toString()); + componentName2, host1, State.INIT.toString()); ServiceComponentHostRequest r3 = new ServiceComponentHostRequest(cluster1, serviceGroupName, serviceName1, - componentName3, componentName3, host1, State.INIT.toString()); + componentName3, host1, State.INIT.toString()); set1.add(r1); set1.add(r2); @@ -7642,7 +7634,7 @@ public void testDeleteHostComponentInVariousStates() throws Exception { createCluster(cluster1); clusters.getCluster(cluster1).setDesiredStackVersion(new StackId("HDP-1.3.1")); String serviceGroupName = "CORE"; - ServiceGroupResourceProviderTest.createServiceGroup(controller, cluster1, serviceGroupName, "HDP-1.3.1"); + ServiceGroupResourceProviderTest.createServiceGroup(controller, cluster1, serviceGroupName); String hdfs = "HDFS"; String mapred = "MAPREDUCE"; createService(cluster1, serviceGroupName, hdfs, null); @@ -7685,7 +7677,7 @@ public void testDeleteHostComponentInVariousStates() throws Exception { Set schRequests = new HashSet<>(); // delete HC schRequests.clear(); - schRequests.add(new ServiceComponentHostRequest(cluster1, serviceGroupName, hdfs, componentName1, componentName1, host1, null)); + schRequests.add(new ServiceComponentHostRequest(cluster1, serviceGroupName, hdfs, componentName1, host1, null)); try { controller.deleteHostComponents(schRequests); Assert.fail("Expect failure while deleting."); @@ -7709,12 +7701,12 @@ public void testDeleteHostComponentInVariousStates() throws Exception { sc6.getServiceComponentHosts().values().iterator().next().setState(State.INIT); schRequests.clear(); - schRequests.add(new ServiceComponentHostRequest(cluster1, serviceGroupName, hdfs, componentName1, componentName1, host1, null)); - schRequests.add(new ServiceComponentHostRequest(cluster1, serviceGroupName, hdfs, componentName2, componentName2, host1, null)); - schRequests.add(new ServiceComponentHostRequest(cluster1, serviceGroupName, hdfs, componentName3, componentName3, host1, null)); - schRequests.add(new ServiceComponentHostRequest(cluster1, serviceGroupName, mapred, componentName4, componentName4, host1, null)); - schRequests.add(new ServiceComponentHostRequest(cluster1, serviceGroupName, mapred, componentName5, componentName5, host1, null)); - schRequests.add(new ServiceComponentHostRequest(cluster1, serviceGroupName, mapred, componentName6, componentName6, host1, null)); + schRequests.add(new ServiceComponentHostRequest(cluster1, serviceGroupName, hdfs, componentName1, host1, null)); + schRequests.add(new ServiceComponentHostRequest(cluster1, serviceGroupName, hdfs, componentName2, host1, null)); + schRequests.add(new ServiceComponentHostRequest(cluster1, serviceGroupName, hdfs, componentName3, host1, null)); + schRequests.add(new ServiceComponentHostRequest(cluster1, serviceGroupName, mapred, componentName4, host1, null)); + schRequests.add(new ServiceComponentHostRequest(cluster1, serviceGroupName, mapred, componentName5, host1, null)); + schRequests.add(new ServiceComponentHostRequest(cluster1, serviceGroupName, mapred, componentName6, host1, null)); DeleteStatusMetaData deleteStatusMetaData = controller.deleteHostComponents(schRequests); Assert.assertEquals(0, deleteStatusMetaData.getExceptionForKeys().size()); } @@ -7726,10 +7718,10 @@ public void testDeleteHostWithComponent() throws Exception { createCluster(cluster1); Cluster cluster = clusters.getCluster(cluster1); - cluster.setDesiredStackVersion(new StackId(HDP_0_1)); + cluster.setDesiredStackVersion(new StackId("HDP-0.1")); String serviceGroupName = "CORE"; - ServiceGroupResourceProviderTest.createServiceGroup(controller, cluster1, serviceGroupName, HDP_0_1); + ServiceGroupResourceProviderTest.createServiceGroup(controller, cluster1, serviceGroupName); String serviceName = "HDFS"; createService(cluster1, serviceGroupName, serviceName, null); String componentName1 = "NAMENODE"; @@ -7822,10 +7814,10 @@ public void testDeleteHost() throws Exception { createCluster(cluster1); Cluster cluster = clusters.getCluster(cluster1); - cluster.setDesiredStackVersion(new StackId(HDP_0_1)); + cluster.setDesiredStackVersion(new StackId("HDP-0.1")); String serviceGroupName = "CORE"; - ServiceGroupResourceProviderTest.createServiceGroup(controller, cluster1, serviceGroupName, HDP_0_1); + ServiceGroupResourceProviderTest.createServiceGroup(controller, cluster1, serviceGroupName); String serviceName = "HDFS"; createService(cluster1, serviceGroupName, serviceName, null); String componentName1 = "NAMENODE"; @@ -7872,15 +7864,15 @@ public void testDeleteHost() throws Exception { // Case 1: Delete host that is still part of cluster, but do not specify the cluster_name in the request Set schRequests = new HashSet<>(); // Disable HC for non-clients - schRequests.add(new ServiceComponentHostRequest(cluster1, serviceGroupName, serviceName, componentName1, componentName1, host1, "DISABLED")); - schRequests.add(new ServiceComponentHostRequest(cluster1, serviceGroupName, serviceName, componentName2, componentName2, host1, "DISABLED")); + schRequests.add(new ServiceComponentHostRequest(cluster1, serviceGroupName, serviceName, componentName1, host1, "DISABLED")); + schRequests.add(new ServiceComponentHostRequest(cluster1, serviceGroupName, serviceName, componentName2, host1, "DISABLED")); updateHostComponents(schRequests, new HashMap<>(), false); // Delete HC schRequests.clear(); - schRequests.add(new ServiceComponentHostRequest(cluster1, serviceGroupName, serviceName, componentName1, componentName1, host1, null)); - schRequests.add(new ServiceComponentHostRequest(cluster1, serviceGroupName, serviceName, componentName2, componentName2, host1, null)); - schRequests.add(new ServiceComponentHostRequest(cluster1, serviceGroupName, serviceName, componentName3, componentName3, host1, null)); + schRequests.add(new ServiceComponentHostRequest(cluster1, serviceGroupName, serviceName, componentName1, host1, null)); + schRequests.add(new ServiceComponentHostRequest(cluster1, serviceGroupName, serviceName, componentName2, host1, null)); + schRequests.add(new ServiceComponentHostRequest(cluster1, serviceGroupName, serviceName, componentName3, host1, null)); controller.deleteHostComponents(schRequests); Assert.assertEquals(0, cluster.getServiceComponentHosts(host1).size()); @@ -8002,10 +7994,10 @@ public void testDeleteComponentsOnHost() throws Exception { createCluster(cluster1); Cluster cluster = clusters.getCluster(cluster1); - cluster.setDesiredStackVersion(new StackId(HDP_0_1)); + cluster.setDesiredStackVersion(new StackId("HDP-0.1")); String serviceGroupName = "CORE"; - ServiceGroupResourceProviderTest.createServiceGroup(controller, cluster1, serviceGroupName, HDP_0_1); + ServiceGroupResourceProviderTest.createServiceGroup(controller, cluster1, serviceGroupName); String serviceName = "HDFS"; createService(cluster1, serviceGroupName, serviceName, null); String componentName1 = "NAMENODE"; @@ -8020,7 +8012,7 @@ public void testDeleteComponentsOnHost() throws Exception { addHostToCluster(host1, cluster1); - createServiceComponentHost(cluster1, serviceGroupName, serviceName, componentName1, host1, null); + createServiceComponentHost(cluster1, serviceGroupName, null, componentName1, host1, null); createServiceComponentHost(cluster1, serviceGroupName, serviceName, componentName2, host1, null); createServiceComponentHost(cluster1, serviceGroupName, serviceName, componentName3, host1, null); @@ -8049,7 +8041,7 @@ public void testDeleteComponentsOnHost() throws Exception { sch.handleEvent(new ServiceComponentHostStartedEvent (sch.getServiceComponentName(), sch.getHostName(), System.currentTimeMillis())); Set schRequests = new HashSet<>(); - schRequests.add(new ServiceComponentHostRequest(cluster1, null, null, null, null, host1, null)); + schRequests.add(new ServiceComponentHostRequest(cluster1, null, null, null, host1, null)); DeleteStatusMetaData deleteStatusMetaData = controller.deleteHostComponents(schRequests); Assert.assertEquals(1, deleteStatusMetaData.getExceptionForKeys().size()); @@ -8127,7 +8119,7 @@ public void testApplyConfigurationWithTheSameTag() throws AuthorizationException amc.createCluster(clusterRequest); String serviceGroupName = "CORE"; - ServiceGroupResourceProviderTest.createServiceGroup(controller, cluster1, serviceGroupName, "HDP-1.2.0"); + ServiceGroupResourceProviderTest.createServiceGroup(controller, cluster1, serviceGroupName); Set serviceRequests = new HashSet<>(); serviceRequests.add(new ServiceRequest(cluster1, serviceGroupName, "HDFS", repositoryVersion120.getId(), null, null)); @@ -8191,7 +8183,7 @@ public void testDeleteClusterCreateHost() throws Exception { HostResourceProviderTest.createHosts(amc, hrs); String serviceGroupName = "CORE"; - ServiceGroupResourceProviderTest.createServiceGroup(controller, clusterName, serviceGroupName, stackId); + ServiceGroupResourceProviderTest.createServiceGroup(controller, clusterName, serviceGroupName); Set serviceRequests = new HashSet<>(); serviceRequests.add(new ServiceRequest(clusterName, serviceGroupName, "HDFS", repositoryVersion201.getId(), null, null)); @@ -8201,24 +8193,24 @@ public void testDeleteClusterCreateHost() throws Exception { ServiceResourceProviderTest.createServices(amc, repositoryVersionDAO, serviceRequests); Set serviceComponentRequests = new HashSet<>(); - serviceComponentRequests.add(new ServiceComponentRequest(clusterName, serviceGroupName, "HDFS", "NAMENODE", "NAMENODE", null)); - serviceComponentRequests.add(new ServiceComponentRequest(clusterName, serviceGroupName, "HDFS", "SECONDARY_NAMENODE", "SECONDARY_NAMENODE", null)); - serviceComponentRequests.add(new ServiceComponentRequest(clusterName, serviceGroupName, "HDFS", "DATANODE", "DATANODE", null)); - serviceComponentRequests.add(new ServiceComponentRequest(clusterName, serviceGroupName, "MAPREDUCE2", "HISTORYSERVER", "HISTORYSERVER", null)); - serviceComponentRequests.add(new ServiceComponentRequest(clusterName, serviceGroupName, "YARN", "RESOURCEMANAGER", "RESOURCEMANAGER", null)); - serviceComponentRequests.add(new ServiceComponentRequest(clusterName, serviceGroupName, "YARN", "NODEMANAGER", "NODEMANAGER", null)); - serviceComponentRequests.add(new ServiceComponentRequest(clusterName, serviceGroupName, "HDFS", "HDFS_CLIENT", "HDFS_CLIENT", null)); + serviceComponentRequests.add(new ServiceComponentRequest(clusterName, serviceGroupName, "HDFS", "NAMENODE", null)); + serviceComponentRequests.add(new ServiceComponentRequest(clusterName, serviceGroupName, "HDFS", "SECONDARY_NAMENODE", null)); + serviceComponentRequests.add(new ServiceComponentRequest(clusterName, serviceGroupName, "HDFS", "DATANODE", null)); + serviceComponentRequests.add(new ServiceComponentRequest(clusterName, serviceGroupName, "MAPREDUCE2", "HISTORYSERVER", null)); + serviceComponentRequests.add(new ServiceComponentRequest(clusterName, serviceGroupName, "YARN", "RESOURCEMANAGER", null)); + serviceComponentRequests.add(new ServiceComponentRequest(clusterName, serviceGroupName, "YARN", "NODEMANAGER", null)); + serviceComponentRequests.add(new ServiceComponentRequest(clusterName, serviceGroupName, "HDFS", "HDFS_CLIENT", null)); ComponentResourceProviderTest.createComponents(amc, serviceComponentRequests); Set componentHostRequests = new HashSet<>(); - componentHostRequests.add(new ServiceComponentHostRequest(clusterName, serviceGroupName, "HDFS", "DATANODE", "DATANODE", host1, null)); - componentHostRequests.add(new ServiceComponentHostRequest(clusterName, serviceGroupName, "HDFS", "NAMENODE", "NAMENODE", host1, null)); - componentHostRequests.add(new ServiceComponentHostRequest(clusterName, serviceGroupName, "HDFS", "SECONDARY_NAMENODE", "SECONDARY_NAMENODE", host1, null)); - componentHostRequests.add(new ServiceComponentHostRequest(clusterName, serviceGroupName, "MAPREDUCE2", "HISTORYSERVER", "HISTORYSERVER", host1, null)); - componentHostRequests.add(new ServiceComponentHostRequest(clusterName, serviceGroupName, "YARN", "RESOURCEMANAGER", "RESOURCEMANAGER", host1, null)); - componentHostRequests.add(new ServiceComponentHostRequest(clusterName, serviceGroupName, "YARN", "NODEMANAGER", "NODEMANAGER", host1, null)); - componentHostRequests.add(new ServiceComponentHostRequest(clusterName, serviceGroupName, "HDFS", "HDFS_CLIENT", "HDFS_CLIENT", host1, null)); + componentHostRequests.add(new ServiceComponentHostRequest(clusterName, serviceGroupName, "HDFS", "DATANODE", host1, null)); + componentHostRequests.add(new ServiceComponentHostRequest(clusterName, serviceGroupName, "HDFS", "NAMENODE", host1, null)); + componentHostRequests.add(new ServiceComponentHostRequest(clusterName, serviceGroupName, "HDFS", "SECONDARY_NAMENODE", host1, null)); + componentHostRequests.add(new ServiceComponentHostRequest(clusterName, serviceGroupName, "MAPREDUCE2", "HISTORYSERVER", host1, null)); + componentHostRequests.add(new ServiceComponentHostRequest(clusterName, serviceGroupName, "YARN", "RESOURCEMANAGER", host1, null)); + componentHostRequests.add(new ServiceComponentHostRequest(clusterName, serviceGroupName, "YARN", "NODEMANAGER", host1, null)); + componentHostRequests.add(new ServiceComponentHostRequest(clusterName, serviceGroupName, "HDFS", "HDFS_CLIENT", host1, null)); amc.createHostComponents(componentHostRequests); @@ -8266,12 +8258,11 @@ public void testDisableAndDeleteStates() throws Exception { host = clusters.getHost("host3"); setOsFamily(host, "redhat", "5.9"); - String version = "HDP-1.2.0"; - ClusterRequest clusterRequest = new ClusterRequest(null, cluster1, version, null); + ClusterRequest clusterRequest = new ClusterRequest(null, cluster1, "HDP-1.2.0", null); amc.createCluster(clusterRequest); String serviceGroupName = "CORE"; - ServiceGroupResourceProviderTest.createServiceGroup(controller, cluster1, serviceGroupName, version); + ServiceGroupResourceProviderTest.createServiceGroup(controller, cluster1, serviceGroupName); Set serviceRequests = new HashSet<>(); String serviceName = "HDFS"; @@ -8302,10 +8293,10 @@ public void testDisableAndDeleteStates() throws Exception { ServiceResourceProviderTest.updateServices(amc, serviceRequests, mapRequestProps, true, false); Set serviceComponentRequests = new HashSet<>(); - serviceComponentRequests.add(new ServiceComponentRequest(cluster1, serviceGroupName, serviceName, "NAMENODE", "NAMENODE", null)); - serviceComponentRequests.add(new ServiceComponentRequest(cluster1, serviceGroupName, serviceName, "SECONDARY_NAMENODE", "SECONDARY_NAMENODE", null)); - serviceComponentRequests.add(new ServiceComponentRequest(cluster1, serviceGroupName, serviceName, "DATANODE", "DATANODE", null)); - serviceComponentRequests.add(new ServiceComponentRequest(cluster1, serviceGroupName, serviceName, "HDFS_CLIENT", "HDFS_CLIENT", null)); + serviceComponentRequests.add(new ServiceComponentRequest(cluster1, serviceGroupName, serviceName, "NAMENODE", null)); + serviceComponentRequests.add(new ServiceComponentRequest(cluster1, serviceGroupName, serviceName, "SECONDARY_NAMENODE", null)); + serviceComponentRequests.add(new ServiceComponentRequest(cluster1, serviceGroupName, serviceName, "DATANODE", null)); + serviceComponentRequests.add(new ServiceComponentRequest(cluster1, serviceGroupName, serviceName, "HDFS_CLIENT", null)); ComponentResourceProviderTest.createComponents(amc, serviceComponentRequests); @@ -8317,11 +8308,11 @@ public void testDisableAndDeleteStates() throws Exception { HostResourceProviderTest.createHosts(amc, hostRequests); Set componentHostRequests = new HashSet<>(); - componentHostRequests.add(new ServiceComponentHostRequest(cluster1, serviceGroupName, null, "DATANODE", "DATANODE", host1, null)); - componentHostRequests.add(new ServiceComponentHostRequest(cluster1, serviceGroupName, null, "NAMENODE", "NAMENODE", host1, null)); - componentHostRequests.add(new ServiceComponentHostRequest(cluster1, serviceGroupName, null, "SECONDARY_NAMENODE", "SECONDARY_NAMENODE", host1, null)); - componentHostRequests.add(new ServiceComponentHostRequest(cluster1, serviceGroupName, null, "DATANODE", "DATANODE", host2, null)); - componentHostRequests.add(new ServiceComponentHostRequest(cluster1, serviceGroupName, null, "DATANODE", "DATANODE", host3, null)); + componentHostRequests.add(new ServiceComponentHostRequest(cluster1, serviceGroupName, null, "DATANODE", host1, null)); + componentHostRequests.add(new ServiceComponentHostRequest(cluster1, serviceGroupName, null, "NAMENODE", host1, null)); + componentHostRequests.add(new ServiceComponentHostRequest(cluster1, serviceGroupName, null, "SECONDARY_NAMENODE", host1, null)); + componentHostRequests.add(new ServiceComponentHostRequest(cluster1, serviceGroupName, null, "DATANODE", host2, null)); + componentHostRequests.add(new ServiceComponentHostRequest(cluster1, serviceGroupName, null, "DATANODE", host3, null)); amc.createHostComponents(componentHostRequests); @@ -8339,50 +8330,50 @@ public void testDisableAndDeleteStates() throws Exception { Map hostComponents = cluster.getService(serviceName).getServiceComponent("DATANODE").getServiceComponentHosts(); for (Map.Entry entry : hostComponents.entrySet()) { ServiceComponentHost cHost = entry.getValue(); - cHost.handleEvent(new ServiceComponentHostInstallEvent(cHost.getServiceComponentName(), cHost.getHostName(), System.currentTimeMillis(), version)); + cHost.handleEvent(new ServiceComponentHostInstallEvent(cHost.getServiceComponentName(), cHost.getHostName(), System.currentTimeMillis(), "HDP-1.2.0")); cHost.handleEvent(new ServiceComponentHostOpSucceededEvent(cHost.getServiceComponentName(), cHost.getHostName(), System.currentTimeMillis())); } hostComponents = cluster.getService(serviceName).getServiceComponent("NAMENODE").getServiceComponentHosts(); for (Map.Entry entry : hostComponents.entrySet()) { ServiceComponentHost cHost = entry.getValue(); - cHost.handleEvent(new ServiceComponentHostInstallEvent(cHost.getServiceComponentName(), cHost.getHostName(), System.currentTimeMillis(), version)); + cHost.handleEvent(new ServiceComponentHostInstallEvent(cHost.getServiceComponentName(), cHost.getHostName(), System.currentTimeMillis(), "HDP-1.2.0")); cHost.handleEvent(new ServiceComponentHostOpSucceededEvent(cHost.getServiceComponentName(), cHost.getHostName(), System.currentTimeMillis())); } hostComponents = cluster.getService(serviceName).getServiceComponent("SECONDARY_NAMENODE").getServiceComponentHosts(); for (Map.Entry entry : hostComponents.entrySet()) { ServiceComponentHost cHost = entry.getValue(); - cHost.handleEvent(new ServiceComponentHostInstallEvent(cHost.getServiceComponentName(), cHost.getHostName(), System.currentTimeMillis(), version)); + cHost.handleEvent(new ServiceComponentHostInstallEvent(cHost.getServiceComponentName(), cHost.getHostName(), System.currentTimeMillis(), "HDP-1.2.0")); cHost.handleEvent(new ServiceComponentHostOpSucceededEvent(cHost.getServiceComponentName(), cHost.getHostName(), System.currentTimeMillis())); } componentHostRequests.clear(); - componentHostRequests.add(new ServiceComponentHostRequest(cluster1, serviceGroupName, null, "NAMENODE", "NAMENODE", host1, "DISABLED")); + componentHostRequests.add(new ServiceComponentHostRequest(cluster1, serviceGroupName, null, "NAMENODE", host1, "DISABLED")); updateHostComponents(amc, componentHostRequests, mapRequestProps, true); Assert.assertEquals(State.DISABLED, componentHost.getState()); componentHostRequests.clear(); - componentHostRequests.add(new ServiceComponentHostRequest(cluster1, serviceGroupName, null, "NAMENODE", "NAMENODE", host1, "INSTALLED")); + componentHostRequests.add(new ServiceComponentHostRequest(cluster1, serviceGroupName, null, "NAMENODE", host1, "INSTALLED")); updateHostComponents(amc, componentHostRequests, mapRequestProps, true); Assert.assertEquals(State.INSTALLED, componentHost.getState()); componentHostRequests.clear(); - componentHostRequests.add(new ServiceComponentHostRequest(cluster1, serviceGroupName, null, "NAMENODE", "NAMENODE", host1, "DISABLED")); + componentHostRequests.add(new ServiceComponentHostRequest(cluster1, serviceGroupName, null, "NAMENODE", host1, "DISABLED")); updateHostComponents(amc, componentHostRequests, mapRequestProps, true); Assert.assertEquals(State.DISABLED, componentHost.getState()); componentHostRequests.clear(); - componentHostRequests.add(new ServiceComponentHostRequest(cluster1, serviceGroupName, null, "NAMENODE", "NAMENODE", host2, null)); + componentHostRequests.add(new ServiceComponentHostRequest(cluster1, serviceGroupName, null, "NAMENODE", host2, null)); amc.createHostComponents(componentHostRequests); componentHostRequests.clear(); - componentHostRequests.add(new ServiceComponentHostRequest(cluster1, serviceGroupName, null, "NAMENODE", "NAMENODE", host2, "INSTALLED")); + componentHostRequests.add(new ServiceComponentHostRequest(cluster1, serviceGroupName, null, "NAMENODE", host2, "INSTALLED")); updateHostComponents(amc, componentHostRequests, mapRequestProps, true); @@ -8390,7 +8381,7 @@ public void testDisableAndDeleteStates() throws Exception { Assert.assertEquals(2, namenodes.size()); componentHost = namenodes.get(host2); - componentHost.handleEvent(new ServiceComponentHostInstallEvent(componentHost.getServiceComponentName(), componentHost.getHostName(), System.currentTimeMillis(), version)); + componentHost.handleEvent(new ServiceComponentHostInstallEvent(componentHost.getServiceComponentName(), componentHost.getHostName(), System.currentTimeMillis(), "HDP-1.2.0")); componentHost.handleEvent(new ServiceComponentHostOpSucceededEvent(componentHost.getServiceComponentName(), componentHost.getHostName(), System.currentTimeMillis())); serviceRequests.clear(); @@ -8403,7 +8394,7 @@ public void testDisableAndDeleteStates() throws Exception { } componentHostRequests.clear(); - componentHostRequests.add(new ServiceComponentHostRequest(cluster1, serviceGroupName, null, "NAMENODE", "NAMENODE", host1, null)); + componentHostRequests.add(new ServiceComponentHostRequest(cluster1, serviceGroupName, null, "NAMENODE", host1, null)); amc.deleteHostComponents(componentHostRequests); @@ -8416,17 +8407,17 @@ public void testDisableAndDeleteStates() throws Exception { // should be able to add the host component back componentHostRequests.clear(); - componentHostRequests.add(new ServiceComponentHostRequest(cluster1, serviceGroupName, null, "NAMENODE", "NAMENODE", host1, null)); + componentHostRequests.add(new ServiceComponentHostRequest(cluster1, serviceGroupName, null, "NAMENODE", host1, null)); amc.createHostComponents(componentHostRequests); namenodes = cluster.getService(serviceName).getServiceComponent("NAMENODE").getServiceComponentHosts(); assertEquals(2, namenodes.size()); // make INSTALLED again componentHost = namenodes.get(host1); - componentHost.handleEvent(new ServiceComponentHostInstallEvent(componentHost.getServiceComponentName(), componentHost.getHostName(), System.currentTimeMillis(), version)); + componentHost.handleEvent(new ServiceComponentHostInstallEvent(componentHost.getServiceComponentName(), componentHost.getHostName(), System.currentTimeMillis(), "HDP-1.2.0")); componentHost.handleEvent(new ServiceComponentHostOpSucceededEvent(componentHost.getServiceComponentName(), componentHost.getHostName(), System.currentTimeMillis())); componentHostRequests.clear(); - componentHostRequests.add(new ServiceComponentHostRequest(cluster1, serviceGroupName, null, "NAMENODE", "NAMENODE", host1, "INSTALLED")); + componentHostRequests.add(new ServiceComponentHostRequest(cluster1, serviceGroupName, null, "NAMENODE", host1, "INSTALLED")); updateHostComponents(amc, componentHostRequests, mapRequestProps, true); assertEquals(State.INSTALLED, namenodes.get(host1).getState()); @@ -8442,7 +8433,7 @@ public void testDisableAndDeleteStates() throws Exception { // make disabled componentHostRequests.clear(); - componentHostRequests.add(new ServiceComponentHostRequest(cluster1, serviceGroupName, null, "DATANODE", "DATANODE", host2, "DISABLED")); + componentHostRequests.add(new ServiceComponentHostRequest(cluster1, serviceGroupName, null, "DATANODE", host2, "DISABLED")); updateHostComponents(amc, componentHostRequests, mapRequestProps, false); org.junit.Assert.assertEquals(State.DISABLED, sch.getState()); @@ -8470,7 +8461,7 @@ public void testDisableAndDeleteStates() throws Exception { // confirm delete componentHostRequests.clear(); - componentHostRequests.add(new ServiceComponentHostRequest(cluster1, serviceGroupName, null, "DATANODE", "DATANODE", host2, null)); + componentHostRequests.add(new ServiceComponentHostRequest(cluster1, serviceGroupName, null, "DATANODE", host2, null)); amc.deleteHostComponents(componentHostRequests); sch = null; @@ -8523,19 +8514,19 @@ public void testDisableAndDeleteStates() throws Exception { ServiceResourceProviderTest.updateServices(amc, serviceRequests, mapRequestProps, true, false); //Crate service components serviceComponentRequests = new HashSet<>(); - serviceComponentRequests.add(new ServiceComponentRequest(cluster1, serviceGroupName, serviceName, "NAMENODE", "NAMENODE", null)); - serviceComponentRequests.add(new ServiceComponentRequest(cluster1, serviceGroupName, serviceName, "SECONDARY_NAMENODE", "SECONDARY_NAMENODE", null)); - serviceComponentRequests.add(new ServiceComponentRequest(cluster1, serviceGroupName, serviceName, "DATANODE", "DATANODE", null)); - serviceComponentRequests.add(new ServiceComponentRequest(cluster1, serviceGroupName, serviceName, "HDFS_CLIENT", "HDFS_CLIENT", null)); + serviceComponentRequests.add(new ServiceComponentRequest(cluster1, serviceGroupName, serviceName, "NAMENODE", null)); + serviceComponentRequests.add(new ServiceComponentRequest(cluster1, serviceGroupName, serviceName, "SECONDARY_NAMENODE", null)); + serviceComponentRequests.add(new ServiceComponentRequest(cluster1, serviceGroupName, serviceName, "DATANODE", null)); + serviceComponentRequests.add(new ServiceComponentRequest(cluster1, serviceGroupName, serviceName, "HDFS_CLIENT", null)); ComponentResourceProviderTest.createComponents(amc, serviceComponentRequests); //Create ServiceComponentHosts componentHostRequests = new HashSet<>(); - componentHostRequests.add(new ServiceComponentHostRequest(cluster1, serviceGroupName, null, "DATANODE", "DATANODE", "host1", null)); - componentHostRequests.add(new ServiceComponentHostRequest(cluster1, serviceGroupName, null, "NAMENODE", "NAMENODE", "host1", null)); - componentHostRequests.add(new ServiceComponentHostRequest(cluster1, serviceGroupName, null, "SECONDARY_NAMENODE", "SECONDARY_NAMENODE", host1, null)); - componentHostRequests.add(new ServiceComponentHostRequest(cluster1, serviceGroupName, null, "DATANODE", "DATANODE", "host2", null)); - componentHostRequests.add(new ServiceComponentHostRequest(cluster1, serviceGroupName, null, "DATANODE", "DATANODE", "host3", null)); + componentHostRequests.add(new ServiceComponentHostRequest(cluster1, serviceGroupName, null, "DATANODE", "host1", null)); + componentHostRequests.add(new ServiceComponentHostRequest(cluster1, serviceGroupName, null, "NAMENODE", "host1", null)); + componentHostRequests.add(new ServiceComponentHostRequest(cluster1, serviceGroupName, null, "SECONDARY_NAMENODE", host1, null)); + componentHostRequests.add(new ServiceComponentHostRequest(cluster1, serviceGroupName, null, "DATANODE", "host2", null)); + componentHostRequests.add(new ServiceComponentHostRequest(cluster1, serviceGroupName, null, "DATANODE", "host3", null)); amc.createHostComponents(componentHostRequests); @@ -8574,7 +8565,7 @@ public void testScheduleSmokeTest() throws Exception { amc.createCluster(clusterRequest); String serviceGroupName = "CORE"; - ServiceGroupResourceProviderTest.createServiceGroup(controller, CLUSTER_NAME, serviceGroupName, STACK_ID); + ServiceGroupResourceProviderTest.createServiceGroup(controller, CLUSTER_NAME, serviceGroupName); Set serviceRequests = new HashSet<>(); serviceRequests.add(new ServiceRequest(CLUSTER_NAME, serviceGroupName, "HDFS", repositoryVersion201.getId(), null, null)); @@ -8584,14 +8575,12 @@ public void testScheduleSmokeTest() throws Exception { ServiceResourceProviderTest.createServices(amc, repositoryVersionDAO, serviceRequests); Set serviceComponentRequests = new HashSet<>(); - serviceComponentRequests.add(new ServiceComponentRequest(CLUSTER_NAME, serviceGroupName, "HDFS", "NAMENODE", "NAMENODE", null)); - serviceComponentRequests.add(new ServiceComponentRequest(CLUSTER_NAME, serviceGroupName, "HDFS", "SECONDARY_NAMENODE", "SECONDARY_NAMENODE", null)); - serviceComponentRequests.add(new ServiceComponentRequest(CLUSTER_NAME, serviceGroupName, "HDFS", "DATANODE", "DATANODE", null)); - serviceComponentRequests.add(new ServiceComponentRequest(CLUSTER_NAME, serviceGroupName, "HDFS", "HDFS_CLIENT", "HDFS_CLIENT", null)); - serviceComponentRequests.add(new ServiceComponentRequest(CLUSTER_NAME, serviceGroupName, "MAPREDUCE2", "HISTORYSERVER", "HISTORYSERVER", null)); - serviceComponentRequests.add(new ServiceComponentRequest(CLUSTER_NAME, serviceGroupName, "YARN", "RESOURCEMANAGER", "RESOURCEMANAGER", null)); - serviceComponentRequests.add(new ServiceComponentRequest(CLUSTER_NAME, serviceGroupName, "YARN", "NODEMANAGER", "NODEMANAGER", null)); - serviceComponentRequests.add(new ServiceComponentRequest(CLUSTER_NAME, serviceGroupName, "YARN", "YARN_CLIENT", "YARN_CLIENT", null)); + serviceComponentRequests.add(new ServiceComponentRequest(CLUSTER_NAME, serviceGroupName, "HDFS", "NAMENODE", null)); + serviceComponentRequests.add(new ServiceComponentRequest(CLUSTER_NAME, serviceGroupName, "HDFS", "SECONDARY_NAMENODE", null)); + serviceComponentRequests.add(new ServiceComponentRequest(CLUSTER_NAME, serviceGroupName, "HDFS", "DATANODE", null)); + serviceComponentRequests.add(new ServiceComponentRequest(CLUSTER_NAME, serviceGroupName, "MAPREDUCE2", "HISTORYSERVER", null)); + serviceComponentRequests.add(new ServiceComponentRequest(CLUSTER_NAME, serviceGroupName, "YARN", "RESOURCEMANAGER", null)); + serviceComponentRequests.add(new ServiceComponentRequest(CLUSTER_NAME, serviceGroupName, "YARN", "NODEMANAGER", null)); ComponentResourceProviderTest.createComponents(amc, serviceComponentRequests); @@ -8601,14 +8590,12 @@ public void testScheduleSmokeTest() throws Exception { HostResourceProviderTest.createHosts(amc, hostRequests); Set componentHostRequests = new HashSet<>(); - componentHostRequests.add(new ServiceComponentHostRequest(CLUSTER_NAME, serviceGroupName, "HDFS", "DATANODE", "DATANODE", HOST1, null)); - componentHostRequests.add(new ServiceComponentHostRequest(CLUSTER_NAME, serviceGroupName, "HDFS", "NAMENODE", "NAMENODE", HOST1, null)); - componentHostRequests.add(new ServiceComponentHostRequest(CLUSTER_NAME, serviceGroupName, null, "HDFS_CLIENT", "HDFS_CLIENT", HOST1, null)); - componentHostRequests.add(new ServiceComponentHostRequest(CLUSTER_NAME, serviceGroupName, "HDFS", "SECONDARY_NAMENODE", "SECONDARY_NAMENODE", HOST1, null)); - componentHostRequests.add(new ServiceComponentHostRequest(CLUSTER_NAME, serviceGroupName, "MAPREDUCE2", "HISTORYSERVER", "HISTORYSERVER", HOST1, null)); - componentHostRequests.add(new ServiceComponentHostRequest(CLUSTER_NAME, serviceGroupName, "YARN", "RESOURCEMANAGER", "RESOURCEMANAGER", HOST1, null)); - componentHostRequests.add(new ServiceComponentHostRequest(CLUSTER_NAME, serviceGroupName, "YARN", "NODEMANAGER", "NODEMANAGER", HOST1, null)); - componentHostRequests.add(new ServiceComponentHostRequest(CLUSTER_NAME, serviceGroupName, null, "YARN_CLIENT", "YARN_CLIENT", HOST1, null)); + componentHostRequests.add(new ServiceComponentHostRequest(CLUSTER_NAME, serviceGroupName, null, "DATANODE", HOST1, null)); + componentHostRequests.add(new ServiceComponentHostRequest(CLUSTER_NAME, serviceGroupName, null, "NAMENODE", HOST1, null)); + componentHostRequests.add(new ServiceComponentHostRequest(CLUSTER_NAME, serviceGroupName, null, "SECONDARY_NAMENODE", HOST1, null)); + componentHostRequests.add(new ServiceComponentHostRequest(CLUSTER_NAME, serviceGroupName, null, "HISTORYSERVER", HOST1, null)); + componentHostRequests.add(new ServiceComponentHostRequest(CLUSTER_NAME, serviceGroupName, null, "RESOURCEMANAGER", HOST1, null)); + componentHostRequests.add(new ServiceComponentHostRequest(CLUSTER_NAME, serviceGroupName, null, "NODEMANAGER", HOST1, null)); amc.createHostComponents(componentHostRequests); @@ -8687,17 +8674,9 @@ public void testGetServices2() throws Exception { expect(clusters.getCluster("cluster1")).andReturn(cluster); expect(cluster.getService("service1")).andReturn(service); - HostComponentStateDAO hostComponentStateDAO = createMock(HostComponentStateDAO.class); - ServiceComponentDesiredStateDAO serviceComponentDesiredStateDAO = createMock(ServiceComponentDesiredStateDAO.class); - - expect(injector.getInstance(HostComponentStateDAO.class)).andReturn(hostComponentStateDAO).anyTimes(); - expect(injector.getInstance(ServiceComponentDesiredStateDAO.class)).andReturn(serviceComponentDesiredStateDAO).anyTimes(); - expect(service.convertToResponse()).andReturn(response); - - // replay mocks - replay(maintHelper, injector, clusters, cluster, service, response, hostComponentStateDAO, serviceComponentDesiredStateDAO); + replay(maintHelper, injector, clusters, cluster, service, response); //test AmbariManagementController controller = new AmbariManagementControllerImpl(null, clusters, injector); @@ -8708,7 +8687,7 @@ public void testGetServices2() throws Exception { assertEquals(1, setResponses.size()); assertTrue(setResponses.contains(response)); - verify(injector, clusters, cluster, service, response, hostComponentStateDAO, serviceComponentDesiredStateDAO); + verify(injector, clusters, cluster, service, response); } /** @@ -8739,14 +8718,8 @@ public void testGetServices___ServiceNotFoundException() throws Exception { expect(clusters.getCluster("cluster1")).andReturn(cluster); expect(cluster.getService("service1")).andThrow(new ServiceNotFoundException("custer1", "service1")); - HostComponentStateDAO hostComponentStateDAO = createMock(HostComponentStateDAO.class); - ServiceComponentDesiredStateDAO serviceComponentDesiredStateDAO = createMock(ServiceComponentDesiredStateDAO.class); - - expect(injector.getInstance(HostComponentStateDAO.class)).andReturn(hostComponentStateDAO).anyTimes(); - expect(injector.getInstance(ServiceComponentDesiredStateDAO.class)).andReturn(serviceComponentDesiredStateDAO).anyTimes(); - // replay mocks - replay(maintHelper, injector, clusters, cluster, hostComponentStateDAO, serviceComponentDesiredStateDAO); + replay(maintHelper, injector, clusters, cluster); //test AmbariManagementController controller = new AmbariManagementControllerImpl(null, clusters, injector); @@ -8760,7 +8733,7 @@ public void testGetServices___ServiceNotFoundException() throws Exception { } assertSame(controller, controllerCapture.getValue()); - verify(injector, clusters, cluster, hostComponentStateDAO, serviceComponentDesiredStateDAO); + verify(injector, clusters, cluster); } /** @@ -8809,17 +8782,9 @@ public void testGetServices___OR_Predicate_ServiceNotFoundException() throws Exc expect(service1.convertToResponse()).andReturn(response); expect(service2.convertToResponse()).andReturn(response2); - - HostComponentStateDAO hostComponentStateDAO = createMock(HostComponentStateDAO.class); - ServiceComponentDesiredStateDAO serviceComponentDesiredStateDAO = createMock(ServiceComponentDesiredStateDAO.class); - - expect(injector.getInstance(HostComponentStateDAO.class)).andReturn(hostComponentStateDAO).anyTimes(); - expect(injector.getInstance(ServiceComponentDesiredStateDAO.class)).andReturn(serviceComponentDesiredStateDAO).anyTimes(); - - // replay mocks replay(maintHelper, injector, clusters, cluster, service1, service2, - response, response2, hostComponentStateDAO, serviceComponentDesiredStateDAO); + response, response2); //test AmbariManagementController controller = new AmbariManagementControllerImpl(null, clusters, injector); @@ -8831,7 +8796,7 @@ public void testGetServices___OR_Predicate_ServiceNotFoundException() throws Exc assertTrue(setResponses.contains(response)); assertTrue(setResponses.contains(response2)); - verify(injector, clusters, cluster, service1, service2, response, response2, hostComponentStateDAO, serviceComponentDesiredStateDAO); + verify(injector, clusters, cluster, service1, service2, response, response2); } private void testRunSmokeTestFlag(String serviceGroupName, String serviceName, @@ -8968,7 +8933,7 @@ public void setMonitoringServicesRestartRequired() throws Exception { cluster.setCurrentStackVersion(stackId); String serviceGroupName = "CORE"; - ServiceGroupResourceProviderTest.createServiceGroup(controller, cluster1, serviceGroupName, stackId.getStackId()); + ServiceGroupResourceProviderTest.createServiceGroup(controller, cluster1, serviceGroupName); String hdfsService = "HDFS"; String fakeMonitoringService = "FAKENAGIOS"; createService(cluster1, serviceGroupName, hdfsService, repositoryVersion208, null); @@ -9020,7 +8985,7 @@ public void setRestartRequiredAfterChangeService() throws Exception { cluster.setCurrentStackVersion(stackId); String serviceGroupName = "CORE"; - ServiceGroupResourceProviderTest.createServiceGroup(controller, cluster1, serviceGroupName, stackId.getStackId()); + ServiceGroupResourceProviderTest.createServiceGroup(controller, cluster1, serviceGroupName); String hdfsService = "HDFS"; String zookeeperService = "ZOOKEEPER"; createService(cluster1, serviceGroupName, hdfsService, repositoryVersion207, null); @@ -9082,7 +9047,7 @@ public void testRestartIndicatorsAndSlaveFilesUpdateAtComponentsDelete() throws cluster.setCurrentStackVersion(stackId); String serviceGroupName = "CORE"; - ServiceGroupResourceProviderTest.createServiceGroup(controller, cluster1, serviceGroupName, stackId.getStackId()); + ServiceGroupResourceProviderTest.createServiceGroup(controller, cluster1, serviceGroupName); String hdfsService = "HDFS"; String zookeeperService = "ZOOKEEPER"; createService(cluster1, serviceGroupName, hdfsService, null); @@ -9150,7 +9115,7 @@ public void testMaintenanceState() throws Exception { new StackId("HDP-1.2.0")); String serviceGroupName = "CORE"; - ServiceGroupResourceProviderTest.createServiceGroup(controller, cluster1, serviceGroupName, "HDP-1.2.0"); + ServiceGroupResourceProviderTest.createServiceGroup(controller, cluster1, serviceGroupName); String serviceName = "HDFS"; createService(cluster1, serviceGroupName, serviceName, null); @@ -9371,7 +9336,7 @@ public void testCredentialStoreRelatedAPICallsToUpdateSettings() throws Exceptio new StackId("HDP-2.2.0")); String serviceGroupName = "CORE"; - ServiceGroupResourceProviderTest.createServiceGroup(controller, cluster1, serviceGroupName, "HDP-2.2.0"); + ServiceGroupResourceProviderTest.createServiceGroup(controller, cluster1, serviceGroupName); String service1Name = "HDFS"; String service2Name = "STORM"; String service3Name = "ZOOKEEPER"; @@ -9453,10 +9418,10 @@ public void testPassiveSkipServices() throws Exception { String cluster1 = getUniqueName(); createCluster(cluster1); clusters.getCluster(cluster1) - .setDesiredStackVersion(new StackId(HDP_0_1)); + .setDesiredStackVersion(new StackId("HDP-0.1")); String serviceGroupName = "CORE"; - ServiceGroupResourceProviderTest.createServiceGroup(controller, cluster1, serviceGroupName, HDP_0_1); + ServiceGroupResourceProviderTest.createServiceGroup(controller, cluster1, serviceGroupName); String serviceName1 = "HDFS"; String serviceName2 = "MAPREDUCE"; createService(cluster1, serviceGroupName, serviceName1, null); @@ -9595,7 +9560,7 @@ public void testEmptyConfigs() throws Exception { String cluster1 = getUniqueName(); createCluster(cluster1); Cluster cluster = clusters.getCluster(cluster1); - cluster.setDesiredStackVersion(new StackId(HDP_0_1)); + cluster.setDesiredStackVersion(new StackId("HDP-0.1")); ClusterRequest cr = new ClusterRequest(cluster.getClusterId(), cluster.getClusterName(), null, null); @@ -9767,7 +9732,7 @@ public void testConfigAttributesStaleConfigFilter() throws Exception { Long clusterId = c.getClusterId(); String serviceGroupName = "CORE"; - ServiceGroupResourceProviderTest.createServiceGroup(controller, cluster1, serviceGroupName, "HDP-2.0.5"); + ServiceGroupResourceProviderTest.createServiceGroup(controller, cluster1, serviceGroupName); String serviceName = "HDFS"; createService(cluster1, serviceGroupName, serviceName, null); String componentName1 = "NAMENODE"; @@ -9825,30 +9790,30 @@ public void testConfigAttributesStaleConfigFilter() throws Exception { s1.getServiceComponent(componentName2).getServiceComponentHost(host2).updateActualConfigs(actualConfigOld); s1.getServiceComponent(componentName3).getServiceComponentHost(host2).updateActualConfigs(actualConfig); - ServiceComponentHostRequest r = new ServiceComponentHostRequest(cluster1, null, null, null, null, null, null); + ServiceComponentHostRequest r = new ServiceComponentHostRequest(cluster1, null, null, null, null, null); Set resps = controller.getHostComponents(Collections.singleton(r)); Assert.assertEquals(5, resps.size()); // Get all host components with stale config = true - r = new ServiceComponentHostRequest(cluster1, null, null, null, null, null, null); + r = new ServiceComponentHostRequest(cluster1, null, null, null, null, null); r.setStaleConfig("true"); resps = controller.getHostComponents(Collections.singleton(r)); Assert.assertEquals(2, resps.size()); // Get all host components with stale config = false - r = new ServiceComponentHostRequest(cluster1, null, null, null, null, null, null); + r = new ServiceComponentHostRequest(cluster1, null, null, null, null, null); r.setStaleConfig("false"); resps = controller.getHostComponents(Collections.singleton(r)); Assert.assertEquals(3, resps.size()); // Get all host components with stale config = false and hostname filter - r = new ServiceComponentHostRequest(cluster1, null, null, null, null, host1, null); + r = new ServiceComponentHostRequest(cluster1, null, null, null, host1, null); r.setStaleConfig("false"); resps = controller.getHostComponents(Collections.singleton(r)); Assert.assertEquals(2, resps.size()); // Get all host components with stale config = false and hostname filter - r = new ServiceComponentHostRequest(cluster1, null, null, null, null, host2, null); + r = new ServiceComponentHostRequest(cluster1, null, null, null, host2, null); r.setStaleConfig("true"); resps = controller.getHostComponents(Collections.singleton(r)); Assert.assertEquals(1, resps.size()); @@ -9866,7 +9831,7 @@ public void testSecretReferences() throws Exception { Long clusterId = cl.getClusterId(); String serviceGroupName = "CORE"; - ServiceGroupResourceProviderTest.createServiceGroup(controller, cluster1, serviceGroupName, "HDP-2.0.5"); + ServiceGroupResourceProviderTest.createServiceGroup(controller, cluster1, serviceGroupName); String serviceName = "HDFS"; createService(cluster1, serviceGroupName, serviceName, null); String componentName1 = "NAMENODE"; @@ -10011,7 +9976,7 @@ public void testTargetedProcessCommand() throws Exception { Cluster cluster = setupClusterWithHosts(cluster1, "HDP-2.0.5", Arrays.asList(host1), "centos5"); String serviceGroupName = "CORE"; - ServiceGroupResourceProviderTest.createServiceGroup(controller, cluster1, serviceGroupName, "HDP-2.0.5"); + ServiceGroupResourceProviderTest.createServiceGroup(controller, cluster1, serviceGroupName); String serviceName = "HDFS"; createService(cluster1, serviceGroupName, serviceName, null); String componentName1 = "NAMENODE"; @@ -10043,7 +10008,7 @@ public void testTargetedProcessCommand() throws Exception { startService(cluster1, serviceGroupName, serviceName, false, false); ServiceComponentHostRequest req = new ServiceComponentHostRequest(cluster1, serviceGroupName, serviceName, - componentName1, componentName1, host1, "INSTALLED"); + componentName1, host1, "INSTALLED"); Map requestProperties = new HashMap<>(); requestProperties.put("namenode", "p1"); @@ -10110,7 +10075,7 @@ public void testServiceWidgetCreationOnServiceCreate() throws Exception { State.INSTALLED.name(), SecurityType.NONE, "OTHER-2.0", null); controller.createCluster(r); String serviceGroupName = "CORE"; - ServiceGroupResourceProviderTest.createServiceGroup(controller, cluster1, serviceGroupName, "OTHER-2.0"); + ServiceGroupResourceProviderTest.createServiceGroup(controller, cluster1, serviceGroupName); String serviceName = "HBASE"; clusters.getCluster(cluster1).setDesiredStackVersion(new StackId("OTHER-2.0")); diff --git a/ambari-server/src/test/java/org/apache/ambari/server/controller/BackgroundCustomCommandExecutionTest.java b/ambari-server/src/test/java/org/apache/ambari/server/controller/BackgroundCustomCommandExecutionTest.java index d192c43867e..38d46edb44d 100644 --- a/ambari-server/src/test/java/org/apache/ambari/server/controller/BackgroundCustomCommandExecutionTest.java +++ b/ambari-server/src/test/java/org/apache/ambari/server/controller/BackgroundCustomCommandExecutionTest.java @@ -183,7 +183,7 @@ private void createClusterFixture() throws AmbariException, AuthorizationExcepti clusters.getCluster(clusterName); String serviceGroupName = "CORE"; - ServiceGroupResourceProviderTest.createServiceGroup(controller, clusterName, serviceGroupName, STACK_ID.getStackId()); + ServiceGroupResourceProviderTest.createServiceGroup(controller, clusterName, serviceGroupName); createService(clusterName, serviceGroupName, "HDFS", null); createServiceComponent(clusterName, serviceGroupName, "HDFS","NAMENODE", State.INIT); @@ -224,13 +224,13 @@ private void createService(String clusterName, String serviceGroupName, String s private void createServiceComponent(String clusterName, String serviceGroupName, String serviceName, String componentName, State desiredState) throws AmbariException, AuthorizationException { - ServiceComponentRequest r = new ServiceComponentRequest(clusterName, serviceGroupName, serviceName, componentName, componentName, desiredState != null ? desiredState.name() : null); + ServiceComponentRequest r = new ServiceComponentRequest(clusterName, serviceGroupName, serviceName, componentName, desiredState != null ? desiredState.name() : null); ComponentResourceProviderTest.createComponents(controller, Collections.singleton(r)); } private void createServiceComponentHost(String clusterName, String serviceGroupName, String serviceName, String componentName, String hostname, State desiredState) throws AmbariException, AuthorizationException { - ServiceComponentHostRequest r = new ServiceComponentHostRequest(clusterName, serviceGroupName, serviceName, componentName, componentName, hostname, desiredState != null ? desiredState.name() : null); + ServiceComponentHostRequest r = new ServiceComponentHostRequest(clusterName, serviceGroupName, serviceName, componentName, hostname, desiredState != null ? desiredState.name() : null); controller.createHostComponents(Collections.singleton(r)); } diff --git a/ambari-server/src/test/java/org/apache/ambari/server/controller/RefreshYarnCapacitySchedulerReleaseConfigTest.java b/ambari-server/src/test/java/org/apache/ambari/server/controller/RefreshYarnCapacitySchedulerReleaseConfigTest.java index d705d6a4b44..3f269653977 100644 --- a/ambari-server/src/test/java/org/apache/ambari/server/controller/RefreshYarnCapacitySchedulerReleaseConfigTest.java +++ b/ambari-server/src/test/java/org/apache/ambari/server/controller/RefreshYarnCapacitySchedulerReleaseConfigTest.java @@ -106,7 +106,7 @@ public void testRMRequiresRestart() throws AmbariException, AuthorizationExcepti controller.updateClusters(Collections.singleton(cr) , null); - ServiceComponentHostRequest r = new ServiceComponentHostRequest("c1", null, null, null, null, null, null); + ServiceComponentHostRequest r = new ServiceComponentHostRequest("c1", null, null, null, null, null); r.setStaleConfig("true"); Set resps = controller.getHostComponents(Collections.singleton(r)); Assert.assertEquals(1, resps.size()); @@ -127,7 +127,7 @@ public void testAllRequiresRestart() throws AmbariException, AuthorizationExcept controller.updateClusters(Collections.singleton(cr) , null); - ServiceComponentHostRequest r = new ServiceComponentHostRequest("c1", null, null, null, null, null, null); + ServiceComponentHostRequest r = new ServiceComponentHostRequest("c1", null, null, null, null, null); r.setStaleConfig("true"); Set resps = controller.getHostComponents(Collections.singleton(r)); Assert.assertEquals(4, resps.size()); @@ -167,7 +167,7 @@ private void createClusterFixture(String stackName) throws AmbariException, Auth addHost("c6402", clusterName); String serviceGroupName = "CORE"; - ServiceGroupResourceProviderTest.createServiceGroup(controller, clusterName, serviceGroupName, stackName); + ServiceGroupResourceProviderTest.createServiceGroup(controller, clusterName, serviceGroupName); String serviceName = "YARN"; createService(clusterName, serviceGroupName, serviceName, null); @@ -215,13 +215,13 @@ private void createService(String clusterName, String serviceGroupName, private void createServiceComponent(String clusterName, String serviceGroupName, String serviceName, String componentName, State desiredState) throws AmbariException, AuthorizationException { - ServiceComponentRequest r = new ServiceComponentRequest(clusterName, serviceGroupName, serviceName, componentName, componentName, desiredState != null ? desiredState.name() : null); + ServiceComponentRequest r = new ServiceComponentRequest(clusterName, serviceGroupName, serviceName, componentName, desiredState != null ? desiredState.name() : null); ComponentResourceProviderTest.createComponents(controller, Collections.singleton(r)); } private void createServiceComponentHost(String clusterName, String serviceGroupName, String serviceName, String componentName, String hostname, State desiredState) throws AmbariException, AuthorizationException { - ServiceComponentHostRequest r = new ServiceComponentHostRequest(clusterName, serviceGroupName, serviceName, componentName, componentName, hostname, desiredState != null ? desiredState.name() : null); + ServiceComponentHostRequest r = new ServiceComponentHostRequest(clusterName, serviceGroupName, serviceName, componentName, hostname, desiredState != null ? desiredState.name() : null); controller.createHostComponents(Collections.singleton(r)); //set actual config diff --git a/ambari-server/src/test/java/org/apache/ambari/server/controller/internal/BaseBlueprintProcessorTest.java b/ambari-server/src/test/java/org/apache/ambari/server/controller/internal/BaseBlueprintProcessorTest.java new file mode 100644 index 00000000000..341e889fdad --- /dev/null +++ b/ambari-server/src/test/java/org/apache/ambari/server/controller/internal/BaseBlueprintProcessorTest.java @@ -0,0 +1,550 @@ +package org.apache.ambari.server.controller.internal; + +import static org.junit.Assert.assertEquals; + +import java.util.Collection; +import java.util.Collections; +import java.util.HashSet; +import java.util.Map; +import java.util.Set; + +import org.apache.ambari.server.state.DependencyInfo; +import org.apache.ambari.server.state.StackInfo; +import org.junit.Test; + +/** + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with this + * work for additional information regarding copyright ownership. The ASF + * licenses this file to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT + * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the + * License for the specific language governing permissions and limitations under + * the License. + */ + +@SuppressWarnings("unchecked") +public class BaseBlueprintProcessorTest { + + //todo: Move these tests to the correct location. + //todo: BaseBluprintProcess no longer exists. + @Test + public void testStackRegisterConditionalDependencies() throws Exception { + StackInfo stackInfo = new StackInfo(); + + // test dependencies + final DependencyInfo hCatDependency = new TestDependencyInfo("HIVE/HCAT"); + final DependencyInfo yarnClientDependency = new TestDependencyInfo( + "YARN/YARN_CLIENT"); + final DependencyInfo tezClientDependency = new TestDependencyInfo( + "TEZ/TEZ_CLIENT"); + final DependencyInfo mapReduceTwoClientDependency = new TestDependencyInfo( + "YARN/MAPREDUCE2_CLIENT"); + final DependencyInfo oozieClientDependency = new TestDependencyInfo( + "OOZIE/OOZIE_CLIENT"); + + // create stack for testing + Stack testStack = new Stack(stackInfo) { + @Override + public Collection getDependenciesForComponent( + String component) { + // simulate the dependencies in a given stack by overriding this method + if (component.equals("FAKE_MONITORING_SERVER")) { + Set setOfDependencies = new HashSet<>(); + + setOfDependencies.add(hCatDependency); + setOfDependencies.add(yarnClientDependency); + setOfDependencies.add(tezClientDependency); + setOfDependencies.add(mapReduceTwoClientDependency); + setOfDependencies.add(oozieClientDependency); + + return setOfDependencies; + } + + return Collections.emptySet(); + } + + /** + * {@inheritDoc} + */ + @Override + void registerConditionalDependencies() { + // TODO Auto-generated method stub + super.registerConditionalDependencies(); + + Map dependencyConditionalServiceMap = getDependencyConditionalServiceMap(); + Collection monitoringDependencies = getDependenciesForComponent("FAKE_MONITORING_SERVER"); + for (DependencyInfo dependency : monitoringDependencies) { + if (dependency.getComponentName().equals("HCAT")) { + dependencyConditionalServiceMap.put(dependency, "HIVE"); + } else if (dependency.getComponentName().equals("OOZIE_CLIENT")) { + dependencyConditionalServiceMap.put(dependency, "OOZIE"); + } else if (dependency.getComponentName().equals("YARN_CLIENT")) { + dependencyConditionalServiceMap.put(dependency, "YARN"); + } else if (dependency.getComponentName().equals("TEZ_CLIENT")) { + dependencyConditionalServiceMap.put(dependency, "TEZ"); + } else if (dependency.getComponentName().equals("MAPREDUCE2_CLIENT")) { + dependencyConditionalServiceMap.put(dependency, "MAPREDUCE2"); + } + } + } + }; + + assertEquals("Initial conditional dependency map should be empty", 0, + testStack.getDependencyConditionalServiceMap().size()); + + testStack.registerConditionalDependencies(); + + assertEquals("Set of conditional service mappings is an incorrect size", 5, + testStack.getDependencyConditionalServiceMap().size()); + + assertEquals("Incorrect service dependency for HCAT", "HIVE", + testStack.getDependencyConditionalServiceMap().get(hCatDependency)); + assertEquals( + "Incorrect service dependency for YARN_CLIENT", + "YARN", + testStack.getDependencyConditionalServiceMap().get(yarnClientDependency)); + assertEquals("Incorrect service dependency for TEZ_CLIENT", "TEZ", + testStack.getDependencyConditionalServiceMap().get(tezClientDependency)); + assertEquals( + "Incorrect service dependency for MAPREDUCE2_CLIENT", + "MAPREDUCE2", + testStack.getDependencyConditionalServiceMap().get( + mapReduceTwoClientDependency)); + assertEquals( + "Incorrect service dependency for OOZIE_CLIENT", + "OOZIE", + testStack.getDependencyConditionalServiceMap().get( + oozieClientDependency)); + } + + @Test + public void testStackRegisterConditionalDependenciesNoHCAT() throws Exception { + // test dependencies + final DependencyInfo yarnClientDependency = new TestDependencyInfo( + "YARN/YARN_CLIENT"); + final DependencyInfo tezClientDependency = new TestDependencyInfo( + "TEZ/TEZ_CLIENT"); + final DependencyInfo mapReduceTwoClientDependency = new TestDependencyInfo( + "YARN/MAPREDUCE2_CLIENT"); + final DependencyInfo oozieClientDependency = new TestDependencyInfo( + "OOZIE/OOZIE_CLIENT"); + + // create stack for testing + Stack testStack = new Stack(new StackInfo()) { + @Override + public Collection getDependenciesForComponent( + String component) { + // simulate the dependencies in a given stack by overriding this method + if (component.equals("FAKE_MONITORING_SERVER")) { + Set setOfDependencies = new HashSet<>(); + + setOfDependencies.add(yarnClientDependency); + setOfDependencies.add(tezClientDependency); + setOfDependencies.add(mapReduceTwoClientDependency); + setOfDependencies.add(oozieClientDependency); + + return setOfDependencies; + } + + return Collections.emptySet(); + } + + /** + * {@inheritDoc} + */ + @Override + void registerConditionalDependencies() { + // TODO Auto-generated method stub + super.registerConditionalDependencies(); + + Map dependencyConditionalServiceMap = getDependencyConditionalServiceMap(); + Collection monitoringDependencies = getDependenciesForComponent("FAKE_MONITORING_SERVER"); + for (DependencyInfo dependency : monitoringDependencies) { + if (dependency.getComponentName().equals("HCAT")) { + dependencyConditionalServiceMap.put(dependency, "HIVE"); + } else if (dependency.getComponentName().equals("OOZIE_CLIENT")) { + dependencyConditionalServiceMap.put(dependency, "OOZIE"); + } else if (dependency.getComponentName().equals("YARN_CLIENT")) { + dependencyConditionalServiceMap.put(dependency, "YARN"); + } else if (dependency.getComponentName().equals("TEZ_CLIENT")) { + dependencyConditionalServiceMap.put(dependency, "TEZ"); + } else if (dependency.getComponentName().equals("MAPREDUCE2_CLIENT")) { + dependencyConditionalServiceMap.put(dependency, "MAPREDUCE2"); + } + } + } + }; + + assertEquals("Initial conditional dependency map should be empty", 0, + testStack.getDependencyConditionalServiceMap().size()); + + testStack.registerConditionalDependencies(); + + assertEquals("Set of conditional service mappings is an incorrect size", 4, + testStack.getDependencyConditionalServiceMap().size()); + + assertEquals( + "Incorrect service dependency for YARN_CLIENT", + "YARN", + testStack.getDependencyConditionalServiceMap().get(yarnClientDependency)); + assertEquals("Incorrect service dependency for TEZ_CLIENT", "TEZ", + testStack.getDependencyConditionalServiceMap().get(tezClientDependency)); + assertEquals( + "Incorrect service dependency for MAPREDUCE2_CLIENT", + "MAPREDUCE2", + testStack.getDependencyConditionalServiceMap().get( + mapReduceTwoClientDependency)); + assertEquals( + "Incorrect service dependency for OOZIE_CLIENT", + "OOZIE", + testStack.getDependencyConditionalServiceMap().get( + oozieClientDependency)); + } + + @Test + public void testStackRegisterConditionalDependenciesNoYarnClient() + throws Exception { + // test dependencies + final DependencyInfo hCatDependency = new TestDependencyInfo("HIVE/HCAT"); + final DependencyInfo tezClientDependency = new TestDependencyInfo( + "TEZ/TEZ_CLIENT"); + final DependencyInfo mapReduceTwoClientDependency = new TestDependencyInfo( + "YARN/MAPREDUCE2_CLIENT"); + final DependencyInfo oozieClientDependency = new TestDependencyInfo( + "OOZIE/OOZIE_CLIENT"); + + // create stack for testing + Stack testStack = new Stack(new StackInfo()) { + @Override + public Collection getDependenciesForComponent( + String component) { + // simulate the dependencies in a given stack by overriding this method + if (component.equals("FAKE_MONITORING_SERVER")) { + Set setOfDependencies = new HashSet<>(); + + setOfDependencies.add(hCatDependency); + setOfDependencies.add(tezClientDependency); + setOfDependencies.add(mapReduceTwoClientDependency); + setOfDependencies.add(oozieClientDependency); + + return setOfDependencies; + } + + return Collections.emptySet(); + } + + /** + * {@inheritDoc} + */ + @Override + void registerConditionalDependencies() { + // TODO Auto-generated method stub + super.registerConditionalDependencies(); + + Map dependencyConditionalServiceMap = getDependencyConditionalServiceMap(); + Collection monitoringDependencies = getDependenciesForComponent("FAKE_MONITORING_SERVER"); + for (DependencyInfo dependency : monitoringDependencies) { + if (dependency.getComponentName().equals("HCAT")) { + dependencyConditionalServiceMap.put(dependency, "HIVE"); + } else if (dependency.getComponentName().equals("OOZIE_CLIENT")) { + dependencyConditionalServiceMap.put(dependency, "OOZIE"); + } else if (dependency.getComponentName().equals("YARN_CLIENT")) { + dependencyConditionalServiceMap.put(dependency, "YARN"); + } else if (dependency.getComponentName().equals("TEZ_CLIENT")) { + dependencyConditionalServiceMap.put(dependency, "TEZ"); + } else if (dependency.getComponentName().equals("MAPREDUCE2_CLIENT")) { + dependencyConditionalServiceMap.put(dependency, "MAPREDUCE2"); + } + } + } + }; + + assertEquals("Initial conditional dependency map should be empty", 0, + testStack.getDependencyConditionalServiceMap().size()); + + testStack.registerConditionalDependencies(); + + assertEquals("Set of conditional service mappings is an incorrect size", 4, + testStack.getDependencyConditionalServiceMap().size()); + + assertEquals("Incorrect service dependency for HCAT", "HIVE", + testStack.getDependencyConditionalServiceMap().get(hCatDependency)); + assertEquals("Incorrect service dependency for TEZ_CLIENT", "TEZ", + testStack.getDependencyConditionalServiceMap().get(tezClientDependency)); + assertEquals( + "Incorrect service dependency for MAPREDUCE2_CLIENT", + "MAPREDUCE2", + testStack.getDependencyConditionalServiceMap().get( + mapReduceTwoClientDependency)); + assertEquals( + "Incorrect service dependency for OOZIE_CLIENT", + "OOZIE", + testStack.getDependencyConditionalServiceMap().get( + oozieClientDependency)); + } + + @Test + public void testStackRegisterConditionalDependenciesNoTezClient() + throws Exception { + // test dependencies + final DependencyInfo hCatDependency = new TestDependencyInfo("HIVE/HCAT"); + final DependencyInfo yarnClientDependency = new TestDependencyInfo( + "YARN/YARN_CLIENT"); + final DependencyInfo mapReduceTwoClientDependency = new TestDependencyInfo( + "YARN/MAPREDUCE2_CLIENT"); + final DependencyInfo oozieClientDependency = new TestDependencyInfo( + "OOZIE/OOZIE_CLIENT"); + + // create stack for testing + Stack testStack = new Stack(new StackInfo()) { + @Override + public Collection getDependenciesForComponent( + String component) { + // simulate the dependencies in a given stack by overriding this method + if (component.equals("FAKE_MONITORING_SERVER")) { + Set setOfDependencies = new HashSet<>(); + + setOfDependencies.add(hCatDependency); + setOfDependencies.add(yarnClientDependency); + setOfDependencies.add(mapReduceTwoClientDependency); + setOfDependencies.add(oozieClientDependency); + + return setOfDependencies; + } + + return Collections.emptySet(); + } + + /** + * {@inheritDoc} + */ + @Override + void registerConditionalDependencies() { + // TODO Auto-generated method stub + super.registerConditionalDependencies(); + + Map dependencyConditionalServiceMap = getDependencyConditionalServiceMap(); + Collection monitoringDependencies = getDependenciesForComponent("FAKE_MONITORING_SERVER"); + for (DependencyInfo dependency : monitoringDependencies) { + if (dependency.getComponentName().equals("HCAT")) { + dependencyConditionalServiceMap.put(dependency, "HIVE"); + } else if (dependency.getComponentName().equals("OOZIE_CLIENT")) { + dependencyConditionalServiceMap.put(dependency, "OOZIE"); + } else if (dependency.getComponentName().equals("YARN_CLIENT")) { + dependencyConditionalServiceMap.put(dependency, "YARN"); + } else if (dependency.getComponentName().equals("TEZ_CLIENT")) { + dependencyConditionalServiceMap.put(dependency, "TEZ"); + } else if (dependency.getComponentName().equals("MAPREDUCE2_CLIENT")) { + dependencyConditionalServiceMap.put(dependency, "MAPREDUCE2"); + } + } + } + }; + + assertEquals("Initial conditional dependency map should be empty", 0, + testStack.getDependencyConditionalServiceMap().size()); + + testStack.registerConditionalDependencies(); + + assertEquals("Set of conditional service mappings is an incorrect size", 4, + testStack.getDependencyConditionalServiceMap().size()); + + assertEquals("Incorrect service dependency for HCAT", "HIVE", + testStack.getDependencyConditionalServiceMap().get(hCatDependency)); + assertEquals( + "Incorrect service dependency for YARN_CLIENT", + "YARN", + testStack.getDependencyConditionalServiceMap().get(yarnClientDependency)); + assertEquals( + "Incorrect service dependency for MAPREDUCE2_CLIENT", + "MAPREDUCE2", + testStack.getDependencyConditionalServiceMap().get( + mapReduceTwoClientDependency)); + assertEquals( + "Incorrect service dependency for OOZIE_CLIENT", + "OOZIE", + testStack.getDependencyConditionalServiceMap().get( + oozieClientDependency)); + } + + @Test + public void testStackRegisterConditionalDependenciesNoMapReduceClient() + throws Exception { + // test dependencies + final DependencyInfo hCatDependency = new TestDependencyInfo("HIVE/HCAT"); + final DependencyInfo yarnClientDependency = new TestDependencyInfo( + "YARN/YARN_CLIENT"); + final DependencyInfo tezClientDependency = new TestDependencyInfo( + "TEZ/TEZ_CLIENT"); + final DependencyInfo oozieClientDependency = new TestDependencyInfo( + "OOZIE/OOZIE_CLIENT"); + + // create stack for testing + Stack testStack = new Stack(new StackInfo()) { + @Override + public Collection getDependenciesForComponent( + String component) { + // simulate the dependencies in a given stack by overriding this method + if (component.equals("FAKE_MONITORING_SERVER")) { + Set setOfDependencies = new HashSet<>(); + + setOfDependencies.add(hCatDependency); + setOfDependencies.add(yarnClientDependency); + setOfDependencies.add(tezClientDependency); + setOfDependencies.add(oozieClientDependency); + + return setOfDependencies; + } + + return Collections.emptySet(); + } + + /** + * {@inheritDoc} + */ + @Override + void registerConditionalDependencies() { + // TODO Auto-generated method stub + super.registerConditionalDependencies(); + + Map dependencyConditionalServiceMap = getDependencyConditionalServiceMap(); + Collection monitoringDependencies = getDependenciesForComponent("FAKE_MONITORING_SERVER"); + for (DependencyInfo dependency : monitoringDependencies) { + if (dependency.getComponentName().equals("HCAT")) { + dependencyConditionalServiceMap.put(dependency, "HIVE"); + } else if (dependency.getComponentName().equals("OOZIE_CLIENT")) { + dependencyConditionalServiceMap.put(dependency, "OOZIE"); + } else if (dependency.getComponentName().equals("YARN_CLIENT")) { + dependencyConditionalServiceMap.put(dependency, "YARN"); + } else if (dependency.getComponentName().equals("TEZ_CLIENT")) { + dependencyConditionalServiceMap.put(dependency, "TEZ"); + } else if (dependency.getComponentName().equals("MAPREDUCE2_CLIENT")) { + dependencyConditionalServiceMap.put(dependency, "MAPREDUCE2"); + } + } + } + + }; + + assertEquals("Initial conditional dependency map should be empty", 0, + testStack.getDependencyConditionalServiceMap().size()); + + testStack.registerConditionalDependencies(); + + assertEquals("Set of conditional service mappings is an incorrect size", 4, + testStack.getDependencyConditionalServiceMap().size()); + + assertEquals("Incorrect service dependency for HCAT", "HIVE", + testStack.getDependencyConditionalServiceMap().get(hCatDependency)); + assertEquals( + "Incorrect service dependency for YARN_CLIENT", + "YARN", + testStack.getDependencyConditionalServiceMap().get(yarnClientDependency)); + assertEquals("Incorrect service dependency for TEZ_CLIENT", "TEZ", + testStack.getDependencyConditionalServiceMap().get(tezClientDependency)); + assertEquals( + "Incorrect service dependency for OOZIE_CLIENT", + "OOZIE", + testStack.getDependencyConditionalServiceMap().get( + oozieClientDependency)); + } + + @Test + public void testStackRegisterConditionalDependenciesNoOozieClient() + throws Exception { + // test dependencies + final DependencyInfo hCatDependency = new TestDependencyInfo("HIVE/HCAT"); + final DependencyInfo yarnClientDependency = new TestDependencyInfo( + "YARN/YARN_CLIENT"); + final DependencyInfo tezClientDependency = new TestDependencyInfo( + "TEZ/TEZ_CLIENT"); + final DependencyInfo mapReduceTwoClientDependency = new TestDependencyInfo( + "YARN/MAPREDUCE2_CLIENT"); + + // create stack for testing + Stack testStack = new Stack(new StackInfo()) { + @Override + public Collection getDependenciesForComponent( + String component) { + // simulate the dependencies in a given stack by overriding this method + if (component.equals("FAKE_MONITORING_SERVER")) { + Set setOfDependencies = new HashSet<>(); + + setOfDependencies.add(hCatDependency); + setOfDependencies.add(yarnClientDependency); + setOfDependencies.add(tezClientDependency); + setOfDependencies.add(mapReduceTwoClientDependency); + + return setOfDependencies; + } + + return Collections.emptySet(); + } + + /** + * {@inheritDoc} + */ + @Override + void registerConditionalDependencies() { + // TODO Auto-generated method stub + super.registerConditionalDependencies(); + + Map dependencyConditionalServiceMap = getDependencyConditionalServiceMap(); + Collection monitoringDependencies = getDependenciesForComponent("FAKE_MONITORING_SERVER"); + for (DependencyInfo dependency : monitoringDependencies) { + if (dependency.getComponentName().equals("HCAT")) { + dependencyConditionalServiceMap.put(dependency, "HIVE"); + } else if (dependency.getComponentName().equals("OOZIE_CLIENT")) { + dependencyConditionalServiceMap.put(dependency, "OOZIE"); + } else if (dependency.getComponentName().equals("YARN_CLIENT")) { + dependencyConditionalServiceMap.put(dependency, "YARN"); + } else if (dependency.getComponentName().equals("TEZ_CLIENT")) { + dependencyConditionalServiceMap.put(dependency, "TEZ"); + } else if (dependency.getComponentName().equals("MAPREDUCE2_CLIENT")) { + dependencyConditionalServiceMap.put(dependency, "MAPREDUCE2"); + } + } + } + + }; + + assertEquals("Initial conditional dependency map should be empty", 0, + testStack.getDependencyConditionalServiceMap().size()); + + testStack.registerConditionalDependencies(); + + assertEquals("Set of conditional service mappings is an incorrect size", 4, + testStack.getDependencyConditionalServiceMap().size()); + + assertEquals("Incorrect service dependency for HCAT", "HIVE", + testStack.getDependencyConditionalServiceMap().get(hCatDependency)); + assertEquals( + "Incorrect service dependency for YARN_CLIENT", + "YARN", + testStack.getDependencyConditionalServiceMap().get(yarnClientDependency)); + assertEquals("Incorrect service dependency for TEZ_CLIENT", "TEZ", + testStack.getDependencyConditionalServiceMap().get(tezClientDependency)); + assertEquals( + "Incorrect service dependency for MAPREDUCE2_CLIENT", + "MAPREDUCE2", + testStack.getDependencyConditionalServiceMap().get( + mapReduceTwoClientDependency)); + } + + /** + * Convenience class for easier setup/initialization of dependencies for unit + * testing. + */ + private static class TestDependencyInfo extends DependencyInfo { + TestDependencyInfo(String dependencyName) { + setName(dependencyName); + } + } +} \ No newline at end of file diff --git a/ambari-server/src/test/java/org/apache/ambari/server/controller/internal/BlueprintConfigurationProcessorTest.java b/ambari-server/src/test/java/org/apache/ambari/server/controller/internal/BlueprintConfigurationProcessorTest.java index 84e3593b992..23283702dfa 100644 --- a/ambari-server/src/test/java/org/apache/ambari/server/controller/internal/BlueprintConfigurationProcessorTest.java +++ b/ambari-server/src/test/java/org/apache/ambari/server/controller/internal/BlueprintConfigurationProcessorTest.java @@ -18,13 +18,7 @@ package org.apache.ambari.server.controller.internal; -import static java.util.stream.Collectors.toList; -import static java.util.stream.Collectors.toSet; -import static org.apache.ambari.server.topology.ConfigRecommendationStrategy.ALWAYS_APPLY; -import static org.apache.ambari.server.topology.ConfigRecommendationStrategy.NEVER_APPLY; -import static org.apache.ambari.server.topology.ConfigRecommendationStrategy.ONLY_STACK_DEFAULTS_APPLY; import static org.easymock.EasyMock.anyObject; -import static org.easymock.EasyMock.anyString; import static org.easymock.EasyMock.expect; import static org.easymock.EasyMock.replay; import static org.easymock.EasyMock.reset; @@ -62,7 +56,6 @@ import org.apache.ambari.server.topology.AdvisedConfiguration; import org.apache.ambari.server.topology.AmbariContext; import org.apache.ambari.server.topology.Blueprint; -import org.apache.ambari.server.topology.BlueprintBasedClusterProvisionRequest; import org.apache.ambari.server.topology.Cardinality; import org.apache.ambari.server.topology.ClusterTopology; import org.apache.ambari.server.topology.ClusterTopologyImpl; @@ -73,9 +66,7 @@ import org.apache.ambari.server.topology.HostGroupImpl; import org.apache.ambari.server.topology.HostGroupInfo; import org.apache.ambari.server.topology.InvalidTopologyException; -import org.apache.ambari.server.topology.ResolvedComponent; -import org.apache.ambari.server.topology.SecurityConfiguration; -import org.apache.ambari.server.topology.SecurityConfigurationFactory; +import org.apache.ambari.server.topology.TopologyRequest; import org.apache.commons.lang.StringUtils; import org.easymock.EasyMock; import org.easymock.EasyMockRule; @@ -146,34 +137,33 @@ public class BlueprintConfigurationProcessorTest extends EasyMockSupport { private Cluster cluster; @Mock - private ProvisionClusterRequest topologyRequestMock; + private TopologyRequest topologyRequestMock; @Mock(type = MockType.NICE) private ConfigHelper configHelper; - @Mock(type = MockType.NICE) - private SecurityConfigurationFactory securityFactory; - @Before public void init() throws Exception { - expect(ambariContext.composeStacks(anyObject())).andReturn(stack).anyTimes(); + expect(bp.getStack()).andReturn(stack).anyTimes(); expect(bp.getStackIds()).andReturn(ImmutableSet.of(STACK_ID)).anyTimes(); expect(bp.getName()).andReturn("test-bp").anyTimes(); expect(stack.getName()).andReturn(STACK_NAME).atLeastOnce(); expect(stack.getVersion()).andReturn(STACK_VERSION).atLeastOnce(); // return false for all components since for this test we don't care about the value - expect(stack.isMasterComponent(anyObject())).andReturn(false).anyTimes(); + expect(stack.isMasterComponent((String) anyObject())).andReturn(false).anyTimes(); expect(stack.getConfigurationPropertiesWithMetadata(anyObject(String.class), anyObject(String.class))).andReturn(Collections.emptyMap()).anyTimes(); - expect(serviceInfo.getRequiredProperties()).andReturn(Collections.emptyMap()).anyTimes(); + expect(serviceInfo.getRequiredProperties()).andReturn( + Collections.emptyMap()).anyTimes(); expect(serviceInfo.getRequiredServices()).andReturn(Collections.emptyList()).anyTimes(); setupGetServiceForComponentExpectations(); expect(stack.getCardinality("MYSQL_SERVER")).andReturn(new Cardinality("0-1")).anyTimes(); - expect(stack.getExcludedConfigurationTypes(anyObject(String.class))).andReturn(Collections.emptySet()).anyTimes(); + Set emptySet = Collections.emptySet(); + expect(stack.getExcludedConfigurationTypes(anyObject(String.class))).andReturn(emptySet).anyTimes(); expect(ambariContext.getConfigHelper()).andReturn(configHelper).anyTimes(); expect(configHelper.getDefaultStackProperties( @@ -299,7 +289,7 @@ public void testDoUpdateForBlueprintExport_SingleHostProperty() throws Exception hostGroups.add(group1); hostGroups.add(group2); - ClusterTopology topology = createClusterTopology(bp, clusterConfig, hostGroups, NEVER_APPLY); + ClusterTopology topology = createClusterTopology(bp, clusterConfig, hostGroups); BlueprintConfigurationProcessor configProcessor = new BlueprintConfigurationProcessor(topology); configProcessor.doUpdateForBlueprintExport(); @@ -347,14 +337,14 @@ public void testDoUpdateForBlueprintExport_FilterProperties() throws Exception { hostGroups.add(group1); hostGroups.add(group2); - ClusterTopology topology = createClusterTopology(bp, clusterConfig, hostGroups, NEVER_APPLY); + ClusterTopology topology = createClusterTopology(bp, clusterConfig, hostGroups); BlueprintConfigurationProcessor configProcessor = new BlueprintConfigurationProcessor(topology); configProcessor.doUpdateForBlueprintExport(); assertEquals(properties.size(), 3); - assertEquals((properties.get("kerberos-env")).size(), 0); - assertEquals((properties.get("krb5-conf")).size(), 0); - assertEquals((properties.get("tez-site")).size(), 0); + assertEquals(((Map) properties.get("kerberos-env")).size(), 0); + assertEquals(((Map) properties.get("krb5-conf")).size(), 0); + assertEquals(((Map) properties.get("tez-site")).size(), 0); } @Test @@ -381,7 +371,7 @@ public void testDoUpdateForBlueprintExportRangerHAPolicyMgrExternalUrlProperty() Collection hostGroups = ImmutableSet.of(group1, group2); - ClusterTopology topology = createClusterTopology(bp, clusterConfig, hostGroups, NEVER_APPLY); + ClusterTopology topology = createClusterTopology(bp, clusterConfig, hostGroups); BlueprintConfigurationProcessor configProcessor = new BlueprintConfigurationProcessor(topology); // When @@ -425,7 +415,7 @@ public void testDoUpdateForBlueprintExport_SingleHostProperty_specifiedInParentC hostGroups.add(group1); hostGroups.add(group2); - ClusterTopology topology = createClusterTopology(bp, clusterConfig, hostGroups, NEVER_APPLY); + ClusterTopology topology = createClusterTopology(bp, clusterConfig, hostGroups); BlueprintConfigurationProcessor configProcessor = new BlueprintConfigurationProcessor(topology); configProcessor.doUpdateForBlueprintExport(); @@ -473,7 +463,7 @@ public void testDoUpdateForBlueprintExport_SingleHostProperty_hostGroupConfigura hostGroups.add(group1); hostGroups.add(group2); - ClusterTopology topology = createClusterTopology(bp, clusterConfig, hostGroups, NEVER_APPLY); + ClusterTopology topology = createClusterTopology(bp, clusterConfig, hostGroups); BlueprintConfigurationProcessor configProcessor = new BlueprintConfigurationProcessor(topology); configProcessor.doUpdateForBlueprintExport(); @@ -508,7 +498,7 @@ public void testDoUpdateForBlueprintExport_SingleHostProperty__withPort() throws hostGroups.add(group1); hostGroups.add(group2); - ClusterTopology topology = createClusterTopology(bp, clusterConfig, hostGroups, NEVER_APPLY); + ClusterTopology topology = createClusterTopology(bp, clusterConfig, hostGroups); BlueprintConfigurationProcessor configProcessor = new BlueprintConfigurationProcessor(topology); configProcessor.doUpdateForBlueprintExport(); @@ -541,7 +531,7 @@ public void testDoUpdateForBlueprintExport_SingleHostProperty__ExternalReference hostGroups.add(group1); hostGroups.add(group2); - ClusterTopology topology = createClusterTopology(bp, clusterConfig, hostGroups, NEVER_APPLY); + ClusterTopology topology = createClusterTopology(bp, clusterConfig, hostGroups); BlueprintConfigurationProcessor configProcessor = new BlueprintConfigurationProcessor(topology); configProcessor.doUpdateForBlueprintExport(); @@ -587,7 +577,7 @@ public void testDoUpdateForBlueprintExport_MultiHostProperty() throws Exception hostGroups.add(group2); hostGroups.add(group3); - ClusterTopology topology = createClusterTopology(bp, clusterConfig, hostGroups, NEVER_APPLY); + ClusterTopology topology = createClusterTopology(bp, clusterConfig, hostGroups); BlueprintConfigurationProcessor configProcessor = new BlueprintConfigurationProcessor(topology); configProcessor.doUpdateForBlueprintExport(); @@ -634,7 +624,7 @@ public void testDoUpdateForBlueprintExport_MultiHostProperty__WithPorts() throws hostGroups.add(group2); hostGroups.add(group3); - ClusterTopology topology = createClusterTopology(bp, clusterConfig, hostGroups, NEVER_APPLY); + ClusterTopology topology = createClusterTopology(bp, clusterConfig, hostGroups); BlueprintConfigurationProcessor configProcessor = new BlueprintConfigurationProcessor(topology); configProcessor.doUpdateForBlueprintExport(); @@ -689,7 +679,7 @@ public void testDoUpdateForBlueprintExport_MultiHostProperty__YAML() throws Exce hostGroups.add(group2); hostGroups.add(group3); - ClusterTopology topology = createClusterTopology(bp, clusterConfig, hostGroups, NEVER_APPLY); + ClusterTopology topology = createClusterTopology(bp, clusterConfig, hostGroups); BlueprintConfigurationProcessor configProcessor = new BlueprintConfigurationProcessor(topology); configProcessor.doUpdateForBlueprintExport(); @@ -732,7 +722,7 @@ public void testDoUpdateForBlueprintExport_DBHostProperty() throws Exception { hostGroups.add(group1); hostGroups.add(group2); - ClusterTopology topology = createClusterTopology(bp, clusterConfig, hostGroups, NEVER_APPLY); + ClusterTopology topology = createClusterTopology(bp, clusterConfig, hostGroups); BlueprintConfigurationProcessor configProcessor = new BlueprintConfigurationProcessor(topology); configProcessor.doUpdateForBlueprintExport(); @@ -765,7 +755,7 @@ public void testDoUpdateForBlueprintExport_DBHostProperty__External() throws Exc hostGroups.add(group1); hostGroups.add(group2); - ClusterTopology topology = createClusterTopology(bp, clusterConfig, hostGroups, NEVER_APPLY); + ClusterTopology topology = createClusterTopology(bp, clusterConfig, hostGroups); BlueprintConfigurationProcessor configProcessor = new BlueprintConfigurationProcessor(topology); configProcessor.doUpdateForBlueprintExport(); @@ -817,7 +807,7 @@ public void testDoUpdateForBlueprintExport_PasswordFilterApplied() throws Except hostGroups.add(group1); hostGroups.add(group2); - ClusterTopology topology = createClusterTopology(bp, clusterConfig, hostGroups, NEVER_APPLY); + ClusterTopology topology = createClusterTopology(bp, clusterConfig, hostGroups); BlueprintConfigurationProcessor configProcessor = new BlueprintConfigurationProcessor(topology); configProcessor.doUpdateForBlueprintExport(); @@ -900,7 +890,7 @@ public void testFalconConfigExport() throws Exception { Collection hostGroups = new HashSet<>(); hostGroups.add(group); - ClusterTopology topology = createClusterTopology(bp, clusterConfig, hostGroups, NEVER_APPLY); + ClusterTopology topology = createClusterTopology(bp, clusterConfig, hostGroups); BlueprintConfigurationProcessor configProcessor = new BlueprintConfigurationProcessor(topology); configProcessor.doUpdateForBlueprintExport(); @@ -937,7 +927,7 @@ public void testTezConfigExport() throws Exception { Collection hostGroups = new HashSet<>(); hostGroups.add(group); - ClusterTopology topology = createClusterTopology(bp, clusterConfig, hostGroups, NEVER_APPLY); + ClusterTopology topology = createClusterTopology(bp, clusterConfig, hostGroups); BlueprintConfigurationProcessor configProcessor = new BlueprintConfigurationProcessor(topology); configProcessor.doUpdateForBlueprintExport(); @@ -949,6 +939,8 @@ public void testTezConfigExport() throws Exception { * There is no support currently for deploying a fully Kerberized * cluster with Blueprints. This test verifies the current treatment * of Kerberos-related properties in a Blueprint export. + * + * @throws Exception */ @Test public void testKerberosConfigExport() throws Exception { @@ -986,7 +978,7 @@ public void testKerberosConfigExport() throws Exception { Collection hostGroups = new HashSet<>(); hostGroups.add(group); - ClusterTopology topology = createClusterTopology(bp, clusterConfig, hostGroups, NEVER_APPLY); + ClusterTopology topology = createClusterTopology(bp, clusterConfig, hostGroups); BlueprintConfigurationProcessor configProcessor = new BlueprintConfigurationProcessor(topology); configProcessor.doUpdateForBlueprintExport(); @@ -1050,7 +1042,7 @@ public void testDoNameNodeHighAvailabilityExportWithHAEnabled() throws Exception Collection hostGroups = new HashSet<>(); hostGroups.add(group); - ClusterTopology topology = createClusterTopology(bp, clusterConfig, hostGroups, NEVER_APPLY); + ClusterTopology topology = createClusterTopology(bp, clusterConfig, hostGroups); BlueprintConfigurationProcessor configProcessor = new BlueprintConfigurationProcessor(topology); configProcessor.doUpdateForBlueprintExport(); @@ -1123,7 +1115,7 @@ public void testDoNameNodeHighAvailabilityExportWithHAEnabledPrimaryNamePreferen Collection hostGroups = new HashSet<>(); hostGroups.add(group); - ClusterTopology topology = createClusterTopology(bp, clusterConfig, hostGroups, NEVER_APPLY); + ClusterTopology topology = createClusterTopology(bp, clusterConfig, hostGroups); BlueprintConfigurationProcessor configProcessor = new BlueprintConfigurationProcessor(topology); configProcessor.doUpdateForBlueprintExport(); @@ -1183,7 +1175,7 @@ public void testDoNameNodeHighAvailabilityExportWithHAEnabledNameServiceProperti Collection hostGroups = new HashSet<>(); hostGroups.add(group); - ClusterTopology topology = createClusterTopology(bp, clusterConfig, hostGroups, NEVER_APPLY); + ClusterTopology topology = createClusterTopology(bp, clusterConfig, hostGroups); BlueprintConfigurationProcessor configProcessor = new BlueprintConfigurationProcessor(topology); configProcessor.doUpdateForBlueprintExport(); @@ -1219,7 +1211,7 @@ public void testDoNameNodeHighAvailabilityExportWithHANotEnabled() throws Except Collection hostGroups = new HashSet<>(); hostGroups.add(group); - ClusterTopology topology = createClusterTopology(bp, clusterConfig, hostGroups, NEVER_APPLY); + ClusterTopology topology = createClusterTopology(bp, clusterConfig, hostGroups); BlueprintConfigurationProcessor configProcessor = new BlueprintConfigurationProcessor(topology); configProcessor.doUpdateForBlueprintExport(); @@ -1282,7 +1274,7 @@ public void testDoNameNodeHighAvailabilityExportWithHAEnabledMultipleServices() Collection hostGroups = new HashSet<>(); hostGroups.add(group); - ClusterTopology topology = createClusterTopology(bp, clusterConfig, hostGroups, NEVER_APPLY); + ClusterTopology topology = createClusterTopology(bp, clusterConfig, hostGroups); BlueprintConfigurationProcessor configProcessor = new BlueprintConfigurationProcessor(topology); configProcessor.doUpdateForBlueprintExport(); @@ -1359,7 +1351,7 @@ public void testYarnConfigExported() throws Exception { Collection hostGroups = new HashSet<>(); hostGroups.add(group); - ClusterTopology topology = createClusterTopology(bp, clusterConfig, hostGroups, NEVER_APPLY); + ClusterTopology topology = createClusterTopology(bp, clusterConfig, hostGroups); BlueprintConfigurationProcessor configProcessor = new BlueprintConfigurationProcessor(topology); configProcessor.doUpdateForBlueprintExport(); @@ -1425,7 +1417,7 @@ public void testYarnConfigExportedWithDefaultZeroHostAddress() throws Exception Collection hostGroups = new HashSet<>(); hostGroups.add(group); - ClusterTopology topology = createClusterTopology(bp, clusterConfig, hostGroups, NEVER_APPLY); + ClusterTopology topology = createClusterTopology(bp, clusterConfig, hostGroups); BlueprintConfigurationProcessor configProcessor = new BlueprintConfigurationProcessor(topology); configProcessor.doUpdateForBlueprintExport(); @@ -1501,7 +1493,7 @@ public void testHDFSConfigExported() throws Exception { Collection hostGroups = new HashSet<>(); hostGroups.add(group); - ClusterTopology topology = createClusterTopology(bp, clusterConfig, hostGroups, NEVER_APPLY); + ClusterTopology topology = createClusterTopology(bp, clusterConfig, hostGroups); BlueprintConfigurationProcessor configProcessor = new BlueprintConfigurationProcessor(topology); configProcessor.doUpdateForBlueprintExport(); @@ -1588,7 +1580,7 @@ public void testHiveConfigExported() throws Exception { hostGroups.add(group); hostGroups.add(group2); - ClusterTopology topology = createClusterTopology(bp, clusterConfig, hostGroups, NEVER_APPLY); + ClusterTopology topology = createClusterTopology(bp, clusterConfig, hostGroups); BlueprintConfigurationProcessor configProcessor = new BlueprintConfigurationProcessor(topology); // call top-level export method @@ -1681,7 +1673,7 @@ public void testHiveConfigExportedMultipleHiveMetaStoreServers() throws Exceptio hostGroups.add(group); hostGroups.add(group2); - ClusterTopology topology = createClusterTopology(bp, clusterConfig, hostGroups, NEVER_APPLY); + ClusterTopology topology = createClusterTopology(bp, clusterConfig, hostGroups); BlueprintConfigurationProcessor configProcessor = new BlueprintConfigurationProcessor(topology); // call top-level export method @@ -1777,7 +1769,7 @@ public void testOozieConfigExported() throws Exception { BlueprintConfigurationProcessor.singleHostTopologyUpdaters.get("oozie-site").remove("oozie.service.JPAService.jdbc.url"); } - ClusterTopology topology = createClusterTopology(bp, clusterConfig, hostGroups, NEVER_APPLY); + ClusterTopology topology = createClusterTopology(bp, clusterConfig, hostGroups); BlueprintConfigurationProcessor configProcessor = new BlueprintConfigurationProcessor(topology); // call top-level export method @@ -1840,7 +1832,7 @@ public void testOozieJDBCPropertiesNotRemoved() throws Exception { expect(stack.getCardinality("OOZIE_SERVER")).andReturn(new Cardinality("1+")).anyTimes(); - ClusterTopology topology = createClusterTopology(bp, clusterConfig, hostGroups, NEVER_APPLY); + ClusterTopology topology = createClusterTopology(bp, clusterConfig, hostGroups); BlueprintConfigurationProcessor blueprintConfigurationProcessor = new BlueprintConfigurationProcessor(topology); assertTrue(BlueprintConfigurationProcessor.singleHostTopologyUpdaters.get("oozie-site").containsKey("oozie.service.JPAService.jdbc.url")); @@ -1879,7 +1871,7 @@ public void testOozieJDBCPropertyAddedToSingleHostMapDuringImport() throws Excep expect(stack.getCardinality("OOZIE_SERVER")).andReturn(new Cardinality("1+")).anyTimes(); - ClusterTopology topology = createClusterTopology(bp, clusterConfig, hostGroups, NEVER_APPLY); + ClusterTopology topology = createClusterTopology(bp, clusterConfig, hostGroups); BlueprintConfigurationProcessor blueprintConfigurationProcessor = new BlueprintConfigurationProcessor(topology); assertTrue(BlueprintConfigurationProcessor.singleHostTopologyUpdaters.get("oozie-site").containsKey("oozie.service.JPAService.jdbc.url")); @@ -1942,7 +1934,7 @@ public void testZookeeperConfigExported() throws Exception { hostGroups.add(group); hostGroups.add(group2); - ClusterTopology topology = createClusterTopology(bp, clusterConfig, hostGroups, NEVER_APPLY); + ClusterTopology topology = createClusterTopology(bp, clusterConfig, hostGroups); BlueprintConfigurationProcessor configProcessor = new BlueprintConfigurationProcessor(topology); // call top-level export method @@ -2022,7 +2014,7 @@ public void testKnoxSecurityConfigExported() throws Exception { hostGroups.add(group); hostGroups.add(group2); - ClusterTopology topology = createClusterTopology(bp, clusterConfig, hostGroups, NEVER_APPLY); + ClusterTopology topology = createClusterTopology(bp, clusterConfig, hostGroups); BlueprintConfigurationProcessor configProcessor = new BlueprintConfigurationProcessor(topology); // call top-level export method @@ -2071,7 +2063,7 @@ public void testKafkaConfigExported() throws Exception { hostGroups.add(group); hostGroups.add(group2); - ClusterTopology topology = createClusterTopology(bp, clusterConfig, hostGroups, NEVER_APPLY); + ClusterTopology topology = createClusterTopology(bp, clusterConfig, hostGroups); BlueprintConfigurationProcessor configProcessor = new BlueprintConfigurationProcessor(topology); // call top-level export method @@ -2110,7 +2102,7 @@ public void testPropertyWithUndefinedHostisExported() throws Exception { Collection hostGroups = new HashSet<>(); hostGroups.add(group); - ClusterTopology topology = createClusterTopology(bp, clusterConfig, hostGroups, NEVER_APPLY); + ClusterTopology topology = createClusterTopology(bp, clusterConfig, hostGroups); BlueprintConfigurationProcessor configProcessor = new BlueprintConfigurationProcessor(topology); // call top-level export method @@ -2152,7 +2144,7 @@ public void testDoUpdateForClusterCreate_SingleHostProperty__defaultValue() thro hostGroups.add(group1); hostGroups.add(group2); - ClusterTopology topology = createClusterTopology(bp, clusterConfig, hostGroups, NEVER_APPLY); + ClusterTopology topology = createClusterTopology(bp, clusterConfig, hostGroups); BlueprintConfigurationProcessor updater = new BlueprintConfigurationProcessor(topology); Set configTypesUpdated = @@ -2208,7 +2200,7 @@ public void testDoUpdateForClusterCreate_SingleHostProperty__defaultValue_provid hostGroups.add(group1); hostGroups.add(group2); - ClusterTopology topology = createClusterTopology(bp, clusterConfig, hostGroups, NEVER_APPLY); + ClusterTopology topology = createClusterTopology(bp, clusterConfig, hostGroups); BlueprintConfigurationProcessor updater = new BlueprintConfigurationProcessor(topology); updater.doUpdateForClusterCreate(); @@ -2255,7 +2247,7 @@ public void testDoUpdateForClusterCreate_SingleHostProperty__defaultValue_hostGr hostGroups.add(group1); hostGroups.add(group2); - ClusterTopology topology = createClusterTopology(bp, clusterConfig, hostGroups, NEVER_APPLY); + ClusterTopology topology = createClusterTopology(bp, clusterConfig, hostGroups); BlueprintConfigurationProcessor updater = new BlueprintConfigurationProcessor(topology); updater.doUpdateForClusterCreate(); @@ -2303,7 +2295,7 @@ public void testDoUpdateForClusterCreate_SingleHostProperty__defaultValue_BPHost hostGroups.add(group1); hostGroups.add(group2); - ClusterTopology topology = createClusterTopology(bp, clusterConfig, hostGroups, NEVER_APPLY); + ClusterTopology topology = createClusterTopology(bp, clusterConfig, hostGroups); // todo: set as BP hostgroup topology.getHostGroupInfo().get("group2").getConfiguration().setParentConfiguration(group2BPConfig); @@ -2342,7 +2334,7 @@ public void testDoUpdateForClusterCreate_SingleHostProperty__MissingComponent() expect(stack.getCardinality("APP_TIMELINE_SERVER")).andReturn(new Cardinality("1")).anyTimes(); - ClusterTopology topology = createClusterTopology(bp, clusterConfig, hostGroups, NEVER_APPLY); + ClusterTopology topology = createClusterTopology(bp, clusterConfig, hostGroups); BlueprintConfigurationProcessor updater = new BlueprintConfigurationProcessor(topology); //todo: should throw a checked exception, not the exception expected by the api @@ -2385,7 +2377,7 @@ public void testDoUpdateForClusterCreate_SingleHostProperty__MultipleMatchingHos expect(stack.getCardinality("APP_TIMELINE_SERVER")).andReturn(new Cardinality("0-1")).anyTimes(); - ClusterTopology topology = createClusterTopology(bp, clusterConfig, hostGroups, NEVER_APPLY); + ClusterTopology topology = createClusterTopology(bp, clusterConfig, hostGroups); BlueprintConfigurationProcessor updater = new BlueprintConfigurationProcessor(topology); try { @@ -2429,7 +2421,7 @@ public void testDoUpdateForClusterCreate_SingleHostProperty__MultipleAppTimeline expect(stack.getCardinality("APP_TIMELINE_SERVER")).andReturn(new Cardinality("0-1")).anyTimes(); - ClusterTopology topology = createClusterTopology(bp, clusterConfig, hostGroups, NEVER_APPLY); + ClusterTopology topology = createClusterTopology(bp, clusterConfig, hostGroups); BlueprintConfigurationProcessor updater = new BlueprintConfigurationProcessor(topology); updater.doUpdateForClusterCreate(); @@ -2465,7 +2457,7 @@ public void testDoUpdateForClusterCreate_SingleHostProperty__MissingOptionalComp expect(stack.getCardinality("APP_TIMELINE_SERVER")).andReturn(new Cardinality("0-1")).anyTimes(); - ClusterTopology topology = createClusterTopology(bp, clusterConfig, hostGroups, NEVER_APPLY); + ClusterTopology topology = createClusterTopology(bp, clusterConfig, hostGroups); BlueprintConfigurationProcessor updater = new BlueprintConfigurationProcessor(topology); updater.doUpdateForClusterCreate(); @@ -2497,7 +2489,7 @@ public void testDoUpdateForClusterCreate_SingleHostProperty__defaultValue__WithP hostGroups.add(group1); hostGroups.add(group2); - ClusterTopology topology = createClusterTopology(bp, clusterConfig, hostGroups, NEVER_APPLY); + ClusterTopology topology = createClusterTopology(bp, clusterConfig, hostGroups); BlueprintConfigurationProcessor updater = new BlueprintConfigurationProcessor(topology); updater.doUpdateForClusterCreate(); String updatedVal = topology.getConfiguration().getFullProperties().get("core-site").get("fs.defaultFS"); @@ -2542,7 +2534,7 @@ public void testDoUpdateForClusterCreate_MultiHostProperty__defaultValues() thro hostGroups.add(group2); hostGroups.add(group3); - ClusterTopology topology = createClusterTopology(bp, clusterConfig, hostGroups, NEVER_APPLY); + ClusterTopology topology = createClusterTopology(bp, clusterConfig, hostGroups); BlueprintConfigurationProcessor updater = new BlueprintConfigurationProcessor(topology); updater.doUpdateForClusterCreate(); String updatedVal = topology.getConfiguration().getFullProperties().get("hbase-site").get("hbase.zookeeper.quorum"); @@ -2599,7 +2591,7 @@ public void testDoUpdateForClusterCreate_MultiHostProperty__defaultValues___with hostGroups.add(group2); hostGroups.add(group3); - ClusterTopology topology = createClusterTopology(bp, clusterConfig, hostGroups, NEVER_APPLY); + ClusterTopology topology = createClusterTopology(bp, clusterConfig, hostGroups); BlueprintConfigurationProcessor updater = new BlueprintConfigurationProcessor(topology); updater.doUpdateForClusterCreate(); String updatedVal = topology.getConfiguration().getFullProperties().get("webhcat-site").get("templeton.zookeeper.hosts"); @@ -2650,7 +2642,7 @@ public void testMultipleHostTopologyUpdater__localhost__singleHost() throws Exce hostGroups.add(group1); hostGroups.add(group2); - ClusterTopology topology = createClusterTopology(bp, clusterConfig, hostGroups, NEVER_APPLY); + ClusterTopology topology = createClusterTopology(bp, clusterConfig, hostGroups); BlueprintConfigurationProcessor.MultipleHostTopologyUpdater mhtu = new BlueprintConfigurationProcessor.MultipleHostTopologyUpdater(component1); String newValue = mhtu.updateForClusterCreate(propertyName, originalValue, properties, topology); @@ -2684,7 +2676,7 @@ public void testMultipleHostTopologyUpdater__localhost__singleHostGroup() throws Collection hostGroups = new HashSet<>(); hostGroups.add(group1); - ClusterTopology topology = createClusterTopology(bp, clusterConfig, hostGroups, NEVER_APPLY); + ClusterTopology topology = createClusterTopology(bp, clusterConfig, hostGroups); BlueprintConfigurationProcessor.MultipleHostTopologyUpdater mhtu = new BlueprintConfigurationProcessor.MultipleHostTopologyUpdater(component1); String newValue = mhtu.updateForClusterCreate(propertyName, originalValue, properties, topology); @@ -2719,7 +2711,7 @@ public void testMultipleHostTopologyUpdater__hostgroup__singleHostGroup() throws Collection hostGroups = new HashSet<>(); hostGroups.add(group1); - ClusterTopology topology = createClusterTopology(bp, clusterConfig, hostGroups, NEVER_APPLY); + ClusterTopology topology = createClusterTopology(bp, clusterConfig, hostGroups); BlueprintConfigurationProcessor.MultipleHostTopologyUpdater mhtu = new BlueprintConfigurationProcessor.MultipleHostTopologyUpdater(component1); String newValue = mhtu.updateForClusterCreate(propertyName, originalValue, properties, topology); @@ -2739,7 +2731,7 @@ public void testDoUpdateForClusterVerifyRetrySettingsDefault() throws Exception Configuration clusterConfig = new Configuration(configProperties, Collections.emptyMap()); TestHostGroup testHostGroup = new TestHostGroup("test-host-group-one", Collections.emptySet(), Collections.emptySet()); - ClusterTopology topology = createClusterTopology(bp, clusterConfig, Collections.singleton(testHostGroup), NEVER_APPLY); + ClusterTopology topology = createClusterTopology(bp, clusterConfig, Collections.singleton(testHostGroup)); BlueprintConfigurationProcessor updater = new BlueprintConfigurationProcessor(topology); @@ -2779,7 +2771,7 @@ public void testDoUpdateForClusterVerifyRetrySettingsCustomized() throws Excepti Configuration clusterConfig = new Configuration(configProperties, Collections.emptyMap()); TestHostGroup testHostGroup = new TestHostGroup("test-host-group-one", Collections.emptySet(), Collections.emptySet()); - ClusterTopology topology = createClusterTopology(bp, clusterConfig, Collections.singleton(testHostGroup), NEVER_APPLY); + ClusterTopology topology = createClusterTopology(bp, clusterConfig, Collections.singleton(testHostGroup)); BlueprintConfigurationProcessor updater = new BlueprintConfigurationProcessor(topology); @@ -2867,7 +2859,7 @@ public void testDoUpdateForClusterWithNameNodeHAEnabledSpecifyingHostNamesDirect expect(stack.getCardinality("NAMENODE")).andReturn(new Cardinality("1-2")).anyTimes(); expect(stack.getCardinality("SECONDARY_NAMENODE")).andReturn(new Cardinality("1")).anyTimes(); - ClusterTopology topology = createClusterTopology(bp, clusterConfig, hostGroups, NEVER_APPLY); + ClusterTopology topology = createClusterTopology(bp, clusterConfig, hostGroups); BlueprintConfigurationProcessor updater = new BlueprintConfigurationProcessor(topology); updater.doUpdateForClusterCreate(); @@ -2942,7 +2934,7 @@ public void testHiveConfigClusterUpdateCustomValueSpecifyingHostNamesMetaStoreHA hostGroups.add(group1); hostGroups.add(group2); - ClusterTopology topology = createClusterTopology(bp, clusterConfig, hostGroups, NEVER_APPLY); + ClusterTopology topology = createClusterTopology(bp, clusterConfig, hostGroups); BlueprintConfigurationProcessor updater = new BlueprintConfigurationProcessor(topology); updater.doUpdateForClusterCreate(); @@ -2991,7 +2983,7 @@ public void testHiveConfigClusterUpdateSpecifyingHostNamesHiveServer2HA() throws expect(stack.getCardinality("HIVE_SERVER")).andReturn(new Cardinality("1+")).anyTimes(); - ClusterTopology topology = createClusterTopology(bp, clusterConfig, hostGroups, NEVER_APPLY); + ClusterTopology topology = createClusterTopology(bp, clusterConfig, hostGroups); BlueprintConfigurationProcessor updater = new BlueprintConfigurationProcessor(topology); updater.doUpdateForClusterCreate(); @@ -3055,7 +3047,7 @@ public void testHiveConfigClusterUpdateUsingExportedNamesHiveServer2HA() throws expect(stack.getCardinality("HIVE_SERVER")).andReturn(new Cardinality("1+")).anyTimes(); - ClusterTopology topology = createClusterTopology(bp, clusterConfig, hostGroups, NEVER_APPLY); + ClusterTopology topology = createClusterTopology(bp, clusterConfig, hostGroups); BlueprintConfigurationProcessor updater = new BlueprintConfigurationProcessor(topology); updater.doUpdateForClusterCreate(); @@ -3116,7 +3108,7 @@ private void testHiveMetastoreHA(String separator) throws InvalidTopologyExcepti hostGroups.add(new TestHostGroup("host_group_" + i, components, Collections.singleton(hostNames[i]))); } - ClusterTopology topology = createClusterTopology(bp, clusterConfig, hostGroups, NEVER_APPLY); + ClusterTopology topology = createClusterTopology(bp, clusterConfig, hostGroups); BlueprintConfigurationProcessor updater = new BlueprintConfigurationProcessor(topology); updater.doUpdateForClusterCreate(); String updatedValue = webHCatSiteProperties.get(propertyKey); @@ -3170,7 +3162,7 @@ public void testHiveInteractiveLlapZookeeperConfigExported() throws Exception { hostGroups.add(group1); hostGroups.add(group2); - ClusterTopology topology = createClusterTopology(bp, clusterConfig, hostGroups, NEVER_APPLY); + ClusterTopology topology = createClusterTopology(bp, clusterConfig, hostGroups); BlueprintConfigurationProcessor updater = new BlueprintConfigurationProcessor(topology); updater.doUpdateForBlueprintExport(); @@ -3224,7 +3216,7 @@ public void testOozieConfigClusterUpdateHAEnabledSpecifyingHostNamesDirectly() t expect(stack.getCardinality("OOZIE_SERVER")).andReturn(new Cardinality("1+")).anyTimes(); - ClusterTopology topology = createClusterTopology(bp, clusterConfig, hostGroups, NEVER_APPLY); + ClusterTopology topology = createClusterTopology(bp, clusterConfig, hostGroups); BlueprintConfigurationProcessor updater = new BlueprintConfigurationProcessor(topology); updater.doUpdateForClusterCreate(); @@ -3290,7 +3282,7 @@ public void testOozieHAEnabledExport() throws Exception { expect(stack.getCardinality("OOZIE_SERVER")).andReturn(new Cardinality("1+")).anyTimes(); - ClusterTopology topology = createClusterTopology(bp, clusterConfig, hostGroups, NEVER_APPLY); + ClusterTopology topology = createClusterTopology(bp, clusterConfig, hostGroups); BlueprintConfigurationProcessor updater = new BlueprintConfigurationProcessor(topology); updater.doUpdateForBlueprintExport(); @@ -3344,7 +3336,7 @@ public void testYarnHighAvailabilityConfigClusterUpdateSpecifyingHostNamesDirect expect(stack.getCardinality("RESOURCEMANAGER")).andReturn(new Cardinality("1-2")).anyTimes(); - ClusterTopology topology = createClusterTopology(bp, clusterConfig, hostGroups, NEVER_APPLY); + ClusterTopology topology = createClusterTopology(bp, clusterConfig, hostGroups); BlueprintConfigurationProcessor updater = new BlueprintConfigurationProcessor(topology); updater.doUpdateForClusterCreate(); @@ -3431,7 +3423,7 @@ public void testYarnHighAvailabilityExport() throws Exception { expect(stack.getCardinality("RESOURCEMANAGER")).andReturn(new Cardinality("1-2")).anyTimes(); - ClusterTopology topology = createClusterTopology(bp, clusterConfig, hostGroups, NEVER_APPLY); + ClusterTopology topology = createClusterTopology(bp, clusterConfig, hostGroups); BlueprintConfigurationProcessor updater = new BlueprintConfigurationProcessor(topology); updater.doUpdateForBlueprintExport(); @@ -3514,7 +3506,7 @@ public void testHDFSConfigClusterUpdateQuorumJournalURLSpecifyingHostNamesDirect hostGroups.add(group1); hostGroups.add(group2); - ClusterTopology topology = createClusterTopology(bp, clusterConfig, hostGroups, NEVER_APPLY); + ClusterTopology topology = createClusterTopology(bp, clusterConfig, hostGroups); BlueprintConfigurationProcessor updater = new BlueprintConfigurationProcessor(topology); updater.doUpdateForClusterCreate(); @@ -3563,7 +3555,7 @@ public void testDoUpdateForClusterCreate_MultiHostProperty__defaultValues___YAML hostGroups.add(group3); - ClusterTopology topology = createClusterTopology(bp, clusterConfig, hostGroups, NEVER_APPLY); + ClusterTopology topology = createClusterTopology(bp, clusterConfig, hostGroups); BlueprintConfigurationProcessor updater = new BlueprintConfigurationProcessor(topology); updater.doUpdateForClusterCreate(); @@ -3614,7 +3606,7 @@ public void testDoUpdateForClusterCreate_Storm_Nimbus_HA_Enabled__defaultValues_ hostGroups.add(group1); hostGroups.add(group2); - ClusterTopology topology = createClusterTopology(bp, clusterConfig, hostGroups, NEVER_APPLY); + ClusterTopology topology = createClusterTopology(bp, clusterConfig, hostGroups); BlueprintConfigurationProcessor updater = new BlueprintConfigurationProcessor(topology); updater.doUpdateForClusterCreate(); @@ -3664,7 +3656,7 @@ public void testDoUpdateForClusterCreate_Storm_Nimbus_HA_Enabled__FQDN_ValuesSpe hostGroups.add(group1); hostGroups.add(group2); - ClusterTopology topology = createClusterTopology(bp, clusterConfig, hostGroups, NEVER_APPLY); + ClusterTopology topology = createClusterTopology(bp, clusterConfig, hostGroups); BlueprintConfigurationProcessor updater = new BlueprintConfigurationProcessor(topology); updater.doUpdateForClusterCreate(); @@ -3699,7 +3691,7 @@ public void testDoUpdateForClusterCreate_MProperty__defaultValues() throws Excep hostGroups.add(group1); hostGroups.add(group2); - ClusterTopology topology = createClusterTopology(bp, clusterConfig, hostGroups, NEVER_APPLY); + ClusterTopology topology = createClusterTopology(bp, clusterConfig, hostGroups); BlueprintConfigurationProcessor updater = new BlueprintConfigurationProcessor(topology); updater.doUpdateForClusterCreate(); @@ -3731,7 +3723,7 @@ public void testDoUpdateForClusterCreate_MProperty__missingM() throws Exception hostGroups.add(group1); hostGroups.add(group2); - ClusterTopology topology = createClusterTopology(bp, clusterConfig, hostGroups, NEVER_APPLY); + ClusterTopology topology = createClusterTopology(bp, clusterConfig, hostGroups); BlueprintConfigurationProcessor updater = new BlueprintConfigurationProcessor(topology); updater.doUpdateForClusterCreate(); @@ -3763,7 +3755,7 @@ public void testDoUpdateForClusterCreate_SingleHostProperty__exportedValue() thr hostGroups.add(group1); hostGroups.add(group2); - ClusterTopology topology = createClusterTopology(bp, clusterConfig, hostGroups, NEVER_APPLY); + ClusterTopology topology = createClusterTopology(bp, clusterConfig, hostGroups); BlueprintConfigurationProcessor updater = new BlueprintConfigurationProcessor(topology); updater.doUpdateForClusterCreate(); @@ -3795,7 +3787,7 @@ public void testDoUpdateForClusterCreate_SingleHostProperty__exportedValue_Using hostGroups.add(group1); hostGroups.add(group2); - ClusterTopology topology = createClusterTopology(bp, clusterConfig, hostGroups, NEVER_APPLY); + ClusterTopology topology = createClusterTopology(bp, clusterConfig, hostGroups); BlueprintConfigurationProcessor updater = new BlueprintConfigurationProcessor(topology); updater.doUpdateForClusterCreate(); @@ -3827,7 +3819,7 @@ public void testDoUpdateForClusterCreate_SingleHostProperty__exportedValue_WithP hostGroups.add(group1); hostGroups.add(group2); - ClusterTopology topology = createClusterTopology(bp, clusterConfig, hostGroups, NEVER_APPLY); + ClusterTopology topology = createClusterTopology(bp, clusterConfig, hostGroups); BlueprintConfigurationProcessor updater = new BlueprintConfigurationProcessor(topology); updater.doUpdateForClusterCreate(); @@ -3859,7 +3851,7 @@ public void testDoUpdateForClusterCreate_SingleHostProperty__exportedValue__With hostGroups.add(group1); hostGroups.add(group2); - ClusterTopology topology = createClusterTopology(bp, clusterConfig, hostGroups, NEVER_APPLY); + ClusterTopology topology = createClusterTopology(bp, clusterConfig, hostGroups); BlueprintConfigurationProcessor updater = new BlueprintConfigurationProcessor(topology); updater.doUpdateForClusterCreate(); @@ -3905,7 +3897,7 @@ public void testDoUpdateForClusterCreate_MultiHostProperty__exportedValues() thr hostGroups.add(group2); hostGroups.add(group3); - ClusterTopology topology = createClusterTopology(bp, clusterConfig, hostGroups, NEVER_APPLY); + ClusterTopology topology = createClusterTopology(bp, clusterConfig, hostGroups); BlueprintConfigurationProcessor updater = new BlueprintConfigurationProcessor(topology); updater.doUpdateForClusterCreate(); @@ -3963,7 +3955,7 @@ public void testDoUpdateForClusterCreate_MultiHostProperty__exportedValues___wit hostGroups.add(group2); hostGroups.add(group3); - ClusterTopology topology = createClusterTopology(bp, clusterConfig, hostGroups, NEVER_APPLY); + ClusterTopology topology = createClusterTopology(bp, clusterConfig, hostGroups); BlueprintConfigurationProcessor updater = new BlueprintConfigurationProcessor(topology); updater.doUpdateForClusterCreate(); @@ -4021,7 +4013,7 @@ public void testDoUpdateForClusterCreate_MultiHostProperty__exportedValues___wit hostGroups.add(group2); hostGroups.add(group3); - ClusterTopology topology = createClusterTopology(bp, clusterConfig, hostGroups, NEVER_APPLY); + ClusterTopology topology = createClusterTopology(bp, clusterConfig, hostGroups); BlueprintConfigurationProcessor updater = new BlueprintConfigurationProcessor(topology); updater.doUpdateForClusterCreate(); @@ -4062,7 +4054,7 @@ public void testDoUpdateForClusterCreate_MultiHostProperty_exportedValues_withPo Collection hostGroups = new HashSet<>(); hostGroups.add(group1); - ClusterTopology topology = createClusterTopology(bp, clusterConfig, hostGroups, NEVER_APPLY); + ClusterTopology topology = createClusterTopology(bp, clusterConfig, hostGroups); BlueprintConfigurationProcessor updater = new BlueprintConfigurationProcessor(topology); updater.doUpdateForClusterCreate(); @@ -4118,7 +4110,7 @@ public void testDoUpdateForClusterCreate_MultiHostProperty__exportedValues___YAM hostGroups.add(group3); hostGroups.add(group4); - ClusterTopology topology = createClusterTopology(bp, clusterConfig, hostGroups, NEVER_APPLY); + ClusterTopology topology = createClusterTopology(bp, clusterConfig, hostGroups); BlueprintConfigurationProcessor updater = new BlueprintConfigurationProcessor(topology); updater.doUpdateForClusterCreate(); @@ -4192,7 +4184,7 @@ public void testDoUpdateForClusterCreate_DBHostProperty__defaultValue() throws E hostGroups.add(group1); hostGroups.add(group2); - ClusterTopology topology = createClusterTopology(bp, clusterConfig, hostGroups, NEVER_APPLY); + ClusterTopology topology = createClusterTopology(bp, clusterConfig, hostGroups); BlueprintConfigurationProcessor updater = new BlueprintConfigurationProcessor(topology); updater.doUpdateForClusterCreate(); @@ -4228,7 +4220,7 @@ public void testDoUpdateForClusterCreate_DBHostProperty__exportedValue() throws hostGroups.add(group1); hostGroups.add(group2); - ClusterTopology topology = createClusterTopology(bp, clusterConfig, hostGroups, NEVER_APPLY); + ClusterTopology topology = createClusterTopology(bp, clusterConfig, hostGroups); BlueprintConfigurationProcessor updater = new BlueprintConfigurationProcessor(topology); updater.doUpdateForClusterCreate(); @@ -4261,7 +4253,7 @@ public void testDoUpdateForClusterCreate_DBHostProperty__external() throws Excep hostGroups.add(group1); hostGroups.add(group2); - ClusterTopology topology = createClusterTopology(bp, clusterConfig, hostGroups, NEVER_APPLY); + ClusterTopology topology = createClusterTopology(bp, clusterConfig, hostGroups); BlueprintConfigurationProcessor updater = new BlueprintConfigurationProcessor(topology); updater.doUpdateForClusterCreate(); @@ -4276,17 +4268,17 @@ public void testExcludedPropertiesShouldBeAddedWhenServiceIsInBlueprint() throws // defaults from init() method that we need expect(stack.getName()).andReturn("testStack").anyTimes(); expect(stack.getVersion()).andReturn("1").anyTimes(); - expect(stack.isMasterComponent(anyObject())).andReturn(false).anyTimes(); + expect(stack.isMasterComponent((String) anyObject())).andReturn(false).anyTimes(); // customized stack calls for this test only expect(stack.getExcludedConfigurationTypes("FALCON")).andReturn(Collections.singleton("oozie-site")); expect(stack.getExcludedConfigurationTypes("OOZIE")).andReturn(Collections.emptySet()); expect(stack.getConfigurationProperties("FALCON", "oozie-site")).andReturn(Collections.singletonMap("oozie.service.ELService.ext.functions.coord-job-submit-instances", "testValue")).anyTimes(); expect(stack.getServiceForConfigType("oozie-site")).andReturn("OOZIE").anyTimes(); - expect(stack.getServiceForComponent("FALCON_CLIENT")).andReturn("FALCON").anyTimes(); - expect(stack.getServiceForComponent("FALCON_SERVER")).andReturn("FALCON").anyTimes(); - expect(stack.getServiceForComponent("OOZIE_CLIENT")).andReturn("OOZIE").anyTimes(); - expect(stack.getServiceForComponent("OOZIE_SERVER")).andReturn("OOZIE").anyTimes(); + expect(stack.getServiceForComponent("FALCON_CLIENT")).andReturn("FALCON"); + expect(stack.getServiceForComponent("FALCON_SERVER")).andReturn("FALCON"); + expect(stack.getServiceForComponent("OOZIE_CLIENT")).andReturn("OOZIE"); + expect(stack.getServiceForComponent("OOZIE_SERVER")).andReturn("OOZIE"); Map> properties = new HashMap<>(); Configuration clusterConfig = new Configuration(properties, Collections.emptyMap()); @@ -4304,7 +4296,7 @@ public void testExcludedPropertiesShouldBeAddedWhenServiceIsInBlueprint() throws Collection hostGroups = new HashSet<>(); hostGroups.add(group1); - ClusterTopology topology = createClusterTopology(bp, clusterConfig, hostGroups, NEVER_APPLY); + ClusterTopology topology = createClusterTopology(bp, clusterConfig, hostGroups); BlueprintConfigurationProcessor updater = new BlueprintConfigurationProcessor(topology); updater.doUpdateForClusterCreate(); @@ -4320,16 +4312,16 @@ public void testExcludedPropertiesShouldBeIgnoredWhenServiceIsNotInBlueprint() t // defaults from init() method that we need expect(stack.getName()).andReturn("testStack").anyTimes(); expect(stack.getVersion()).andReturn("1").anyTimes(); - expect(stack.isMasterComponent(anyObject())).andReturn(false).anyTimes(); + expect(stack.isMasterComponent((String) anyObject())).andReturn(false).anyTimes(); // customized stack calls for this test only expect(stack.getExcludedConfigurationTypes("FALCON")).andReturn(Collections.singleton("oozie-site")).anyTimes(); expect(stack.getConfigurationProperties("FALCON", "oozie-site")).andReturn(Collections.singletonMap("oozie.service.ELService.ext.functions.coord-job-submit-instances", "testValue")).anyTimes(); expect(stack.getServiceForConfigType("oozie-site")).andReturn("OOZIE").anyTimes(); - expect(stack.getServiceForComponent("FALCON_CLIENT")).andReturn("FALCON").anyTimes(); - expect(stack.getServiceForComponent("FALCON_SERVER")).andReturn("FALCON").anyTimes(); - expect(stack.getServiceForComponent("OOZIE_CLIENT")).andReturn("OOZIE").anyTimes(); - expect(stack.getServiceForComponent("OOZIE_SERVER")).andReturn("OOZIE").anyTimes(); + expect(stack.getServiceForComponent("FALCON_CLIENT")).andReturn("FALCON"); + expect(stack.getServiceForComponent("FALCON_SERVER")).andReturn("FALCON"); + expect(stack.getServiceForComponent("OOZIE_CLIENT")).andReturn("OOZIE"); + expect(stack.getServiceForComponent("OOZIE_SERVER")).andReturn("OOZIE"); Map> properties = new HashMap<>(); Configuration clusterConfig = new Configuration(properties, Collections.emptyMap()); @@ -4345,7 +4337,7 @@ public void testExcludedPropertiesShouldBeIgnoredWhenServiceIsNotInBlueprint() t Collection hostGroups = new HashSet<>(); hostGroups.add(group1); - ClusterTopology topology = createClusterTopology(bp, clusterConfig, hostGroups, NEVER_APPLY); + ClusterTopology topology = createClusterTopology(bp, clusterConfig, hostGroups); BlueprintConfigurationProcessor updater = new BlueprintConfigurationProcessor(topology); updater.doUpdateForClusterCreate(); @@ -4360,14 +4352,14 @@ public void testAddExcludedPropertiesAreOverwrittenByBlueprintConfigs() throws E // defaults from init() method that we need expect(stack.getName()).andReturn("testStack").anyTimes(); expect(stack.getVersion()).andReturn("1").anyTimes(); - expect(stack.isMasterComponent(anyObject())).andReturn(false).anyTimes(); + expect(stack.isMasterComponent((String) anyObject())).andReturn(false).anyTimes(); expect(stack.getConfigurationPropertiesWithMetadata(anyObject(String.class), anyObject(String.class))).andReturn(Collections.emptyMap()).anyTimes(); // customized stack calls for this test only expect(stack.getExcludedConfigurationTypes("FALCON")).andReturn(Collections.singleton("oozie-site")).anyTimes(); expect(stack.getConfigurationProperties("FALCON", "oozie-site")).andReturn(Collections.singletonMap("oozie.service.ELService.ext.functions.coord-job-submit-instances", "testValue")).anyTimes(); - expect(stack.getServiceForComponent("FALCON_CLIENT")).andReturn("FALCON").anyTimes(); - expect(stack.getServiceForComponent("FALCON_SERVER")).andReturn("FALCON").anyTimes(); + expect(stack.getServiceForComponent("FALCON_CLIENT")).andReturn("FALCON"); + expect(stack.getServiceForComponent("FALCON_SERVER")).andReturn("FALCON"); Map> properties = new HashMap<>(); Map typeProps = new HashMap<>(); @@ -4387,7 +4379,7 @@ public void testAddExcludedPropertiesAreOverwrittenByBlueprintConfigs() throws E Collection hostGroups = new HashSet<>(); hostGroups.add(group1); - ClusterTopology topology = createClusterTopology(bp, clusterConfig, hostGroups, NEVER_APPLY); + ClusterTopology topology = createClusterTopology(bp, clusterConfig, hostGroups); BlueprintConfigurationProcessor updater = new BlueprintConfigurationProcessor(topology); updater.doUpdateForClusterCreate(); @@ -4403,7 +4395,7 @@ public void testExcludedPropertiesHandlingWhenExcludedConfigServiceIsNotFoundInS // defaults from init() method that we need expect(stack.getName()).andReturn("testStack").anyTimes(); expect(stack.getVersion()).andReturn("1").anyTimes(); - expect(stack.isMasterComponent(anyObject())).andReturn(false).anyTimes(); + expect(stack.isMasterComponent((String) anyObject())).andReturn(false).anyTimes(); // customized stack calls for this test only Set excludedConfigTypes = new HashSet<>(); @@ -4415,10 +4407,10 @@ public void testExcludedPropertiesHandlingWhenExcludedConfigServiceIsNotFoundInS expect(stack.getServiceForConfigType("oozie-site")).andReturn("OOZIE").anyTimes(); // simulate the case where the STORM service has been removed manually from the stack definitions expect(stack.getServiceForConfigType("storm-site")).andThrow(new IllegalArgumentException("TEST: Configuration not found in stack definitions!")); - expect(stack.getServiceForComponent("FALCON_CLIENT")).andReturn("FALCON").anyTimes(); - expect(stack.getServiceForComponent("FALCON_SERVER")).andReturn("FALCON").anyTimes(); - expect(stack.getServiceForComponent("OOZIE_CLIENT")).andReturn("OOZIE").anyTimes(); - expect(stack.getServiceForComponent("OOZIE_SERVER")).andReturn("OOZIE").anyTimes(); + expect(stack.getServiceForComponent("FALCON_CLIENT")).andReturn("FALCON"); + expect(stack.getServiceForComponent("FALCON_SERVER")).andReturn("FALCON"); + expect(stack.getServiceForComponent("OOZIE_CLIENT")).andReturn("OOZIE"); + expect(stack.getServiceForComponent("OOZIE_SERVER")).andReturn("OOZIE"); Map> properties = new HashMap<>(); Configuration clusterConfig = new Configuration(properties, Collections.emptyMap()); @@ -4436,7 +4428,7 @@ public void testExcludedPropertiesHandlingWhenExcludedConfigServiceIsNotFoundInS Collection hostGroups = new HashSet<>(); hostGroups.add(group1); - ClusterTopology topology = createClusterTopology(bp, clusterConfig, hostGroups, NEVER_APPLY); + ClusterTopology topology = createClusterTopology(bp, clusterConfig, hostGroups); BlueprintConfigurationProcessor updater = new BlueprintConfigurationProcessor(topology); updater.doUpdateForClusterCreate(); @@ -4474,7 +4466,7 @@ public void testFalconConfigClusterUpdate() throws Exception { Collection hostGroups = new HashSet<>(); hostGroups.add(group1); - ClusterTopology topology = createClusterTopology(bp, clusterConfig, hostGroups, NEVER_APPLY); + ClusterTopology topology = createClusterTopology(bp, clusterConfig, hostGroups); BlueprintConfigurationProcessor updater = new BlueprintConfigurationProcessor(topology); updater.doUpdateForClusterCreate(); @@ -4515,7 +4507,7 @@ public void testFalconConfigClusterUpdateDefaultConfig() throws Exception { Collection hostGroups = new HashSet<>(); hostGroups.add(group1); - ClusterTopology topology = createClusterTopology(bp, clusterConfig, hostGroups, NEVER_APPLY); + ClusterTopology topology = createClusterTopology(bp, clusterConfig, hostGroups); BlueprintConfigurationProcessor updater = new BlueprintConfigurationProcessor(topology); // call top-level export method @@ -4551,7 +4543,7 @@ public void testHiveConfigClusterUpdateCustomValue() throws Exception { Collection hostGroups = new HashSet<>(); hostGroups.add(group1); - ClusterTopology topology = createClusterTopology(bp, clusterConfig, hostGroups, NEVER_APPLY); + ClusterTopology topology = createClusterTopology(bp, clusterConfig, hostGroups); BlueprintConfigurationProcessor updater = new BlueprintConfigurationProcessor(topology); // call top-level cluster config update method @@ -4607,7 +4599,7 @@ Set getDependsOnProperties() { // defaults from init() method that we need expect(stack.getName()).andReturn("testStack").anyTimes(); expect(stack.getVersion()).andReturn("1").anyTimes(); - expect(stack.isMasterComponent(anyObject())).andReturn(false).anyTimes(); + expect(stack.isMasterComponent((String) anyObject())).andReturn(false).anyTimes(); Set emptySet = Collections.emptySet(); expect(stack.getExcludedConfigurationTypes(anyObject(String.class))).andReturn(emptySet).anyTimes(); @@ -4629,7 +4621,7 @@ Set getDependsOnProperties() { Collection hostGroups = new HashSet<>(); hostGroups.add(group1); - ClusterTopology topology = createClusterTopology(bp, clusterConfig, hostGroups, NEVER_APPLY); + ClusterTopology topology = createClusterTopology(bp, clusterConfig, hostGroups); BlueprintConfigurationProcessor updater = new BlueprintConfigurationProcessor(topology); // call top-level cluster config update method @@ -4686,7 +4678,7 @@ Set getDependsOnProperties() { // defaults from init() method that we need expect(stack.getName()).andReturn("testStack").anyTimes(); expect(stack.getVersion()).andReturn("1").anyTimes(); - expect(stack.isMasterComponent(anyObject())).andReturn(false).anyTimes(); + expect(stack.isMasterComponent((String) anyObject())).andReturn(false).anyTimes(); Set emptySet = Collections.emptySet(); expect(stack.getExcludedConfigurationTypes(anyObject(String.class))).andReturn(emptySet).anyTimes(); @@ -4708,7 +4700,7 @@ Set getDependsOnProperties() { Collection hostGroups = new HashSet<>(); hostGroups.add(group1); - ClusterTopology topology = createClusterTopology(bp, clusterConfig, hostGroups, NEVER_APPLY); + ClusterTopology topology = createClusterTopology(bp, clusterConfig, hostGroups); BlueprintConfigurationProcessor updater = new BlueprintConfigurationProcessor(topology); // call top-level cluster config update method @@ -4766,7 +4758,7 @@ Set getDependsOnProperties() { // defaults from init() method that we need expect(stack.getName()).andReturn("testStack").anyTimes(); expect(stack.getVersion()).andReturn("1").anyTimes(); - expect(stack.isMasterComponent(anyObject())).andReturn(false).anyTimes(); + expect(stack.isMasterComponent((String) anyObject())).andReturn(false).anyTimes(); Set emptySet = Collections.emptySet(); expect(stack.getExcludedConfigurationTypes(anyObject(String.class))).andReturn(emptySet).anyTimes(); @@ -4787,7 +4779,7 @@ Set getDependsOnProperties() { Collection hostGroups = new HashSet<>(); hostGroups.add(group1); - ClusterTopology topology = createClusterTopology(bp, clusterConfig, hostGroups, NEVER_APPLY); + ClusterTopology topology = createClusterTopology(bp, clusterConfig, hostGroups); BlueprintConfigurationProcessor updater = new BlueprintConfigurationProcessor(topology); // call top-level cluster config update method @@ -4854,7 +4846,7 @@ Set getDependsOnProperties() { // defaults from init() method that we need expect(stack.getName()).andReturn("testStack").anyTimes(); expect(stack.getVersion()).andReturn("1").anyTimes(); - expect(stack.isMasterComponent(anyObject())).andReturn(false).anyTimes(); + expect(stack.isMasterComponent((String) anyObject())).andReturn(false).anyTimes(); Set emptySet = Collections.emptySet(); expect(stack.getExcludedConfigurationTypes(anyObject(String.class))).andReturn(emptySet).anyTimes(); @@ -4876,7 +4868,7 @@ Set getDependsOnProperties() { Collection hostGroups = new HashSet<>(); hostGroups.add(group1); - ClusterTopology topology = createClusterTopology(bp, clusterConfig, hostGroups, NEVER_APPLY); + ClusterTopology topology = createClusterTopology(bp, clusterConfig, hostGroups); BlueprintConfigurationProcessor updater = new BlueprintConfigurationProcessor(topology); // call top-level cluster config update method @@ -4924,7 +4916,7 @@ Set getDependsOnProperties() { // defaults from init() method that we need expect(stack.getName()).andReturn("testStack").anyTimes(); expect(stack.getVersion()).andReturn("1").anyTimes(); - expect(stack.isMasterComponent(anyObject())).andReturn(false).anyTimes(); + expect(stack.isMasterComponent((String) anyObject())).andReturn(false).anyTimes(); Set emptySet = Collections.emptySet(); expect(stack.getExcludedConfigurationTypes(anyObject(String.class))).andReturn(emptySet).anyTimes(); @@ -4945,7 +4937,7 @@ Set getDependsOnProperties() { Collection hostGroups = new HashSet<>(); hostGroups.add(group1); - ClusterTopology topology = createClusterTopology(bp, clusterConfig, hostGroups, NEVER_APPLY); + ClusterTopology topology = createClusterTopology(bp, clusterConfig, hostGroups); BlueprintConfigurationProcessor updater = new BlueprintConfigurationProcessor(topology); // call top-level cluster config update method @@ -4983,7 +4975,7 @@ public void testHiveConfigClusterUpdateDefaultValue() throws Exception { Collection hostGroups = new HashSet<>(); hostGroups.add(group1); - ClusterTopology topology = createClusterTopology(bp, clusterConfig, hostGroups, NEVER_APPLY); + ClusterTopology topology = createClusterTopology(bp, clusterConfig, hostGroups); BlueprintConfigurationProcessor updater = new BlueprintConfigurationProcessor(topology); // call top-level cluster config update method @@ -5036,7 +5028,7 @@ public void testAtlas() throws Exception { hostGroups.add(group1); hostGroups.add(group2); - ClusterTopology topology = createClusterTopology(bp, clusterConfig, hostGroups, NEVER_APPLY); + ClusterTopology topology = createClusterTopology(bp, clusterConfig, hostGroups); BlueprintConfigurationProcessor updater = new BlueprintConfigurationProcessor(topology); // call top-level cluster config update method @@ -5100,7 +5092,7 @@ public void testHiveConfigClusterUpdateExportedHostGroupValue() throws Exception Collection hostGroups = new HashSet<>(); hostGroups.add(group1); - ClusterTopology topology = createClusterTopology(bp, clusterConfig, hostGroups, NEVER_APPLY); + ClusterTopology topology = createClusterTopology(bp, clusterConfig, hostGroups); BlueprintConfigurationProcessor updater = new BlueprintConfigurationProcessor(topology); // call top-level cluster config update method @@ -5140,7 +5132,7 @@ public void testStormAndKafkaConfigClusterUpdateWithoutGangliaServer() throws Ex expect(stack.getCardinality("GANGLIA_SERVER")).andReturn(new Cardinality("1")).anyTimes(); - ClusterTopology topology = createClusterTopology(bp, clusterConfig, hostGroups, NEVER_APPLY); + ClusterTopology topology = createClusterTopology(bp, clusterConfig, hostGroups); BlueprintConfigurationProcessor updater = new BlueprintConfigurationProcessor(topology); // call top-level export method @@ -5188,7 +5180,7 @@ public void testStormandKafkaConfigClusterUpdateWithGangliaServer() throws Excep Collection hostGroups = new HashSet<>(); hostGroups.add(group1); - ClusterTopology topology = createClusterTopology(bp, clusterConfig, hostGroups, NEVER_APPLY); + ClusterTopology topology = createClusterTopology(bp, clusterConfig, hostGroups); BlueprintConfigurationProcessor updater = new BlueprintConfigurationProcessor(topology); // call top-level export method @@ -5283,7 +5275,7 @@ public void testDoUpdateForClusterWithNameNodeHAEnabled() throws Exception { expect(stack.getCardinality("NAMENODE")).andReturn(new Cardinality("1-2")).anyTimes(); expect(stack.getCardinality("SECONDARY_NAMENODE")).andReturn(new Cardinality("1")).anyTimes(); - ClusterTopology topology = createClusterTopology(bp, clusterConfig, hostGroups, NEVER_APPLY); + ClusterTopology topology = createClusterTopology(bp, clusterConfig, hostGroups); BlueprintConfigurationProcessor updater = new BlueprintConfigurationProcessor(topology); Set updatedConfigTypes = @@ -5402,7 +5394,7 @@ public void testDoUpdateForClusterWithNameNodeHANotEnabled() throws Exception { expect(stack.getCardinality("NAMENODE")).andReturn(new Cardinality("1-2")).anyTimes(); expect(stack.getCardinality("SECONDARY_NAMENODE")).andReturn(new Cardinality("1")).anyTimes(); - ClusterTopology topology = createClusterTopology(bp, clusterConfig, hostGroups, NEVER_APPLY); + ClusterTopology topology = createClusterTopology(bp, clusterConfig, hostGroups); BlueprintConfigurationProcessor updater = new BlueprintConfigurationProcessor(topology); Set updatedConfigTypes = @@ -5471,7 +5463,7 @@ public void testDoUpdateForClusterWithNameNodeHAEnabledAndActiveNodeSet() throws Collection hostGroups = new ArrayList<>(); hostGroups.add(group); - ClusterTopology topology = createClusterTopology(bp, clusterConfig, hostGroups, NEVER_APPLY); + ClusterTopology topology = createClusterTopology(bp, clusterConfig, hostGroups); BlueprintConfigurationProcessor updater = new BlueprintConfigurationProcessor(topology); updater.doUpdateForClusterCreate(); @@ -5618,7 +5610,7 @@ public void testHDFSConfigClusterUpdateQuorumJournalURL() throws Exception { hostGroups.add(group1); hostGroups.add(group2); - ClusterTopology topology = createClusterTopology(bp, clusterConfig, hostGroups, NEVER_APPLY); + ClusterTopology topology = createClusterTopology(bp, clusterConfig, hostGroups); BlueprintConfigurationProcessor updater = new BlueprintConfigurationProcessor(topology); // call top-level export method @@ -5660,7 +5652,7 @@ public void testHDFSConfigClusterUpdateQuorumJournalURL_UsingMinusSymbolInHostNa hostGroups.add(group1); hostGroups.add(group2); - ClusterTopology topology = createClusterTopology(bp, clusterConfig, hostGroups, NEVER_APPLY); + ClusterTopology topology = createClusterTopology(bp, clusterConfig, hostGroups); BlueprintConfigurationProcessor updater = new BlueprintConfigurationProcessor(topology); // call top-level export method @@ -5711,7 +5703,7 @@ public void testHadoopHaNameNode() throws Exception { Collection hostGroups = Lists.newArrayList(group1, group2); - ClusterTopology topology = createClusterTopology(bp, clusterConfig, hostGroups, NEVER_APPLY); + ClusterTopology topology = createClusterTopology(bp, clusterConfig, hostGroups); BlueprintConfigurationProcessor configProcessor = new BlueprintConfigurationProcessor(topology); // When @@ -5751,7 +5743,7 @@ public void testGetRequiredHostGroups___validComponentCountOfZero() throws Excep hostGroups.add(group1); hostGroups.add(group2); - ClusterTopology topology = createClusterTopology(bp, clusterConfig, hostGroups, NEVER_APPLY); + ClusterTopology topology = createClusterTopology(bp, clusterConfig, hostGroups); BlueprintConfigurationProcessor updater = new BlueprintConfigurationProcessor(topology); // call top-level export method @@ -5784,7 +5776,7 @@ public void testGetRequiredHostGroups___invalidComponentCountOfZero() throws Exc hostGroups.add(group1); hostGroups.add(group2); - ClusterTopology topology = createClusterTopology(bp, clusterConfig, hostGroups, NEVER_APPLY); + ClusterTopology topology = createClusterTopology(bp, clusterConfig, hostGroups); BlueprintConfigurationProcessor updater = new BlueprintConfigurationProcessor(topology); // call top-level export method @@ -5819,7 +5811,7 @@ public void testGetRequiredHostGroups___multipleGroups() throws Exception { hostGroups.add(group1); hostGroups.add(group2); - ClusterTopology topology = createClusterTopology(bp, clusterConfig, hostGroups, NEVER_APPLY); + ClusterTopology topology = createClusterTopology(bp, clusterConfig, hostGroups); BlueprintConfigurationProcessor updater = new BlueprintConfigurationProcessor(topology); // call top-level export method @@ -5863,7 +5855,7 @@ public void testAllDefaultUserAndGroupProxyPropertiesSet() throws Exception { Collection hostGroups = Collections.singletonList(group1); - ClusterTopology topology = createClusterTopology(bp, clusterConfig, hostGroups, NEVER_APPLY); + ClusterTopology topology = createClusterTopology(bp, clusterConfig, hostGroups); BlueprintConfigurationProcessor configProcessor = new BlueprintConfigurationProcessor(topology); configProcessor.doUpdateForClusterCreate(); @@ -5908,7 +5900,7 @@ public void testRelevantDefaultUserAndGroupProxyPropertiesSet() throws Exception Collection hostGroups = Collections.singletonList(group1); - ClusterTopology topology = createClusterTopology(bp, clusterConfig, hostGroups, NEVER_APPLY); + ClusterTopology topology = createClusterTopology(bp, clusterConfig, hostGroups); BlueprintConfigurationProcessor configProcessor = new BlueprintConfigurationProcessor(topology); configProcessor.doUpdateForClusterCreate(); @@ -5952,7 +5944,7 @@ public void testDefaultUserAndGroupProxyPropertiesSetWhenNotProvided() throws Ex Collection hostGroups = Collections.singletonList(group1); - ClusterTopology topology = createClusterTopology(bp, clusterConfig, hostGroups, NEVER_APPLY); + ClusterTopology topology = createClusterTopology(bp, clusterConfig, hostGroups); BlueprintConfigurationProcessor configProcessor = new BlueprintConfigurationProcessor(topology); configProcessor.doUpdateForClusterCreate(); @@ -5995,7 +5987,7 @@ public void testDefaultUserAndGroupProxyPropertiesSetWhenNotProvided2() throws E Collection hostGroups = Collections.singletonList(group1); - ClusterTopology topology = createClusterTopology(bp, clusterConfig, hostGroups, NEVER_APPLY); + ClusterTopology topology = createClusterTopology(bp, clusterConfig, hostGroups); BlueprintConfigurationProcessor configProcessor = new BlueprintConfigurationProcessor(topology); configProcessor.doUpdateForClusterCreate(); @@ -6039,7 +6031,7 @@ public void testHiveWithoutAtlas() throws Exception { Collection hostGroups = Collections.singletonList(group1); - ClusterTopology topology = createClusterTopology(bp, clusterConfig, hostGroups, NEVER_APPLY); + ClusterTopology topology = createClusterTopology(bp, clusterConfig, hostGroups); BlueprintConfigurationProcessor configProcessor = new BlueprintConfigurationProcessor(topology); configProcessor.doUpdateForClusterCreate(); @@ -6130,7 +6122,7 @@ private void validateAtlasHivePropertiesForTestCase(Map hostGroups = Collections.singletonList(group1); - ClusterTopology topology1 = createClusterTopology(bp, clusterConfig, hostGroups, NEVER_APPLY); + ClusterTopology topology1 = createClusterTopology(bp, clusterConfig, hostGroups); BlueprintConfigurationProcessor configProcessor = new BlueprintConfigurationProcessor(topology1); configProcessor.doUpdateForClusterCreate(); @@ -6178,7 +6170,7 @@ public void testAtlasHivePropertiesWithHTTPS() throws Exception { Collection hostGroups = Collections.singletonList(group1); - ClusterTopology topology = createClusterTopology(bp, clusterConfig, hostGroups, NEVER_APPLY); + ClusterTopology topology = createClusterTopology(bp, clusterConfig, hostGroups); BlueprintConfigurationProcessor configProcessor = new BlueprintConfigurationProcessor(topology); configProcessor.doUpdateForClusterCreate(); @@ -6210,7 +6202,7 @@ public void testStormAmsPropertiesDefault() throws Exception { Collection hostGroups = Collections.singletonList(group1); - ClusterTopology topology = createClusterTopology(bp, clusterConfig, hostGroups, NEVER_APPLY); + ClusterTopology topology = createClusterTopology(bp, clusterConfig, hostGroups); BlueprintConfigurationProcessor configProcessor = new BlueprintConfigurationProcessor(topology); configProcessor.doUpdateForClusterCreate(); @@ -6241,7 +6233,7 @@ public void testStormAmsPropertiesUserDefinedReporter() throws Exception { Collection hostGroups = Collections.singletonList(group1); - ClusterTopology topology = createClusterTopology(bp, clusterConfig, hostGroups, NEVER_APPLY); + ClusterTopology topology = createClusterTopology(bp, clusterConfig, hostGroups); BlueprintConfigurationProcessor configProcessor = new BlueprintConfigurationProcessor(topology); configProcessor.doUpdateForClusterCreate(); @@ -6272,7 +6264,7 @@ public void testKafkaAmsProperties() throws Exception { Collection hostGroups = Collections.singletonList(group1); - ClusterTopology topology = createClusterTopology(bp, clusterConfig, hostGroups, NEVER_APPLY); + ClusterTopology topology = createClusterTopology(bp, clusterConfig, hostGroups); BlueprintConfigurationProcessor configProcessor = new BlueprintConfigurationProcessor(topology); configProcessor.doUpdateForClusterCreate(); @@ -6304,7 +6296,7 @@ public void testKafkaAmsPropertiesMultipleReporters() throws Exception { Collection hostGroups = Collections.singletonList(group1); - ClusterTopology topology = createClusterTopology(bp, clusterConfig, hostGroups, NEVER_APPLY); + ClusterTopology topology = createClusterTopology(bp, clusterConfig, hostGroups); BlueprintConfigurationProcessor configProcessor = new BlueprintConfigurationProcessor(topology); configProcessor.doUpdateForClusterCreate(); @@ -6337,6 +6329,7 @@ public void testRecommendConfiguration_applyStackDefaultsOnly() throws Exception Collection hgComponents = new HashSet<>(); hgComponents.add("NAMENODE"); hgComponents.add("SECONDARY_NAMENODE"); + hgComponents.add("RESOURCEMANAGER"); TestHostGroup group1 = new TestHostGroup("group1", hgComponents, Collections.singleton("testhost")); Collection hgComponents2 = new HashSet<>(); @@ -6354,15 +6347,15 @@ public void testRecommendConfiguration_applyStackDefaultsOnly() throws Exception Configuration clusterConfig = new Configuration(properties, Collections.emptyMap(), parentConfig); - ClusterTopology topology = createClusterTopology(bp, clusterConfig, hostGroups, ONLY_STACK_DEFAULTS_APPLY); + ClusterTopology topology = createClusterTopology(bp, clusterConfig, hostGroups); topology.getAdvisedConfigurations().putAll(createAdvisedConfigMap()); + topology.setConfigRecommendationStrategy(ConfigRecommendationStrategy.ONLY_STACK_DEFAULTS_APPLY); BlueprintConfigurationProcessor configProcessor = new BlueprintConfigurationProcessor(topology); reset(stack); expect(stack.getName()).andReturn(STACK_NAME).anyTimes(); expect(stack.getVersion()).andReturn(STACK_VERSION).anyTimes(); - expect(stack.getServiceForComponent(anyString())).andReturn("HDFS").anyTimes(); - expect(stack.getConfiguration(ImmutableSet.of("HDFS"))).andReturn(createStackDefaults()).anyTimes(); + expect(stack.getConfiguration(bp.getServices())).andReturn(createStackDefaults()).anyTimes(); Set emptySet = Collections.emptySet(); expect(stack.getExcludedConfigurationTypes(anyObject(String.class))).andReturn(emptySet).anyTimes(); @@ -6394,6 +6387,7 @@ public void testRecommendConfiguration_EmptyConfiguration_applyStackDefaultsOnly Collection hgComponents = new HashSet<>(); hgComponents.add("NAMENODE"); hgComponents.add("SECONDARY_NAMENODE"); + hgComponents.add("RESOURCEMANAGER"); TestHostGroup group1 = new TestHostGroup("group1", hgComponents, Collections.singleton("testhost")); Collection hgComponents2 = new HashSet<>(); @@ -6413,15 +6407,15 @@ public void testRecommendConfiguration_EmptyConfiguration_applyStackDefaultsOnly Configuration clusterConfig = new Configuration(properties, Collections.emptyMap(), parentConfig); - ClusterTopology topology = createClusterTopology(bp, clusterConfig, hostGroups, ONLY_STACK_DEFAULTS_APPLY); + ClusterTopology topology = createClusterTopology(bp, clusterConfig, hostGroups); topology.getAdvisedConfigurations().putAll(createAdvisedConfigMap()); + topology.setConfigRecommendationStrategy(ConfigRecommendationStrategy.ONLY_STACK_DEFAULTS_APPLY); BlueprintConfigurationProcessor configProcessor = new BlueprintConfigurationProcessor(topology); reset(stack); expect(stack.getName()).andReturn(STACK_NAME).anyTimes(); expect(stack.getVersion()).andReturn(STACK_VERSION).anyTimes(); - expect(stack.getServiceForComponent(anyString())).andReturn("HDFS").anyTimes(); - expect(stack.getConfiguration(ImmutableSet.of("HDFS"))).andReturn(createStackDefaults()).anyTimes(); + expect(stack.getConfiguration(bp.getServices())).andReturn(createStackDefaults()).anyTimes(); Set emptySet = Collections.emptySet(); expect(stack.getExcludedConfigurationTypes(anyObject(String.class))).andReturn(emptySet).anyTimes(); @@ -6481,8 +6475,9 @@ public void testRecommendConfiguration_applyAlways() throws Exception { Configuration clusterConfig = new Configuration(properties, Collections.emptyMap(), parentClusterConfig); - ClusterTopology topology = createClusterTopology(bp, clusterConfig, hostGroups, ALWAYS_APPLY); + ClusterTopology topology = createClusterTopology(bp, clusterConfig, hostGroups); topology.getAdvisedConfigurations().putAll(createAdvisedConfigMap()); + topology.setConfigRecommendationStrategy(ConfigRecommendationStrategy.ALWAYS_APPLY); BlueprintConfigurationProcessor configProcessor = new BlueprintConfigurationProcessor(topology); // WHEN Set configTypes = configProcessor.doUpdateForClusterCreate(); @@ -6535,8 +6530,9 @@ public void testRecommendConfiguration_neverApply() throws Exception { Configuration clusterConfig = new Configuration(properties, Collections.emptyMap(), parentClusterConfig); - ClusterTopology topology = createClusterTopology(bp, clusterConfig, hostGroups, NEVER_APPLY); + ClusterTopology topology = createClusterTopology(bp, clusterConfig, hostGroups); topology.getAdvisedConfigurations().putAll(createAdvisedConfigMap()); + topology.setConfigRecommendationStrategy(ConfigRecommendationStrategy.NEVER_APPLY); BlueprintConfigurationProcessor configProcessor = new BlueprintConfigurationProcessor(topology); // WHEN configProcessor.doUpdateForClusterCreate(); @@ -6575,7 +6571,7 @@ public void testRangerAdminProperties() throws Exception { Collection hostGroups = Collections.singletonList(group1); - ClusterTopology topology = createClusterTopology(bp, clusterConfig, hostGroups, NEVER_APPLY); + ClusterTopology topology = createClusterTopology(bp, clusterConfig, hostGroups); BlueprintConfigurationProcessor configProcessor = new BlueprintConfigurationProcessor(topology); // When @@ -6611,7 +6607,7 @@ public void testRangerAdminProperties_defaults() throws Exception { Collection hostGroups = Collections.singletonList(group1); - ClusterTopology topology = createClusterTopology(bp, clusterConfig, hostGroups, NEVER_APPLY); + ClusterTopology topology = createClusterTopology(bp, clusterConfig, hostGroups); BlueprintConfigurationProcessor configProcessor = new BlueprintConfigurationProcessor(topology); // When @@ -6648,7 +6644,7 @@ public void testRangerAdminProperties_HA() throws Exception { Collection hostGroups = Lists.newArrayList(group1, group2); - ClusterTopology topology = createClusterTopology(bp, clusterConfig, hostGroups, NEVER_APPLY); + ClusterTopology topology = createClusterTopology(bp, clusterConfig, hostGroups); BlueprintConfigurationProcessor configProcessor = new BlueprintConfigurationProcessor(topology); // When @@ -6703,7 +6699,7 @@ public void testRangerEnv_defaults() throws Exception { Collection hostGroups = Lists.newArrayList(group1, group2); - ClusterTopology topology = createClusterTopology(bp, clusterConfig, hostGroups, NEVER_APPLY); + ClusterTopology topology = createClusterTopology(bp, clusterConfig, hostGroups); BlueprintConfigurationProcessor configProcessor = new BlueprintConfigurationProcessor(topology); // When @@ -6762,7 +6758,7 @@ public void testRangerEnv_defaults_NO_HDFS() throws Exception { Collection hostGroups = Lists.newArrayList(group1);//, group2); - ClusterTopology topology = createClusterTopology(bp, clusterConfig, hostGroups, NEVER_APPLY); + ClusterTopology topology = createClusterTopology(bp, clusterConfig, hostGroups); BlueprintConfigurationProcessor configProcessor = new BlueprintConfigurationProcessor(topology); // When @@ -6828,7 +6824,7 @@ public void testRangerEnv() throws Exception { Collection hostGroups = Lists.newArrayList(group1, group2); - ClusterTopology topology = createClusterTopology(bp, clusterConfig, hostGroups, NEVER_APPLY); + ClusterTopology topology = createClusterTopology(bp, clusterConfig, hostGroups); BlueprintConfigurationProcessor configProcessor = new BlueprintConfigurationProcessor(topology); // When @@ -6902,7 +6898,7 @@ public void testRangerEnvWithHdfsHA() throws Exception { Collection hostGroups = Lists.newArrayList(group1, group2); - ClusterTopology topology = createClusterTopology(bp, clusterConfig, hostGroups, NEVER_APPLY); + ClusterTopology topology = createClusterTopology(bp, clusterConfig, hostGroups); BlueprintConfigurationProcessor configProcessor = new BlueprintConfigurationProcessor(topology); // When @@ -6968,7 +6964,7 @@ public void testRangerEnvBlueprintExport() throws Exception { Collection hostGroups = Lists.newArrayList(group1, group2); - ClusterTopology topology = createClusterTopology(bp, clusterConfig, hostGroups, NEVER_APPLY); + ClusterTopology topology = createClusterTopology(bp, clusterConfig, hostGroups); BlueprintConfigurationProcessor configProcessor = new BlueprintConfigurationProcessor(topology); // When @@ -7040,7 +7036,7 @@ public void testRangerEnvExportBlueprintWithHdfsHA() throws Exception { Collection hostGroups = Lists.newArrayList(group1, group2); - ClusterTopology topology = createClusterTopology(bp, clusterConfig, hostGroups, NEVER_APPLY); + ClusterTopology topology = createClusterTopology(bp, clusterConfig, hostGroups); BlueprintConfigurationProcessor configProcessor = new BlueprintConfigurationProcessor(topology); // When @@ -7091,7 +7087,7 @@ public void testRangerKmsServerProperties() throws Exception { hostGroups.add(group1); hostGroups.add(group2); - ClusterTopology topology = createClusterTopology(bp, clusterConfig, hostGroups, NEVER_APPLY); + ClusterTopology topology = createClusterTopology(bp, clusterConfig, hostGroups); BlueprintConfigurationProcessor configProcessor = new BlueprintConfigurationProcessor(topology); // When @@ -7131,7 +7127,7 @@ public void testRangerKmsServerProperties_default() throws Exception { Collection hostGroups = Collections.singleton(group1); - ClusterTopology topology = createClusterTopology(bp, clusterConfig, hostGroups, NEVER_APPLY); + ClusterTopology topology = createClusterTopology(bp, clusterConfig, hostGroups); BlueprintConfigurationProcessor configProcessor = new BlueprintConfigurationProcessor(topology); // When @@ -7176,7 +7172,7 @@ public void testHdfsWithRangerKmsServer() throws Exception { Collection hostGroups = Lists.newArrayList(group1, group2); - ClusterTopology topology = createClusterTopology(bp, clusterConfig, hostGroups, NEVER_APPLY); + ClusterTopology topology = createClusterTopology(bp, clusterConfig, hostGroups); BlueprintConfigurationProcessor configProcessor = new BlueprintConfigurationProcessor(topology); // When @@ -7230,7 +7226,7 @@ public void testHdfsWithNoRangerKmsServer() throws Exception { Collection hostGroups = Lists.newArrayList(group1, group2); - ClusterTopology topology = createClusterTopology(bp, clusterConfig, hostGroups, NEVER_APPLY); + ClusterTopology topology = createClusterTopology(bp, clusterConfig, hostGroups); BlueprintConfigurationProcessor configProcessor = new BlueprintConfigurationProcessor(topology); @@ -7275,7 +7271,7 @@ public void testHdfsWithRangerKmsServer_default() throws Exception { Collection hostGroups = Lists.newArrayList(group1, group2); - ClusterTopology topology = createClusterTopology(bp, clusterConfig, hostGroups, NEVER_APPLY); + ClusterTopology topology = createClusterTopology(bp, clusterConfig, hostGroups); BlueprintConfigurationProcessor configProcessor = new BlueprintConfigurationProcessor(topology); // When @@ -7321,7 +7317,7 @@ public void testHdfsWithRangerKmsServer__multiple_hosts__localhost() throws Exce Collection hostGroups = Lists.newArrayList(group1, group2); - ClusterTopology topology = createClusterTopology(bp, clusterConfig, hostGroups, NEVER_APPLY); + ClusterTopology topology = createClusterTopology(bp, clusterConfig, hostGroups); BlueprintConfigurationProcessor configProcessor = new BlueprintConfigurationProcessor(topology); // When @@ -7375,7 +7371,7 @@ public void testHdfsWithRangerKmsServer__multiple_hosts__hostgroup() throws Exce Collection hostGroups = Lists.newArrayList(group1, group2); - ClusterTopology topology = createClusterTopology(bp, clusterConfig, hostGroups, NEVER_APPLY); + ClusterTopology topology = createClusterTopology(bp, clusterConfig, hostGroups); BlueprintConfigurationProcessor configProcessor = new BlueprintConfigurationProcessor(topology); // When @@ -7431,7 +7427,7 @@ public void testResolutionOfDRPCServerAndNN() throws Exception { Collection hostGroups = Lists.newArrayList(group1, group2); - ClusterTopology topology = createClusterTopology(bp, clusterConfig, hostGroups, NEVER_APPLY); + ClusterTopology topology = createClusterTopology(bp, clusterConfig, hostGroups); BlueprintConfigurationProcessor configProcessor = new BlueprintConfigurationProcessor(topology); // When @@ -7475,7 +7471,7 @@ public void testHadoopWithRangerKmsServer() throws Exception { Collection hostGroups = Lists.newArrayList(group1, group2); - ClusterTopology topology = createClusterTopology(bp, clusterConfig, hostGroups, NEVER_APPLY); + ClusterTopology topology = createClusterTopology(bp, clusterConfig, hostGroups); BlueprintConfigurationProcessor configProcessor = new BlueprintConfigurationProcessor(topology); // When @@ -7519,7 +7515,7 @@ public void testHadoopWithNoRangerKmsServer() throws Exception { Collection hostGroups = Lists.newArrayList(group1, group2); - ClusterTopology topology = createClusterTopology(bp, clusterConfig, hostGroups, NEVER_APPLY); + ClusterTopology topology = createClusterTopology(bp, clusterConfig, hostGroups); BlueprintConfigurationProcessor configProcessor = new BlueprintConfigurationProcessor(topology); @@ -7564,7 +7560,7 @@ public void testHadoopWithRangerKmsServer_default() throws Exception { Collection hostGroups = Lists.newArrayList(group1, group2); - ClusterTopology topology = createClusterTopology(bp, clusterConfig, hostGroups, NEVER_APPLY); + ClusterTopology topology = createClusterTopology(bp, clusterConfig, hostGroups); BlueprintConfigurationProcessor configProcessor = new BlueprintConfigurationProcessor(topology); // When @@ -7819,7 +7815,7 @@ public void testHawqConfigClusterUpdate() throws Exception { hostGroups.add(group2); hostGroups.add(group3); - ClusterTopology topology = createClusterTopology(bp, clusterConfig, hostGroups, NEVER_APPLY); + ClusterTopology topology = createClusterTopology(bp, clusterConfig, hostGroups); BlueprintConfigurationProcessor updater = new BlueprintConfigurationProcessor(topology); updater.doUpdateForClusterCreate(); @@ -7851,7 +7847,7 @@ public void testHawqNonHaConfigClusterUpdate() throws Exception { Collection hostGroups = new HashSet<>(); hostGroups.add(group1); - ClusterTopology topology = createClusterTopology(bp, clusterConfig, hostGroups, NEVER_APPLY); + ClusterTopology topology = createClusterTopology(bp, clusterConfig, hostGroups); BlueprintConfigurationProcessor updater = new BlueprintConfigurationProcessor(topology); updater.doUpdateForClusterCreate(); @@ -7874,7 +7870,7 @@ public void testDoUpdateForBlueprintExport_NonTopologyProperty__AtlasClusterName Collection hostGroups = new HashSet<>(); hostGroups.add(group1); - ClusterTopology topology = createClusterTopology(bp, clusterConfig, hostGroups, NEVER_APPLY); + ClusterTopology topology = createClusterTopology(bp, clusterConfig, hostGroups); Long clusterId = topology.getClusterId(); Map typeProps = new HashMap<>(); typeProps.put("atlas.cluster.name", String.valueOf(clusterId)); @@ -7904,7 +7900,8 @@ public void testDoUpdateForBlueprintExport_NonTopologyProperty() throws Exceptio Collection hostGroups = new HashSet<>(); hostGroups.add(group1); - ClusterTopology topology = createClusterTopology(bp, clusterConfig, hostGroups, NEVER_APPLY); + ClusterTopology topology = createClusterTopology(bp, clusterConfig, hostGroups); + Long clusterId = topology.getClusterId(); Map hiveSiteProps = new HashMap<>(); hiveSiteProps.put("hive.exec.post.hooks", someString); @@ -7941,8 +7938,8 @@ public void druidProperties() throws Exception { properties.put("druid-common", druidCommon); Map> parentProperties = new HashMap<>(); - Configuration parentClusterConfig = new Configuration(parentProperties, Collections.emptyMap()); - Configuration clusterConfig = new Configuration(properties, Collections.emptyMap(), parentClusterConfig); + Configuration parentClusterConfig = new Configuration(parentProperties, Collections.>>emptyMap()); + Configuration clusterConfig = new Configuration(properties, Collections.>>emptyMap(), parentClusterConfig); Collection hgComponents1 = Sets.newHashSet("DRUID_COORDINATOR"); TestHostGroup group1 = new TestHostGroup("group1", hgComponents1, Collections.singleton("host1")); @@ -7952,7 +7949,7 @@ public void druidProperties() throws Exception { Collection hostGroups = Arrays.asList(group1, group2); - ClusterTopology topology = createClusterTopology(bp, clusterConfig, hostGroups, NEVER_APPLY); + ClusterTopology topology = createClusterTopology(bp, clusterConfig, hostGroups); BlueprintConfigurationProcessor configProcessor = new BlueprintConfigurationProcessor(topology); configProcessor.doUpdateForClusterCreate(); @@ -7982,7 +7979,7 @@ public void testAmsPropertiesDefault() throws Exception { Collection hostGroups = Collections.singletonList(group1); - ClusterTopology topology = createClusterTopology(bp, clusterConfig, hostGroups, NEVER_APPLY); + ClusterTopology topology = createClusterTopology(bp, clusterConfig, hostGroups); BlueprintConfigurationProcessor configProcessor = new BlueprintConfigurationProcessor(topology); configProcessor.doUpdateForClusterCreate(); @@ -8012,7 +8009,7 @@ public void testAmsPropertiesSpecialAddress() throws Exception { Collection hostGroups = Collections.singletonList(group1); - ClusterTopology topology = createClusterTopology(bp, clusterConfig, hostGroups, NEVER_APPLY); + ClusterTopology topology = createClusterTopology(bp, clusterConfig, hostGroups); BlueprintConfigurationProcessor configProcessor = new BlueprintConfigurationProcessor(topology); configProcessor.doUpdateForClusterCreate(); @@ -8047,7 +8044,7 @@ public void testAmsPropertiesSpecialAddressMultipleCollectors() throws Exception hostGroups.add(group1); hostGroups.add(group2); - ClusterTopology topology = createClusterTopology(bp, clusterConfig, hostGroups, NEVER_APPLY); + ClusterTopology topology = createClusterTopology(bp, clusterConfig, hostGroups); BlueprintConfigurationProcessor configProcessor = new BlueprintConfigurationProcessor(topology); configProcessor.doUpdateForClusterCreate(); @@ -8082,8 +8079,8 @@ public void testStackPasswordPropertyFilter() throws Exception { hostGroups.add(group1); hostGroups.add(group2); - expect(stack.isPasswordProperty(anyObject(), anyObject(), anyObject())).andReturn(true).once(); - ClusterTopology topology = createClusterTopology(bp, clusterConfig, hostGroups, NEVER_APPLY); + expect(stack.isPasswordProperty((String) anyObject(), (String) anyObject(), (String) anyObject())).andReturn(true).once(); + ClusterTopology topology = createClusterTopology(bp, clusterConfig, hostGroups); BlueprintConfigurationProcessor configProcessor = new BlueprintConfigurationProcessor(topology); configProcessor.doUpdateForBlueprintExport(); @@ -8162,7 +8159,7 @@ public void testValuesTrimming() throws Exception { Collection hostGroups = Collections.singletonList(group1); - ClusterTopology topology = createClusterTopology(bp, clusterConfig, hostGroups, NEVER_APPLY); + ClusterTopology topology = createClusterTopology(bp, clusterConfig, hostGroups); BlueprintConfigurationProcessor configProcessor = new BlueprintConfigurationProcessor(topology); configProcessor.doUpdateForClusterCreate(); @@ -8215,15 +8212,15 @@ private Configuration createStackDefaults() { } private ClusterTopology createClusterTopology(Blueprint blueprint, Configuration configuration, - Collection hostGroups, ConfigRecommendationStrategy recommendation) + Collection hostGroups) throws InvalidTopologyException { replay(stack, serviceInfo, ambariContext, configHelper, controller, kerberosHelper, kerberosDescriptor, clusters, cluster); Map hostGroupInfo = new HashMap<>(); + Collection allServices = new HashSet<>(); Map allHostGroups = new HashMap<>(); - Map> resolvedComponents = new HashMap<>(); for (TestHostGroup hostGroup : hostGroups) { HostGroupInfo groupInfo = new HostGroupInfo(hostGroup.name); @@ -8231,47 +8228,45 @@ private ClusterTopology createClusterTopology(Blueprint blueprint, Configuration //todo: HG configs groupInfo.setConfiguration(hostGroup.configuration); - Set components = hostGroup.components.stream() - .map(name -> ResolvedComponent.builder(new Component(name)).stackId(STACK_ID).serviceType(stack.getServiceForComponent(name)).buildPartial()) - .collect(toSet()); - - List componentList = components.stream() - .map(ResolvedComponent::component) - .collect(toList()); + List componentList = new ArrayList<>(); + for (String componentName : hostGroup.components) { + componentList.add(new Component(componentName)); + } //create host group which is set on topology - allHostGroups.put(hostGroup.name, new HostGroupImpl(hostGroup.name, componentList, EMPTY_CONFIG, "1")); + allHostGroups.put(hostGroup.name, new HostGroupImpl(hostGroup.name, "test-bp", stack, + componentList, EMPTY_CONFIG, "1")); + hostGroupInfo.put(hostGroup.name, groupInfo); - resolvedComponents.put(hostGroup.name, components); + + for (String component : hostGroup.components) { + for (Map.Entry> serviceComponentsEntry : serviceComponents.entrySet()) { + if (serviceComponentsEntry.getValue().contains(component)) { + allServices.add(serviceComponentsEntry.getKey()); + } + } + } } + expect(bp.getServices()).andReturn(allServices).anyTimes(); + for (HostGroup group : allHostGroups.values()) { expect(bp.getHostGroup(group.getName())).andReturn(group).anyTimes(); } expect(bp.getHostGroups()).andReturn(allHostGroups).anyTimes(); - expect(topologyRequestMock.getBlueprint()).andReturn(blueprint).anyTimes(); expect(topologyRequestMock.getClusterId()).andReturn(1L).anyTimes(); - expect(topologyRequestMock.getConfigRecommendationStrategy()).andReturn(recommendation).anyTimes(); + expect(topologyRequestMock.getBlueprint()).andReturn(blueprint).anyTimes(); expect(topologyRequestMock.getConfiguration()).andReturn(configuration).anyTimes(); - expect(topologyRequestMock.getDefaultPassword()).andReturn("secret").anyTimes(); expect(topologyRequestMock.getHostGroupInfo()).andReturn(hostGroupInfo).anyTimes(); - expect(topologyRequestMock.getMpacks()).andReturn(ImmutableSet.of()).anyTimes(); - expect(topologyRequestMock.getProvisionAction()).andReturn(ProvisionAction.INSTALL_AND_START).anyTimes(); - expect(topologyRequestMock.getSecurityConfiguration()).andReturn(SecurityConfiguration.NONE).anyTimes(); - expect(topologyRequestMock.getStackIds()).andReturn(ImmutableSet.of(STACK_ID)).anyTimes(); - expect(bp.getConfiguration()).andReturn(Configuration.createEmpty()).anyTimes(); - expect(bp.getMpacks()).andReturn(ImmutableSet.of()).anyTimes(); - expect(bp.getSecurity()).andReturn(SecurityConfiguration.NONE).anyTimes(); - expect(securityFactory.loadSecurityConfigurationByReference(anyString())).andReturn(null).anyTimes(); - replay(bp, topologyRequestMock, securityFactory); - - BlueprintBasedClusterProvisionRequest request = new BlueprintBasedClusterProvisionRequest(ambariContext, securityFactory, bp, topologyRequestMock); - - ClusterTopologyImpl clusterTopology = new ClusterTopologyImpl(ambariContext, request, resolvedComponents); - clusterTopology.setClusterId(1L); - return clusterTopology; + + replay(bp, topologyRequestMock); + + ClusterTopology topology = new ClusterTopologyImpl(ambariContext, topologyRequestMock); + topology.setConfigRecommendationStrategy(ConfigRecommendationStrategy.NEVER_APPLY); + + return topology; } private class TestHostGroup { diff --git a/ambari-server/src/test/java/org/apache/ambari/server/controller/internal/BlueprintResourceProviderTest.java b/ambari-server/src/test/java/org/apache/ambari/server/controller/internal/BlueprintResourceProviderTest.java index aeadc5cb74a..dece002f9bb 100644 --- a/ambari-server/src/test/java/org/apache/ambari/server/controller/internal/BlueprintResourceProviderTest.java +++ b/ambari-server/src/test/java/org/apache/ambari/server/controller/internal/BlueprintResourceProviderTest.java @@ -75,10 +75,11 @@ import org.apache.ambari.server.state.StackInfo; import org.apache.ambari.server.topology.Blueprint; import org.apache.ambari.server.topology.BlueprintFactory; +import org.apache.ambari.server.topology.BlueprintValidator; +import org.apache.ambari.server.topology.InvalidTopologyException; import org.apache.ambari.server.topology.SecurityConfiguration; import org.apache.ambari.server.topology.SecurityConfigurationFactory; import org.apache.ambari.server.topology.Setting; -import org.apache.ambari.server.topology.validators.BlueprintValidator; import org.apache.ambari.server.utils.StageUtils; import org.junit.Before; import org.junit.BeforeClass; @@ -89,6 +90,7 @@ /** * BlueprintResourceProvider unit tests. */ +@SuppressWarnings("unchecked") public class BlueprintResourceProviderTest { private static String BLUEPRINT_NAME = "test-blueprint"; @@ -113,7 +115,7 @@ public static void initClass() { replay(resourceProviderFactory); } - private Map>> getSettingProperties() { + private Map>> getSettingProperties() { return new HashMap<>(); } @@ -131,12 +133,13 @@ public void testCreateResources() throws Exception { Set> setProperties = getBlueprintTestProperties(); Map requestInfoProperties = getTestRequestInfoProperties(); - Map>> settingProperties = getSettingProperties(); + Map>> settingProperties = getSettingProperties(); // set expectations expect(blueprintFactory.createBlueprint(setProperties.iterator().next(), null)).andReturn(blueprint).once(); expect(securityFactory.createSecurityConfigurationFromRequest(null, true)).andReturn(null).anyTimes(); - blueprintValidator.validate(blueprint); + blueprintValidator.validateRequiredProperties(blueprint); + blueprintValidator.validateTopology(blueprint); expect(blueprint.getSetting()).andReturn(setting).anyTimes(); expect(setting.getProperties()).andReturn(settingProperties).anyTimes(); expect(blueprint.toEntity()).andReturn(entity); @@ -205,13 +208,14 @@ public void testCreateResources_NoValidation() throws Exception { Request request = createMock(Request.class); Setting setting = createStrictMock(Setting.class); - Map>> settingProperties = getSettingProperties(); + Map>> settingProperties = getSettingProperties(); Set> setProperties = getBlueprintTestProperties(); Map requestInfoProperties = getTestRequestInfoProperties(); requestInfoProperties.put("validate_topology", "false"); // set expectations expect(blueprintFactory.createBlueprint(setProperties.iterator().next(), null)).andReturn(blueprint).once(); + blueprintValidator.validateRequiredProperties(blueprint); expect(blueprint.getSetting()).andReturn(setting).anyTimes(); expect(setting.getProperties()).andReturn(settingProperties).anyTimes(); expect(blueprint.toEntity()).andReturn(entity); @@ -243,7 +247,7 @@ public void testCreateResources_NoValidation() throws Exception { verify(dao, entity, blueprintFactory, blueprint, blueprintValidator, metaInfo, request, managementController); } - @Test(expected = IllegalArgumentException.class) + @Test public void testCreateResources_TopologyValidationFails() throws Exception { Request request = createMock(Request.class); @@ -253,8 +257,9 @@ public void testCreateResources_TopologyValidationFails() throws Exception { // set expectations expect(blueprintFactory.createBlueprint(setProperties.iterator().next(), null)).andReturn(blueprint).once(); expect(blueprint.getName()).andReturn(BLUEPRINT_NAME).atLeastOnce(); - blueprintValidator.validate(blueprint); - expectLastCall().andThrow(new IllegalArgumentException("test")).once(); + blueprintValidator.validateRequiredProperties(blueprint); + blueprintValidator.validateTopology(blueprint); + expectLastCall().andThrow(new InvalidTopologyException("test")).once(); expect(request.getProperties()).andReturn(setProperties); expect(request.getRequestInfoProperties()).andReturn(requestInfoProperties); @@ -270,7 +275,14 @@ public void testCreateResources_TopologyValidationFails() throws Exception { AbstractResourceProviderTest.TestObserver observer = new AbstractResourceProviderTest.TestObserver(); ((ObservableResourceProvider)provider).addObserver(observer); - provider.createResources(request); + try { + provider.createResources(request); + fail("Expected exception due to topology validation error"); + } catch (IllegalArgumentException e) { + // expected + } + + verify(dao, entity, blueprintFactory, blueprint, blueprintValidator, metaInfo, request); } @@ -283,11 +295,12 @@ public void testCreateResources_withConfiguration() throws Exception { Map requestInfoProperties = getTestRequestInfoProperties(); Request request = createMock(Request.class); Setting setting = createStrictMock(Setting.class); - Map>> settingProperties = getSettingProperties(); + Map>> settingProperties = getSettingProperties(); // set expectations expect(blueprintFactory.createBlueprint(setProperties.iterator().next(), null)).andReturn(blueprint).once(); - blueprintValidator.validate(blueprint); + blueprintValidator.validateRequiredProperties(blueprint); + blueprintValidator.validateTopology(blueprint); expect(blueprint.getSetting()).andReturn(setting).anyTimes(); expect(setting.getProperties()).andReturn(settingProperties).anyTimes(); expect(blueprint.toEntity()).andReturn(entity); @@ -319,8 +332,9 @@ public void testCreateResources_withConfiguration() throws Exception { verify(dao, entity, blueprintFactory, blueprint, blueprintValidator, metaInfo, request, managementController); } - @Test(expected = IllegalArgumentException.class) - public void testCreateResource_BlueprintFactoryThrowsException() throws Exception { + @Test + public void testCreateResource_BlueprintFactoryThrowsException() throws Exception + { Request request = createMock(Request.class); Set> setProperties = getBlueprintTestProperties(); @@ -338,7 +352,13 @@ public void testCreateResource_BlueprintFactoryThrowsException() throws Exceptio replay(dao, entity, metaInfo, blueprintFactory, securityFactory, blueprint, blueprintValidator, request); // end expectations - provider.createResources(request); + try { + provider.createResources(request); + fail("Exception expected"); + } catch (IllegalArgumentException e) { + // expected + } + verify(dao, entity, blueprintFactory, blueprint, blueprintValidator, metaInfo, request); } @Test @@ -349,14 +369,15 @@ public void testCreateResources_withSecurityConfiguration() throws Exception { Set> setProperties = getBlueprintTestProperties(); Map requestInfoProperties = getTestRequestInfoProperties(); - Map>> settingProperties = getSettingProperties(); + Map>> settingProperties = getSettingProperties(); SecurityConfiguration securityConfiguration = new SecurityConfiguration(SecurityType.KERBEROS, "testRef", null); // set expectations expect(securityFactory.createSecurityConfigurationFromRequest(anyObject(), anyBoolean())).andReturn (securityConfiguration).once(); expect(blueprintFactory.createBlueprint(setProperties.iterator().next(), securityConfiguration)).andReturn(blueprint).once(); - blueprintValidator.validate(blueprint); + blueprintValidator.validateRequiredProperties(blueprint); + blueprintValidator.validateTopology(blueprint); expect(blueprint.getSetting()).andReturn(setting).anyTimes(); expect(setting.getProperties()).andReturn(settingProperties).anyTimes(); expect(blueprint.toEntity()).andReturn(entity); @@ -474,14 +495,15 @@ public void testCreateResources_withEmptyConfiguration() throws Exception { setConfigurationProperties(setProperties); AmbariManagementController managementController = createMock(AmbariManagementController.class); Map requestInfoProperties = new HashMap<>(); - Map>> settingProperties = getSettingProperties(); + Map>> settingProperties = getSettingProperties(); requestInfoProperties.put(Request.REQUEST_INFO_BODY_PROPERTY, "{\"configurations\":[]}"); Request request = createMock(Request.class); Setting setting = createStrictMock(Setting.class); // set expectations expect(blueprintFactory.createBlueprint(setProperties.iterator().next(), null)).andReturn(blueprint).once(); - blueprintValidator.validate(blueprint); + blueprintValidator.validateRequiredProperties(blueprint); + blueprintValidator.validateTopology(blueprint); expect(blueprint.getSetting()).andReturn(setting).anyTimes(); expect(setting.getProperties()).andReturn(settingProperties).anyTimes(); expect(blueprint.toEntity()).andReturn(entity); @@ -520,14 +542,15 @@ public void testCreateResources_withSingleConfigurationType() throws Exception { setConfigurationProperties(setProperties); AmbariManagementController managementController = createMock(AmbariManagementController.class); Map requestInfoProperties = new HashMap<>(); - Map>> settingProperties = getSettingProperties(); + Map>> settingProperties = getSettingProperties(); requestInfoProperties.put(Request.REQUEST_INFO_BODY_PROPERTY, "{\"configurations\":[{\"configuration-type\":{\"properties\":{\"property\":\"value\"}}}]}"); Request request = createMock(Request.class); Setting setting = createStrictMock(Setting.class); // set expectations expect(blueprintFactory.createBlueprint(setProperties.iterator().next(), null)).andReturn(blueprint).once(); - blueprintValidator.validate(blueprint); + blueprintValidator.validateRequiredProperties(blueprint); + blueprintValidator.validateTopology(blueprint); expect(blueprint.getSetting()).andReturn(setting).anyTimes(); expect(setting.getProperties()).andReturn(settingProperties).anyTimes(); expect(blueprint.toEntity()).andReturn(entity); diff --git a/ambari-server/src/test/java/org/apache/ambari/server/controller/internal/ClientConfigResourceProviderTest.java b/ambari-server/src/test/java/org/apache/ambari/server/controller/internal/ClientConfigResourceProviderTest.java index 2630ef746ff..a17e94fa26a 100644 --- a/ambari-server/src/test/java/org/apache/ambari/server/controller/internal/ClientConfigResourceProviderTest.java +++ b/ambari-server/src/test/java/org/apache/ambari/server/controller/internal/ClientConfigResourceProviderTest.java @@ -245,7 +245,7 @@ public void testGetResources() throws Exception { serviceOsSpecificHashMap.put("key", osSpecific); ServiceComponentHostResponse shr1 = new ServiceComponentHostResponse(1L, clusterName, 1L, "CORE", 1L, serviceName, serviceName, 1L, - componentName, componentName, displayName, hostName, publicHostname, desiredState, "", null, null, null, + componentName, displayName, hostName, publicHostname, desiredState, "", null, null, null, null); Set responses = new LinkedHashSet<>(); @@ -323,6 +323,9 @@ public void testGetResources() throws Exception { expect(service.getServiceComponent(componentName)).andReturn(serviceComponent).atLeastOnce(); expect(serviceComponent.getDesiredStackId()).andReturn(stackId).atLeastOnce(); + HashMap rcaParams = new HashMap<>(); + rcaParams.put("key","value"); + expect(managementController.getRcaParameters()).andReturn(rcaParams).anyTimes(); expect(stackInfo.getOsSpecifics()).andReturn(new HashMap<>()).anyTimes(); Set userSet = new HashSet<>(); userSet.add("hdfs"); @@ -499,7 +502,7 @@ public void testGetResourcesFromCommonServices() throws Exception { serviceOsSpecificHashMap.put("key", osSpecific); ServiceComponentHostResponse shr1 = new ServiceComponentHostResponse(1L, clusterName, 1L, "CORE", 1L, serviceName, serviceName, 1L, - componentName, componentName, displayName, hostName, publicHostName, desiredState, "", null, null, null, + componentName, displayName, hostName, publicHostName, desiredState, "", null, null, null, null); Set responses = new LinkedHashSet<>(); @@ -580,6 +583,7 @@ public void testGetResourcesFromCommonServices() throws Exception { HashMap rcaParams = new HashMap<>(); rcaParams.put("key","value"); + expect(managementController.getRcaParameters()).andReturn(rcaParams).anyTimes(); expect(serviceInfo.getOsSpecifics()).andReturn(new HashMap<>()).anyTimes(); expect(stackInfo.getOsSpecifics()).andReturn(new HashMap<>()).anyTimes(); Set userSet = new HashSet<>(); diff --git a/ambari-server/src/test/java/org/apache/ambari/server/controller/internal/ClusterControllerImplTest.java b/ambari-server/src/test/java/org/apache/ambari/server/controller/internal/ClusterControllerImplTest.java index 2bf4d117b62..aa295319f75 100644 --- a/ambari-server/src/test/java/org/apache/ambari/server/controller/internal/ClusterControllerImplTest.java +++ b/ambari-server/src/test/java/org/apache/ambari/server/controller/internal/ClusterControllerImplTest.java @@ -941,7 +941,7 @@ public TestProviderModule() { providers.put(Resource.Type.Host, new TestHostResourceProvider()); providers.put(Resource.Type.Stack, new TestStackResourceProvider()); providers.put(Resource.Type.StackVersion, new TestStackVersionResourceProvider()); - providers.put(Resource.Type.OperatingSystemReadOnly, new TestOperatingSystemResourceProvider()); + providers.put(Resource.Type.OperatingSystem, new TestOperatingSystemResourceProvider()); providers.put(Resource.Type.Repository, new TestRepositoryResourceProvider()); providers.put(Resource.Type.RepositoryVersion, new TestRepositoryVersionResourceProvider()); providers.put(Resource.Type.CompatibleRepositoryVersion, new TestCompatibleRepositoryVersionResourceProvider()); @@ -1174,7 +1174,7 @@ public Set getResources(Request request, Predicate predicate) private static class TestOperatingSystemResourceProvider extends TestResourceProvider { private TestOperatingSystemResourceProvider() { - super(OperatingSystemReadOnlyResourceProvider.propertyIds, OperatingSystemReadOnlyResourceProvider.keyPropertyIds); + super(OperatingSystemResourceProvider.propertyIds, OperatingSystemResourceProvider.keyPropertyIds); } @Override @@ -1186,7 +1186,7 @@ public Set getResources(Request request, Predicate predicate) keyPropertyValues.add("centos6"); keyPropertyValues.add("oraclelinux5"); - return getResources(Resource.Type.OperatingSystemReadOnly, predicate, "OperatingSystems/os_type", keyPropertyValues); + return getResources(Resource.Type.OperatingSystem, predicate, "OperatingSystems/os_type", keyPropertyValues); } } diff --git a/ambari-server/src/test/java/org/apache/ambari/server/controller/internal/ClusterResourceProviderTest.java b/ambari-server/src/test/java/org/apache/ambari/server/controller/internal/ClusterResourceProviderTest.java index 06d95bd660d..e68798334cb 100644 --- a/ambari-server/src/test/java/org/apache/ambari/server/controller/internal/ClusterResourceProviderTest.java +++ b/ambari-server/src/test/java/org/apache/ambari/server/controller/internal/ClusterResourceProviderTest.java @@ -20,7 +20,6 @@ import static org.easymock.EasyMock.anyBoolean; import static org.easymock.EasyMock.anyObject; -import static org.easymock.EasyMock.anyString; import static org.easymock.EasyMock.capture; import static org.easymock.EasyMock.createMock; import static org.easymock.EasyMock.createNiceMock; @@ -155,7 +154,7 @@ public void testCreateResource_blueprint_With_ProvisionAction() throws Exception expect(securityFactory.createSecurityConfigurationFromRequest(EasyMock.anyObject(), anyBoolean())).andReturn(null) .once(); - expect(topologyFactory.createProvisionClusterRequest("{}", properties, null)).andReturn(topologyRequest).once(); + expect(topologyFactory.createProvisionClusterRequest(properties, null)).andReturn(topologyRequest).once(); expect(topologyManager.provisionCluster(topologyRequest)).andReturn(requestStatusResponse).once(); expect(requestStatusResponse.getRequestId()).andReturn(5150L).anyTimes(); @@ -171,6 +170,33 @@ public void testCreateResource_blueprint_With_ProvisionAction() throws Exception verifyAll(); } + @Test(expected = IllegalArgumentException.class) + public void testCreateResource_blueprint_withInvalidSecurityConfiguration() throws Exception { + Set> requestProperties = createBlueprintRequestProperties(CLUSTER_NAME, BLUEPRINT_NAME); + Map properties = requestProperties.iterator().next(); + Map requestInfoProperties = new HashMap<>(); + requestInfoProperties.put(Request.REQUEST_INFO_BODY_PROPERTY, "{\"security\" : {\n\"type\" : \"NONE\"," + + "\n\"kerberos_descriptor_reference\" : " + "\"testRef\"\n}}"); + SecurityConfiguration blueprintSecurityConfiguration = new SecurityConfiguration(SecurityType.KERBEROS, "testRef", + null); + SecurityConfiguration securityConfiguration = new SecurityConfiguration(SecurityType.NONE, null, null); + + // set expectations + expect(request.getProperties()).andReturn(requestProperties).anyTimes(); + expect(request.getRequestInfoProperties()).andReturn(requestInfoProperties).anyTimes(); + + expect(securityFactory.createSecurityConfigurationFromRequest(EasyMock.anyObject(), anyBoolean())).andReturn + (securityConfiguration).once(); + expect(topologyFactory.createProvisionClusterRequest(properties, securityConfiguration)).andReturn(topologyRequest).once(); + expect(topologyRequest.getBlueprint()).andReturn(blueprint).anyTimes(); + expect(blueprint.getSecurity()).andReturn(blueprintSecurityConfiguration).anyTimes(); + expect(requestStatusResponse.getRequestId()).andReturn(5150L).anyTimes(); + + replayAll(); + SecurityContextHolder.getContext().setAuthentication(TestAuthenticationFactory.createAdministrator()); + RequestStatus requestStatus = provider.createResources(request); + } + @Test public void testCreateResource_blueprint_withSecurityConfiguration() throws Exception { Set> requestProperties = createBlueprintRequestProperties(CLUSTER_NAME, BLUEPRINT_NAME); @@ -185,7 +211,7 @@ public void testCreateResource_blueprint_withSecurityConfiguration() throws Exce expect(request.getProperties()).andReturn(requestProperties).anyTimes(); expect(request.getRequestInfoProperties()).andReturn(requestInfoProperties).anyTimes(); - expect(topologyFactory.createProvisionClusterRequest(anyString(), eq(properties), eq(securityConfiguration))).andReturn(topologyRequest).once(); + expect(topologyFactory.createProvisionClusterRequest(properties, securityConfiguration)).andReturn(topologyRequest).once(); expect(securityFactory.createSecurityConfigurationFromRequest(EasyMock.anyObject(), anyBoolean())).andReturn (securityConfiguration).once(); expect(topologyManager.provisionCluster(topologyRequest)).andReturn(requestStatusResponse).once(); @@ -209,7 +235,7 @@ public void testCreateResource_blueprint__InvalidRequest() throws Exception { // set expectations expect(request.getProperties()).andReturn(requestProperties).anyTimes(); // throw exception from topology request factory an assert that the correct exception is thrown from resource provider - expect(topologyFactory.createProvisionClusterRequest(null, properties, null)).andThrow(new InvalidTopologyException + expect(topologyFactory.createProvisionClusterRequest(properties, null)).andThrow(new InvalidTopologyException ("test")); replayAll(); @@ -473,7 +499,7 @@ private void testCreateResource_blueprint(Authentication authentication) throws expect(securityFactory.createSecurityConfigurationFromRequest(EasyMock.anyObject(), anyBoolean())).andReturn(null) .once(); - expect(topologyFactory.createProvisionClusterRequest(anyString(), eq(properties), anyObject())).andReturn(topologyRequest).once(); + expect(topologyFactory.createProvisionClusterRequest(properties, null)).andReturn(topologyRequest).once(); expect(topologyManager.provisionCluster(topologyRequest)).andReturn(requestStatusResponse).once(); expect(requestStatusResponse.getRequestId()).andReturn(5150L).anyTimes(); @@ -804,7 +830,7 @@ public void testCreateResource_blueprint_withRepoVersion() throws Exception { expect(securityFactory.createSecurityConfigurationFromRequest(EasyMock.anyObject(), anyBoolean())).andReturn(null) .once(); - expect(topologyFactory.createProvisionClusterRequest("{}", properties, null)).andReturn(topologyRequest).once(); + expect(topologyFactory.createProvisionClusterRequest(properties, null)).andReturn(topologyRequest).once(); expect(topologyManager.provisionCluster(topologyRequest)).andReturn(requestStatusResponse).once(); expect(requestStatusResponse.getRequestId()).andReturn(5150L).anyTimes(); diff --git a/ambari-server/src/test/java/org/apache/ambari/server/controller/internal/ComponentResourceProviderTest.java b/ambari-server/src/test/java/org/apache/ambari/server/controller/internal/ComponentResourceProviderTest.java index ba641650f98..642c97be046 100644 --- a/ambari-server/src/test/java/org/apache/ambari/server/controller/internal/ComponentResourceProviderTest.java +++ b/ambari-server/src/test/java/org/apache/ambari/server/controller/internal/ComponentResourceProviderTest.java @@ -61,8 +61,6 @@ import org.apache.ambari.server.controller.spi.SystemException; import org.apache.ambari.server.controller.utilities.PredicateBuilder; import org.apache.ambari.server.controller.utilities.PropertyHelper; -import org.apache.ambari.server.orm.dao.HostComponentStateDAO; -import org.apache.ambari.server.orm.dao.ServiceComponentDesiredStateDAO; import org.apache.ambari.server.security.TestAuthenticationFactory; import org.apache.ambari.server.security.authorization.AuthorizationException; import org.apache.ambari.server.security.authorization.AuthorizationHelperInitializer; @@ -154,7 +152,7 @@ private void testCreateResources(Authentication authentication) throws Exception expect(componentInfo.isRecoveryEnabled()).andReturn(true).anyTimes(); expect(ambariMetaInfo.getComponent("HDP", "99", "Service100", "Component100")).andReturn(componentInfo).anyTimes(); - expect(serviceComponentFactory.createNew(service, "Component100", "Component100")).andReturn(serviceComponent); + expect(serviceComponentFactory.createNew(service, "Component100")).andReturn(serviceComponent); ServiceComponentResponse componentResponse = createNiceMock(ServiceComponentResponse.class); expect(serviceComponent.convertToResponse()).andReturn(componentResponse); @@ -253,13 +251,13 @@ private void testGetResources(Authentication authentication) throws Exception { expect(service.getServiceComponents()).andReturn(serviceComponentMap).anyTimes(); expect(serviceComponent1.convertToResponse()).andReturn( - new ServiceComponentResponse(100L, "Cluster100", 1L, "CORE", 1L, "Service100", "Service100", 1L, "Component100", "Component100", stackId, "", serviceComponentStateCountMap, + new ServiceComponentResponse(100L, "Cluster100", 1L, "CORE", 1L, "Service100", "Service100", "Component100", stackId, "", serviceComponentStateCountMap, true /* recovery enabled */, "Component100 Client", null, null)); expect(serviceComponent2.convertToResponse()).andReturn( - new ServiceComponentResponse(100L, "Cluster100", 1L, "CORE", 1L, "Service100", "Service100", 2L, "Component101", "Component101", stackId, "", serviceComponentStateCountMap, + new ServiceComponentResponse(100L, "Cluster100", 1L, "CORE", 1L, "Service100", "Service100", "Component101", stackId, "", serviceComponentStateCountMap, false /* recovery not enabled */, "Component101 Client", null, null)); expect(serviceComponent3.convertToResponse()).andReturn( - new ServiceComponentResponse(100L, "Cluster100", 1L, "CORE", 1L, "Service100", "Service100", 3L, "Component102", "Component102", stackId, "", serviceComponentStateCountMap, + new ServiceComponentResponse(100L, "Cluster100", 1L, "CORE", 1L, "Service100", "Service100", "Component102", stackId, "", serviceComponentStateCountMap, true /* recovery enabled */, "Component102 Client", "1.1", RepositoryVersionState.CURRENT)); expect(ambariMetaInfo.getComponent("FOO", "1.0", null, "Component100")).andReturn( @@ -433,13 +431,13 @@ private void testUpdateResources(Authentication authentication) throws Exception expect(component3Info.getCategory()).andReturn(null); expect(serviceComponent1.convertToResponse()).andReturn( - new ServiceComponentResponse(100L, "Cluster100", 1L, "", 1L, "Service100", "", 1L, "Component101", "Component101", stackId, "", serviceComponentStateCountMap, + new ServiceComponentResponse(100L, "Cluster100", 1L, "", 1L, "Service100", "", "Component101", stackId, "", serviceComponentStateCountMap, false /* recovery not enabled */, "Component101 Client", null, null)); expect(serviceComponent2.convertToResponse()).andReturn( - new ServiceComponentResponse(100L, "Cluster100", 1L, "", 1L, "Service100", "", 2L, "Component102", "Component102",stackId, "", serviceComponentStateCountMap, + new ServiceComponentResponse(100L, "Cluster100", 1L, "", 1L, "Service100", "", "Component102", stackId, "", serviceComponentStateCountMap, false /* recovery not enabled */, "Component102 Client", null, null)); expect(serviceComponent3.convertToResponse()).andReturn( - new ServiceComponentResponse(100L, "Cluster100", 1L, "", 1L, "Service100", "", 3L, "Component103", "Component103", stackId, "", serviceComponentStateCountMap, + new ServiceComponentResponse(100L, "Cluster100", 1L, "", 1L, "Service100", "", "Component103", stackId, "", serviceComponentStateCountMap, false /* recovery not enabled */, "Component103 Client", null, null)); expect(serviceComponent1.getDesiredState()).andReturn(State.INSTALLED).anyTimes(); expect(serviceComponent2.getDesiredState()).andReturn(State.INSTALLED).anyTimes(); @@ -742,7 +740,7 @@ private void testUpdateAutoStart(Authentication authentication) throws Exception expect(component1Info.getCategory()).andReturn(null); expect(serviceComponent1.convertToResponse()).andReturn( - new ServiceComponentResponse(100L, "Cluster100", 1L, "", 1L, "Service100", "", 1L, "Component101", "Component101", stackId, "", serviceComponentStateCountMap, + new ServiceComponentResponse(100L, "Cluster100", 1L, "", 1L, "Service100", "", "Component101", stackId, "", serviceComponentStateCountMap, false /* recovery not enabled */, "Component101 Client", null, null)); expect(serviceComponent1.getDesiredState()).andReturn(State.INSTALLED).anyTimes(); @@ -950,8 +948,6 @@ public void testGetComponents_ServiceComponentNotFoundException() throws Excepti MaintenanceStateHelper maintHelper = createNiceMock(MaintenanceStateHelper.class); Cluster cluster = createNiceMock(Cluster.class); Service service = createNiceMock(Service.class); - HostComponentStateDAO hostComponentStateDAO = createMock(HostComponentStateDAO.class); - ServiceComponentDesiredStateDAO serviceComponentDesiredStateDAO = createMock(ServiceComponentDesiredStateDAO.class); // requests ServiceComponentRequest request1 = new ServiceComponentRequest("cluster1", "CORE", "service1", "component1", @@ -967,9 +963,6 @@ public void testGetComponents_ServiceComponentNotFoundException() throws Excepti expect(injector.getInstance(MaintenanceStateHelper.class)).andReturn(maintHelper); expect(injector.getInstance(KerberosHelper.class)).andReturn(createNiceMock(KerberosHelper.class)); - expect(injector.getInstance(HostComponentStateDAO.class)).andReturn(hostComponentStateDAO).anyTimes(); - expect(injector.getInstance(ServiceComponentDesiredStateDAO.class)).andReturn(serviceComponentDesiredStateDAO).anyTimes(); - // getComponents expect(clusters.getCluster("cluster1")).andReturn(cluster); expect(cluster.getService("service1")).andReturn(service); diff --git a/ambari-server/src/test/java/org/apache/ambari/server/controller/internal/ConfigGroupResourceProviderTest.java b/ambari-server/src/test/java/org/apache/ambari/server/controller/internal/ConfigGroupResourceProviderTest.java index 158ab7df4fc..0485396e512 100644 --- a/ambari-server/src/test/java/org/apache/ambari/server/controller/internal/ConfigGroupResourceProviderTest.java +++ b/ambari-server/src/test/java/org/apache/ambari/server/controller/internal/ConfigGroupResourceProviderTest.java @@ -64,7 +64,6 @@ import org.apache.ambari.server.state.Config; import org.apache.ambari.server.state.ConfigHelper; import org.apache.ambari.server.state.Host; -import org.apache.ambari.server.state.Service; import org.apache.ambari.server.state.configgroup.ConfigGroup; import org.apache.ambari.server.state.configgroup.ConfigGroupFactory; import org.easymock.Capture; @@ -83,8 +82,6 @@ public class ConfigGroupResourceProviderTest { - public static final String SERVICE_GROUP_NAME = "default"; - public static final String SERVICE_NAME = "ZOOKEEPER"; private HostDAO hostDAO = null; @Before @@ -157,7 +154,6 @@ private void testCreateConfigGroup(Authentication authentication) throws Excepti HostEntity hostEntity2 = createMock(HostEntity.class); ConfigGroupFactory configGroupFactory = createNiceMock(ConfigGroupFactory.class); ConfigGroup configGroup = createNiceMock(ConfigGroup.class); - Service service = createNiceMock(Service.class); expect(managementController.getClusters()).andReturn(clusters).anyTimes(); expect(clusters.getCluster("Cluster100")).andReturn(cluster).anyTimes(); @@ -165,8 +161,6 @@ private void testCreateConfigGroup(Authentication authentication) throws Excepti expect(clusters.getHost("h2")).andReturn(h2); expect(cluster.getClusterName()).andReturn("Cluster100").anyTimes(); expect(cluster.isConfigTypeExists(anyString())).andReturn(true).anyTimes(); - expect(cluster.getService(SERVICE_GROUP_NAME, SERVICE_NAME)).andReturn(service); - expect(service.getServiceId()).andReturn(1L); expect(managementController.getConfigGroupFactory()).andReturn(configGroupFactory); expect(managementController.getAuthName()).andReturn("admin").anyTimes(); expect(hostDAO.findByName("h1")).andReturn(hostEntity1).atLeastOnce(); @@ -178,6 +172,7 @@ private void testCreateConfigGroup(Authentication authentication) throws Excepti Capture serviceName = newCapture(); Capture servcieId = newCapture(); Capture servcieGroupId = newCapture(); + Capture captureName = newCapture(); Capture captureDesc = newCapture(); Capture captureTag = newCapture(); Capture> captureConfigs = newCapture(); @@ -187,7 +182,7 @@ private void testCreateConfigGroup(Authentication authentication) throws Excepti capture(captureTag), capture(captureDesc), capture(captureConfigs), capture(captureHosts))).andReturn(configGroup); - replay(managementController, clusters, cluster, service, configGroupFactory, + replay(managementController, clusters, cluster, configGroupFactory, configGroup, response, hostDAO, hostEntity1, hostEntity2); ResourceProvider provider = getConfigGroupResourceProvider @@ -223,8 +218,6 @@ private void testCreateConfigGroup(Authentication authentication) throws Excepti hostSet); properties.put(ConfigGroupResourceProvider.CONFIGGROUP_CONFIGS_PROPERTY_ID, configSet); - properties.put(ConfigGroupResourceProvider.CONFIGGROUP_SERVICEGROUPNAME_PROPERTY_ID, SERVICE_GROUP_NAME); - properties.put(ConfigGroupResourceProvider.CONFIGGROUP_SERVICENAME_PROPERTY_ID, SERVICE_NAME); propertySet.add(properties); diff --git a/ambari-server/src/test/java/org/apache/ambari/server/controller/internal/ExportBlueprintRequestTest.java b/ambari-server/src/test/java/org/apache/ambari/server/controller/internal/ExportBlueprintRequestTest.java index 0ab9dfb612c..65d89c11d2f 100644 --- a/ambari-server/src/test/java/org/apache/ambari/server/controller/internal/ExportBlueprintRequestTest.java +++ b/ambari-server/src/test/java/org/apache/ambari/server/controller/internal/ExportBlueprintRequestTest.java @@ -44,7 +44,6 @@ import org.apache.ambari.server.api.util.TreeNodeImpl; import org.apache.ambari.server.controller.AmbariManagementController; import org.apache.ambari.server.controller.spi.Resource; -import org.apache.ambari.server.state.StackId; import org.apache.ambari.server.state.StackInfo; import org.apache.ambari.server.topology.Blueprint; import org.apache.ambari.server.topology.HostGroup; @@ -78,7 +77,7 @@ public void setupTest() throws Exception { AmbariMetaInfo metainfo = createNiceMock(AmbariMetaInfo.class); expect(controller.getAmbariMetaInfo()).andReturn(metainfo).anyTimes(); StackInfo stackInfo = createNiceMock(StackInfo.class); - expect(metainfo.getStack(new StackId("TEST", "1.0"))).andReturn(stackInfo); + expect(metainfo.getStack("TEST", "1.0")).andReturn(stackInfo); expect(stackInfo.getServices()).andReturn(Collections.emptySet()).anyTimes(); expect(stackInfo.getProperties()).andReturn(Collections.emptyList()).anyTimes(); diff --git a/ambari-server/src/test/java/org/apache/ambari/server/controller/internal/HostComponentResourceProviderTest.java b/ambari-server/src/test/java/org/apache/ambari/server/controller/internal/HostComponentResourceProviderTest.java index 999de001cfd..f0337bb575f 100644 --- a/ambari-server/src/test/java/org/apache/ambari/server/controller/internal/HostComponentResourceProviderTest.java +++ b/ambari-server/src/test/java/org/apache/ambari/server/controller/internal/HostComponentResourceProviderTest.java @@ -112,7 +112,7 @@ private void testCreateResources(Authentication authentication) throws Exception AbstractControllerResourceProvider.init(resourceProviderFactory); - ServiceComponentHostRequest request = new ServiceComponentHostRequest("Cluster100", SERVICE_GROUP_NAME, "Service100", "Component100", "Component100", "Host100", null); + ServiceComponentHostRequest request = new ServiceComponentHostRequest("Cluster100", SERVICE_GROUP_NAME, "Service100", "Component100", "Host100", null); Set expectedRequests = Collections.singleton(request); expect(managementController.createHostComponents(eq(expectedRequests))).andReturn(null).once(); @@ -141,7 +141,6 @@ private void testCreateResources(Authentication authentication) throws Exception properties.put(HostComponentResourceProvider.HOST_COMPONENT_SERVICE_GROUP_NAME_PROPERTY_ID, SERVICE_GROUP_NAME); properties.put(HostComponentResourceProvider.HOST_COMPONENT_SERVICE_NAME_PROPERTY_ID, "Service100"); properties.put(HostComponentResourceProvider.HOST_COMPONENT_COMPONENT_NAME_PROPERTY_ID, "Component100"); - properties.put(HostComponentResourceProvider.HOST_COMPONENT_COMPONENT_TYPE_PROPERTY_ID, "Component100"); properties.put(HostComponentResourceProvider.HOST_COMPONENT_HOST_NAME_PROPERTY_ID, "Host100"); propertySet.add(properties); @@ -184,17 +183,17 @@ private void testGetResources(Authentication authentication) throws Exception { String repositoryVersion2 = "0.2-1234"; allResponse.add(new ServiceComponentHostResponse( - 1L, "Cluster100", 1L, "", 1L, "Service100", "", 1L, "Component100", "Component100", "Component 100", "Host100", "Host100", + 1L, "Cluster100", 1L, "", 1L, "Service100", "", 1L, "Component100", "Component 100", "Host100", "Host100", State.INSTALLED.toString(), stackId.getStackId(), State.STARTED.toString(), stackId2.getStackId(), repositoryVersion2, null)); allResponse.add(new ServiceComponentHostResponse( - 1L, "Cluster100", 1L, "", 1L, "Service100", "", 1L, "Component101", "Component101", "Component 101", "Host100", "Host100", + 1L, "Cluster100", 1L, "", 1L, "Service100", "", 1L, "Component101", "Component 101", "Host100", "Host100", State.INSTALLED.toString(), stackId.getStackId(), State.STARTED.toString(), stackId2.getStackId(), repositoryVersion2, null)); allResponse.add(new ServiceComponentHostResponse( - 1L, "Cluster100", 1L, "", 1L, "Service100", "", 1L, "Component102", "Component102", "Component 102", "Host100", "Host100", + 1L, "Cluster100", 1L, "", 1L, "Service100", "", 1L, "Component102", "Component 102", "Host100", "Host100", State.INSTALLED.toString(), stackId.getStackId(), State.STARTED.toString(), stackId2.getStackId(), repositoryVersion2, null)); @@ -351,7 +350,7 @@ private void testUpdateResources(Authentication authentication) throws Exception Set nameResponse = new HashSet<>(); nameResponse.add(new ServiceComponentHostResponse( - 1L, "Cluster102", 1L, "ServiceGroup100", 1L, "Service100", "", 1L, "Component100", "Component100","Component 100", "Host100", "Host100", + 1L, "Cluster102", 1L, "", 1L, "Service100", "", 1L, "Component100", "Component 100", "Host100", "Host100", "INSTALLED", "", "", "", "", null)); // set expectations @@ -379,7 +378,7 @@ private void testUpdateResources(Authentication authentication) throws Exception changedHosts.put("Component100", Collections.singletonMap(State.STARTED, changedComponentHosts)); expect(managementController.addStages(null, cluster, mapRequestProps, null, null, null, changedHosts, - Collections.emptyList(), false, false)).andReturn(stageContainer).anyTimes(); + Collections.emptyList(), false, false)).andReturn(stageContainer).once(); stageContainer.persist(); expect(stageContainer.getRequestStatusResponse()).andReturn(response).once(); @@ -412,9 +411,7 @@ private void testUpdateResources(Authentication authentication) throws Exception Predicate predicate = new PredicateBuilder().property( HostComponentResourceProvider.HOST_COMPONENT_CLUSTER_NAME_PROPERTY_ID).equals("Cluster102").and(). property(HostComponentResourceProvider.HOST_COMPONENT_STATE_PROPERTY_ID).equals("INSTALLED").and(). - property(HostComponentResourceProvider.HOST_COMPONENT_SERVICE_GROUP_NAME_PROPERTY_ID).equals("ServiceGroup100").and(). - property(HostComponentResourceProvider.HOST_COMPONENT_SERVICE_NAME_PROPERTY_ID).equals("Service100").and(). - property(HostComponentResourceProvider.HOST_COMPONENT_HOST_COMPONENT_ID_PROPERTY_ID).equals(100L).toPredicate(); + property(HostComponentResourceProvider.HOST_COMPONENT_COMPONENT_NAME_PROPERTY_ID).equals("Component100").toPredicate(); RequestStatus requestStatus = provider.updateResources(request, predicate); Resource responseResource = requestStatus.getRequestResource(); assertEquals("response msg", responseResource.getPropertyValue(PropertyHelper.getPropertyId("Requests", "message"))); @@ -453,7 +450,7 @@ private void testDeleteResources(Authentication authentication) throws Exception new HostComponentResourceProvider(managementController, injector); // set expectations - ServiceComponentHostRequest request = new ServiceComponentHostRequest(null, null, null, 1L, "Component100", "Component100", "Host100", null); + ServiceComponentHostRequest request = new ServiceComponentHostRequest(null, null, null, "Component100", "Host100", null); expect(managementController.deleteHostComponents(Collections.singleton(request))).andReturn(deleteStatusMetaData); // replay @@ -466,9 +463,7 @@ private void testDeleteResources(Authentication authentication) throws Exception provider.addObserver(observer); Predicate predicate = new PredicateBuilder(). - property(HostComponentResourceProvider.HOST_COMPONENT_HOST_COMPONENT_ID_PROPERTY_ID).equals(1L).and(). property(HostComponentResourceProvider.HOST_COMPONENT_COMPONENT_NAME_PROPERTY_ID).equals("Component100").and(). - property(HostComponentResourceProvider.HOST_COMPONENT_COMPONENT_TYPE_PROPERTY_ID).equals("Component100").and(). property(HostComponentResourceProvider.HOST_COMPONENT_HOST_NAME_PROPERTY_ID).equals("Host100").toPredicate(); provider.deleteResources(new RequestImpl(null, null, null, null), predicate); @@ -540,7 +535,7 @@ public void testUpdateResourcesNothingToUpdate() throws Exception { Set nameResponse = new HashSet<>(); nameResponse.add(new ServiceComponentHostResponse( - 1L, "Cluster102", 1L, "", 1L, "Service100", "", 1L, "Component100", "Component100", "Component 100", "Host100", "Host100", + 1L, "Cluster102", 1L, "", 1L, "Service100", "", 1L, "Component100", "Component 100", "Host100", "Host100", "INSTALLED", "", "", "", "", null)); // set expectations @@ -593,7 +588,7 @@ public void testUpdateResourcesNothingToUpdate() throws Exception { Predicate predicate = new PredicateBuilder().property( HostComponentResourceProvider.HOST_COMPONENT_CLUSTER_NAME_PROPERTY_ID).equals("Cluster102").and(). property(HostComponentResourceProvider.HOST_COMPONENT_STATE_PROPERTY_ID).equals("INSTALLED").and(). - property(HostComponentResourceProvider.HOST_COMPONENT_HOST_COMPONENT_ID_PROPERTY_ID).equals(100L).toPredicate(); + property(HostComponentResourceProvider.HOST_COMPONENT_COMPONENT_NAME_PROPERTY_ID).equals("Component100").toPredicate(); try { diff --git a/ambari-server/src/test/java/org/apache/ambari/server/controller/internal/HostResourceProviderTest.java b/ambari-server/src/test/java/org/apache/ambari/server/controller/internal/HostResourceProviderTest.java index 1334e260d3c..e68ff2eb03d 100644 --- a/ambari-server/src/test/java/org/apache/ambari/server/controller/internal/HostResourceProviderTest.java +++ b/ambari-server/src/test/java/org/apache/ambari/server/controller/internal/HostResourceProviderTest.java @@ -314,11 +314,11 @@ public void testGetResources_Status_NoCluster() throws Exception { Set clusterSet = new HashSet<>(); clusterSet.add(cluster); - ServiceComponentHostResponse shr1 = new ServiceComponentHostResponse(1L, "Cluster100", 1L, "", 1L, "Service100", "", 1L, "Component100", "Component100","Component 100", + ServiceComponentHostResponse shr1 = new ServiceComponentHostResponse(1L, "Cluster100", 1L, "", 1L, "Service100", "", 1L, "Component100", "Component 100", "Host100", "Host100", "STARTED", "", null, null, null, null); - ServiceComponentHostResponse shr2 = new ServiceComponentHostResponse(1L,"Cluster100", 1L, "", 1L, "Service100", "", 1L, "Component102", "Component102", "Component 102", + ServiceComponentHostResponse shr2 = new ServiceComponentHostResponse(1L,"Cluster100", 1L, "", 1L, "Service100", "", 1L, "Component102", "Component 102", "Host100", "Host100", "STARTED", "", null, null, null, null); - ServiceComponentHostResponse shr3 = new ServiceComponentHostResponse(1L,"Cluster100", 1L, "", 1L, "Service100", "", 1L, "Component103", "Component103", "Component 103", + ServiceComponentHostResponse shr3 = new ServiceComponentHostResponse(1L,"Cluster100", 1L, "", 1L, "Service100", "", 1L, "Component103", "Component 103", "Host100", "Host100", "STARTED", "", null, null, null, null); Set responses = new HashSet<>(); @@ -409,11 +409,11 @@ public void testGetResources_Status_Healthy() throws Exception { Set clusterSet = new HashSet<>(); clusterSet.add(cluster); - ServiceComponentHostResponse shr1 = new ServiceComponentHostResponse(1L, "Cluster100", 1L, "", 1L, "Service100", "", 1L, "Component100", "Component100", "Component 100", + ServiceComponentHostResponse shr1 = new ServiceComponentHostResponse(1L, "Cluster100", 1L, "", 1L, "Service100", "", 1L, "Component100", "Component 100", "Host100", "Host100", "STARTED", "", null, null, null, null); - ServiceComponentHostResponse shr2 = new ServiceComponentHostResponse(1L, "Cluster100", 1L, "", 1L, "Service100", "", 1L, "Component102", "Component102", "Component 102", + ServiceComponentHostResponse shr2 = new ServiceComponentHostResponse(1L, "Cluster100", 1L, "", 1L, "Service100", "", 1L, "Component102", "Component 102", "Host100", "Host100", "STARTED", "", null, null, null, null); - ServiceComponentHostResponse shr3 = new ServiceComponentHostResponse(1L, "Cluster100", 1L, "", 1L, "Service100", "", 1L, "Component103", "Component103", "Component 103", + ServiceComponentHostResponse shr3 = new ServiceComponentHostResponse(1L, "Cluster100", 1L, "", 1L, "Service100", "", 1L, "Component103", "Component 103", "Host100", "Host100", "STARTED", "", null, null, null, null); Set responses = new HashSet<>(); @@ -501,11 +501,11 @@ public void testGetResources_Status_Unhealthy() throws Exception { Set clusterSet = new HashSet<>(); clusterSet.add(cluster); - ServiceComponentHostResponse shr1 = new ServiceComponentHostResponse(1L, "Cluster100", 1L, "", 1L, "Service100", "", 1L, "Component100", "Component100","Component 100", + ServiceComponentHostResponse shr1 = new ServiceComponentHostResponse(1L, "Cluster100", 1L, "", 1L, "Service100", "", 1L, "Component100", "Component 100", "Host100", "Host100", "STARTED", "", null, null, null, null); - ServiceComponentHostResponse shr2 = new ServiceComponentHostResponse(1L, "Cluster100", 1L, "", 1L, "Service100", "", 1L, "Component102", "Component102", "Component 102", + ServiceComponentHostResponse shr2 = new ServiceComponentHostResponse(1L, "Cluster100", 1L, "", 1L, "Service100", "", 1L, "Component102", "Component 102", "Host100", "Host100", "INSTALLED", "", null, null, null, null); - ServiceComponentHostResponse shr3 = new ServiceComponentHostResponse(1L, "Cluster100", 1L, "", 1L, "Service100", "", 1L, "Component103", "Component103", "Component 103", + ServiceComponentHostResponse shr3 = new ServiceComponentHostResponse(1L, "Cluster100", 1L, "", 1L, "Service100", "", 1L, "Component103", "Component 103", "Host100", "Host100", "STARTED", "", null, null, null, null); Set responses = new HashSet<>(); @@ -688,7 +688,7 @@ private void testGetRecoveryReport(Authentication authentication) throws Excepti Set clusterSet = new HashSet<>(); clusterSet.add(cluster); - ServiceComponentHostResponse shr1 = new ServiceComponentHostResponse(1L, "Cluster100", 1L, "", 1L, "Service100", "", 1L, "Component100", "Component100", "Component 100", + ServiceComponentHostResponse shr1 = new ServiceComponentHostResponse(1L, "Cluster100", 1L, "", 1L, "Service100", "", 1L, "Component100", "Component 100", "Host100", "Host100", "STARTED", "", null, null, null, null); Set responses = new HashSet<>(); @@ -772,11 +772,11 @@ public void testGetResources_Status_Alert() throws Exception { Set clusterSet = new HashSet<>(); clusterSet.add(cluster); - ServiceComponentHostResponse shr1 = new ServiceComponentHostResponse(1L, "Cluster100", 1L, "", 1L, "Service100", "", 1L, "Component100", "Component100", "Component 100", + ServiceComponentHostResponse shr1 = new ServiceComponentHostResponse(1L, "Cluster100", 1L, "", 1L, "Service100", "", 1L, "Component100", "Component 100", "Host100", "Host100", "STARTED", "", null, null, null, null); - ServiceComponentHostResponse shr2 = new ServiceComponentHostResponse(1L, "Cluster100", 1L, "", 1L, "Service100", "", 1L, "Component102", "Component102", "Component 102", + ServiceComponentHostResponse shr2 = new ServiceComponentHostResponse(1L, "Cluster100", 1L, "", 1L, "Service100", "", 1L, "Component102", "Component 102", "Host100", "Host100", "INSTALLED", "", null, null, null, null); - ServiceComponentHostResponse shr3 = new ServiceComponentHostResponse(1L, "Cluster100", 1L, "", 1L, "Service100", "", 1L, "Component103", "Component103", "Component 103", + ServiceComponentHostResponse shr3 = new ServiceComponentHostResponse(1L, "Cluster100", 1L, "", 1L, "Service100", "", 1L, "Component103", "Component 103", "Host100", "Host100", "STARTED", "", null, null, null, null); Set responses = new HashSet<>(); diff --git a/ambari-server/src/test/java/org/apache/ambari/server/controller/internal/JMXHostProviderTest.java b/ambari-server/src/test/java/org/apache/ambari/server/controller/internal/JMXHostProviderTest.java index 25317ce282e..9eac5ec5d71 100644 --- a/ambari-server/src/test/java/org/apache/ambari/server/controller/internal/JMXHostProviderTest.java +++ b/ambari-server/src/test/java/org/apache/ambari/server/controller/internal/JMXHostProviderTest.java @@ -123,14 +123,14 @@ private void createService(String clusterName, String serviceGroupName, String s private void createServiceComponent(String clusterName, String serviceGroupName, String serviceName, String componentName, State desiredState) throws AmbariException, AuthorizationException { - ServiceComponentRequest r = new ServiceComponentRequest(clusterName, serviceGroupName, serviceName, componentName, componentName, desiredState != null ? desiredState.name() : null); + ServiceComponentRequest r = new ServiceComponentRequest(clusterName, serviceGroupName, serviceName, componentName, desiredState != null ? desiredState.name() : null); ComponentResourceProviderTest.createComponents(controller, Collections.singleton(r)); } private void createServiceComponentHost(String clusterName, String serviceGroupName, String serviceName, String componentName, String hostname, State desiredState) throws AmbariException, AuthorizationException { - ServiceComponentHostRequest r = new ServiceComponentHostRequest(clusterName, serviceGroupName, serviceName, componentName, componentName, hostname, desiredState != null ? desiredState.name() : null); + ServiceComponentHostRequest r = new ServiceComponentHostRequest(clusterName, serviceGroupName, serviceName, componentName, hostname, desiredState != null ? desiredState.name() : null); controller.createHostComponents(Collections.singleton(r)); } @@ -141,7 +141,7 @@ private void createHDFSServiceConfigs(boolean version1) throws AmbariException, Cluster cluster = clusters.getCluster(clusterName); cluster.setDesiredStackVersion(new StackId("HDP-0.1")); String serviceGroupName = "CORE"; - ServiceGroupResourceProviderTest.createServiceGroup(controller, clusterName, serviceGroupName, r.getStackVersion()); + ServiceGroupResourceProviderTest.createServiceGroup(controller, clusterName, serviceGroupName); String serviceName = "HDFS"; createService(clusterName, serviceGroupName, serviceName, null); String componentName1 = "NAMENODE"; @@ -207,7 +207,7 @@ private void createConfigs() throws AmbariException, AuthorizationException { Cluster cluster = clusters.getCluster(clusterName); cluster.setDesiredStackVersion(new StackId("HDP-2.0.6")); String serviceGroupName = "CORE"; - ServiceGroupResourceProviderTest.createServiceGroup(controller, clusterName, serviceGroupName, r.getStackVersion()); + ServiceGroupResourceProviderTest.createServiceGroup(controller, clusterName, serviceGroupName); String serviceName = "HDFS"; String serviceName2 = "YARN"; String serviceName3 = "MAPREDUCE2"; @@ -327,7 +327,7 @@ private void createConfigsNameNodeHa() throws AmbariException, AuthorizationExce Cluster cluster = clusters.getCluster(clusterName); cluster.setDesiredStackVersion(new StackId("HDP-2.0.6")); String serviceGroupName = "CORE"; - ServiceGroupResourceProviderTest.createServiceGroup(controller, clusterName, serviceGroupName, r.getStackVersion()); + ServiceGroupResourceProviderTest.createServiceGroup(controller, clusterName, serviceGroupName); String serviceName = "HDFS"; createService(clusterName, serviceGroupName, serviceName, null); String componentName1 = "NAMENODE"; diff --git a/ambari-server/src/test/java/org/apache/ambari/server/controller/internal/MpackResourceProviderTest.java b/ambari-server/src/test/java/org/apache/ambari/server/controller/internal/MpackResourceProviderTest.java index cc92b6ba348..bb1dbec4ef3 100644 --- a/ambari-server/src/test/java/org/apache/ambari/server/controller/internal/MpackResourceProviderTest.java +++ b/ambari-server/src/test/java/org/apache/ambari/server/controller/internal/MpackResourceProviderTest.java @@ -111,7 +111,9 @@ public void testGetResourcesMpacks() throws Exception { // replay replay(m_dao); - ResourceProvider provider = AbstractControllerResourceProvider.getResourceProvider(type, m_amc); + ResourceProvider provider = AbstractControllerResourceProvider.getResourceProvider( + type + ); // create the request Request request = PropertyHelper.getReadRequest(); @@ -180,7 +182,8 @@ public void testGetResourcesMpackId() throws Exception { // replay replay(m_dao,m_amc); - ResourceProvider provider = AbstractControllerResourceProvider.getResourceProvider(type, m_amc); + ResourceProvider provider = AbstractControllerResourceProvider.getResourceProvider( + type); // create the request Request request = PropertyHelper.getReadRequest(); @@ -219,7 +222,8 @@ public void testCreateResources() throws Exception { replay(m_amc,request); // end expectations - MpackResourceProvider provider = (MpackResourceProvider) AbstractControllerResourceProvider.getResourceProvider(Resource.Type.Mpack, m_amc); + MpackResourceProvider provider = (MpackResourceProvider) AbstractControllerResourceProvider.getResourceProvider( + Resource.Type.Mpack); AbstractResourceProviderTest.TestObserver observer = new AbstractResourceProviderTest.TestObserver(); ((ObservableResourceProvider)provider).addObserver(observer); diff --git a/ambari-server/src/test/java/org/apache/ambari/server/controller/internal/ProvisionClusterRequestTest.java b/ambari-server/src/test/java/org/apache/ambari/server/controller/internal/ProvisionClusterRequestTest.java index 3cc48905829..5ed582f5c3b 100644 --- a/ambari-server/src/test/java/org/apache/ambari/server/controller/internal/ProvisionClusterRequestTest.java +++ b/ambari-server/src/test/java/org/apache/ambari/server/controller/internal/ProvisionClusterRequestTest.java @@ -109,7 +109,7 @@ public void testHostNameSpecified() throws Exception { replay(hostResourceProvider); Map properties = createBlueprintRequestPropertiesNameOnly(CLUSTER_NAME, BLUEPRINT_NAME); - ProvisionClusterRequest provisionClusterRequest = new ProvisionClusterRequest(null, properties, null); + ProvisionClusterRequest provisionClusterRequest = new ProvisionClusterRequest(properties, null); assertEquals(CLUSTER_NAME, provisionClusterRequest.getClusterName()); assertEquals(TopologyRequest.Type.PROVISION, provisionClusterRequest.getType()); @@ -160,7 +160,7 @@ public void testHostCountSpecified() throws Exception { replay(hostResourceProvider); Map properties = createBlueprintRequestPropertiesCountOnly(CLUSTER_NAME, BLUEPRINT_NAME); - ProvisionClusterRequest provisionClusterRequest = new ProvisionClusterRequest(null, properties, null); + ProvisionClusterRequest provisionClusterRequest = new ProvisionClusterRequest(properties, null); assertEquals(CLUSTER_NAME, provisionClusterRequest.getClusterName()); assertEquals(TopologyRequest.Type.PROVISION, provisionClusterRequest.getType()); @@ -211,7 +211,7 @@ public void testHostCountSpecified() throws Exception { @Test public void testMultipleGroups() throws Exception { Map properties = createBlueprintRequestProperties(CLUSTER_NAME, BLUEPRINT_NAME); - ProvisionClusterRequest provisionClusterRequest = new ProvisionClusterRequest(null, properties, null); + ProvisionClusterRequest provisionClusterRequest = new ProvisionClusterRequest(properties, null); assertEquals(CLUSTER_NAME, provisionClusterRequest.getClusterName()); assertEquals(TopologyRequest.Type.PROVISION, provisionClusterRequest.getType()); @@ -286,7 +286,7 @@ public void test_NoHostGroupInfo() throws Exception { reset(hostResourceProvider); replay(hostResourceProvider); // should result in an exception - new ProvisionClusterRequest(null, properties, null); + new ProvisionClusterRequest(properties, null); } @Test @@ -301,7 +301,7 @@ public void test_Creditentials() throws Exception { credentialsSet.add(credentialHashMap); properties.put("credentials", credentialsSet); - ProvisionClusterRequest provisionClusterRequest = new ProvisionClusterRequest(null, properties, null); + ProvisionClusterRequest provisionClusterRequest = new ProvisionClusterRequest(properties, null); assertEquals(provisionClusterRequest.getCredentialsMap().get("testAlias").getAlias(), "testAlias"); assertEquals(provisionClusterRequest.getCredentialsMap().get("testAlias").getPrincipal(), "testPrincipal"); @@ -326,7 +326,7 @@ public void test_CreditentialsInvalidType() throws Exception { credentialsSet.add(credentialHashMap); properties.put("credentials", credentialsSet); - ProvisionClusterRequest provisionClusterRequest = new ProvisionClusterRequest(null, properties, null); + ProvisionClusterRequest provisionClusterRequest = new ProvisionClusterRequest(properties, null); } @Test(expected= InvalidTopologyTemplateException.class) @@ -338,7 +338,7 @@ public void test_GroupInfoMissingName() throws Exception { reset(hostResourceProvider); replay(hostResourceProvider); // should result in an exception - new ProvisionClusterRequest(null, properties, null); + new ProvisionClusterRequest(properties, null); } @Test(expected= InvalidTopologyTemplateException.class) @@ -350,7 +350,7 @@ public void test_NoHostsInfo() throws Exception { reset(hostResourceProvider); replay(hostResourceProvider); // should result in an exception - new ProvisionClusterRequest(null, properties, null); + new ProvisionClusterRequest(properties, null); } @Test(expected = InvalidTopologyTemplateException.class) @@ -370,7 +370,7 @@ public void test_NoHostNameOrHostCount() throws Exception { reset(hostResourceProvider); replay(hostResourceProvider); // should result in an exception - new ProvisionClusterRequest(null, properties, null); + new ProvisionClusterRequest(properties, null); } @@ -383,7 +383,7 @@ public void testInvalidPredicateProperty() throws Exception { replay(hostResourceProvider); // should result in an exception due to invalid property in host predicate - new ProvisionClusterRequest(null, createBlueprintRequestProperties(CLUSTER_NAME, BLUEPRINT_NAME), null); + new ProvisionClusterRequest(createBlueprintRequestProperties(CLUSTER_NAME, BLUEPRINT_NAME), null); } @Test(expected = InvalidTopologyTemplateException.class) @@ -395,7 +395,7 @@ public void testHostNameAndCountSpecified() throws Exception { Map properties = createBlueprintRequestPropertiesNameOnly(CLUSTER_NAME, BLUEPRINT_NAME); ((Map) ((List) properties.get("host_groups")).iterator().next()).put("host_count", "5"); // should result in an exception due to both host name and host count being specified - new ProvisionClusterRequest(null, properties, null); + new ProvisionClusterRequest(properties, null); } @Test(expected = InvalidTopologyTemplateException.class) @@ -407,13 +407,13 @@ public void testHostNameAndPredicateSpecified() throws Exception { Map properties = createBlueprintRequestPropertiesNameOnly(CLUSTER_NAME, BLUEPRINT_NAME); ((Map) ((List) properties.get("host_groups")).iterator().next()).put("host_predicate", "Hosts/host_name=myTestHost"); // should result in an exception due to both host name and host count being specified - new ProvisionClusterRequest(null, properties, null); + new ProvisionClusterRequest(properties, null); } @Test public void testQuickLinksProfile_NoDataInRequest() throws Exception { Map properties = createBlueprintRequestProperties(CLUSTER_NAME, BLUEPRINT_NAME); - ProvisionClusterRequest request = new ProvisionClusterRequest(null, properties, null); + ProvisionClusterRequest request = new ProvisionClusterRequest(properties, null); assertNull("No quick links profile is expected", request.getQuickLinksProfileJson()); } @@ -424,7 +424,7 @@ public void testQuickLinksProfile_OnlyGlobalFilterDataInRequest() throws Excepti properties.put(ProvisionClusterRequest.QUICKLINKS_PROFILE_FILTERS_PROPERTY, Sets.newHashSet(QuickLinksProfileBuilderTest.filter(null, null, true))); - ProvisionClusterRequest request = new ProvisionClusterRequest(null, properties, null); + ProvisionClusterRequest request = new ProvisionClusterRequest(properties, null); assertEquals("Quick links profile doesn't match expected", "{\"filters\":[{\"visible\":true}],\"services\":[]}", request.getQuickLinksProfileJson()); @@ -439,7 +439,7 @@ public void testQuickLinksProfile_OnlyServiceFilterDataInRequest() throws Except Set> services = Sets.newHashSet(hdfs); properties.put(ProvisionClusterRequest.QUICKLINKS_PROFILE_SERVICES_PROPERTY, services); - ProvisionClusterRequest request = new ProvisionClusterRequest(null, properties, null); + ProvisionClusterRequest request = new ProvisionClusterRequest(properties, null); assertEquals("Quick links profile doesn't match expected", "{\"filters\":[],\"services\":[{\"name\":\"HDFS\",\"components\":[],\"filters\":[{\"visible\":true}]}]}", request.getQuickLinksProfileJson()); @@ -457,7 +457,7 @@ public void testQuickLinksProfile_BothGlobalAndServiceLevelFilters() throws Exce Set> services = Sets.newHashSet(hdfs); properties.put(ProvisionClusterRequest.QUICKLINKS_PROFILE_SERVICES_PROPERTY, services); - ProvisionClusterRequest request = new ProvisionClusterRequest(null, properties, null); + ProvisionClusterRequest request = new ProvisionClusterRequest(properties, null); System.out.println(request.getQuickLinksProfileJson()); assertEquals("Quick links profile doesn't match expected", "{\"filters\":[{\"visible\":true}],\"services\":[{\"name\":\"HDFS\",\"components\":[],\"filters\":[{\"visible\":true}]}]}", @@ -470,7 +470,7 @@ public void testQuickLinksProfile_InvalidRequestData() throws Exception { properties.put(ProvisionClusterRequest.QUICKLINKS_PROFILE_SERVICES_PROPERTY, "Hello World!"); - ProvisionClusterRequest request = new ProvisionClusterRequest(null, properties, null); + ProvisionClusterRequest request = new ProvisionClusterRequest(properties, null); } public static Map createBlueprintRequestProperties(String clusterName, String blueprintName) { diff --git a/ambari-server/src/test/java/org/apache/ambari/server/controller/internal/RequestImplTest.java b/ambari-server/src/test/java/org/apache/ambari/server/controller/internal/RequestImplTest.java index cd5db3db23d..49cd8e1a04c 100644 --- a/ambari-server/src/test/java/org/apache/ambari/server/controller/internal/RequestImplTest.java +++ b/ambari-server/src/test/java/org/apache/ambari/server/controller/internal/RequestImplTest.java @@ -214,7 +214,7 @@ public void testValidPropertyIds() { Assert.assertTrue(validPropertyIds.contains("Versions/stack_version")); Assert.assertTrue(validPropertyIds.contains("Versions/min_upgrade_version")); - request = PropertyHelper.getReadRequest(OperatingSystemReadOnlyResourceProvider.propertyIds); + request = PropertyHelper.getReadRequest(OperatingSystemResourceProvider.propertyIds); validPropertyIds = request.getPropertyIds(); //OperatingSystem resource properties diff --git a/ambari-server/src/test/java/org/apache/ambari/server/controller/internal/RequestResourceProviderTest.java b/ambari-server/src/test/java/org/apache/ambari/server/controller/internal/RequestResourceProviderTest.java index 440b2e81275..d8cb701da5c 100644 --- a/ambari-server/src/test/java/org/apache/ambari/server/controller/internal/RequestResourceProviderTest.java +++ b/ambari-server/src/test/java/org/apache/ambari/server/controller/internal/RequestResourceProviderTest.java @@ -85,7 +85,6 @@ import org.apache.ambari.server.topology.HostGroupInfo; import org.apache.ambari.server.topology.HostRequest; import org.apache.ambari.server.topology.LogicalRequest; -import org.apache.ambari.server.topology.Setting; import org.apache.ambari.server.topology.TopologyManager; import org.apache.ambari.server.topology.TopologyRequest; import org.apache.ambari.server.utils.SecretReference; @@ -1685,9 +1684,7 @@ public void testGetLogicalRequestStatusWithNoTasks() throws Exception { expect(topologyRequest.getHostGroupInfo()).andReturn(hostGroupInfoMap).anyTimes(); expect(topology.getBlueprint()).andReturn(blueprint).anyTimes(); - Setting setting = createNiceMock(Setting.class); - expect(blueprint.getSetting()).andReturn(setting).anyTimes(); - expect(setting.shouldSkipFailure()).andReturn(true).anyTimes(); + expect(blueprint.shouldSkipFailure()).andReturn(true).anyTimes(); expect(logicalRequest.getHostRequests()).andReturn(hostRequests).anyTimes(); expect(logicalRequest.constructNewPersistenceEntity()).andReturn(requestMock).anyTimes(); diff --git a/ambari-server/src/test/java/org/apache/ambari/server/controller/internal/ScaleClusterRequestTest.java b/ambari-server/src/test/java/org/apache/ambari/server/controller/internal/ScaleClusterRequestTest.java index cc02db4e5c0..b9f32a03f67 100644 --- a/ambari-server/src/test/java/org/apache/ambari/server/controller/internal/ScaleClusterRequestTest.java +++ b/ambari-server/src/test/java/org/apache/ambari/server/controller/internal/ScaleClusterRequestTest.java @@ -112,7 +112,7 @@ private void addSingleHostByName(Map props) throws InvalidTopolo reset(hostResourceProvider); replay(hostResourceProvider); - ScaleClusterRequest scaleClusterRequest = new ScaleClusterRequest("{}", Collections.singleton(props)); + ScaleClusterRequest scaleClusterRequest = new ScaleClusterRequest(Collections.singleton(props)); assertEquals(TopologyRequest.Type.SCALE, scaleClusterRequest.getType()); assertEquals(String.format("Scale Cluster '%s' (+%s hosts)", CLUSTER_NAME, "1"), @@ -150,7 +150,7 @@ private void addMultipleHostsByName(Set> propertySet) throws reset(hostResourceProvider); replay(hostResourceProvider); - ScaleClusterRequest scaleClusterRequest = new ScaleClusterRequest("{}", propertySet); + ScaleClusterRequest scaleClusterRequest = new ScaleClusterRequest(propertySet); assertEquals(TopologyRequest.Type.SCALE, scaleClusterRequest.getType()); assertEquals(String.format("Scale Cluster '%s' (+%s hosts)", CLUSTER_NAME, "2"), @@ -177,7 +177,7 @@ public void test_basic_hostCount() throws Exception { reset(hostResourceProvider); replay(hostResourceProvider); - ScaleClusterRequest scaleClusterRequest = new ScaleClusterRequest("{}", Collections.singleton( + ScaleClusterRequest scaleClusterRequest = new ScaleClusterRequest(Collections.singleton( createScaleClusterPropertiesGroup1_HostCount(CLUSTER_NAME, BLUEPRINT_NAME))); assertEquals(TopologyRequest.Type.SCALE, scaleClusterRequest.getType()); @@ -203,7 +203,7 @@ public void test_basic_hostCount2() throws Exception { reset(hostResourceProvider); replay(hostResourceProvider); - ScaleClusterRequest scaleClusterRequest = new ScaleClusterRequest("{}", Collections.singleton( + ScaleClusterRequest scaleClusterRequest = new ScaleClusterRequest(Collections.singleton( createScaleClusterPropertiesGroup1_HostCount2(CLUSTER_NAME, BLUEPRINT_NAME))); assertEquals(TopologyRequest.Type.SCALE, scaleClusterRequest.getType()); @@ -225,7 +225,7 @@ public void test_basic_hostCount2() throws Exception { @Test public void test_basic_hostCountAndPredicate() throws Exception { - ScaleClusterRequest scaleClusterRequest = new ScaleClusterRequest("{}", Collections.singleton( + ScaleClusterRequest scaleClusterRequest = new ScaleClusterRequest(Collections.singleton( createScaleClusterPropertiesGroup1_HostCountAndPredicate(CLUSTER_NAME, BLUEPRINT_NAME))); assertEquals(TopologyRequest.Type.SCALE, scaleClusterRequest.getType()); @@ -252,7 +252,7 @@ public void testMultipleHostGroups() throws Exception { propertySet.add(createScaleClusterPropertiesGroup1_HostCount(CLUSTER_NAME, BLUEPRINT_NAME)); propertySet.add(createScaleClusterPropertiesGroup1_HostName(CLUSTER_NAME, BLUEPRINT_NAME)); - ScaleClusterRequest scaleClusterRequest = new ScaleClusterRequest("{}", propertySet); + ScaleClusterRequest scaleClusterRequest = new ScaleClusterRequest(propertySet); assertEquals(TopologyRequest.Type.SCALE, scaleClusterRequest.getType()); assertEquals(String.format("Scale Cluster '%s' (+%s hosts)", CLUSTER_NAME, "3"), @@ -300,7 +300,7 @@ public void test_GroupInfoMissingName() throws Exception { reset(hostResourceProvider); replay(hostResourceProvider); // should result in an exception - new ScaleClusterRequest("{}", Collections.singleton(properties)); + new ScaleClusterRequest(Collections.singleton(properties)); } @Test(expected = InvalidTopologyTemplateException.class) @@ -313,7 +313,7 @@ public void test_NoHostNameOrHostCount() throws Exception { reset(hostResourceProvider); replay(hostResourceProvider); // should result in an exception because neither host name or host count are specified - new ScaleClusterRequest("{}", Collections.singleton(properties)); + new ScaleClusterRequest(Collections.singleton(properties)); } @@ -326,7 +326,7 @@ public void testInvalidPredicateProperty() throws Exception { replay(hostResourceProvider); // should result in an exception due to invalid property in host predicate - new ScaleClusterRequest("{}", Collections.singleton( + new ScaleClusterRequest(Collections.singleton( createScaleClusterPropertiesGroup1_HostCountAndPredicate(CLUSTER_NAME, BLUEPRINT_NAME))); } @@ -340,7 +340,7 @@ public void testMultipleBlueprints() throws Exception { propertySet.add(createScaleClusterPropertiesGroup1_HostName2(CLUSTER_NAME, "OTHER_BLUEPRINT")); // should result in an exception due to different blueprints being specified - new ScaleClusterRequest("{}", propertySet); + new ScaleClusterRequest(propertySet); } public static Map createScaleClusterPropertiesGroup1_HostName(String clusterName, String blueprintName) { diff --git a/ambari-server/src/test/java/org/apache/ambari/server/controller/internal/ServiceDependencyResourceProviderTest.java b/ambari-server/src/test/java/org/apache/ambari/server/controller/internal/ServiceDependencyResourceProviderTest.java index dbf3caeb415..f4d4d2d0dbb 100644 --- a/ambari-server/src/test/java/org/apache/ambari/server/controller/internal/ServiceDependencyResourceProviderTest.java +++ b/ambari-server/src/test/java/org/apache/ambari/server/controller/internal/ServiceDependencyResourceProviderTest.java @@ -256,7 +256,7 @@ private void createServiceComponent(String clusterName, dStateStr = desiredState.toString(); } ServiceComponentRequest r = new ServiceComponentRequest(clusterName, serviceGroupName, - serviceName, componentName, componentName, dStateStr); + serviceName, componentName, dStateStr); ComponentResourceProviderTest.createComponents(controller, Collections.singleton(r)); } @@ -269,7 +269,7 @@ private void createServiceComponentHost(String clusterName, dStateStr = desiredState.toString(); } ServiceComponentHostRequest r = new ServiceComponentHostRequest(clusterName, serviceGroupName, - serviceName, componentName, componentName, hostname, dStateStr); + serviceName, componentName, hostname, dStateStr); controller.createHostComponents(Collections.singleton(r)); } @@ -341,17 +341,16 @@ private Cluster createDefaultCluster(String clusterName) throws Exception { final String host2 = "b" + getUniqueName(); final String host3 = "c" + getUniqueName(); - String stackId = "HDP-2.0.6"; - setupClusterWithHosts(clusterName, stackId, Arrays.asList(host1, host2, host3), "centos6"); + setupClusterWithHosts(clusterName, "HDP-2.0.6", Arrays.asList(host1, host2, host3), "centos6"); Cluster cluster = clusters.getCluster(clusterName); - cluster.setDesiredStackVersion(new StackId(stackId)); - cluster.setCurrentStackVersion(new StackId(stackId)); + cluster.setDesiredStackVersion(new StackId("HDP-2.0.6")); + cluster.setCurrentStackVersion(new StackId("HDP-2.0.6")); RepositoryVersionEntity repositoryVersion = repositoryVersion206; - ServiceGroup serviceGroupCore = cluster.addServiceGroup(SERVICE_GROUP_NAME_CORE, stackId); - ServiceGroup serviceGroupTest = cluster.addServiceGroup(SERVICE_GROUP_NAME_TEST, stackId); + ServiceGroup serviceGroupCore = cluster.addServiceGroup(SERVICE_GROUP_NAME_CORE, "HDP-1.0"); + ServiceGroup serviceGroupTest = cluster.addServiceGroup(SERVICE_GROUP_NAME_TEST, "HDP-1.0"); Service hdfs = cluster.addService(serviceGroupCore, SERVICE_NAME_HDFS, SERVICE_NAME_HDFS, repositoryVersion); Service yarn = cluster.addService(serviceGroupCore, SERVICE_NAME_YARN, SERVICE_NAME_YARN, repositoryVersion); diff --git a/ambari-server/src/test/java/org/apache/ambari/server/controller/internal/ServiceGroupDependencyResourceProviderTest.java b/ambari-server/src/test/java/org/apache/ambari/server/controller/internal/ServiceGroupDependencyResourceProviderTest.java index f0e98cafc6e..9f1910e9f4f 100644 --- a/ambari-server/src/test/java/org/apache/ambari/server/controller/internal/ServiceGroupDependencyResourceProviderTest.java +++ b/ambari-server/src/test/java/org/apache/ambari/server/controller/internal/ServiceGroupDependencyResourceProviderTest.java @@ -251,7 +251,7 @@ private void createServiceComponent(String clusterName, dStateStr = desiredState.toString(); } ServiceComponentRequest r = new ServiceComponentRequest(clusterName, serviceGroupName, - serviceName, componentName, componentName, dStateStr); + serviceName, componentName, dStateStr); ComponentResourceProviderTest.createComponents(controller, Collections.singleton(r)); } @@ -264,7 +264,7 @@ private void createServiceComponentHost(String clusterName, dStateStr = desiredState.toString(); } ServiceComponentHostRequest r = new ServiceComponentHostRequest(clusterName, serviceGroupName, - serviceName, componentName, componentName, hostname, dStateStr); + serviceName, componentName, hostname, dStateStr); controller.createHostComponents(Collections.singleton(r)); } diff --git a/ambari-server/src/test/java/org/apache/ambari/server/controller/internal/ServiceGroupResourceProviderTest.java b/ambari-server/src/test/java/org/apache/ambari/server/controller/internal/ServiceGroupResourceProviderTest.java index 7db5cc140ac..d590b4c3c19 100644 --- a/ambari-server/src/test/java/org/apache/ambari/server/controller/internal/ServiceGroupResourceProviderTest.java +++ b/ambari-server/src/test/java/org/apache/ambari/server/controller/internal/ServiceGroupResourceProviderTest.java @@ -50,7 +50,6 @@ import org.apache.ambari.server.state.Cluster; import org.apache.ambari.server.state.Clusters; import org.apache.ambari.server.state.ServiceGroup; -import org.apache.ambari.server.state.StackId; import org.junit.After; import org.junit.Assert; import org.junit.Before; @@ -60,8 +59,6 @@ public class ServiceGroupResourceProviderTest { - private static final String STACK_ID = "HDP-1.2.3"; - public static ServiceGroupResourceProvider getProvider(AmbariManagementController controller) { return new ServiceGroupResourceProvider(controller); } @@ -71,9 +68,9 @@ public static void createServiceGroups(AmbariManagementController controller, Se getProvider(controller).createServiceGroups(requests); } - public static void createServiceGroup(AmbariManagementController controller, String clusterName, String serviceGroupName, String version) + public static void createServiceGroup(AmbariManagementController controller, String clusterName, String serviceGroupName) throws AmbariException, AuthorizationException { - ServiceGroupRequest request = new ServiceGroupRequest(clusterName, serviceGroupName, version); + ServiceGroupRequest request = new ServiceGroupRequest(clusterName, serviceGroupName, "dummy-stack-name"); createServiceGroups(controller, Collections.singleton(request)); } @@ -111,12 +108,11 @@ public void testCreateServiceGroupsWithStackId() throws Exception { ClusterController clusterController = createNiceMock(ClusterController.class); ServiceGroup coreServiceGroup = createNiceMock(ServiceGroup.class); ServiceGroup edmServiceGroup = createNiceMock(ServiceGroup.class); - ServiceGroupResponse coreServiceGroupResponse = new ServiceGroupResponse(1l, "c1", 1l, "CORE", STACK_ID); + ServiceGroupResponse coreServiceGroupResponse = new ServiceGroupResponse(1l, "c1", 1l, "CORE", "HDP-1.2.3"); expect(ambariManagementController.getAmbariMetaInfo()).andReturn(ambariMetaInfo).anyTimes(); expect(ambariManagementController.getClusters()).andReturn(clusters).anyTimes(); expect(clusters.getCluster(clusterName)).andReturn(cluster).anyTimes(); - expect(cluster.getCurrentStackVersion()).andReturn(new StackId(STACK_ID)).anyTimes(); - expect(cluster.addServiceGroup("CORE", STACK_ID)).andReturn(coreServiceGroup).anyTimes(); + expect(cluster.addServiceGroup("CORE", "HDP-1.2.3")).andReturn(coreServiceGroup).anyTimes(); expect(coreServiceGroup.convertToResponse()).andReturn(coreServiceGroupResponse).anyTimes(); expect(clusterController.getSchema(Resource.Type.ServiceGroup)).andReturn(serviceGroupSchema).anyTimes(); expect(serviceGroupSchema.getKeyPropertyId(Resource.Type.Cluster)).andReturn("ServiceGroupInfo/cluster_name").anyTimes(); diff --git a/ambari-server/src/test/java/org/apache/ambari/server/controller/internal/StackDefinedPropertyProviderTest.java b/ambari-server/src/test/java/org/apache/ambari/server/controller/internal/StackDefinedPropertyProviderTest.java index fe26a82fd03..c9464061c9a 100644 --- a/ambari-server/src/test/java/org/apache/ambari/server/controller/internal/StackDefinedPropertyProviderTest.java +++ b/ambari-server/src/test/java/org/apache/ambari/server/controller/internal/StackDefinedPropertyProviderTest.java @@ -142,22 +142,22 @@ public void setup() throws Exception { RepositoryVersionEntity repositoryVersion = helper.getOrCreateRepositoryVersion(stackId, stackId.getStackVersion()); ServiceGroup serviceGroup = cluster.addServiceGroup("CORE", stackId.getStackId()); Service service = cluster.addService(serviceGroup, "HDFS", "HDFS", repositoryVersion); - service.addServiceComponent("NAMENODE", "NAMENODE"); - service.addServiceComponent("DATANODE", "DATANODE"); - service.addServiceComponent("JOURNALNODE", "JOURNALNODE"); + service.addServiceComponent("NAMENODE"); + service.addServiceComponent("DATANODE"); + service.addServiceComponent("JOURNALNODE"); service = cluster.addService(serviceGroup, "YARN", "YARN", repositoryVersion); - service.addServiceComponent("RESOURCEMANAGER", "RESOURCEMANAGER"); + service.addServiceComponent("RESOURCEMANAGER"); service = cluster.addService(serviceGroup, "HBASE", "HBASE", repositoryVersion); - service.addServiceComponent("HBASE_MASTER", "HBASE_MASTER"); - service.addServiceComponent("HBASE_REGIONSERVER", "HBASE_REGIONSERVER"); + service.addServiceComponent("HBASE_MASTER"); + service.addServiceComponent("HBASE_REGIONSERVER"); stackId = new StackId("HDP-2.1.1"); repositoryVersion = helper.getOrCreateRepositoryVersion(stackId, stackId.getStackVersion()); service = cluster.addService(serviceGroup, "STORM", "STORM", repositoryVersion); - service.addServiceComponent("STORM_REST_API", "STORM_REST_API"); + service.addServiceComponent("STORM_REST_API"); clusters.addHost("h1"); Host host = clusters.getHost("h1"); diff --git a/ambari-server/src/test/java/org/apache/ambari/server/controller/internal/UnitUpdaterTest.java b/ambari-server/src/test/java/org/apache/ambari/server/controller/internal/UnitUpdaterTest.java index a08691862af..a7d0d479d2e 100644 --- a/ambari-server/src/test/java/org/apache/ambari/server/controller/internal/UnitUpdaterTest.java +++ b/ambari-server/src/test/java/org/apache/ambari/server/controller/internal/UnitUpdaterTest.java @@ -106,7 +106,7 @@ private void stackUnitIs(String name, String unit) { private String updateUnit(String serviceName, String configType, String propName, String propValue) throws InvalidTopologyException, ConfigurationTopologyException { UnitUpdater updater = new UnitUpdater(serviceName, configType); expect(clusterTopology.getBlueprint()).andReturn(blueprint).anyTimes(); - expect(clusterTopology.getStack()).andReturn(stack).anyTimes(); + expect(blueprint.getStack()).andReturn(stack).anyTimes(); expect(stack.getConfigurationPropertiesWithMetadata(serviceName, configType)).andReturn(stackConfigWithMetadata).anyTimes(); replayAll(); return updater.updateForClusterCreate(propName, propValue, Collections.emptyMap(), clusterTopology); diff --git a/ambari-server/src/test/java/org/apache/ambari/server/controller/internal/UpgradeSummaryResourceProviderTest.java b/ambari-server/src/test/java/org/apache/ambari/server/controller/internal/UpgradeSummaryResourceProviderTest.java index 549e1c181de..473a7dd1762 100644 --- a/ambari-server/src/test/java/org/apache/ambari/server/controller/internal/UpgradeSummaryResourceProviderTest.java +++ b/ambari-server/src/test/java/org/apache/ambari/server/controller/internal/UpgradeSummaryResourceProviderTest.java @@ -177,11 +177,11 @@ public void createCluster() throws Exception { ServiceGroup serviceGroup = cluster.addServiceGroup("CORE", stackId.getStackId()); Service service = cluster.addService(serviceGroup, "ZOOKEEPER", "ZOOKEEPER", repoVersionEntity); - ServiceComponent component = service.addServiceComponent("ZOOKEEPER_SERVER", "ZOOKEEPER_SERVER"); + ServiceComponent component = service.addServiceComponent("ZOOKEEPER_SERVER"); ServiceComponentHost sch = component.addServiceComponentHost("h1"); sch.setVersion("2.2.0.0"); - component = service.addServiceComponent("ZOOKEEPER_CLIENT", "ZOOKEEPER_CLIENT"); + component = service.addServiceComponent("ZOOKEEPER_CLIENT"); sch = component.addServiceComponentHost("h1"); sch.setVersion("2.2.0.0"); } diff --git a/ambari-server/src/test/java/org/apache/ambari/server/controller/internal/VersionDefinitionResourceProviderTest.java b/ambari-server/src/test/java/org/apache/ambari/server/controller/internal/VersionDefinitionResourceProviderTest.java index 235e0f02284..0c9805c6207 100644 --- a/ambari-server/src/test/java/org/apache/ambari/server/controller/internal/VersionDefinitionResourceProviderTest.java +++ b/ambari-server/src/test/java/org/apache/ambari/server/controller/internal/VersionDefinitionResourceProviderTest.java @@ -698,7 +698,7 @@ private void makeService(String serviceName, RepositoryVersionEntity serviceRepo } catch (AmbariException e) { clusters.addCluster(clusterName, parentEntity.getStackId()); cluster = clusters.getCluster(clusterName); - serviceGroup = cluster.addServiceGroup(serviceGroupName, cluster.getDesiredStackVersion().getStackId()); + serviceGroup = cluster.addServiceGroup(serviceGroupName, "HDP-1.0"); } cluster.addService(serviceGroup, serviceName, serviceName, serviceRepo); diff --git a/ambari-server/src/test/java/org/apache/ambari/server/controller/utilities/state/DefaultServiceCalculatedStateTest.java b/ambari-server/src/test/java/org/apache/ambari/server/controller/utilities/state/DefaultServiceCalculatedStateTest.java index 14db57aa1d0..9b998e506b5 100644 --- a/ambari-server/src/test/java/org/apache/ambari/server/controller/utilities/state/DefaultServiceCalculatedStateTest.java +++ b/ambari-server/src/test/java/org/apache/ambari/server/controller/utilities/state/DefaultServiceCalculatedStateTest.java @@ -43,8 +43,8 @@ protected ServiceCalculatedState getServiceCalculatedStateObject() { @Override protected void createComponentsAndHosts() throws Exception{ - ServiceComponent masterComponent = service.addServiceComponent("ZOOKEEPER_SERVER", "ZOOKEEPER_SERVER"); - ServiceComponent clientComponent = service.addServiceComponent("ZOOKEEPER_CLIENT", "ZOOKEEPER_CLIENT"); + ServiceComponent masterComponent = service.addServiceComponent("ZOOKEEPER_SERVER"); + ServiceComponent clientComponent = service.addServiceComponent("ZOOKEEPER_CLIENT"); for (String hostName: hosts){ clusters.addHost(hostName); diff --git a/ambari-server/src/test/java/org/apache/ambari/server/controller/utilities/state/FlumeServiceCalculatedStateTest.java b/ambari-server/src/test/java/org/apache/ambari/server/controller/utilities/state/FlumeServiceCalculatedStateTest.java index 35ec2d2fc15..f88a4426cc7 100644 --- a/ambari-server/src/test/java/org/apache/ambari/server/controller/utilities/state/FlumeServiceCalculatedStateTest.java +++ b/ambari-server/src/test/java/org/apache/ambari/server/controller/utilities/state/FlumeServiceCalculatedStateTest.java @@ -41,7 +41,7 @@ protected ServiceCalculatedState getServiceCalculatedStateObject() { @Override protected void createComponentsAndHosts() throws Exception { - ServiceComponent masterComponent = service.addServiceComponent("FLUME_HANDLER", "FLUME_HANDLER"); + ServiceComponent masterComponent = service.addServiceComponent("FLUME_HANDLER"); for (String hostName: hosts){ diff --git a/ambari-server/src/test/java/org/apache/ambari/server/controller/utilities/state/HBaseServiceCalculatedStateTest.java b/ambari-server/src/test/java/org/apache/ambari/server/controller/utilities/state/HBaseServiceCalculatedStateTest.java index 5b972209385..be814e3c53b 100644 --- a/ambari-server/src/test/java/org/apache/ambari/server/controller/utilities/state/HBaseServiceCalculatedStateTest.java +++ b/ambari-server/src/test/java/org/apache/ambari/server/controller/utilities/state/HBaseServiceCalculatedStateTest.java @@ -41,9 +41,9 @@ protected ServiceCalculatedState getServiceCalculatedStateObject() { @Override protected void createComponentsAndHosts() throws Exception { - ServiceComponent masterComponent = service.addServiceComponent("HBASE_MASTER", "HBASE_MASTER"); - ServiceComponent secondMasterComponent = service.addServiceComponent("HBASE_REGIONSERVER", "HBASE_REGIONSERVER"); - ServiceComponent clientComponent = service.addServiceComponent("HBASE_CLIENT", "HBASE_CLIENT"); + ServiceComponent masterComponent = service.addServiceComponent("HBASE_MASTER"); + ServiceComponent secondMasterComponent = service.addServiceComponent("HBASE_REGIONSERVER"); + ServiceComponent clientComponent = service.addServiceComponent("HBASE_CLIENT"); for (String hostName: hosts){ clusters.addHost(hostName); diff --git a/ambari-server/src/test/java/org/apache/ambari/server/controller/utilities/state/HDFSServiceCalculatedStateTest.java b/ambari-server/src/test/java/org/apache/ambari/server/controller/utilities/state/HDFSServiceCalculatedStateTest.java index 6c317cd190d..cfae64fc8e9 100644 --- a/ambari-server/src/test/java/org/apache/ambari/server/controller/utilities/state/HDFSServiceCalculatedStateTest.java +++ b/ambari-server/src/test/java/org/apache/ambari/server/controller/utilities/state/HDFSServiceCalculatedStateTest.java @@ -42,9 +42,9 @@ protected ServiceCalculatedState getServiceCalculatedStateObject() { @Override protected void createComponentsAndHosts() throws Exception { - ServiceComponent masterComponent = service.addServiceComponent("NAMENODE", "NAMENODE"); - ServiceComponent masterComponent1 = service.addServiceComponent("SECONDARY_NAMENODE", "SECONDARY_NAMENODE"); - ServiceComponent clientComponent = service.addServiceComponent("HDFS_CLIENT", "HDFS_CLIENT"); + ServiceComponent masterComponent = service.addServiceComponent("NAMENODE"); + ServiceComponent masterComponent1 = service.addServiceComponent("SECONDARY_NAMENODE"); + ServiceComponent clientComponent = service.addServiceComponent("HDFS_CLIENT"); for (String hostName: hosts){ clusters.addHost(hostName); diff --git a/ambari-server/src/test/java/org/apache/ambari/server/controller/utilities/state/HiveServiceCalculatedStateTest.java b/ambari-server/src/test/java/org/apache/ambari/server/controller/utilities/state/HiveServiceCalculatedStateTest.java index fb598d60816..511c165c124 100644 --- a/ambari-server/src/test/java/org/apache/ambari/server/controller/utilities/state/HiveServiceCalculatedStateTest.java +++ b/ambari-server/src/test/java/org/apache/ambari/server/controller/utilities/state/HiveServiceCalculatedStateTest.java @@ -42,11 +42,11 @@ protected ServiceCalculatedState getServiceCalculatedStateObject() { @Override protected void createComponentsAndHosts() throws Exception { - ServiceComponent masterComponent = service.addServiceComponent("HIVE_METASTORE", "HIVE_METASTORE"); - ServiceComponent secondMasterComponent = service.addServiceComponent("HIVE_SERVER", "HIVE_SERVER"); - ServiceComponent thirdMasterComponent = service.addServiceComponent("WEBHCAT_SERVER", "WEBHCAT_SERVER"); - ServiceComponent fourMasterComponent = service.addServiceComponent("MYSQL_SERVER", "MYSQL_SERVER"); - ServiceComponent clientComponent = service.addServiceComponent("HIVE_CLIENT", "HIVE_CLIENT"); + ServiceComponent masterComponent = service.addServiceComponent("HIVE_METASTORE"); + ServiceComponent secondMasterComponent = service.addServiceComponent("HIVE_SERVER"); + ServiceComponent thirdMasterComponent = service.addServiceComponent("WEBHCAT_SERVER"); + ServiceComponent fourMasterComponent = service.addServiceComponent("MYSQL_SERVER"); + ServiceComponent clientComponent = service.addServiceComponent("HIVE_CLIENT"); for (String hostName: hosts){ clusters.addHost(hostName); diff --git a/ambari-server/src/test/java/org/apache/ambari/server/controller/utilities/state/OozieServiceCalculatedStateTest.java b/ambari-server/src/test/java/org/apache/ambari/server/controller/utilities/state/OozieServiceCalculatedStateTest.java index 6e3a1c8aa32..3d959ea0c69 100644 --- a/ambari-server/src/test/java/org/apache/ambari/server/controller/utilities/state/OozieServiceCalculatedStateTest.java +++ b/ambari-server/src/test/java/org/apache/ambari/server/controller/utilities/state/OozieServiceCalculatedStateTest.java @@ -41,8 +41,8 @@ protected ServiceCalculatedState getServiceCalculatedStateObject() { @Override protected void createComponentsAndHosts() throws Exception { - ServiceComponent masterComponent = service.addServiceComponent("OOZIE_SERVER", "OOZIE_SERVER"); - ServiceComponent clientComponent = service.addServiceComponent("OOZIE_CLIENT", "OOZIE_CLIENT"); + ServiceComponent masterComponent = service.addServiceComponent("OOZIE_SERVER"); + ServiceComponent clientComponent = service.addServiceComponent("OOZIE_CLIENT"); for (String hostName: hosts){ clusters.addHost(hostName); diff --git a/ambari-server/src/test/java/org/apache/ambari/server/controller/utilities/state/YarnServiceCalculatedStateTest.java b/ambari-server/src/test/java/org/apache/ambari/server/controller/utilities/state/YarnServiceCalculatedStateTest.java index cf23f319555..2de3ea6b3d6 100644 --- a/ambari-server/src/test/java/org/apache/ambari/server/controller/utilities/state/YarnServiceCalculatedStateTest.java +++ b/ambari-server/src/test/java/org/apache/ambari/server/controller/utilities/state/YarnServiceCalculatedStateTest.java @@ -42,9 +42,9 @@ protected ServiceCalculatedState getServiceCalculatedStateObject() { @Override protected void createComponentsAndHosts() throws Exception { - ServiceComponent masterComponent = service.addServiceComponent("RESOURCEMANAGER", "RESOURCEMANAGER"); - ServiceComponent secondMasterComponent = service.addServiceComponent("NODEMANAGER", "NODEMANAGER"); - ServiceComponent clientComponent = service.addServiceComponent("YARN_CLIENT", "YARN_CLIENT"); + ServiceComponent masterComponent = service.addServiceComponent("RESOURCEMANAGER"); + ServiceComponent secondMasterComponent = service.addServiceComponent("NODEMANAGER"); + ServiceComponent clientComponent = service.addServiceComponent("YARN_CLIENT"); for (String hostName: hosts){ clusters.addHost(hostName); diff --git a/ambari-server/src/test/java/org/apache/ambari/server/events/EventsTest.java b/ambari-server/src/test/java/org/apache/ambari/server/events/EventsTest.java index f939ce07914..a312f8efbde 100644 --- a/ambari-server/src/test/java/org/apache/ambari/server/events/EventsTest.java +++ b/ambari-server/src/test/java/org/apache/ambari/server/events/EventsTest.java @@ -366,7 +366,7 @@ private void installHdfsService() throws Exception { Service service = m_cluster.getService(serviceName); Assert.assertNotNull(service); - ServiceComponent component = m_componentFactory.createNew(service, "DATANODE", "DATANODE"); + ServiceComponent component = m_componentFactory.createNew(service, "DATANODE"); service.addServiceComponent(component); component.setDesiredState(State.INSTALLED); diff --git a/ambari-server/src/test/java/org/apache/ambari/server/events/listeners/upgrade/HostVersionOutOfSyncListenerTest.java b/ambari-server/src/test/java/org/apache/ambari/server/events/listeners/upgrade/HostVersionOutOfSyncListenerTest.java index 7a180dce9a4..4f05d9ba752 100644 --- a/ambari-server/src/test/java/org/apache/ambari/server/events/listeners/upgrade/HostVersionOutOfSyncListenerTest.java +++ b/ambari-server/src/test/java/org/apache/ambari/server/events/listeners/upgrade/HostVersionOutOfSyncListenerTest.java @@ -151,7 +151,7 @@ private RepositoryVersionEntity createClusterAndHosts(String INSTALLED_VERSION, Map> zkTopology = new HashMap<>(); List zkServerHosts = Arrays.asList(0, 1, 2); zkTopology.put("ZOOKEEPER_SERVER", new ArrayList<>(zkServerHosts)); - ServiceGroup serviceGroup = c1.addServiceGroup("CORE", this.stackId); + ServiceGroup serviceGroup = c1.addServiceGroup("CORE", "HDP-1.0"); addService(c1, serviceGroup, hostList, zkTopology, "ZOOKEEPER", repositoryVersionEntity); // install new version @@ -477,7 +477,7 @@ public void testComponentHostVersionNotRequired() throws Exception { .put("NAMENODE", Lists.newArrayList(0)) .put("DATANODE", Lists.newArrayList(1)) .build(); - ServiceGroup serviceGroup = c1.addServiceGroup("CORE", this.stackId); + ServiceGroup serviceGroup = c1.addServiceGroup("CORE", "HDP-1.0"); addService(c1, serviceGroup, allHosts, topology, "HDFS", repo); topology = new ImmutableMap.Builder>() @@ -558,7 +558,7 @@ private void addService(Cluster cl, ServiceGroup serviceGroup, List host for (Map.Entry> component : topology.entrySet()) { String componentName = component.getKey(); - cl.getService(serviceName).addServiceComponent(componentName, componentName); + cl.getService(serviceName).addServiceComponent(componentName); for (Integer hostIndex : component.getValue()) { cl.getService(serviceName) @@ -580,7 +580,7 @@ private void addServiceComponent(Cluster cl, List hostList, Service service = cl.getService(serviceName); if (!service.getServiceComponents().containsKey(componentName)) { - service.addServiceComponent(componentName, componentName); + service.addServiceComponent(componentName); } ServiceComponent component = service.getServiceComponent(componentName); diff --git a/ambari-server/src/test/java/org/apache/ambari/server/orm/OrmTestHelper.java b/ambari-server/src/test/java/org/apache/ambari/server/orm/OrmTestHelper.java index e996c3ac650..3be8333122b 100644 --- a/ambari-server/src/test/java/org/apache/ambari/server/orm/OrmTestHelper.java +++ b/ambari-server/src/test/java/org/apache/ambari/server/orm/OrmTestHelper.java @@ -49,7 +49,6 @@ import org.apache.ambari.server.orm.dao.HostDAO; import org.apache.ambari.server.orm.dao.HostRoleCommandDAO; import org.apache.ambari.server.orm.dao.HostVersionDAO; -import org.apache.ambari.server.orm.dao.MpackDAO; import org.apache.ambari.server.orm.dao.RepositoryVersionDAO; import org.apache.ambari.server.orm.dao.RequestDAO; import org.apache.ambari.server.orm.dao.ResourceTypeDAO; @@ -66,7 +65,6 @@ import org.apache.ambari.server.orm.entities.HostRoleCommandEntity; import org.apache.ambari.server.orm.entities.HostStateEntity; import org.apache.ambari.server.orm.entities.HostVersionEntity; -import org.apache.ambari.server.orm.entities.MpackEntity; import org.apache.ambari.server.orm.entities.PrincipalEntity; import org.apache.ambari.server.orm.entities.PrincipalTypeEntity; import org.apache.ambari.server.orm.entities.RepoDefinitionEntity; @@ -85,9 +83,6 @@ import org.apache.ambari.server.state.Clusters; import org.apache.ambari.server.state.Host; import org.apache.ambari.server.state.HostState; -import org.apache.ambari.server.state.Module; -import org.apache.ambari.server.state.ModuleComponent; -import org.apache.ambari.server.state.Mpack; import org.apache.ambari.server.state.RepositoryVersionState; import org.apache.ambari.server.state.Service; import org.apache.ambari.server.state.ServiceComponent; @@ -101,13 +96,11 @@ import org.apache.ambari.server.state.alert.Scope; import org.apache.ambari.server.state.alert.SourceType; import org.apache.ambari.server.state.cluster.ClustersImpl; -import org.easymock.EasyMock; import org.junit.Assert; import org.slf4j.Logger; import org.slf4j.LoggerFactory; import org.springframework.security.crypto.password.PasswordEncoder; -import com.google.common.collect.Lists; import com.google.inject.Inject; import com.google.inject.Injector; import com.google.inject.Provider; @@ -151,10 +144,6 @@ public class OrmTestHelper { @Inject private StackDAO stackDAO; - @Inject - MpackDAO mpackDAO; - - private static final StackId HDP_206 = new StackId("HDP", "2.0.6"); public static final StackId STACK_ID = new StackId("HDP", "2.2.0"); public static final String CLUSTER_NAME = "test_cluster1"; public static final String SERVICE_GROUP_NAME = "CORE"; @@ -336,51 +325,10 @@ public void createStageCommands() { hostDAO.merge(host2); } - @Transactional - public MpackEntity createMpack(StackId stackId) throws AmbariException { - List mpackEntities = - mpackDAO.findByNameVersion(stackId.getStackName(), stackId.getStackVersion()); - MpackEntity mpackEntity = !mpackEntities.isEmpty() ? mpackEntities.get(0) : null; - if (mpackEntities.isEmpty()) { - mpackEntity = new MpackEntity(); - mpackEntity.setMpackName(stackId.getStackName()); - mpackEntity.setMpackVersion(stackId.getStackVersion()); - mpackEntity.setMpackUri("http://mpacks.org/" + stackId.toString() + ".json"); - mpackDAO.create(mpackEntity); - - AmbariMetaInfo ambariMetaInfo = injector.getInstance(AmbariMetaInfo.class); - - ArrayList packletArrayList = new ArrayList<>(); - ModuleComponent sampleComponent = EasyMock.createNiceMock(ModuleComponent.class); - EasyMock.expect(sampleComponent.getName()).andReturn("FOO_COMPONENT").anyTimes(); - EasyMock.expect(sampleComponent.getVersion()).andReturn("1.0.0-b1").anyTimes(); - - Module samplePacklet = EasyMock.createNiceMock(Module.class); - EasyMock.expect(samplePacklet.getVersion()).andReturn("1.0.0-b1").anyTimes(); - EasyMock.expect(samplePacklet.getName()).andReturn("FOO").anyTimes(); - EasyMock.expect(samplePacklet.getDefinition()).andReturn("foo.tar.gz").anyTimes(); - EasyMock.expect(samplePacklet.getComponents()).andReturn(Lists.newArrayList(sampleComponent)).anyTimes(); - EasyMock.expect(samplePacklet.getModuleComponent(EasyMock.anyString())).andReturn(sampleComponent).anyTimes(); - - packletArrayList.add(samplePacklet); - - Map mpackMap = ambariMetaInfo.getMpackManager().getMpackMap(); - Mpack mpack = EasyMock.createNiceMock(Mpack.class); - EasyMock.expect(mpack.getMpackId()).andReturn(stackId.getStackName()).anyTimes(); - EasyMock.expect(mpack.getResourceId()).andReturn(mpackEntity.getId()).anyTimes(); - EasyMock.expect(mpack.getModules()).andReturn(packletArrayList).anyTimes(); - EasyMock.expect(mpack.getModule(EasyMock.anyString())).andReturn(samplePacklet).anyTimes(); - EasyMock.expect(mpack.getModuleComponent(EasyMock.anyString(), EasyMock.anyString())).andReturn(sampleComponent).anyTimes(); - - EasyMock.replay(mpack, samplePacklet, sampleComponent); - mpackMap.put(mpackEntity.getId(), mpack); - } - return mpackEntity; - } - @Transactional public StackEntity createStack(StackId stackId) throws AmbariException { StackEntity stackEntity = stackDAO.find(stackId.getStackName(), stackId.getStackVersion()); + if (null == stackEntity) { stackEntity = new StackEntity(); stackEntity.setStackName(stackId.getStackName()); @@ -388,18 +336,6 @@ public StackEntity createStack(StackId stackId) throws AmbariException { stackDAO.create(stackEntity); } - if (null == stackEntity.getMpackId()) { - List mpackEntities = - mpackDAO.findByNameVersion(stackId.getStackName(), stackId.getStackVersion()); - - if (!mpackEntities.isEmpty()) { - stackEntity.setMpackId(mpackEntities.get(0).getId()); - } - - stackEntity = stackDAO.merge(stackEntity); - } - - return stackEntity; } @@ -436,7 +372,7 @@ public Long createCluster(String clusterName) throws Exception { ClusterDAO clusterDAO = injector.getInstance(ClusterDAO.class); StackDAO stackDAO = injector.getInstance(StackDAO.class); - StackEntity stackEntity = stackDAO.find(HDP_206); + StackEntity stackEntity = stackDAO.find("HDP", "2.0.6"); assertNotNull(stackEntity); ClusterEntity clusterEntity = new ClusterEntity(); @@ -474,12 +410,13 @@ public Cluster buildNewCluster(Clusters clusters, ServiceFactory serviceFactory, ServiceComponentFactory componentFactory, ServiceComponentHostFactory schFactory, String hostName) throws Exception { String clusterName = "cluster-" + System.currentTimeMillis(); + StackId stackId = new StackId("HDP", "2.0.6"); - createStack(HDP_206); + createStack(stackId); - clusters.addCluster(clusterName, HDP_206); + clusters.addCluster(clusterName, stackId); Cluster cluster = clusters.getCluster(clusterName); - ServiceGroup serviceGroup = cluster.addServiceGroup(SERVICE_GROUP_NAME, HDP_206.getStackId()); + ServiceGroup serviceGroup = cluster.addServiceGroup(SERVICE_GROUP_NAME, stackId.getStackId()); cluster = initializeClusterWithStack(cluster); addHost(clusters, cluster, hostName); @@ -490,8 +427,9 @@ public Cluster buildNewCluster(Clusters clusters, } public Cluster initializeClusterWithStack(Cluster cluster) throws Exception { - cluster.setDesiredStackVersion(HDP_206); - getOrCreateRepositoryVersion(HDP_206, HDP_206.getStackVersion()); + StackId stackId = new StackId("HDP", "2.0.6"); + cluster.setDesiredStackVersion(stackId); + getOrCreateRepositoryVersion(stackId, stackId.getStackVersion()); return cluster; } @@ -531,7 +469,7 @@ public void installHdfsService(Cluster cluster, Service service = cluster.getService(serviceName); assertNotNull(service); - ServiceComponent datanode = componentFactory.createNew(service, "DATANODE", "DATANODE"); + ServiceComponent datanode = componentFactory.createNew(service, "DATANODE"); service.addServiceComponent(datanode); datanode.setDesiredState(State.INSTALLED); @@ -542,7 +480,7 @@ public void installHdfsService(Cluster cluster, sch.setDesiredState(State.INSTALLED); sch.setState(State.INSTALLED); - ServiceComponent namenode = componentFactory.createNew(service, "NAMENODE", "NAMENODE"); + ServiceComponent namenode = componentFactory.createNew(service, "NAMENODE"); service.addServiceComponent(namenode); namenode.setDesiredState(State.INSTALLED); @@ -566,7 +504,7 @@ public void installYarnService(Cluster cluster, assertNotNull(service); ServiceComponent resourceManager = componentFactory.createNew(service, - "RESOURCEMANAGER", "RESOURCEMANAGER"); + "RESOURCEMANAGER"); service.addServiceComponent(resourceManager); resourceManager.setDesiredState(State.INSTALLED); @@ -730,13 +668,8 @@ public RepositoryVersionEntity getOrCreateRepositoryVersion(Cluster cluster) { */ public RepositoryVersionEntity getOrCreateRepositoryVersion(StackId stackId, String version) { - MpackEntity mpackEntity = null; StackEntity stackEntity = null; try { - // creating mpack before stack makes - mpackEntity = createMpack(stackId); - - // sure stack will be linked to mpack stackEntity = createStack(stackId); } catch (Exception e) { LOG.error("Expected successful repository", e); @@ -763,13 +696,8 @@ public RepositoryVersionEntity getOrCreateRepositoryVersion(StackId stackId, repoOsEntity.setAmbariManaged(true); repoOsEntity.addRepoDefinition(repoDefinitionEntity1); repoOsEntity.addRepoDefinition(repoDefinitionEntity2); - repoOsEntity.setMpackEntity(createMpack(stackId)); operatingSystems.add(repoOsEntity); - mpackEntity.setRepositoryOperatingSystems(operatingSystems); - mpackEntity = mpackDAO.merge(mpackEntity); - - operatingSystems = mpackEntity.getRepositoryOperatingSystems(); repositoryVersion = repositoryVersionDAO.create(stackEntity, version, String.valueOf(System.currentTimeMillis()) + uniqueCounter.incrementAndGet(), operatingSystems); diff --git a/ambari-server/src/test/java/org/apache/ambari/server/orm/dao/AlertDispatchDAOTest.java b/ambari-server/src/test/java/org/apache/ambari/server/orm/dao/AlertDispatchDAOTest.java index 5169dfeb6fe..c20ce18758a 100644 --- a/ambari-server/src/test/java/org/apache/ambari/server/orm/dao/AlertDispatchDAOTest.java +++ b/ambari-server/src/test/java/org/apache/ambari/server/orm/dao/AlertDispatchDAOTest.java @@ -117,7 +117,7 @@ public void setup() throws Exception { EventBusSynchronizer.synchronizeAmbariEventPublisher(m_injector); m_cluster = m_clusters.getClusterById(m_helper.createCluster()); - serviceGroup = m_cluster.addServiceGroup("CORE", m_cluster.getDesiredStackVersion().getStackId()); + serviceGroup = m_cluster.addServiceGroup("CORE", "HDP-1.0"); m_helper.initializeClusterWithStack(m_cluster); } diff --git a/ambari-server/src/test/java/org/apache/ambari/server/orm/dao/AlertsDAOTest.java b/ambari-server/src/test/java/org/apache/ambari/server/orm/dao/AlertsDAOTest.java index 7fdf5933913..2d7826bf4a4 100644 --- a/ambari-server/src/test/java/org/apache/ambari/server/orm/dao/AlertsDAOTest.java +++ b/ambari-server/src/test/java/org/apache/ambari/server/orm/dao/AlertsDAOTest.java @@ -123,7 +123,7 @@ public void setup() throws Exception { // install YARN so there is at least 1 service installed and no // unexpected alerts since the test YARN service doesn't have any alerts m_cluster = m_clusters.getClusterById(m_helper.createCluster()); - serviceGroup = m_cluster.addServiceGroup("CORE", m_cluster.getDesiredStackVersion().getStackId()); + serviceGroup = m_cluster.addServiceGroup("CORE", "HDP-1.0"); m_helper.initializeClusterWithStack(m_cluster); m_helper.addHost(m_clusters, m_cluster, HOSTNAME); m_helper.installYarnService(m_cluster, m_serviceFactory, diff --git a/ambari-server/src/test/java/org/apache/ambari/server/orm/dao/HostVersionDAOTest.java b/ambari-server/src/test/java/org/apache/ambari/server/orm/dao/HostVersionDAOTest.java new file mode 100644 index 00000000000..c961fb06795 --- /dev/null +++ b/ambari-server/src/test/java/org/apache/ambari/server/orm/dao/HostVersionDAOTest.java @@ -0,0 +1,356 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.ambari.server.orm.dao; + +import java.sql.SQLException; +import java.util.ArrayList; +import java.util.Arrays; +import java.util.List; + +import org.apache.ambari.server.AmbariException; +import org.apache.ambari.server.H2DatabaseCleaner; +import org.apache.ambari.server.api.services.AmbariMetaInfo; +import org.apache.ambari.server.orm.GuiceJpaInitializer; +import org.apache.ambari.server.orm.InMemoryDefaultTestModule; +import org.apache.ambari.server.orm.OrmTestHelper; +import org.apache.ambari.server.orm.entities.ClusterEntity; +import org.apache.ambari.server.orm.entities.HostEntity; +import org.apache.ambari.server.orm.entities.HostVersionEntity; +import org.apache.ambari.server.orm.entities.RepositoryVersionEntity; +import org.apache.ambari.server.orm.entities.ResourceEntity; +import org.apache.ambari.server.orm.entities.ResourceTypeEntity; +import org.apache.ambari.server.orm.entities.StackEntity; +import org.apache.ambari.server.security.authorization.ResourceType; +import org.apache.ambari.server.state.RepositoryVersionState; +import org.apache.ambari.server.state.StackId; +import org.junit.After; +import org.junit.Assert; +import org.junit.Before; +import org.junit.Test; + +import com.google.inject.Guice; +import com.google.inject.Injector; + + +/** + * {@link org.apache.ambari.server.orm.dao.HostVersionDAO} unit tests. + */ +public class HostVersionDAOTest { + + private static Injector injector; + private ResourceTypeDAO resourceTypeDAO; + private ClusterDAO clusterDAO; + private StackDAO stackDAO; + private HostDAO hostDAO; + private HostVersionDAO hostVersionDAO; + private OrmTestHelper helper; + + private final static StackId HDP_22_STACK = new StackId("HDP", "2.2.0"); + private final static StackId BAD_STACK = new StackId("BADSTACK", "1.0"); + + private final static String repoVersion_2200 = "2.2.0.0-1"; + private final static String repoVersion_2201 = "2.2.0.1-2"; + private final static String repoVersion_2202 = "2.2.0.2-3"; + + @Before + public void before() { + injector = Guice.createInjector(new InMemoryDefaultTestModule()); + H2DatabaseCleaner.resetSequences(injector); + injector.getInstance(GuiceJpaInitializer.class); + + resourceTypeDAO = injector.getInstance(ResourceTypeDAO.class); + clusterDAO = injector.getInstance(ClusterDAO.class); + stackDAO = injector.getInstance(StackDAO.class); + hostDAO = injector.getInstance(HostDAO.class); + hostVersionDAO = injector.getInstance(HostVersionDAO.class); + helper = injector.getInstance(OrmTestHelper.class); + + // required to populate the database with stacks + injector.getInstance(AmbariMetaInfo.class); + + createDefaultData(); + } + + /** + * Helper function to bootstrap some basic data about clusters, cluster version, host, and host versions. + */ + private void createDefaultData() { + StackEntity stackEntity = stackDAO.find(HDP_22_STACK.getStackName(), HDP_22_STACK.getStackVersion()); + Assert.assertNotNull(stackEntity); + + // Create the cluster + ResourceTypeEntity resourceTypeEntity = new ResourceTypeEntity(); + resourceTypeEntity.setId(ResourceType.CLUSTER.getId()); + resourceTypeEntity.setName(ResourceType.CLUSTER.name()); + resourceTypeEntity = resourceTypeDAO.merge(resourceTypeEntity); + + ResourceEntity resourceEntity = new ResourceEntity(); + resourceEntity.setResourceType(resourceTypeEntity); + + ClusterEntity clusterEntity = new ClusterEntity(); + clusterEntity.setClusterName("test_cluster1"); + clusterEntity.setClusterInfo("test_cluster_info1"); + clusterEntity.setResource(resourceEntity); + clusterEntity.setDesiredStack(stackEntity); + + clusterDAO.create(clusterEntity); + + RepositoryVersionEntity repoVersionEntity = helper.getOrCreateRepositoryVersion(HDP_22_STACK, repoVersion_2200); + + // Create the hosts + HostEntity host1 = new HostEntity(); + HostEntity host2 = new HostEntity(); + HostEntity host3 = new HostEntity(); + + host1.setHostName("test_host1"); + host2.setHostName("test_host2"); + host3.setHostName("test_host3"); + host1.setIpv4("192.168.0.1"); + host2.setIpv4("192.168.0.2"); + host3.setIpv4("192.168.0.3"); + + List hostEntities = new ArrayList<>(); + hostEntities.add(host1); + hostEntities.add(host2); + hostEntities.add(host3); + + // Both sides of relation should be set when modifying in runtime + host1.setClusterEntities(Arrays.asList(clusterEntity)); + host2.setClusterEntities(Arrays.asList(clusterEntity)); + host3.setClusterEntities(Arrays.asList(clusterEntity)); + + hostDAO.create(host1); + hostDAO.create(host2); + hostDAO.create(host3); + + clusterEntity.setHostEntities(hostEntities); + clusterDAO.merge(clusterEntity); + + // Create the Host Versions + HostVersionEntity hostVersionEntity1 = new HostVersionEntity(host1, repoVersionEntity, RepositoryVersionState.CURRENT); + HostVersionEntity hostVersionEntity2 = new HostVersionEntity(host2, repoVersionEntity, RepositoryVersionState.INSTALLED); + HostVersionEntity hostVersionEntity3 = new HostVersionEntity(host3, repoVersionEntity, RepositoryVersionState.INSTALLED); + + hostVersionDAO.create(hostVersionEntity1); + hostVersionDAO.create(hostVersionEntity2); + hostVersionDAO.create(hostVersionEntity3); + } + + /** + * Helper function to bootstrap additional data on top of the default data. + */ + private void addMoreVersions() { + ClusterEntity clusterEntity = clusterDAO.findByName("test_cluster1"); + + RepositoryVersionEntity repositoryVersionEnt_2_2_0_1 = helper.getOrCreateRepositoryVersion(HDP_22_STACK, repoVersion_2201); + + + HostEntity[] hostEntities = clusterEntity.getHostEntities().toArray(new HostEntity[clusterEntity.getHostEntities().size()]); + // Must sort by host name in ascending order to ensure that state is accurately set later on. + Arrays.sort(hostEntities); + + // For each of the hosts, add a host version + for (HostEntity host : hostEntities) { + HostVersionEntity hostVersionEntity = new HostVersionEntity(host, repositoryVersionEnt_2_2_0_1, RepositoryVersionState.INSTALLED); + hostVersionDAO.create(hostVersionEntity); + } + + // For each of the hosts, add one more host version + RepositoryVersionEntity repositoryVersionEnt_2_2_0_2 = helper.getOrCreateRepositoryVersion(HDP_22_STACK, repoVersion_2202); + for (int i = 0; i < hostEntities.length; i++) { + RepositoryVersionState desiredState = null; + if (i % 3 == 0) { + desiredState = RepositoryVersionState.INSTALLED; + } + if (i % 3 == 1) { + desiredState = RepositoryVersionState.INSTALLING; + } + if (i % 3 == 2) { + desiredState = RepositoryVersionState.INSTALL_FAILED; + } + + + HostVersionEntity hostVersionEntity = new HostVersionEntity(hostEntities[i], repositoryVersionEnt_2_2_0_2, desiredState); + hostVersionDAO.create(hostVersionEntity); + } + } + + /** + * Test the {@link HostVersionDAO#findAll()} method. + */ + @Test + public void testFindAll() { + Assert.assertEquals(3, hostVersionDAO.findAll().size()); + } + + /** + * Test the {@link HostVersionDAO#findByHost(String)} method. + */ + @Test + public void testFindByHost() { + Assert.assertEquals(1, hostVersionDAO.findByHost("test_host1").size()); + Assert.assertEquals(1, hostVersionDAO.findByHost("test_host2").size()); + Assert.assertEquals(1, hostVersionDAO.findByHost("test_host3").size()); + + addMoreVersions(); + + Assert.assertEquals(3, hostVersionDAO.findByHost("test_host1").size()); + Assert.assertEquals(3, hostVersionDAO.findByHost("test_host2").size()); + Assert.assertEquals(3, hostVersionDAO.findByHost("test_host3").size()); + } + + /** + * Test the {@link HostVersionDAO#findByClusterStackAndVersion(String, org.apache.ambari.server.state.StackId, String)} method. + */ + @Test + public void testFindByClusterStackAndVersion() { + Assert.assertEquals(3, hostVersionDAO.findByClusterStackAndVersion("test_cluster1", HDP_22_STACK, repoVersion_2200).size()); + Assert.assertEquals(3, hostVersionDAO.findAll().size()); + + addMoreVersions(); + + Assert.assertEquals(3, hostVersionDAO.findByClusterStackAndVersion("test_cluster1", HDP_22_STACK, repoVersion_2201).size()); + Assert.assertEquals(3, hostVersionDAO.findByClusterStackAndVersion("test_cluster1", HDP_22_STACK, repoVersion_2202).size()); + Assert.assertEquals(9, hostVersionDAO.findAll().size()); + } + + /** + * Test the {@link HostVersionDAO#findByClusterAndHost(String, String)} method. + */ + @Test + public void testFindByClusterAndHost() { + Assert.assertEquals(1, hostVersionDAO.findByClusterAndHost("test_cluster1", "test_host1").size()); + Assert.assertEquals(1, hostVersionDAO.findByClusterAndHost("test_cluster1", "test_host2").size()); + Assert.assertEquals(1, hostVersionDAO.findByClusterAndHost("test_cluster1", "test_host3").size()); + + addMoreVersions(); + + Assert.assertEquals(3, hostVersionDAO.findByClusterAndHost("test_cluster1", "test_host1").size()); + Assert.assertEquals(3, hostVersionDAO.findByClusterAndHost("test_cluster1", "test_host2").size()); + Assert.assertEquals(3, hostVersionDAO.findByClusterAndHost("test_cluster1", "test_host3").size()); + } + + /** + * Test the {@link HostVersionDAO#findByCluster(String)} method. + */ + @Test + public void testFindByCluster() { + Assert.assertEquals(3, hostVersionDAO.findByCluster("test_cluster1").size()); + + addMoreVersions(); + + Assert.assertEquals(9, hostVersionDAO.findByCluster("test_cluster1").size()); + } + + /** + * Test the {@link HostVersionDAO#findByClusterHostAndState(String, String, org.apache.ambari.server.state.RepositoryVersionState)} method. + */ + @Test + public void testFindByClusterHostAndState() { + Assert.assertEquals(1, hostVersionDAO.findByClusterHostAndState("test_cluster1", "test_host1", RepositoryVersionState.CURRENT).size()); + Assert.assertEquals(0, hostVersionDAO.findByClusterHostAndState("test_cluster1", "test_host1", RepositoryVersionState.INSTALLED).size()); + Assert.assertEquals(0, hostVersionDAO.findByClusterHostAndState("test_cluster1", "test_host2", RepositoryVersionState.INSTALLING).size()); + Assert.assertEquals(0, hostVersionDAO.findByClusterHostAndState("test_cluster1", "test_host3", RepositoryVersionState.INSTALL_FAILED).size()); + + addMoreVersions(); + + Assert.assertEquals(2, hostVersionDAO.findByClusterHostAndState("test_cluster1", "test_host1", RepositoryVersionState.INSTALLED).size()); + Assert.assertEquals(2, hostVersionDAO.findByClusterHostAndState("test_cluster1", "test_host2", RepositoryVersionState.INSTALLED).size()); + Assert.assertEquals(2, hostVersionDAO.findByClusterHostAndState("test_cluster1", "test_host3", RepositoryVersionState.INSTALLED).size()); + + Assert.assertEquals(1, hostVersionDAO.findByClusterHostAndState("test_cluster1", "test_host1", RepositoryVersionState.CURRENT).size()); + Assert.assertEquals(1, hostVersionDAO.findByClusterHostAndState("test_cluster1", "test_host2", RepositoryVersionState.INSTALLING).size()); + Assert.assertEquals(1, hostVersionDAO.findByClusterHostAndState("test_cluster1", "test_host3", RepositoryVersionState.INSTALL_FAILED).size()); + } + + /** + * Test the {@link HostVersionDAO#findByClusterStackVersionAndHost(String, StackId, String, String)} method. + */ + @Test + public void testFindByClusterStackVersionAndHost() { + HostEntity host1 = hostDAO.findByName("test_host1"); + HostEntity host2 = hostDAO.findByName("test_host2"); + HostEntity host3 = hostDAO.findByName("test_host3"); + + HostVersionEntity hostVersionEntity1 = new HostVersionEntity(host1, + helper.getOrCreateRepositoryVersion(HDP_22_STACK, repoVersion_2200), RepositoryVersionState.CURRENT); + hostVersionEntity1.setId(1L); + HostVersionEntity hostVersionEntity2 = new HostVersionEntity(host2, + helper.getOrCreateRepositoryVersion(HDP_22_STACK, repoVersion_2200), RepositoryVersionState.INSTALLED); + hostVersionEntity2.setId(2L); + HostVersionEntity hostVersionEntity3 = new HostVersionEntity(host3, + helper.getOrCreateRepositoryVersion(HDP_22_STACK, repoVersion_2200), RepositoryVersionState.INSTALLED); + hostVersionEntity3.setId(3L); + + hostVersionEntity1.equals(hostVersionDAO.findByClusterStackVersionAndHost("test_cluster1", HDP_22_STACK, repoVersion_2200, "test_host1")); + Assert.assertEquals(hostVersionEntity1, hostVersionDAO.findByClusterStackVersionAndHost("test_cluster1", HDP_22_STACK, repoVersion_2200, "test_host1")); + Assert.assertEquals(hostVersionEntity2, hostVersionDAO.findByClusterStackVersionAndHost("test_cluster1", HDP_22_STACK, repoVersion_2200, "test_host2")); + Assert.assertEquals(hostVersionEntity3, hostVersionDAO.findByClusterStackVersionAndHost("test_cluster1", HDP_22_STACK, repoVersion_2200, "test_host3")); + + // Test non-existent objects + Assert.assertEquals(null, hostVersionDAO.findByClusterStackVersionAndHost("non_existent_cluster", HDP_22_STACK, repoVersion_2200, "test_host3")); + Assert.assertEquals(null, hostVersionDAO.findByClusterStackVersionAndHost("test_cluster1", BAD_STACK, repoVersion_2200, "test_host3")); + Assert.assertEquals(null, hostVersionDAO.findByClusterStackVersionAndHost("test_cluster1", HDP_22_STACK, "non_existent_version", "test_host3")); + Assert.assertEquals(null, hostVersionDAO.findByClusterStackVersionAndHost("test_cluster1", HDP_22_STACK, "non_existent_version", "non_existent_host")); + + addMoreVersions(); + + // Expected + HostVersionEntity hostVersionEntity1LastExpected = new HostVersionEntity(host1, + helper.getOrCreateRepositoryVersion(HDP_22_STACK, repoVersion_2202), RepositoryVersionState.INSTALLED); + HostVersionEntity hostVersionEntity2LastExpected = new HostVersionEntity(host2, + helper.getOrCreateRepositoryVersion(HDP_22_STACK, repoVersion_2202), RepositoryVersionState.INSTALLING); + HostVersionEntity hostVersionEntity3LastExpected = new HostVersionEntity(host3, + helper.getOrCreateRepositoryVersion(HDP_22_STACK, repoVersion_2202), RepositoryVersionState.INSTALL_FAILED); + + // Actual + HostVersionEntity hostVersionEntity1LastActual = hostVersionDAO.findByClusterStackVersionAndHost("test_cluster1", HDP_22_STACK, repoVersion_2202, "test_host1"); + HostVersionEntity hostVersionEntity2LastActual = hostVersionDAO.findByClusterStackVersionAndHost("test_cluster1", HDP_22_STACK, repoVersion_2202, "test_host2"); + HostVersionEntity hostVersionEntity3LastActual = hostVersionDAO.findByClusterStackVersionAndHost("test_cluster1", HDP_22_STACK, repoVersion_2202, "test_host3"); + + // Trying to Mock the actual objects to override the getId() method will not work because the class that mockito creates + // is still a Mockito wrapper. Instead, take advantage of an overloaded constructor that ignores the Id. + Assert.assertEquals(hostVersionEntity1LastExpected, new HostVersionEntity(hostVersionEntity1LastActual)); + Assert.assertEquals(hostVersionEntity2LastExpected, new HostVersionEntity(hostVersionEntity2LastActual)); + Assert.assertEquals(hostVersionEntity3LastExpected, new HostVersionEntity(hostVersionEntity3LastActual)); + } + + @Test + public void testDuplicates() throws Exception { + HostEntity host1 = hostDAO.findByName("test_host1"); + + RepositoryVersionEntity repoVersion = helper.getOrCreateRepositoryVersion(HDP_22_STACK, repoVersion_2200); + + HostVersionEntity hostVersionEntity1 = new HostVersionEntity(host1, repoVersion, RepositoryVersionState.CURRENT); + + try { + hostVersionDAO.create(hostVersionEntity1); + Assert.fail("Each host can have a relationship to a repo version, but cannot have more than one for the same repo"); + } catch (Exception e) { + // expected + } + + } + + @After + public void after() throws AmbariException, SQLException { + H2DatabaseCleaner.clearDatabaseAndStopPersistenceService(injector); + injector = null; + } +} diff --git a/ambari-server/src/test/java/org/apache/ambari/server/orm/dao/UpgradeDAOTest.java b/ambari-server/src/test/java/org/apache/ambari/server/orm/dao/UpgradeDAOTest.java index b5d62348c28..47fde03f76b 100644 --- a/ambari-server/src/test/java/org/apache/ambari/server/orm/dao/UpgradeDAOTest.java +++ b/ambari-server/src/test/java/org/apache/ambari/server/orm/dao/UpgradeDAOTest.java @@ -43,7 +43,6 @@ import org.apache.ambari.server.state.StackId; import org.apache.ambari.server.state.UpgradeState; import org.apache.ambari.server.state.stack.upgrade.Direction; -import org.apache.ambari.server.state.stack.upgrade.LifecycleType; import org.apache.ambari.server.state.stack.upgrade.UpgradeType; import org.junit.After; import org.junit.Assert; @@ -106,8 +105,8 @@ public void setup() throws Exception { entity.setDowngradeAllowed(true); UpgradeGroupEntity group = new UpgradeGroupEntity(); + group.setName("group_name"); group.setTitle("group title"); - group.setLifecycle(LifecycleType.UPGRADE); // create 2 items List items = new ArrayList<>(); @@ -153,7 +152,7 @@ public void testFindUpgrade() throws Exception { assertNotNull(group); Assert.assertNotSame(entity.getUpgradeGroups().get(0), group); - Assert.assertNull(group.getName()); + assertEquals("group_name", group.getName()); assertEquals("group title", group.getTitle()); } diff --git a/ambari-server/src/test/java/org/apache/ambari/server/serveraction/upgrades/ComponentVersionCheckActionTest.java b/ambari-server/src/test/java/org/apache/ambari/server/serveraction/upgrades/ComponentVersionCheckActionTest.java index f27d9eb3ffd..40ca1190338 100644 --- a/ambari-server/src/test/java/org/apache/ambari/server/serveraction/upgrades/ComponentVersionCheckActionTest.java +++ b/ambari-server/src/test/java/org/apache/ambari/server/serveraction/upgrades/ComponentVersionCheckActionTest.java @@ -511,7 +511,7 @@ private ServiceComponent addServiceComponent(Service service, String componentNa try { serviceComponent = service.getServiceComponent(componentName); } catch (ServiceComponentNotFoundException e) { - serviceComponent = serviceComponentFactory.createNew(service, componentName, componentName); + serviceComponent = serviceComponentFactory.createNew(service, componentName); service.addServiceComponent(serviceComponent); serviceComponent.setDesiredState(State.INSTALLED); } diff --git a/ambari-server/src/test/java/org/apache/ambari/server/serveraction/upgrades/CreateAndConfigureActionTest.java b/ambari-server/src/test/java/org/apache/ambari/server/serveraction/upgrades/CreateAndConfigureActionTest.java index f6bc2d21d4f..21c43b4ac68 100644 --- a/ambari-server/src/test/java/org/apache/ambari/server/serveraction/upgrades/CreateAndConfigureActionTest.java +++ b/ambari-server/src/test/java/org/apache/ambari/server/serveraction/upgrades/CreateAndConfigureActionTest.java @@ -269,7 +269,7 @@ private ServiceComponent addServiceComponent(Service service, try { serviceComponent = service.getServiceComponent(componentName); } catch (ServiceComponentNotFoundException e) { - serviceComponent = serviceComponentFactory.createNew(service, componentName, componentName); + serviceComponent = serviceComponentFactory.createNew(service, componentName); service.addServiceComponent(serviceComponent); serviceComponent.setDesiredState(State.INSTALLED); } diff --git a/ambari-server/src/test/java/org/apache/ambari/server/stack/StackManagerTest.java b/ambari-server/src/test/java/org/apache/ambari/server/stack/StackManagerTest.java index cef96662fc1..d71042771f0 100644 --- a/ambari-server/src/test/java/org/apache/ambari/server/stack/StackManagerTest.java +++ b/ambari-server/src/test/java/org/apache/ambari/server/stack/StackManagerTest.java @@ -18,7 +18,6 @@ package org.apache.ambari.server.stack; -import static java.util.stream.Collectors.toCollection; import static org.easymock.EasyMock.createNiceMock; import static org.easymock.EasyMock.expect; import static org.easymock.EasyMock.replay; @@ -40,8 +39,6 @@ import java.util.HashSet; import java.util.List; import java.util.Map; -import java.util.Set; -import java.util.TreeSet; import org.apache.ambari.server.AmbariException; import org.apache.ambari.server.Role; @@ -243,7 +240,7 @@ public void testGetStack() { assertEquals("1.0", pigService.getVersion()); assertEquals("This is comment for PIG service", pigService.getComment()); components = pigService.getComponents(); - assertEquals(2, components.size()); + assertEquals(1, components.size()); CommandScriptDefinition commandScript = pigService.getCommandScript(); assertEquals("scripts/service_check.py", commandScript.getScript()); assertEquals(CommandScriptDefinition.Type.PYTHON, commandScript.getScriptType()); @@ -304,11 +301,10 @@ public void testStackVersionInheritance() { assertNotNull(si); //should include all stacks in hierarchy - assertEquals(19, services.size()); + assertEquals(18, services.size()); - Set expectedServices = new TreeSet<>(); + HashSet expectedServices = new HashSet<>(); expectedServices.add("GANGLIA"); - expectedServices.add("HADOOP_CLIENTS"); expectedServices.add("HBASE"); expectedServices.add("HCATALOG"); expectedServices.add("HDFS"); @@ -327,13 +323,12 @@ public void testStackVersionInheritance() { expectedServices.add("SPARK3"); expectedServices.add("SYSTEMML"); - assertEquals(expectedServices, services.stream().map(ServiceInfo::getName).collect(toCollection(TreeSet::new))); ServiceInfo pigService = null; for (ServiceInfo service : services) { if (service.getName().equals("PIG")) { pigService = service; } - assertTrue(service.getName(), expectedServices.remove(service.getName())); + assertTrue(expectedServices.remove(service.getName())); } assertTrue(expectedServices.isEmpty()); @@ -529,7 +524,7 @@ public void testPackageInheritance() throws Exception{ public void testMonitoringServicePropertyInheritance() throws Exception{ StackInfo stack = stackManager.getStack("HDP", "2.0.8"); Collection allServices = stack.getServices(); - assertEquals(16, allServices.size()); + assertEquals(15, allServices.size()); boolean monitoringServiceFound = false; @@ -550,10 +545,9 @@ public void testServiceDeletion() { StackInfo stack = stackManager.getStack("HDP", "2.0.6"); Collection allServices = stack.getServices(); - assertEquals(13, allServices.size()); + assertEquals(12, allServices.size()); HashSet expectedServices = new HashSet<>(); expectedServices.add("GANGLIA"); - expectedServices.add("HADOOP_CLIENTS"); expectedServices.add("HBASE"); expectedServices.add("HCATALOG"); expectedServices.add("HDFS"); diff --git a/ambari-server/src/test/java/org/apache/ambari/server/stack/UpdateActiveRepoVersionOnStartupTest.java b/ambari-server/src/test/java/org/apache/ambari/server/stack/UpdateActiveRepoVersionOnStartupTest.java new file mode 100644 index 00000000000..ad63eb783db --- /dev/null +++ b/ambari-server/src/test/java/org/apache/ambari/server/stack/UpdateActiveRepoVersionOnStartupTest.java @@ -0,0 +1,206 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.ambari.server.stack; + + +import static org.mockito.Mockito.atLeast; +import static org.mockito.Mockito.mock; +import static org.mockito.Mockito.verify; +import static org.mockito.Mockito.when; + +import java.io.IOException; +import java.lang.reflect.Field; +import java.util.ArrayList; +import java.util.Collections; +import java.util.List; + +import org.apache.ambari.server.api.services.AmbariMetaInfo; +import org.apache.ambari.server.orm.InMemoryDefaultTestModule; +import org.apache.ambari.server.orm.dao.ClusterDAO; +import org.apache.ambari.server.orm.dao.RepositoryVersionDAO; +import org.apache.ambari.server.orm.entities.ClusterEntity; +import org.apache.ambari.server.orm.entities.ClusterServiceEntity; +import org.apache.ambari.server.orm.entities.RepoDefinitionEntity; +import org.apache.ambari.server.orm.entities.RepoOsEntity; +import org.apache.ambari.server.orm.entities.RepositoryVersionEntity; +import org.apache.ambari.server.orm.entities.ServiceDesiredStateEntity; +import org.apache.ambari.server.orm.entities.StackEntity; +import org.apache.ambari.server.state.RepositoryInfo; +import org.apache.ambari.server.state.StackInfo; +import org.apache.ambari.server.state.stack.upgrade.RepositoryVersionHelper; +import org.junit.Test; +import org.mockito.Mockito; + +import com.google.common.base.Charsets; +import com.google.common.collect.ImmutableList; +import com.google.common.io.Resources; +import com.google.gson.Gson; +import com.google.inject.Guice; +import com.google.inject.Provider; + +/** + * Unit test for {@link UpdateActiveRepoVersionOnStartup} + */ +public class UpdateActiveRepoVersionOnStartupTest { + + private static String CLUSTER_NAME = "c1"; + private static String ADD_ON_REPO_ID = "MSFT_R-8.0"; + + private RepositoryVersionDAO repositoryVersionDao; + private UpdateActiveRepoVersionOnStartup activeRepoUpdater; + + @Test + public void addAServiceRepoToExistingRepoVersion() throws Exception { + init(true); + activeRepoUpdater.process(); + verifyRepoIsAdded(); + } + + @Test + public void missingClusterVersionShouldNotCauseException() throws Exception { + init(false); + activeRepoUpdater.process(); + } + + /** + * Verifies if the add-on service repo is added to the repo version entity, both json and xml representations. + * + * @throws Exception + */ + private void verifyRepoIsAdded() throws Exception { + verify(repositoryVersionDao, atLeast(1)).merge(Mockito.any(RepositoryVersionEntity.class)); + } + + public void init(boolean addClusterVersion) throws Exception { + ClusterDAO clusterDao = mock(ClusterDAO.class); + + repositoryVersionDao = mock(RepositoryVersionDAO.class); + + final RepositoryVersionHelper repositoryVersionHelper = new RepositoryVersionHelper(); + Field field = RepositoryVersionHelper.class.getDeclaredField("gson"); + field.setAccessible(true); + field.set(repositoryVersionHelper, new Gson()); + + final AmbariMetaInfo metaInfo = mock(AmbariMetaInfo.class); + + StackManager stackManager = mock(StackManager.class); + when(metaInfo.getStackManager()).thenReturn(stackManager); + + ClusterEntity cluster = new ClusterEntity(); + cluster.setClusterName(CLUSTER_NAME); + when(clusterDao.findAll()).thenReturn(ImmutableList.of(cluster)); + + StackEntity stackEntity = new StackEntity(); + stackEntity.setStackName("HDP"); + stackEntity.setStackVersion("2.3"); + cluster.setDesiredStack(stackEntity); + + RepositoryVersionEntity desiredRepositoryVersion = new RepositoryVersionEntity(); + desiredRepositoryVersion.setStack(stackEntity); + + List operatingSystems = new ArrayList<>(); + RepoDefinitionEntity repoDefinitionEntity1 = new RepoDefinitionEntity(); + repoDefinitionEntity1.setRepoID("HDP-UTILS-1.1.0.20"); + repoDefinitionEntity1.setBaseUrl("http://192.168.99.100/repos/HDP-UTILS-1.1.0.20/"); + repoDefinitionEntity1.setRepoName("HDP-UTILS"); + RepoDefinitionEntity repoDefinitionEntity2 = new RepoDefinitionEntity(); + repoDefinitionEntity2.setRepoID("HDP-2.4"); + repoDefinitionEntity2.setBaseUrl("http://192.168.99.100/repos/HDP-2.4.0.0/"); + repoDefinitionEntity2.setRepoName("HDP"); + RepoOsEntity repoOsEntity1 = new RepoOsEntity(); + repoOsEntity1.setFamily("redhat6"); + repoOsEntity1.setAmbariManaged(true); + repoOsEntity1.addRepoDefinition(repoDefinitionEntity1); + repoOsEntity1.addRepoDefinition(repoDefinitionEntity2); + operatingSystems.add(repoOsEntity1); + RepoDefinitionEntity repoDefinitionEntity3 = new RepoDefinitionEntity(); + repoDefinitionEntity3.setRepoID("HDP-UTILS-1.1.0.20"); + repoDefinitionEntity3.setBaseUrl("http://s3.amazonaws.com/dev.hortonworks.com/HDP-UTILS-1.1.0.20/repos/centos7"); + repoDefinitionEntity3.setRepoName("HDP-UTILS"); + RepoDefinitionEntity repoDefinitionEntity4 = new RepoDefinitionEntity(); + repoDefinitionEntity4.setRepoID("HDP-2.4"); + repoDefinitionEntity4.setBaseUrl("http://s3.amazonaws.com/dev.hortonworks.com/HDP/centos7/2.x/BUILDS/2.4.3.0-207"); + repoDefinitionEntity4.setRepoName("HDP"); + RepoOsEntity repoOsEntity2 = new RepoOsEntity(); + repoOsEntity2.setFamily("redhat7"); + repoOsEntity2.setAmbariManaged(true); + repoOsEntity2.addRepoDefinition(repoDefinitionEntity3); + repoOsEntity2.addRepoDefinition(repoDefinitionEntity4); + operatingSystems.add(repoOsEntity2); + + desiredRepositoryVersion.addRepoOsEntities(operatingSystems); + + ServiceDesiredStateEntity serviceDesiredStateEntity = new ServiceDesiredStateEntity(); + serviceDesiredStateEntity.setDesiredRepositoryVersion(desiredRepositoryVersion); + + ClusterServiceEntity clusterServiceEntity = new ClusterServiceEntity(); + clusterServiceEntity.setServiceDesiredStateEntity(serviceDesiredStateEntity); + cluster.setClusterServiceEntities(Collections.singletonList(clusterServiceEntity)); + + StackInfo stackInfo = new StackInfo(); + stackInfo.setName("HDP"); + stackInfo.setVersion("2.3"); + + RepositoryInfo repositoryInfo = new RepositoryInfo(); + repositoryInfo.setBaseUrl("http://msft.r"); + repositoryInfo.setRepoId(ADD_ON_REPO_ID); + repositoryInfo.setRepoName("MSFT_R"); + repositoryInfo.setOsType("redhat6"); + stackInfo.getRepositories().add(repositoryInfo); + + when(stackManager.getStack("HDP", "2.3")).thenReturn(stackInfo); + + final Provider repositoryVersionHelperProvider = mock(Provider.class); + when(repositoryVersionHelperProvider.get()).thenReturn(repositoryVersionHelper); + + + InMemoryDefaultTestModule testModule = new InMemoryDefaultTestModule() { + @Override + protected void configure() { + bind(RepositoryVersionHelper.class).toProvider(repositoryVersionHelperProvider); + bind(AmbariMetaInfo.class).toProvider(new Provider() { + @Override + public AmbariMetaInfo get() { + return metaInfo; + } + }); + + requestStaticInjection(RepositoryVersionEntity.class); + } + }; + + Guice.createInjector(testModule); + if (addClusterVersion) { + + RepositoryInfo info = new RepositoryInfo(); + info.setBaseUrl("http://msft.r"); + info.setRepoId(ADD_ON_REPO_ID); + info.setRepoName("MSFT_R1"); + info.setOsType("redhat6"); + stackInfo.getRepositories().add(info); + } + + activeRepoUpdater = new UpdateActiveRepoVersionOnStartup(clusterDao, + repositoryVersionDao, repositoryVersionHelper, metaInfo); + } + + private static String resourceAsString(String resourceName) throws IOException { + return Resources.toString(Resources.getResource(resourceName), Charsets.UTF_8); + } + +} diff --git a/ambari-server/src/test/java/org/apache/ambari/server/state/ConfigGroupTest.java b/ambari-server/src/test/java/org/apache/ambari/server/state/ConfigGroupTest.java index 6194085eb23..1296be987ae 100644 --- a/ambari-server/src/test/java/org/apache/ambari/server/state/ConfigGroupTest.java +++ b/ambari-server/src/test/java/org/apache/ambari/server/state/ConfigGroupTest.java @@ -27,17 +27,11 @@ import org.apache.ambari.server.orm.GuiceJpaInitializer; import org.apache.ambari.server.orm.InMemoryDefaultTestModule; import org.apache.ambari.server.orm.OrmTestHelper; -import org.apache.ambari.server.orm.dao.ClusterDAO; -import org.apache.ambari.server.orm.dao.ClusterServiceDAO; import org.apache.ambari.server.orm.dao.ConfigGroupDAO; import org.apache.ambari.server.orm.dao.ConfigGroupHostMappingDAO; -import org.apache.ambari.server.orm.dao.ServiceGroupDAO; -import org.apache.ambari.server.orm.entities.ClusterEntity; -import org.apache.ambari.server.orm.entities.ClusterServiceEntity; import org.apache.ambari.server.orm.entities.ConfigGroupConfigMappingEntity; import org.apache.ambari.server.orm.entities.ConfigGroupEntity; import org.apache.ambari.server.orm.entities.ConfigGroupHostMappingEntity; -import org.apache.ambari.server.orm.entities.ServiceGroupEntity; import org.apache.ambari.server.state.configgroup.ConfigGroup; import org.apache.ambari.server.state.configgroup.ConfigGroupFactory; import org.junit.After; @@ -60,9 +54,6 @@ public class ConfigGroupTest { private ConfigFactory configFactory; private ConfigGroupDAO configGroupDAO; private ConfigGroupHostMappingDAO configGroupHostMappingDAO; - private ClusterDAO clusterDAO; - private ClusterServiceDAO clusterServiceDAO; - private ServiceGroupDAO serviceGroupDAO; @Before public void setup() throws Exception { @@ -74,9 +65,6 @@ public void setup() throws Exception { configGroupDAO = injector.getInstance(ConfigGroupDAO.class); configGroupHostMappingDAO = injector.getInstance (ConfigGroupHostMappingDAO.class); - clusterServiceDAO = injector.getInstance(ClusterServiceDAO.class); - clusterDAO = injector.getInstance(ClusterDAO.class); - serviceGroupDAO = injector.getInstance(ServiceGroupDAO.class); StackId stackId = new StackId("HDP-0.1"); OrmTestHelper helper = injector.getInstance(OrmTestHelper.class); @@ -117,22 +105,7 @@ ConfigGroup createConfigGroup() throws AmbariException { configs.put(config.getType(), config); hosts.put(host.getHostId(), host); - ClusterEntity clusterEntity = clusterDAO.findByName("foo"); - - ServiceGroupEntity serviceGroupEntity = new ServiceGroupEntity(); - serviceGroupEntity.setClusterEntity(clusterEntity); - serviceGroupEntity.setServiceGroupName("default"); - serviceGroupEntity.setStack(clusterEntity.getDesiredStack()); - serviceGroupDAO.create(serviceGroupEntity); - - ClusterServiceEntity clusterServiceEntity = new ClusterServiceEntity(); - clusterServiceEntity.setClusterEntity(clusterEntity); - clusterServiceEntity.setServiceGroupEntity(serviceGroupEntity); - clusterServiceEntity.setServiceName("HDFS"); - clusterServiceEntity.setServiceType("HDFS"); - clusterServiceDAO.create(clusterServiceEntity); - - ConfigGroup configGroup = configGroupFactory.createNew(cluster, 1L, clusterServiceEntity.getServiceId(), "cg-test", "HDFS", "New HDFS configs for h1", configs, hosts); + ConfigGroup configGroup = configGroupFactory.createNew(cluster, 1L, 1L, "HDFS", "", "New HDFS configs for h1", configs, hosts); cluster.addConfigGroup(configGroup); return configGroup; diff --git a/ambari-server/src/test/java/org/apache/ambari/server/state/ServiceComponentTest.java b/ambari-server/src/test/java/org/apache/ambari/server/state/ServiceComponentTest.java index 75ac685c6ed..4a4541d6cf7 100644 --- a/ambari-server/src/test/java/org/apache/ambari/server/state/ServiceComponentTest.java +++ b/ambari-server/src/test/java/org/apache/ambari/server/state/ServiceComponentTest.java @@ -18,12 +18,15 @@ package org.apache.ambari.server.state; +import static org.junit.Assert.assertEquals; import static org.junit.Assert.assertNotNull; import static org.junit.Assert.fail; import java.sql.SQLException; +import java.util.ArrayList; import java.util.Collections; import java.util.HashMap; +import java.util.List; import java.util.Map; import org.apache.ambari.server.AmbariException; @@ -35,12 +38,16 @@ import org.apache.ambari.server.orm.dao.HostComponentDesiredStateDAO; import org.apache.ambari.server.orm.dao.HostComponentStateDAO; import org.apache.ambari.server.orm.dao.HostDAO; +import org.apache.ambari.server.orm.dao.RepositoryVersionDAO; import org.apache.ambari.server.orm.dao.ServiceComponentDesiredStateDAO; +import org.apache.ambari.server.orm.dao.StackDAO; import org.apache.ambari.server.orm.entities.HostComponentDesiredStateEntity; import org.apache.ambari.server.orm.entities.HostComponentStateEntity; import org.apache.ambari.server.orm.entities.HostEntity; import org.apache.ambari.server.orm.entities.RepositoryVersionEntity; import org.apache.ambari.server.orm.entities.ServiceComponentDesiredStateEntity; +import org.apache.ambari.server.orm.entities.ServiceComponentVersionEntity; +import org.apache.ambari.server.orm.entities.StackEntity; import org.junit.After; import org.junit.Before; import org.junit.Test; @@ -109,7 +116,7 @@ public void teardown() throws AmbariException, SQLException { public void testCreateServiceComponent() throws AmbariException { String componentName = "DATANODE2"; ServiceComponent component = serviceComponentFactory.createNew(service, - componentName, componentName); + componentName); service.addServiceComponent(component); ServiceComponent sc = service.getServiceComponent(componentName); @@ -131,7 +138,7 @@ public void testCreateServiceComponent() throws AmbariException { public void testGetAndSetServiceComponentInfo() throws AmbariException { String componentName = "NAMENODE"; ServiceComponent component = serviceComponentFactory.createNew(service, - componentName, componentName); + componentName); service.addServiceComponent(component); ServiceComponent sc = service.getServiceComponent(componentName); @@ -155,7 +162,7 @@ public void testGetAndSetServiceComponentInfo() throws AmbariException { long serviceId = 1; ServiceComponentDesiredStateEntity serviceComponentDesiredStateEntity = serviceComponentDesiredStateDAO.findByName( - cluster.getClusterId(), serviceGroupId, serviceId, componentName, componentName); + cluster.getClusterId(), serviceGroupId, serviceId, componentName); ServiceComponent sc1 = serviceComponentFactory.createExisting(service, serviceComponentDesiredStateEntity); @@ -193,7 +200,7 @@ private void addHostToCluster(String hostname, @Test public void testAddAndGetServiceComponentHosts() throws AmbariException { String componentName = "NAMENODE"; - ServiceComponent component = serviceComponentFactory.createNew(service, componentName, componentName); + ServiceComponent component = serviceComponentFactory.createNew(service, componentName); service.addServiceComponent(component); ServiceComponent sc = service.getServiceComponent(componentName); @@ -246,13 +253,18 @@ public void testAddAndGetServiceComponentHosts() throws AmbariException { long serviceGroupId = 1; long serviceId = 1; - long componentId = 1; HostComponentDesiredStateEntity desiredStateEntity = - desiredStateDAO.findByIndex(componentId); + desiredStateDAO.findByIndex( + cluster.getClusterId(), + serviceGroupId, + serviceId, + componentName, + hostEntity1.getHostId() + ); HostComponentStateEntity stateEntity = liveStateDAO.findByIndex(cluster.getClusterId(), - serviceGroupId, serviceId, componentId, hostEntity1.getHostId()); + serviceGroupId, serviceId, componentName, hostEntity1.getHostId()); ServiceComponentHost sch = serviceComponentHostFactory.createExisting(sc, stateEntity, desiredStateEntity); @@ -266,7 +278,7 @@ public void testAddAndGetServiceComponentHosts() throws AmbariException { @Test public void testConvertToResponse() throws AmbariException { String componentName = "NAMENODE"; - ServiceComponent component = serviceComponentFactory.createNew(service, componentName, componentName); + ServiceComponent component = serviceComponentFactory.createNew(service, componentName); service.addServiceComponent(component); addHostToCluster("h1", service.getCluster().getClusterName()); @@ -327,7 +339,7 @@ public void testConvertToResponse() throws AmbariException { public void testCanBeRemoved() throws Exception { String componentName = "NAMENODE"; ServiceComponent component = serviceComponentFactory.createNew(service, - componentName, componentName); + componentName); addHostToCluster("h1", service.getCluster().getClusterName()); ServiceComponentHost sch = serviceComponentHostFactory.createNew(component, "h1"); component.addServiceComponentHost(sch); @@ -355,7 +367,7 @@ public void testServiceComponentRemove() throws AmbariException { ServiceComponentDesiredStateDAO.class); String componentName = "NAMENODE"; - ServiceComponent component = serviceComponentFactory.createNew(service, componentName, componentName); + ServiceComponent component = serviceComponentFactory.createNew(service, componentName); service.addServiceComponent(component); ServiceComponent sc = service.getServiceComponent(componentName); @@ -368,7 +380,7 @@ public void testServiceComponentRemove() throws AmbariException { long serviceId = 1; ServiceComponentDesiredStateEntity serviceComponentDesiredStateEntity = serviceComponentDesiredStateDAO.findByName( - cluster.getClusterId(), serviceGroupId, serviceId, componentName, componentName); + cluster.getClusterId(), serviceGroupId, serviceId, componentName); Assert.assertNotNull(serviceComponentDesiredStateEntity); @@ -408,8 +420,216 @@ public void testServiceComponentRemove() throws AmbariException { // verify history is gone, too serviceComponentDesiredStateEntity = serviceComponentDesiredStateDAO.findByName( - cluster.getClusterId(), serviceGroupId, serviceId, componentName, componentName); + cluster.getClusterId(), serviceGroupId, serviceId, componentName); Assert.assertNull(serviceComponentDesiredStateEntity); } + + @Test + public void testVersionCreation() throws Exception { + ServiceComponentDesiredStateDAO serviceComponentDesiredStateDAO = injector.getInstance( + ServiceComponentDesiredStateDAO.class); + + String componentName = "NAMENODE"; + ServiceComponent component = serviceComponentFactory.createNew(service, componentName); + service.addServiceComponent(component); + + ServiceComponent sc = service.getServiceComponent(componentName); + Assert.assertNotNull(sc); + + sc.setDesiredState(State.INSTALLED); + Assert.assertEquals(State.INSTALLED, sc.getDesiredState()); + + long serviceGroupId = 1; + long serviceId = 1; + + ServiceComponentDesiredStateEntity serviceComponentDesiredStateEntity = serviceComponentDesiredStateDAO.findByName( + cluster.getClusterId(), serviceGroupId, serviceId, componentName); + + StackDAO stackDAO = injector.getInstance(StackDAO.class); + StackEntity stackEntity = stackDAO.find("HDP", "2.2.0"); + + RepositoryVersionEntity rve = new RepositoryVersionEntity(stackEntity, "HDP-2.2.0", + "2.2.0.1-1111", new ArrayList<>()); + + RepositoryVersionDAO repositoryDAO = injector.getInstance(RepositoryVersionDAO.class); + repositoryDAO.create(rve); + + sc.setDesiredRepositoryVersion(rve); + + Assert.assertEquals(rve, sc.getDesiredRepositoryVersion()); + + Assert.assertEquals(new StackId("HDP", "2.2.0"), sc.getDesiredStackId()); + + Assert.assertEquals("HDP-2.2.0", sc.getDesiredStackId().getStackId()); + + Assert.assertNotNull(serviceComponentDesiredStateEntity); + + ServiceComponentVersionEntity version = new ServiceComponentVersionEntity(); + version.setState(RepositoryVersionState.CURRENT); + version.setRepositoryVersion(rve); + version.setUserName("user"); + serviceComponentDesiredStateEntity.addVersion(version); + + serviceComponentDesiredStateEntity = serviceComponentDesiredStateDAO.merge( + serviceComponentDesiredStateEntity); + + serviceComponentDesiredStateEntity = serviceComponentDesiredStateDAO.findByName( + cluster.getClusterId(), serviceGroupId, serviceId, componentName); + + assertEquals(1, serviceComponentDesiredStateEntity.getVersions().size()); + ServiceComponentVersionEntity persistedVersion = serviceComponentDesiredStateEntity.getVersions().iterator().next(); + + assertEquals(RepositoryVersionState.CURRENT, persistedVersion.getState()); + } + + @Test + public void testVersionRemoval() throws Exception { + ServiceComponentDesiredStateDAO serviceComponentDesiredStateDAO = injector.getInstance( + ServiceComponentDesiredStateDAO.class); + + String componentName = "NAMENODE"; + ServiceComponent component = serviceComponentFactory.createNew(service, componentName); + service.addServiceComponent(component); + + ServiceComponent sc = service.getServiceComponent(componentName); + Assert.assertNotNull(sc); + + sc.setDesiredState(State.INSTALLED); + Assert.assertEquals(State.INSTALLED, sc.getDesiredState()); + + long serviceGroupId = 1; + long serviceId = 1; + + ServiceComponentDesiredStateEntity serviceComponentDesiredStateEntity = serviceComponentDesiredStateDAO.findByName( + cluster.getClusterId(), serviceGroupId, serviceId, componentName); + + StackDAO stackDAO = injector.getInstance(StackDAO.class); + StackEntity stackEntity = stackDAO.find("HDP", "2.2.0"); + + RepositoryVersionEntity rve = new RepositoryVersionEntity(stackEntity, "HDP-2.2.0", + "2.2.0.1-1111", new ArrayList<>()); + + RepositoryVersionDAO repositoryDAO = injector.getInstance(RepositoryVersionDAO.class); + repositoryDAO.create(rve); + + sc.setDesiredRepositoryVersion(rve); + + StackId stackId = sc.getDesiredStackId(); + Assert.assertEquals(new StackId("HDP", "2.2.0"), stackId); + + Assert.assertEquals("HDP-2.2.0", sc.getDesiredStackId().getStackId()); + + Assert.assertNotNull(serviceComponentDesiredStateEntity); + + ServiceComponentVersionEntity version = new ServiceComponentVersionEntity(); + version.setState(RepositoryVersionState.CURRENT); + version.setRepositoryVersion(rve); + version.setUserName("user"); + serviceComponentDesiredStateEntity.addVersion(version); + + serviceComponentDesiredStateEntity = serviceComponentDesiredStateDAO.merge( + serviceComponentDesiredStateEntity); + + serviceComponentDesiredStateEntity = serviceComponentDesiredStateDAO.findByName( + cluster.getClusterId(), serviceGroupId, serviceId, componentName); + + assertEquals(1, serviceComponentDesiredStateEntity.getVersions().size()); + ServiceComponentVersionEntity persistedVersion = serviceComponentDesiredStateEntity.getVersions().iterator().next(); + + assertEquals(RepositoryVersionState.CURRENT, persistedVersion.getState()); + + sc.delete(); + + serviceComponentDesiredStateEntity = serviceComponentDesiredStateDAO.findByName( + cluster.getClusterId(), serviceGroupId, serviceId, componentName); + Assert.assertNull(serviceComponentDesiredStateEntity); + + + // verify versions are gone, too + List list = serviceComponentDesiredStateDAO.findVersions(cluster.getClusterId(), serviceGroupId, serviceId, componentName); + assertEquals(0, list.size()); + } + + + @Test + public void testUpdateStates() throws Exception { + ServiceComponentDesiredStateDAO serviceComponentDesiredStateDAO = injector.getInstance( + ServiceComponentDesiredStateDAO.class); + + String componentName = "NAMENODE"; + + ServiceComponent component = serviceComponentFactory.createNew(service, componentName); + + StackId newStackId = new StackId("HDP-2.2.0"); + RepositoryVersionEntity repositoryVersion = helper.getOrCreateRepositoryVersion(newStackId, + newStackId.getStackVersion()); + + component.setDesiredRepositoryVersion(repositoryVersion); + + service.addServiceComponent(component); + + ServiceComponent sc = service.getServiceComponent(componentName); + Assert.assertNotNull(sc); + + long serviceGroupId = 1; + long serviceId = 1; + + ServiceComponentDesiredStateEntity entity = serviceComponentDesiredStateDAO.findByName(cluster.getClusterId(), serviceGroupId, serviceId, componentName); + + RepositoryVersionEntity repoVersion2201 = helper.getOrCreateRepositoryVersion( + component.getDesiredStackId(), "2.2.0.1"); + + RepositoryVersionEntity repoVersion2202 = helper.getOrCreateRepositoryVersion( + component.getDesiredStackId(), "2.2.0.2"); + + addHostToCluster("h1", clusterName); + addHostToCluster("h2", clusterName); + + sc.setDesiredState(State.INSTALLED); + Assert.assertEquals(State.INSTALLED, sc.getDesiredState()); + + ServiceComponentHost sch1 = sc.addServiceComponentHost("h1"); + ServiceComponentHost sch2 = sc.addServiceComponentHost("h2"); + + // !!! case 1: component desired is UNKNOWN, mix of h-c versions + sc.setDesiredRepositoryVersion(repositoryVersion); + sch1.setVersion("2.2.0.1"); + sch2.setVersion("2.2.0.2"); + sc.updateRepositoryState("2.2.0.2"); + entity = serviceComponentDesiredStateDAO.findByName(cluster.getClusterId(), serviceGroupId, serviceId, componentName); + assertEquals(RepositoryVersionState.OUT_OF_SYNC, entity.getRepositoryState()); + + // !!! case 2: component desired is UNKNOWN, all h-c same version + sc.setDesiredRepositoryVersion(repositoryVersion); + sch1.setVersion("2.2.0.1"); + sch2.setVersion("2.2.0.1"); + sc.updateRepositoryState("2.2.0.1"); + entity = serviceComponentDesiredStateDAO.findByName(cluster.getClusterId(), serviceGroupId, serviceId, componentName); + assertEquals(RepositoryVersionState.OUT_OF_SYNC, entity.getRepositoryState()); + + // !!! case 3: component desired is known, any component reports different version + sc.setDesiredRepositoryVersion(repoVersion2201); + sch1.setVersion("2.2.0.1"); + sch2.setVersion("2.2.0.2"); + sc.updateRepositoryState("2.2.0.2"); + entity = serviceComponentDesiredStateDAO.findByName(cluster.getClusterId(), serviceGroupId, serviceId, componentName); + assertEquals(RepositoryVersionState.OUT_OF_SYNC, entity.getRepositoryState()); + + // !!! case 4: component desired is known, component reports same as desired, mix of h-c versions + sc.setDesiredRepositoryVersion(repoVersion2201); + sch1.setVersion("2.2.0.1"); + sch2.setVersion("2.2.0.2"); + sc.updateRepositoryState("2.2.0.1"); + entity = serviceComponentDesiredStateDAO.findByName(cluster.getClusterId(), serviceGroupId, serviceId, componentName); + assertEquals(RepositoryVersionState.OUT_OF_SYNC, entity.getRepositoryState()); + + // !!! case 5: component desired is known, component reports same as desired, all h-c the same + sc.setDesiredRepositoryVersion(repoVersion2201); + sch1.setVersion("2.2.0.1"); + sch2.setVersion("2.2.0.1"); + sc.updateRepositoryState("2.2.0.1"); + entity = serviceComponentDesiredStateDAO.findByName(cluster.getClusterId(), serviceGroupId, serviceId, componentName); + assertEquals(RepositoryVersionState.CURRENT, entity.getRepositoryState()); + } } diff --git a/ambari-server/src/test/java/org/apache/ambari/server/state/ServiceTest.java b/ambari-server/src/test/java/org/apache/ambari/server/state/ServiceTest.java index 5b8f6fd2cb5..b0ee246b373 100644 --- a/ambari-server/src/test/java/org/apache/ambari/server/state/ServiceTest.java +++ b/ambari-server/src/test/java/org/apache/ambari/server/state/ServiceTest.java @@ -90,7 +90,7 @@ public void teardown() throws AmbariException, SQLException { @Test public void testCanBeRemoved() throws Exception{ - ServiceGroup serviceGroup = cluster.addServiceGroup("CORE", STACK_ID.getStackId()); + ServiceGroup serviceGroup = cluster.addServiceGroup("CORE", "HDP-1.0"); Service service = cluster.addService(serviceGroup, "HDFS", "HDFS", repositoryVersion); for (State state : State.values()) { @@ -100,7 +100,7 @@ public void testCanBeRemoved() throws Exception{ org.junit.Assert.assertTrue(service.canBeRemoved()); } - ServiceComponent component = service.addServiceComponent("NAMENODE", "NAMENODE"); + ServiceComponent component = service.addServiceComponent("NAMENODE"); // component can be removed component.setDesiredState(State.INSTALLED); @@ -179,11 +179,11 @@ public void testAddGetDeleteServiceComponents() throws AmbariException { Assert.assertTrue(s.getServiceComponents().isEmpty()); ServiceComponent sc1 = - serviceComponentFactory.createNew(s, "NAMENODE", "NAMENODE"); + serviceComponentFactory.createNew(s, "NAMENODE"); ServiceComponent sc2 = - serviceComponentFactory.createNew(s, "DATANODE1", "DATANODE1"); + serviceComponentFactory.createNew(s, "DATANODE1"); ServiceComponent sc3 = - serviceComponentFactory.createNew(s, "DATANODE2", "DATANODE2"); + serviceComponentFactory.createNew(s, "DATANODE2"); Map comps = new HashMap<>(); @@ -205,7 +205,7 @@ public void testAddGetDeleteServiceComponents() throws AmbariException { s.addServiceComponent(sc3); - ServiceComponent sc4 = s.addServiceComponent("HDFS_CLIENT", "HDFS_CLIENT"); + ServiceComponent sc4 = s.addServiceComponent("HDFS_CLIENT"); Assert.assertNotNull(s.getServiceComponent(sc4.getName())); Assert.assertEquals(State.INIT, s.getServiceComponent("HDFS_CLIENT").getDesiredState()); diff --git a/ambari-server/src/test/java/org/apache/ambari/server/state/alerts/AlertDefinitionHashTest.java b/ambari-server/src/test/java/org/apache/ambari/server/state/alerts/AlertDefinitionHashTest.java index 2d21ab81b20..53b0a109b56 100644 --- a/ambari-server/src/test/java/org/apache/ambari/server/state/alerts/AlertDefinitionHashTest.java +++ b/ambari-server/src/test/java/org/apache/ambari/server/state/alerts/AlertDefinitionHashTest.java @@ -103,7 +103,6 @@ protected void setUp() throws Exception { List serviceComponentHosts = new ArrayList<>(); ServiceComponentHost sch = EasyMock.createNiceMock(ServiceComponentHost.class); expect(sch.getServiceName()).andReturn("HDFS").anyTimes(); - expect(sch.getServiceType()).andReturn("HDFS").anyTimes(); expect(sch.getServiceComponentName()).andReturn("NAMENODE").anyTimes(); expect(sch.getHostName()).andReturn(HOSTNAME).anyTimes(); EasyMock.replay(sch); @@ -112,7 +111,6 @@ protected void setUp() throws Exception { // add HDFS/DN sch = EasyMock.createNiceMock(ServiceComponentHost.class); expect(sch.getServiceName()).andReturn("HDFS").anyTimes(); - expect(sch.getServiceType()).andReturn("HDFS").anyTimes(); expect(sch.getServiceComponentName()).andReturn("DATANODE").anyTimes(); expect(sch.getHostName()).andReturn(HOSTNAME).anyTimes(); EasyMock.replay(sch); diff --git a/ambari-server/src/test/java/org/apache/ambari/server/state/alerts/AlertEventPublisherTest.java b/ambari-server/src/test/java/org/apache/ambari/server/state/alerts/AlertEventPublisherTest.java index 1ab33859704..74201e0f9d7 100644 --- a/ambari-server/src/test/java/org/apache/ambari/server/state/alerts/AlertEventPublisherTest.java +++ b/ambari-server/src/test/java/org/apache/ambari/server/state/alerts/AlertEventPublisherTest.java @@ -298,7 +298,7 @@ private void installHdfsService() throws Exception { cluster.getCurrentStackVersion(), REPO_VERSION); String serviceName = "HDFS"; - ServiceGroup serviceGroup = cluster.addServiceGroup("CORE", cluster.getDesiredStackVersion().getStackId()); + ServiceGroup serviceGroup = cluster.addServiceGroup("CORE", "HDP-1.0"); serviceFactory.createNew(cluster, serviceGroup, Collections.emptyList(), serviceName, serviceName, repositoryVersion); Service service = cluster.getService(serviceName); diff --git a/ambari-server/src/test/java/org/apache/ambari/server/state/alerts/InitialAlertEventTest.java b/ambari-server/src/test/java/org/apache/ambari/server/state/alerts/InitialAlertEventTest.java index 6b827c9a3cd..294f6d8dcac 100644 --- a/ambari-server/src/test/java/org/apache/ambari/server/state/alerts/InitialAlertEventTest.java +++ b/ambari-server/src/test/java/org/apache/ambari/server/state/alerts/InitialAlertEventTest.java @@ -181,7 +181,7 @@ public void testInitialAlertEvent() throws Exception { private void installHdfsService() throws Exception { String serviceName = "HDFS"; - ServiceGroup serviceGroup = m_cluster.addServiceGroup("CORE", STACK_ID.getStackId()); + ServiceGroup serviceGroup = m_cluster.addServiceGroup("CORE", "HDP-1.0"); m_serviceFactory.createNew(m_cluster, serviceGroup, Collections.emptyList(), serviceName, serviceName, m_repositoryVersion); Service service = m_cluster.getService(serviceName); diff --git a/ambari-server/src/test/java/org/apache/ambari/server/state/cluster/ClusterDeadlockTest.java b/ambari-server/src/test/java/org/apache/ambari/server/state/cluster/ClusterDeadlockTest.java index 06ecdc2b6db..9970b9c90ca 100644 --- a/ambari-server/src/test/java/org/apache/ambari/server/state/cluster/ClusterDeadlockTest.java +++ b/ambari-server/src/test/java/org/apache/ambari/server/state/cluster/ClusterDeadlockTest.java @@ -143,7 +143,7 @@ public void setup() throws Exception { clusters.mapHostToCluster(hostName, "c1"); } - serviceGroup = cluster.addServiceGroup("CORE", stackId.getStackId()); + serviceGroup = cluster.addServiceGroup("CORE", "HDP-1.0"); Service service = installService("HDFS", serviceGroup); addServiceComponent(service, "NAMENODE"); addServiceComponent(service, "DATANODE"); @@ -595,7 +595,7 @@ private ServiceComponent addServiceComponent(Service service, serviceComponent = service.getServiceComponent(componentName); } catch (ServiceComponentNotFoundException e) { serviceComponent = serviceComponentFactory.createNew(service, - componentName, componentName); + componentName); service.addServiceComponent(serviceComponent); serviceComponent.setDesiredState(State.INSTALLED); } diff --git a/ambari-server/src/test/java/org/apache/ambari/server/state/cluster/ClusterImplTest.java b/ambari-server/src/test/java/org/apache/ambari/server/state/cluster/ClusterImplTest.java index c4d0a76e96d..7fbaf6f5404 100644 --- a/ambari-server/src/test/java/org/apache/ambari/server/state/cluster/ClusterImplTest.java +++ b/ambari-server/src/test/java/org/apache/ambari/server/state/cluster/ClusterImplTest.java @@ -237,20 +237,20 @@ public void testDeleteService() throws Exception { ServiceGroup serviceGroup = cluster.addServiceGroup("CORE", stackId.getStackId()); Service hdfs = cluster.addService(serviceGroup, "HDFS", "HDFS", repositoryVersion); - ServiceComponent nameNode = hdfs.addServiceComponent("NAMENODE", "NAMENODE"); + ServiceComponent nameNode = hdfs.addServiceComponent("NAMENODE"); nameNode.addServiceComponentHost(hostName1); - ServiceComponent dataNode = hdfs.addServiceComponent("DATANODE", "DATANODE"); + ServiceComponent dataNode = hdfs.addServiceComponent("DATANODE"); dataNode.addServiceComponentHost(hostName1); dataNode.addServiceComponentHost(hostName2); - ServiceComponent hdfsClient = hdfs.addServiceComponent("HDFS_CLIENT", "HDFS_CLIENT"); + ServiceComponent hdfsClient = hdfs.addServiceComponent("HDFS_CLIENT"); hdfsClient.addServiceComponentHost(hostName1); hdfsClient.addServiceComponentHost(hostName2); Service tez = cluster.addService(serviceGroup, serviceToDelete, serviceToDelete, repositoryVersion); - ServiceComponent tezClient = tez.addServiceComponent("TEZ_CLIENT", "TEZ_CLIENT"); + ServiceComponent tezClient = tez.addServiceComponent("TEZ_CLIENT"); ServiceComponentHost tezClientHost1 = tezClient.addServiceComponentHost(hostName1); ServiceComponentHost tezClientHost2 = tezClient.addServiceComponentHost(hostName2); diff --git a/ambari-server/src/test/java/org/apache/ambari/server/state/cluster/ClusterTest.java b/ambari-server/src/test/java/org/apache/ambari/server/state/cluster/ClusterTest.java index 4e85214825d..9a7deb6886d 100644 --- a/ambari-server/src/test/java/org/apache/ambari/server/state/cluster/ClusterTest.java +++ b/ambari-server/src/test/java/org/apache/ambari/server/state/cluster/ClusterTest.java @@ -343,22 +343,22 @@ private Cluster createClusterForRU(String clusterName, RepositoryVersionEntity r cluster.addService(s3); // Add HDFS components - ServiceComponent sc1CompA = serviceComponentFactory.createNew(s1, "NAMENODE", "NAMENODE"); - ServiceComponent sc1CompB = serviceComponentFactory.createNew(s1, "DATANODE", "DATANODE"); - ServiceComponent sc1CompC = serviceComponentFactory.createNew(s1, "HDFS_CLIENT", "HDFS_CLIENT"); + ServiceComponent sc1CompA = serviceComponentFactory.createNew(s1, "NAMENODE"); + ServiceComponent sc1CompB = serviceComponentFactory.createNew(s1, "DATANODE"); + ServiceComponent sc1CompC = serviceComponentFactory.createNew(s1, "HDFS_CLIENT"); s1.addServiceComponent(sc1CompA); s1.addServiceComponent(sc1CompB); s1.addServiceComponent(sc1CompC); // Add ZK - ServiceComponent sc2CompA = serviceComponentFactory.createNew(s2, "ZOOKEEPER_SERVER", "ZOOKEEPER_SERVER"); - ServiceComponent sc2CompB = serviceComponentFactory.createNew(s2, "ZOOKEEPER_CLIENT", "ZOOKEEPER_CLIENT"); + ServiceComponent sc2CompA = serviceComponentFactory.createNew(s2, "ZOOKEEPER_SERVER"); + ServiceComponent sc2CompB = serviceComponentFactory.createNew(s2, "ZOOKEEPER_CLIENT"); s2.addServiceComponent(sc2CompA); s2.addServiceComponent(sc2CompB); // Add Ganglia - ServiceComponent sc3CompA = serviceComponentFactory.createNew(s3, "GANGLIA_SERVER", "GANGLIA_SERVER"); - ServiceComponent sc3CompB = serviceComponentFactory.createNew(s3, "GANGLIA_MONITOR", "GANGLIA_MONITOR"); + ServiceComponent sc3CompA = serviceComponentFactory.createNew(s3, "GANGLIA_SERVER"); + ServiceComponent sc3CompB = serviceComponentFactory.createNew(s3, "GANGLIA_MONITOR"); s3.addServiceComponent(sc3CompA); s3.addServiceComponent(sc3CompB); @@ -633,7 +633,7 @@ public void testGetServiceComponentHosts() throws Exception { Service s = serviceFactory.createNew(c1, serviceGroup, Collections.emptyList(), "HDFS", "HDFS", repositoryVersion); c1.addService(s); - ServiceComponent sc = serviceComponentFactory.createNew(s, "NAMENODE", "NAMENODE"); + ServiceComponent sc = serviceComponentFactory.createNew(s, "NAMENODE"); s.addServiceComponent(sc); ServiceComponentHost sch = @@ -651,7 +651,7 @@ public void testGetServiceComponentHosts() throws Exception { iterator.next(); Service s1 = serviceFactory.createNew(c1, serviceGroup, Collections.emptyList(), "PIG", "PIG", repositoryVersion); c1.addService(s1); - ServiceComponent sc1 = serviceComponentFactory.createNew(s1, "PIG", "PIG"); + ServiceComponent sc1 = serviceComponentFactory.createNew(s1, "PIG"); s1.addServiceComponent(sc1); ServiceComponentHost sch1 = serviceComponentHostFactory.createNew(sc1, "h1"); sc1.addServiceComponentHost(sch1); @@ -673,12 +673,12 @@ public void testGetServiceComponentHosts_ForService() throws Exception { Service s = serviceFactory.createNew(c1, serviceGroup, Collections.emptyList(), "HDFS", "HDFS", repositoryVersion); c1.addService(s); - ServiceComponent scNN = serviceComponentFactory.createNew(s, "NAMENODE", "NAMENODE"); + ServiceComponent scNN = serviceComponentFactory.createNew(s, "NAMENODE"); s.addServiceComponent(scNN); ServiceComponentHost schNNH1 = serviceComponentHostFactory.createNew(scNN, "h1"); scNN.addServiceComponentHost(schNNH1); - ServiceComponent scDN = serviceComponentFactory.createNew(s, "DATANODE", "DATANODE"); + ServiceComponent scDN = serviceComponentFactory.createNew(s, "DATANODE"); s.addServiceComponent(scDN); ServiceComponentHost scDNH1 = serviceComponentHostFactory.createNew(scDN, "h1"); scDN.addServiceComponentHost(scDNH1); @@ -703,12 +703,12 @@ public void testGetServiceComponentHosts_ForServiceComponent() throws Exception Service s = serviceFactory.createNew(c1, serviceGroup, Collections.emptyList(), "HDFS", "HDFS", repositoryVersion); c1.addService(s); - ServiceComponent scNN = serviceComponentFactory.createNew(s, "NAMENODE", "NAMENODE"); + ServiceComponent scNN = serviceComponentFactory.createNew(s, "NAMENODE"); s.addServiceComponent(scNN); ServiceComponentHost schNNH1 = serviceComponentHostFactory.createNew(scNN, "h1"); scNN.addServiceComponentHost(schNNH1); - ServiceComponent scDN = serviceComponentFactory.createNew(s, "DATANODE", "DATANODE"); + ServiceComponent scDN = serviceComponentFactory.createNew(s, "DATANODE"); s.addServiceComponent(scDN); ServiceComponentHost scDNH1 = serviceComponentHostFactory.createNew(scDN, "h1"); scDN.addServiceComponentHost(scDNH1); @@ -739,12 +739,12 @@ public void testGetServiceComponentHostMap() throws Exception { Service s = serviceFactory.createNew(c1, serviceGroup, Collections.emptyList(), "HDFS", "HDFS", repositoryVersion); c1.addService(s); - ServiceComponent scNN = serviceComponentFactory.createNew(s, "NAMENODE", "NAMENODE"); + ServiceComponent scNN = serviceComponentFactory.createNew(s, "NAMENODE"); s.addServiceComponent(scNN); ServiceComponentHost schNNH1 = serviceComponentHostFactory.createNew(scNN, "h1"); scNN.addServiceComponentHost(schNNH1); - ServiceComponent scDN = serviceComponentFactory.createNew(s, "DATANODE", "DATANODE"); + ServiceComponent scDN = serviceComponentFactory.createNew(s, "DATANODE"); s.addServiceComponent(scDN); ServiceComponentHost scDNH1 = serviceComponentHostFactory.createNew(scDN, "h1"); scDN.addServiceComponentHost(scDNH1); @@ -776,19 +776,19 @@ public void testGetServiceComponentHostMap_ForService() throws Exception { Service sfMR = serviceFactory.createNew(c1, serviceGroup, Collections.emptyList(), "MAPREDUCE", "MAPREDUCE", repositoryVersion); c1.addService(sfMR); - ServiceComponent scNN = serviceComponentFactory.createNew(sfHDFS, "NAMENODE", "NAMENODE"); + ServiceComponent scNN = serviceComponentFactory.createNew(sfHDFS, "NAMENODE"); sfHDFS.addServiceComponent(scNN); ServiceComponentHost schNNH1 = serviceComponentHostFactory.createNew(scNN, "h1"); scNN.addServiceComponentHost(schNNH1); - ServiceComponent scDN = serviceComponentFactory.createNew(sfHDFS, "DATANODE", "DATANODE"); + ServiceComponent scDN = serviceComponentFactory.createNew(sfHDFS, "DATANODE"); sfHDFS.addServiceComponent(scDN); ServiceComponentHost scDNH1 = serviceComponentHostFactory.createNew(scDN, "h1"); scDN.addServiceComponentHost(scDNH1); ServiceComponentHost scDNH2 = serviceComponentHostFactory.createNew(scDN, "h2"); scDN.addServiceComponentHost(scDNH2); - ServiceComponent scJT = serviceComponentFactory.createNew(sfMR, "JOBTRACKER", "JOBTRACKER"); + ServiceComponent scJT = serviceComponentFactory.createNew(sfMR, "JOBTRACKER"); sfMR.addServiceComponent(scJT); ServiceComponentHost schJTH1 = serviceComponentHostFactory.createNew(scJT, "h1"); scJT.addServiceComponentHost(schJTH1); @@ -834,19 +834,19 @@ public void testGetServiceComponentHostMap_ForHost() throws Exception { Service sfMR = serviceFactory.createNew(c1, serviceGroup, Collections.emptyList(), "MAPREDUCE", "MAPREDUCE", repositoryVersion); c1.addService(sfMR); - ServiceComponent scNN = serviceComponentFactory.createNew(sfHDFS, "NAMENODE", "NAMENODE"); + ServiceComponent scNN = serviceComponentFactory.createNew(sfHDFS, "NAMENODE"); sfHDFS.addServiceComponent(scNN); ServiceComponentHost schNNH1 = serviceComponentHostFactory.createNew(scNN, "h1"); scNN.addServiceComponentHost(schNNH1); - ServiceComponent scDN = serviceComponentFactory.createNew(sfHDFS, "DATANODE", "DATANODE"); + ServiceComponent scDN = serviceComponentFactory.createNew(sfHDFS, "DATANODE"); sfHDFS.addServiceComponent(scDN); ServiceComponentHost scDNH1 = serviceComponentHostFactory.createNew(scDN, "h1"); scDN.addServiceComponentHost(scDNH1); ServiceComponentHost scDNH2 = serviceComponentHostFactory.createNew(scDN, "h2"); scDN.addServiceComponentHost(scDNH2); - ServiceComponent scJT = serviceComponentFactory.createNew(sfMR, "JOBTRACKER", "JOBTRACKER"); + ServiceComponent scJT = serviceComponentFactory.createNew(sfMR, "JOBTRACKER"); sfMR.addServiceComponent(scJT); ServiceComponentHost schJTH1 = serviceComponentHostFactory.createNew(scJT, "h1"); scJT.addServiceComponentHost(schJTH1); @@ -893,19 +893,19 @@ public void testGetServiceComponentHostMap_ForHostAndService() throws Exception Service sfMR = serviceFactory.createNew(c1, serviceGroup, Collections.emptyList(), "MAPREDUCE", "MAPREDUCE", repositoryVersion); c1.addService(sfMR); - ServiceComponent scNN = serviceComponentFactory.createNew(sfHDFS, "NAMENODE", "NAMENODE"); + ServiceComponent scNN = serviceComponentFactory.createNew(sfHDFS, "NAMENODE"); sfHDFS.addServiceComponent(scNN); ServiceComponentHost schNNH1 = serviceComponentHostFactory.createNew(scNN, "h1"); scNN.addServiceComponentHost(schNNH1); - ServiceComponent scDN = serviceComponentFactory.createNew(sfHDFS, "DATANODE", "DATANODE"); + ServiceComponent scDN = serviceComponentFactory.createNew(sfHDFS, "DATANODE"); sfHDFS.addServiceComponent(scDN); ServiceComponentHost scDNH1 = serviceComponentHostFactory.createNew(scDN, "h1"); scDN.addServiceComponentHost(scDNH1); ServiceComponentHost scDNH2 = serviceComponentHostFactory.createNew(scDN, "h2"); scDN.addServiceComponentHost(scDNH2); - ServiceComponent scJT = serviceComponentFactory.createNew(sfMR, "JOBTRACKER", "JOBTRACKER"); + ServiceComponent scJT = serviceComponentFactory.createNew(sfMR, "JOBTRACKER"); sfMR.addServiceComponent(scJT); ServiceComponentHost schJTH1 = serviceComponentHostFactory.createNew(scJT, "h1"); scJT.addServiceComponentHost(schJTH1); @@ -1076,7 +1076,7 @@ public void testDeleteService() throws Exception { c1.addService(serviceGroup, "MAPREDUCE", "MAPREDUCE", repositoryVersion); Service hdfs = c1.addService(serviceGroup, "HDFS", "HDFS", repositoryVersion); - ServiceComponent nameNode = hdfs.addServiceComponent("NAMENODE", "NAMENODE"); + ServiceComponent nameNode = hdfs.addServiceComponent("NAMENODE"); assertEquals(2, c1.getServices().size()); assertEquals(2, injector.getProvider(EntityManager.class).get(). @@ -1541,8 +1541,8 @@ public void testTransitionHostsToInstalling() throws Exception { c1.addService(hdfs); // Add HDFS components - ServiceComponent datanode = serviceComponentFactory.createNew(hdfs, "NAMENODE", "NAMENODE"); - ServiceComponent namenode = serviceComponentFactory.createNew(hdfs, "DATANODE", "DATANODE"); + ServiceComponent datanode = serviceComponentFactory.createNew(hdfs, "NAMENODE"); + ServiceComponent namenode = serviceComponentFactory.createNew(hdfs, "DATANODE"); hdfs.addServiceComponent(datanode); hdfs.addServiceComponent(namenode); @@ -1874,12 +1874,12 @@ public void testTransitionNonReportableHost() throws Exception { ServiceGroup serviceGroup = c1.addServiceGroup("CORE", stackId.getStackId()); Service service = c1.addService(serviceGroup, "ZOOKEEPER", "ZOOKEEPER", repositoryVersion); - ServiceComponent sc = service.addServiceComponent("ZOOKEEPER_SERVER", "ZOOKEEPER_SERVER"); + ServiceComponent sc = service.addServiceComponent("ZOOKEEPER_SERVER"); sc.addServiceComponentHost("h-1"); sc.addServiceComponentHost("h-2"); service = c1.addService(serviceGroup, "SQOOP", "SQOOP", repositoryVersion); - sc = service.addServiceComponent("SQOOP", "SQOOP"); + sc = service.addServiceComponent("SQOOP"); sc.addServiceComponentHost("h-3"); HostEntity hostEntity = hostDAO.findByName("h-3"); diff --git a/ambari-server/src/test/java/org/apache/ambari/server/state/cluster/ClustersDeadlockTest.java b/ambari-server/src/test/java/org/apache/ambari/server/state/cluster/ClustersDeadlockTest.java index 8988645eaaa..112166882ab 100644 --- a/ambari-server/src/test/java/org/apache/ambari/server/state/cluster/ClustersDeadlockTest.java +++ b/ambari-server/src/test/java/org/apache/ambari/server/state/cluster/ClustersDeadlockTest.java @@ -374,7 +374,7 @@ private ServiceComponent addServiceComponent(Service service, serviceComponent = service.getServiceComponent(componentName); } catch (ServiceComponentNotFoundException e) { serviceComponent = serviceComponentFactory.createNew(service, - componentName, componentName); + componentName); service.addServiceComponent(serviceComponent); serviceComponent.setDesiredState(State.INSTALLED); } diff --git a/ambari-server/src/test/java/org/apache/ambari/server/state/cluster/ClustersTest.java b/ambari-server/src/test/java/org/apache/ambari/server/state/cluster/ClustersTest.java index dfb7ef16b26..849326b6dc2 100644 --- a/ambari-server/src/test/java/org/apache/ambari/server/state/cluster/ClustersTest.java +++ b/ambari-server/src/test/java/org/apache/ambari/server/state/cluster/ClustersTest.java @@ -427,15 +427,15 @@ public void testDeleteCluster() throws Exception { // host config override host1.addDesiredConfig(cluster.getClusterId(), true, "_test", config2); - ServiceGroup serviceGroup = cluster.addServiceGroup("CORE", stackId.getStackId()); + ServiceGroup serviceGroup = cluster.addServiceGroup("CORE", "HDP-1.0"); Service hdfs = cluster.addService(serviceGroup, "HDFS", "HDFS", repositoryVersion); //Assert.assertNotNull(injector.getInstance(ClusterServiceDAO.class).findByClusterAndServiceNames(c1, "HDFS")); - ServiceComponent nameNode = hdfs.addServiceComponent("NAMENODE", "NAMENODE"); - ServiceComponent dataNode = hdfs.addServiceComponent("DATANODE", "DATANODE"); + ServiceComponent nameNode = hdfs.addServiceComponent("NAMENODE"); + ServiceComponent dataNode = hdfs.addServiceComponent("DATANODE"); - ServiceComponent serviceCheckNode = hdfs.addServiceComponent("HDFS_CLIENT", "HDFS_CLIENT"); + ServiceComponent serviceCheckNode = hdfs.addServiceComponent("HDFS_CLIENT"); ServiceComponentHost nameNodeHost = nameNode.addServiceComponentHost(h1); HostEntity nameNodeHostEntity = hostDAO.findByName(nameNodeHost.getHostName()); @@ -448,9 +448,15 @@ public void testDeleteCluster() throws Exception { Assert.assertNotNull(injector.getInstance(HostComponentStateDAO.class).findByIndex( nameNodeHost.getClusterId(), 1L, 1L, - nameNodeHost.getServiceComponentId(), nameNodeHostEntity.getHostId())); - - Assert.assertNotNull(injector.getInstance(HostComponentDesiredStateDAO.class).findByIndex(nameNodeHost.getServiceComponentId())); + nameNodeHost.getServiceComponentName(), nameNodeHostEntity.getHostId())); + + Assert.assertNotNull(injector.getInstance(HostComponentDesiredStateDAO.class).findByIndex( + nameNodeHost.getClusterId(), + 1L, + 1L, + nameNodeHost.getServiceComponentName(), + nameNodeHostEntity.getHostId() + )); Assert.assertEquals(2, injector.getProvider(EntityManager.class).get().createQuery("SELECT config FROM ClusterConfigEntity config").getResultList().size()); Assert.assertEquals(1, injector.getProvider(EntityManager.class).get().createQuery("SELECT state FROM ClusterStateEntity state").getResultList().size()); Assert.assertEquals(1, injector.getProvider(EntityManager.class).get().createQuery("SELECT config FROM ClusterConfigEntity config WHERE config.selected = 1").getResultList().size()); @@ -486,9 +492,12 @@ public void testDeleteCluster() throws Exception { Assert.assertEquals(2, hostDAO.findAll().size()); Assert.assertNull(injector.getInstance(HostComponentStateDAO.class).findByIndex( nameNodeHost.getClusterId(), 1L, 1L, - nameNodeHost.getServiceComponentId(), nameNodeHostEntity.getHostId())); + nameNodeHost.getServiceComponentName(), nameNodeHostEntity.getHostId())); - Assert.assertNull(injector.getInstance(HostComponentDesiredStateDAO.class).findByIndex(nameNodeHost.getServiceComponentId())); + Assert.assertNull(injector.getInstance(HostComponentDesiredStateDAO.class).findByIndex( + nameNodeHost.getClusterId(), 1L, 1L, + nameNodeHost.getServiceComponentName(), nameNodeHostEntity.getHostId() + )); Assert.assertEquals(0, injector.getProvider(EntityManager.class).get().createQuery("SELECT config FROM ClusterConfigEntity config").getResultList().size()); Assert.assertEquals(0, injector.getProvider(EntityManager.class).get().createQuery("SELECT state FROM ClusterStateEntity state").getResultList().size()); Assert.assertEquals(0, topologyRequestDAO.findByClusterId(cluster.getClusterId()).size()); diff --git a/ambari-server/src/test/java/org/apache/ambari/server/state/cluster/ConcurrentServiceConfigVersionTest.java b/ambari-server/src/test/java/org/apache/ambari/server/state/cluster/ConcurrentServiceConfigVersionTest.java index c65b029dcb4..e3d76b323fe 100644 --- a/ambari-server/src/test/java/org/apache/ambari/server/state/cluster/ConcurrentServiceConfigVersionTest.java +++ b/ambari-server/src/test/java/org/apache/ambari/server/state/cluster/ConcurrentServiceConfigVersionTest.java @@ -243,7 +243,7 @@ private ServiceComponent addServiceComponent(Service service, serviceComponent = service.getServiceComponent(componentName); } catch (ServiceComponentNotFoundException e) { serviceComponent = serviceComponentFactory.createNew(service, - componentName, componentName); + componentName); service.addServiceComponent(serviceComponent); serviceComponent.setDesiredState(State.INSTALLED); } diff --git a/ambari-server/src/test/java/org/apache/ambari/server/state/cluster/ServiceComponentHostConcurrentWriteDeadlockTest.java b/ambari-server/src/test/java/org/apache/ambari/server/state/cluster/ServiceComponentHostConcurrentWriteDeadlockTest.java index b3afd65cb0d..8289a0db5b6 100644 --- a/ambari-server/src/test/java/org/apache/ambari/server/state/cluster/ServiceComponentHostConcurrentWriteDeadlockTest.java +++ b/ambari-server/src/test/java/org/apache/ambari/server/state/cluster/ServiceComponentHostConcurrentWriteDeadlockTest.java @@ -139,7 +139,7 @@ public void setup() throws Exception { clusters.mapHostToCluster(hostName, "c1"); Service service = installService("HDFS"); - addServiceComponent(service, "NAMENODE", "NAMENODE"); + addServiceComponent(service, "NAMENODE"); } @After @@ -151,8 +151,8 @@ public void teardown() throws AmbariException, SQLException { */ @Test() public void testConcurrentWriteDeadlock() throws Exception { - ServiceComponentHost nameNodeSCH = createNewServiceComponentHost("HDFS", "NAMENODE", "NAMENODE", "c6401"); - ServiceComponentHost dataNodeSCH = createNewServiceComponentHost("HDFS", "DATANODE", "DATANODE", "c6401"); + ServiceComponentHost nameNodeSCH = createNewServiceComponentHost("HDFS", "NAMENODE", "c6401"); + ServiceComponentHost dataNodeSCH = createNewServiceComponentHost("HDFS", "DATANODE", "c6401"); List serviceComponentHosts = new ArrayList<>(); serviceComponentHosts.add(nameNodeSCH); @@ -229,10 +229,10 @@ private void setOsFamily(Host host, String osFamily, String osVersion) { } private ServiceComponentHost createNewServiceComponentHost(String svc, - String svcComponentName, String svcComponentType, String hostName) throws AmbariException { + String svcComponent, String hostName) throws AmbariException { Assert.assertNotNull(cluster.getConfigGroups()); Service s = installService(svc); - ServiceComponent sc = addServiceComponent(s, svcComponentName, svcComponentType); + ServiceComponent sc = addServiceComponent(s, svcComponent); ServiceComponentHost sch = serviceComponentHostFactory.createNew(sc, hostName); @@ -258,13 +258,13 @@ private Service installService(String serviceName) throws AmbariException { } private ServiceComponent addServiceComponent(Service service, - String componentName, String svcComponentType) throws AmbariException { + String componentName) throws AmbariException { ServiceComponent serviceComponent = null; try { serviceComponent = service.getServiceComponent(componentName); } catch (ServiceComponentNotFoundException e) { serviceComponent = serviceComponentFactory.createNew(service, - componentName, svcComponentType); + componentName); service.addServiceComponent(serviceComponent); serviceComponent.setDesiredState(State.INSTALLED); } diff --git a/ambari-server/src/test/java/org/apache/ambari/server/state/repository/VersionDefinitionTest.java b/ambari-server/src/test/java/org/apache/ambari/server/state/repository/VersionDefinitionTest.java new file mode 100644 index 00000000000..a293d3a518f --- /dev/null +++ b/ambari-server/src/test/java/org/apache/ambari/server/state/repository/VersionDefinitionTest.java @@ -0,0 +1,611 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.ambari.server.state.repository; + +import static org.easymock.EasyMock.createNiceMock; +import static org.easymock.EasyMock.expect; +import static org.easymock.EasyMock.replay; +import static org.junit.Assert.assertEquals; +import static org.junit.Assert.assertNotNull; +import static org.junit.Assert.assertNull; +import static org.junit.Assert.assertTrue; + +import java.io.File; +import java.lang.reflect.Field; +import java.util.Collection; +import java.util.Collections; +import java.util.HashMap; +import java.util.List; +import java.util.Map; +import java.util.Set; + +import org.apache.ambari.server.orm.entities.RepositoryVersionEntity; +import org.apache.ambari.server.state.Cluster; +import org.apache.ambari.server.state.ComponentInfo; +import org.apache.ambari.server.state.RepositoryType; +import org.apache.ambari.server.state.Service; +import org.apache.ambari.server.state.ServiceInfo; +import org.apache.ambari.server.state.StackInfo; +import org.apache.ambari.server.state.stack.RepoTag; +import org.apache.ambari.server.state.stack.RepositoryXml; +import org.apache.ambari.server.state.stack.RepositoryXml.Os; +import org.apache.ambari.server.state.stack.RepositoryXml.Repo; +import org.apache.commons.io.FileUtils; +import org.junit.Test; + +import com.google.common.collect.ImmutableMap; +import com.google.common.collect.Lists; +import com.google.common.collect.Sets; + +/** + * Tests for repository definitions. + */ +public class VersionDefinitionTest { + + private static File file = new File("src/test/resources/version_definition_test.xml"); + + @Test + public void testLoadingString() throws Exception { + String xmlString = FileUtils.readFileToString(file); + VersionDefinitionXml xml = VersionDefinitionXml.load(xmlString); + + validateXml(xml); + } + + @Test + public void testLoadingUrl() throws Exception { + VersionDefinitionXml xml = VersionDefinitionXml.load(file.toURI().toURL()); + + validateXml(xml); + } + + private void validateXml(VersionDefinitionXml xml) throws Exception { + assertNotNull(xml.release); + assertEquals(RepositoryType.PATCH, xml.release.repositoryType); + assertEquals("HDP-2.3", xml.release.stackId); + assertEquals("2.3.4.1", xml.release.version); + assertEquals("2.3.4.[1-9]", xml.release.compatibleWith); + assertEquals("http://docs.hortonworks.com/HDPDocuments/HDP2/HDP-2.3.4/", xml.release.releaseNotes); + + assertEquals(4, xml.manifestServices.size()); + assertEquals("HDFS-271", xml.manifestServices.get(0).serviceId); + assertEquals("HDFS", xml.manifestServices.get(0).serviceName); + assertEquals("2.7.1", xml.manifestServices.get(0).version); + assertEquals("10", xml.manifestServices.get(0).versionId); + + assertEquals(3, xml.availableServices.size()); + assertEquals("HDFS-271", xml.availableServices.get(0).serviceIdReference); + assertEquals(0, xml.availableServices.get(0).components.size()); + + assertEquals("HIVE-110", xml.availableServices.get(2).serviceIdReference); + assertEquals(1, xml.availableServices.get(2).components.size()); + + assertNotNull(xml.repositoryInfo); + assertEquals(2, xml.repositoryInfo.getOses().size()); + + assertEquals("redhat6", xml.repositoryInfo.getOses().get(0).getFamily()); + assertEquals(2, xml.repositoryInfo.getOses().get(0).getRepos().size()); + assertEquals("http://public-repo-1.hortonworks.com/HDP/centos6/2.x/updates/2.3.0.0", + xml.repositoryInfo.getOses().get(0).getRepos().get(0).getBaseUrl()); + assertEquals("HDP-2.3", xml.repositoryInfo.getOses().get(0).getRepos().get(0).getRepoId()); + assertEquals("HDP", xml.repositoryInfo.getOses().get(0).getRepos().get(0).getRepoName()); + assertNull(xml.repositoryInfo.getOses().get(0).getPackageVersion()); + } + + @Test + public void testAllServices() throws Exception { + + File f = new File("src/test/resources/version_definition_test_all_services.xml"); + + VersionDefinitionXml xml = VersionDefinitionXml.load(f.toURI().toURL()); + + StackInfo stack = new StackInfo() { + @Override + public ServiceInfo getService(String name) { + return null; + } + }; + + // the file does not define available services + assertEquals(4, xml.manifestServices.size()); + assertEquals(3, xml.getAvailableServices(stack).size()); + } + + @Test + public void testStackManifest() throws Exception { + + File f = new File("src/test/resources/version_definition_test_all_services.xml"); + + VersionDefinitionXml xml = VersionDefinitionXml.load(f.toURI().toURL()); + + StackInfo stack = new StackInfo() { + private Map m_services = new HashMap() {{ + put("HDFS", makeService("HDFS")); + put("HBASE", makeService("HBASE")); + put("HIVE", makeService("HIVE")); + put("YARN", makeService("YARN")); + }}; + + @Override + public ServiceInfo getService(String name) { + return m_services.get(name); + } + + @Override + public synchronized Collection getServices() { + return m_services.values(); + } + + }; + + List stackServices = xml.getStackServices(stack); + + // the file does not define available services + assertEquals(4, xml.manifestServices.size()); + assertEquals(3, xml.getAvailableServices(stack).size()); + assertEquals(4, stackServices.size()); + + boolean foundHdfs = false; + boolean foundYarn = false; + boolean foundHive = false; + + for (ManifestServiceInfo msi : stackServices) { + if ("HDFS".equals(msi.m_name)) { + foundHdfs = true; + assertEquals("HDFS Display", msi.m_display); + assertEquals("HDFS Comment", msi.m_comment); + assertEquals(1, msi.m_versions.size()); + assertEquals("2.7.1", msi.m_versions.iterator().next()); + } else if ("YARN".equals(msi.m_name)) { + foundYarn = true; + assertEquals(1, msi.m_versions.size()); + assertEquals("1.1.1", msi.m_versions.iterator().next()); + } else if ("HIVE".equals(msi.m_name)) { + foundHive = true; + assertEquals(2, msi.m_versions.size()); + assertTrue(msi.m_versions.contains("1.1.0")); + assertTrue(msi.m_versions.contains("2.0.0")); + } + } + + assertTrue(foundHdfs); + assertTrue(foundYarn); + assertTrue(foundHive); + } + + @Test + public void testSerialization() throws Exception { + + File f = new File("src/test/resources/version_definition_test_all_services.xml"); + VersionDefinitionXml xml = VersionDefinitionXml.load(f.toURI().toURL()); + String xmlString = xml.toXml(); + xml = VersionDefinitionXml.load(xmlString); + + assertNotNull(xml.release.build); + assertEquals("1234", xml.release.build); + + f = new File("src/test/resources/version_definition_with_tags.xml"); + xml = VersionDefinitionXml.load(f.toURI().toURL()); + xmlString = xml.toXml(); + + xml = VersionDefinitionXml.load(xmlString); + + assertEquals(2, xml.repositoryInfo.getOses().size()); + List repos = null; + for (Os os : xml.repositoryInfo.getOses()) { + if (os.getFamily().equals("redhat6")) { + repos = os.getRepos(); + } + } + assertNotNull(repos); + assertEquals(3, repos.size()); + + Repo found = null; + for (Repo repo : repos) { + if (repo.getRepoName().equals("HDP-GPL")) { + found = repo; + break; + } + } + + assertNotNull(found); + assertNotNull(found.getTags()); + assertEquals(1, found.getTags().size()); + assertEquals(RepoTag.GPL, found.getTags().iterator().next()); + } + + + @Test + public void testMerger() throws Exception { + File f = new File("src/test/resources/version_definition_test_all_services.xml"); + + VersionDefinitionXml xml1 = VersionDefinitionXml.load(f.toURI().toURL()); + VersionDefinitionXml xml2 = VersionDefinitionXml.load(f.toURI().toURL()); + + assertEquals(2, xml1.repositoryInfo.getOses().size()); + assertEquals(2, xml2.repositoryInfo.getOses().size()); + + // make xml1 have only redhat6 (remove redhat7) without a package version + RepositoryXml.Os target = null; + for (RepositoryXml.Os os : xml1.repositoryInfo.getOses()) { + if (os.getFamily().equals("redhat7")) { + target = os; + } + } + assertNotNull(target); + xml1.repositoryInfo.getOses().remove(target); + + // make xml2 have only redhat7 (remove redhat6) with a package version + target = null; + for (RepositoryXml.Os os : xml2.repositoryInfo.getOses()) { + if (os.getFamily().equals("redhat6")) { + target = os; + } else { + Field field = RepositoryXml.Os.class.getDeclaredField("packageVersion"); + field.setAccessible(true); + field.set(os, "2_3_4_2"); + } + } + assertNotNull(target); + xml2.repositoryInfo.getOses().remove(target); + xml2.release.version = "2.3.4.2"; + xml2.release.build = "2468"; + + assertEquals(1, xml1.repositoryInfo.getOses().size()); + assertEquals(1, xml2.repositoryInfo.getOses().size()); + + VersionDefinitionXml.Merger builder = new VersionDefinitionXml.Merger(); + VersionDefinitionXml xml3 = builder.merge(); + + assertNull(xml3); + + builder.add(xml1.release.version, xml1); + builder.add("", xml2); + xml3 = builder.merge(); + + assertNotNull(xml3); + assertNull("Merged definition cannot have a build", xml3.release.build); + assertEquals(xml3.release.version, "2.3.4.1"); + + RepositoryXml.Os redhat6 = null; + RepositoryXml.Os redhat7 = null; + assertEquals(2, xml3.repositoryInfo.getOses().size()); + for (RepositoryXml.Os os : xml3.repositoryInfo.getOses()) { + if (os.getFamily().equals("redhat6")) { + redhat6 = os; + } else if (os.getFamily().equals("redhat7")) { + redhat7 = os; + } + } + assertNotNull(redhat6); + assertNotNull(redhat7); + assertNull(redhat6.getPackageVersion()); + assertEquals("2_3_4_2", redhat7.getPackageVersion()); + + // !!! extra test to make sure it serializes + xml3.toXml(); + } + + @Test + public void testLoadingBadNewLine() throws Exception { + List lines = FileUtils.readLines(file); + + // crude + StringBuilder builder = new StringBuilder(); + for (Object line : lines) { + String lineString = line.toString().trim(); + if (lineString.startsWith("")) { + lineString = lineString.replace("", ""); + lineString = lineString.replace("", ""); + + builder.append("\n"); + builder.append(lineString).append('\n'); + builder.append("\n"); + } else if (lineString.startsWith("")) { + lineString = lineString.replace("", ""); + lineString = lineString.replace("", ""); + + builder.append("\n"); + builder.append(lineString).append('\n'); + builder.append("\n"); + } else { + builder.append(line.toString().trim()).append('\n'); + } + } + + VersionDefinitionXml xml = VersionDefinitionXml.load(builder.toString()); + + validateXml(xml); + } + + @Test + public void testPackageVersion() throws Exception { + File f = new File("src/test/resources/hbase_version_test.xml"); + + VersionDefinitionXml xml = VersionDefinitionXml.load(f.toURI().toURL()); + + String xmlString = xml.toXml(); + + xml = VersionDefinitionXml.load(xmlString); + + assertNotNull(xml.release.build); + assertEquals("3396", xml.release.build); + assertEquals("redhat6", xml.repositoryInfo.getOses().get(0).getFamily()); + assertEquals("2_3_4_0_3396", xml.repositoryInfo.getOses().get(0).getPackageVersion()); + assertNotNull(xml.getPackageVersion("redhat6")); + assertEquals("2_3_4_0_3396", xml.getPackageVersion("redhat6")); + assertNull(xml.getPackageVersion("suse11")); + } + + @Test + public void testMaintVersion() throws Exception { + File f = new File("src/test/resources/version_definition_test_maint.xml"); + + VersionDefinitionXml xml = VersionDefinitionXml.load(f.toURI().toURL()); + + String xmlString = xml.toXml(); + + xml = VersionDefinitionXml.load(xmlString); + + assertEquals(RepositoryType.MAINT, xml.release.repositoryType); + assertEquals("2.3.4.1", xml.release.version); + assertEquals("1234", xml.release.build); + assertEquals("redhat6", xml.repositoryInfo.getOses().get(0).getFamily()); + + + + List availableServices = xml.availableServices; + assertEquals(3, availableServices.size()); + + List manifestServices = xml.manifestServices; + assertEquals(4, manifestServices.size()); + + ManifestService hdfs = null; + ManifestService hive = null; + for (ManifestService as : manifestServices) { + if (as.serviceId.equals("HDFS-271")) { + hdfs = as; + } else if (as.serviceId.equals("HIVE-200")) { + hive = as; + } + } + + assertNotNull(hdfs); + assertNotNull(hive); + + assertEquals("2.3.4.0", hdfs.releaseVersion); + assertNull(hive.releaseVersion); + + StackInfo stack = new StackInfo() { + @Override + public ServiceInfo getService(String name) { + return makeService("HIVE", "HIVE_METASTORE"); + } + }; + + Collection availables = xml.getAvailableServices(stack); + + assertEquals(2, availables.size()); + + boolean found = false; + for (AvailableService available : availables) { + if (available.getName().equals("HIVE")) { + found = true; + assertEquals(2, available.getVersions().size()); + for (AvailableVersion version : available.getVersions()) { + if (version.getVersion().equals("1.1.0")) { + assertEquals("1.0.9", version.getReleaseVersion()); + } else { + assertNull(version.getReleaseVersion()); + } + } + } + } + + assertTrue("Found available version for HIVE", found); + } + + @Test + public void testAvailableFull() throws Exception { + + Cluster cluster = createNiceMock(Cluster.class); + RepositoryVersionEntity repositoryVersion = createNiceMock(RepositoryVersionEntity.class); + expect(repositoryVersion.getVersion()).andReturn("2.3.4.0").atLeastOnce(); + + Service serviceHdfs = createNiceMock(Service.class); + expect(serviceHdfs.getName()).andReturn("HDFS").atLeastOnce(); + expect(serviceHdfs.getDisplayName()).andReturn("HDFS").atLeastOnce(); + expect(serviceHdfs.getDesiredRepositoryVersion()).andReturn(repositoryVersion).atLeastOnce(); + + Service serviceHBase = createNiceMock(Service.class); + expect(serviceHBase.getName()).andReturn("HBASE").atLeastOnce(); + expect(serviceHBase.getDisplayName()).andReturn("HBase").atLeastOnce(); + expect(serviceHBase.getDesiredRepositoryVersion()).andReturn(repositoryVersion).atLeastOnce(); + + // !!! should never be accessed as it's not in any VDF + Service serviceAMS = createNiceMock(Service.class); + + expect(cluster.getServices()).andReturn(ImmutableMap.builder() + .put("HDFS", serviceHdfs) + .put("HBASE", serviceHBase) + .put("AMBARI_METRICS", serviceAMS).build()).atLeastOnce(); + + + replay(cluster, repositoryVersion, serviceHdfs, serviceHBase); + + File f = new File("src/test/resources/version_definition_test_all_services.xml"); + VersionDefinitionXml xml = VersionDefinitionXml.load(f.toURI().toURL()); + ClusterVersionSummary summary = xml.getClusterSummary(cluster); + assertEquals(2, summary.getAvailableServiceNames().size()); + + f = new File("src/test/resources/version_definition_test_maint.xml"); + xml = VersionDefinitionXml.load(f.toURI().toURL()); + summary = xml.getClusterSummary(cluster); + assertEquals(0, summary.getAvailableServiceNames().size()); + + f = new File("src/test/resources/version_definition_test_maint.xml"); + xml = VersionDefinitionXml.load(f.toURI().toURL()); + xml.release.repositoryType = RepositoryType.STANDARD; + xml.availableServices = Collections.emptyList(); + summary = xml.getClusterSummary(cluster); + assertEquals(2, summary.getAvailableServiceNames().size()); + + f = new File("src/test/resources/version_definition_test_maint_partial.xml"); + xml = VersionDefinitionXml.load(f.toURI().toURL()); + summary = xml.getClusterSummary(cluster); + assertEquals(1, summary.getAvailableServiceNames().size()); + } + + @Test + public void testAvailableBuildVersion() throws Exception { + + Cluster cluster = createNiceMock(Cluster.class); + RepositoryVersionEntity repositoryVersion = createNiceMock(RepositoryVersionEntity.class); + expect(repositoryVersion.getVersion()).andReturn("2.3.4.1-1").atLeastOnce(); + + Service serviceHdfs = createNiceMock(Service.class); + expect(serviceHdfs.getName()).andReturn("HDFS").atLeastOnce(); + expect(serviceHdfs.getDisplayName()).andReturn("HDFS").atLeastOnce(); + expect(serviceHdfs.getDesiredRepositoryVersion()).andReturn(repositoryVersion).atLeastOnce(); + + Service serviceHBase = createNiceMock(Service.class); + expect(serviceHBase.getName()).andReturn("HBASE").atLeastOnce(); + expect(serviceHBase.getDisplayName()).andReturn("HBase").atLeastOnce(); + expect(serviceHBase.getDesiredRepositoryVersion()).andReturn(repositoryVersion).atLeastOnce(); + + // !!! should never be accessed as it's not in any VDF + Service serviceAMS = createNiceMock(Service.class); + + expect(cluster.getServices()).andReturn(ImmutableMap.builder() + .put("HDFS", serviceHdfs) + .put("HBASE", serviceHBase) + .put("AMBARI_METRICS", serviceAMS).build()).atLeastOnce(); + + replay(cluster, repositoryVersion, serviceHdfs, serviceHBase); + + File f = new File("src/test/resources/version_definition_test_maint_partial.xml"); + VersionDefinitionXml xml = VersionDefinitionXml.load(f.toURI().toURL()); + xml.release.version = "2.3.4.1"; + xml.release.build = "2"; + ClusterVersionSummary summary = xml.getClusterSummary(cluster); + assertEquals(1, summary.getAvailableServiceNames().size()); + } + + /** + * Tests that patch upgrade dependencies can be calculated recursively. + * + * @throws Exception + */ + @Test + public void testRecursiveDependencyDetection() throws Exception { + File f = new File("src/test/resources/version_definition_test_all_services.xml"); + VersionDefinitionXml xml = VersionDefinitionXml.load(f.toURI().toURL()); + + Map> dependencies = new HashMap<>(); + dependencies.put("A", Lists.newArrayList("B", "X")); + dependencies.put("B", Lists.newArrayList("C", "D", "E")); + dependencies.put("E", Lists.newArrayList("A", "F")); + dependencies.put("F", Lists.newArrayList("B", "E")); + + // services not installed + dependencies.put("X", Lists.newArrayList("Y", "Z", "A")); + dependencies.put("Z", Lists.newArrayList("B")); + + Set installedServices = Sets.newHashSet("A", "B", "C", "D", "E", "F", "G", "H"); + + Set servicesInUpgrade = Sets.newHashSet("A"); + + Set results = xml.getRecursiveDependencies(Sets.newHashSet("B"), dependencies, + servicesInUpgrade, installedServices); + + assertEquals(5, results.size()); + assertTrue(results.contains("B")); + assertTrue(results.contains("C")); + assertTrue(results.contains("D")); + assertTrue(results.contains("E")); + assertTrue(results.contains("F")); + + servicesInUpgrade = Sets.newHashSet("A", "B", "C", "E", "F"); + results = xml.getRecursiveDependencies(Sets.newHashSet("D"), dependencies, servicesInUpgrade, + installedServices); + + assertEquals(1, results.size()); + assertTrue(results.contains("D")); + + servicesInUpgrade = Sets.newHashSet("A", "F"); + results = xml.getRecursiveDependencies(Sets.newHashSet("B", "E"), dependencies, + servicesInUpgrade, + installedServices); + + assertEquals(4, results.size()); + assertTrue(results.contains("B")); + assertTrue(results.contains("C")); + assertTrue(results.contains("D")); + assertTrue(results.contains("E")); + } + + private static ServiceInfo makeService(final String name) { + return new ServiceInfo() { + @Override + public String getName() { + return name; + } + @Override + public String getDisplayName() { + return name + " Display"; + } + @Override + public String getVersion() { + return "1.1.1"; + } + @Override + public String getComment() { + return name + " Comment"; + } + + }; + } + + private static ServiceInfo makeService(final String name, final String component) { + return new ServiceInfo() { + @Override + public String getName() { + return name; + } + @Override + public String getDisplayName() { + return name + " Display"; + } + @Override + public String getVersion() { + return "1.1.1"; + } + @Override + public String getComment() { + return name + " Comment"; + } + + @Override + public ComponentInfo getComponentByName(String name) { + return null; + } + + }; + } + +} diff --git a/ambari-server/src/test/java/org/apache/ambari/server/state/stack/UpgradePackTest.java b/ambari-server/src/test/java/org/apache/ambari/server/state/stack/UpgradePackTest.java index 14033a0bbfd..44c58f095da 100644 --- a/ambari-server/src/test/java/org/apache/ambari/server/state/stack/UpgradePackTest.java +++ b/ambari-server/src/test/java/org/apache/ambari/server/state/stack/UpgradePackTest.java @@ -24,18 +24,14 @@ import java.io.File; import java.util.Collection; import java.util.List; -import java.util.Map; import java.util.Optional; import org.apache.ambari.server.stack.ModuleFileUnmarshaller; -import org.apache.ambari.server.state.stack.UpgradePack.PrerequisiteCheckConfig; import org.apache.ambari.server.state.stack.upgrade.Grouping; import org.apache.ambari.server.state.stack.upgrade.Lifecycle; -import org.apache.ambari.server.state.stack.upgrade.LifecycleType; +import org.apache.ambari.server.state.stack.upgrade.Lifecycle.LifecycleType; import org.apache.ambari.server.state.stack.upgrade.UpgradeType; -import org.junit.Assert; import org.junit.Test; -import org.springframework.util.CollectionUtils; /** * Tests for the upgrade pack @@ -73,26 +69,8 @@ public void testUpgradeParsing() throws Exception { assertFalse(startLifecycle.isPresent()); List groups = upgradeLifecycle.get().groups; - assertEquals(29, groups.size()); - assertEquals(LifecycleType.UPGRADE, groups.get(0).lifecycle); - - Optional optional = groups.stream().filter(g -> "Kafka".equals(g.title)).findFirst(); - assertTrue(optional.isPresent()); - Assert.assertNull(optional.get().name); - - assertEquals(12, upgradepack.getPrerequisiteChecks().size()); - PrerequisiteCheckConfig checkConfig = upgradepack.getPrerequisiteCheckConfig(); - Map map = checkConfig.getCheckProperties("abc"); - assertTrue(CollectionUtils.isEmpty(map)); - - map = checkConfig.getCheckProperties("org.apache.ambari.server.checks.HiveDynamicServiceDiscoveryCheck"); - assertFalse(CollectionUtils.isEmpty(map)); - assertTrue(map.containsKey("min-failure-stack-version")); - assertTrue(map.containsKey("my-property")); - assertFalse(map.containsKey("random-key")); - assertEquals("HDP-2.3.0.0", map.get("min-failure-stack-version")); - assertEquals("my-value", map.get("my-property")); + assertEquals(29, groups.size()); } } diff --git a/ambari-server/src/test/java/org/apache/ambari/server/state/svccomphost/ServiceComponentHostTest.java b/ambari-server/src/test/java/org/apache/ambari/server/state/svccomphost/ServiceComponentHostTest.java index 3d233ee29b0..7a17ccc5025 100644 --- a/ambari-server/src/test/java/org/apache/ambari/server/state/svccomphost/ServiceComponentHostTest.java +++ b/ambari-server/src/test/java/org/apache/ambari/server/state/svccomphost/ServiceComponentHostTest.java @@ -202,7 +202,7 @@ private ServiceComponentHost createNewServiceComponentHost( try { sc = s.getServiceComponent(svcComponent); } catch (ServiceComponentNotFoundException e) { - sc = serviceComponentFactory.createNew(s, svcComponent, svcComponent); + sc = serviceComponentFactory.createNew(s, svcComponent); s.addServiceComponent(sc); } @@ -1053,14 +1053,26 @@ public void testMaintenance() throws Exception { ServiceComponentHost sch2 = createNewServiceComponentHost(cluster, "HDFS", "DATANODE", hostName, customServiceGroup); ServiceComponentHost sch3 = createNewServiceComponentHost(cluster, "MAPREDUCE2", "HISTORYSERVER", hostName, customServiceGroup); - HostComponentDesiredStateEntity entity = hostComponentDesiredStateDAO.findByIndex(sch1.getServiceComponentId()); + HostComponentDesiredStateEntity entity = hostComponentDesiredStateDAO.findByIndex( + cluster.getClusterId(), + customServiceGroup.getServiceGroupId(), + sch1.getServiceId(), + sch1.getServiceComponentName(), + hostEntity.getHostId() + ); Assert.assertEquals(MaintenanceState.OFF, entity.getMaintenanceState()); Assert.assertEquals(MaintenanceState.OFF, sch1.getMaintenanceState()); sch1.setMaintenanceState(MaintenanceState.ON); Assert.assertEquals(MaintenanceState.ON, sch1.getMaintenanceState()); - entity = hostComponentDesiredStateDAO.findByIndex(sch1.getServiceComponentId()); + entity = hostComponentDesiredStateDAO.findByIndex( + cluster.getClusterId(), + customServiceGroup.getServiceGroupId(), + sch1.getServiceId(), + sch1.getServiceComponentName(), + hostEntity.getHostId() + ); Assert.assertEquals(MaintenanceState.ON, entity.getMaintenanceState()); } diff --git a/ambari-server/src/test/java/org/apache/ambari/server/testing/DBInconsistencyTests.java b/ambari-server/src/test/java/org/apache/ambari/server/testing/DBInconsistencyTests.java index b2e2c6d80be..c78b0ee2e09 100644 --- a/ambari-server/src/test/java/org/apache/ambari/server/testing/DBInconsistencyTests.java +++ b/ambari-server/src/test/java/org/apache/ambari/server/testing/DBInconsistencyTests.java @@ -91,7 +91,7 @@ public void testOrphanedSCHDesiredEntityReAdd() throws Exception { Assert.assertNotNull(clusterId); Cluster cluster = clusters.getCluster(OrmTestHelper.CLUSTER_NAME); - ServiceGroup serviceGroup = cluster.addServiceGroup("CORE", cluster.getDesiredStackVersion()); + ServiceGroup serviceGroup = cluster.addServiceGroup("CORE", "HDP-1.0"); Assert.assertNotNull(cluster); helper.addHost(clusters, cluster, "h1"); diff --git a/ambari-server/src/test/java/org/apache/ambari/server/topology/AmbariContextTest.java b/ambari-server/src/test/java/org/apache/ambari/server/topology/AmbariContextTest.java index db8c9100e06..895562d7756 100644 --- a/ambari-server/src/test/java/org/apache/ambari/server/topology/AmbariContextTest.java +++ b/ambari-server/src/test/java/org/apache/ambari/server/topology/AmbariContextTest.java @@ -18,7 +18,6 @@ package org.apache.ambari.server.topology; -import static java.util.Collections.emptySet; import static java.util.Collections.singletonList; import static org.easymock.EasyMock.anyObject; import static org.easymock.EasyMock.capture; @@ -36,6 +35,7 @@ import static org.junit.Assert.fail; import java.lang.reflect.Field; +import java.util.ArrayList; import java.util.Arrays; import java.util.Collection; import java.util.Collections; @@ -45,7 +45,6 @@ import java.util.Set; import java.util.stream.Stream; -import org.apache.ambari.server.api.services.AmbariMetaInfo; import org.apache.ambari.server.controller.AmbariManagementController; import org.apache.ambari.server.controller.ClusterRequest; import org.apache.ambari.server.controller.ConfigGroupRequest; @@ -109,7 +108,6 @@ public class AmbariContextTest { private static final AmbariContext context = new AmbariContext(); private static final AmbariManagementController controller = createNiceMock(AmbariManagementController.class); - private static final AmbariMetaInfo metaInfo = createNiceMock(AmbariMetaInfo.class); private static final ClusterController clusterController = createStrictMock(ClusterController.class); private static final HostResourceProvider hostResourceProvider = createStrictMock(HostResourceProvider.class); private static final ServiceGroupResourceProvider serviceGroupResourceProvider = createStrictMock(ServiceGroupResourceProvider.class); @@ -139,11 +137,10 @@ public class AmbariContextTest { private static final Collection group1Hosts = Arrays.asList(HOST1, HOST2); private Capture> configGroupRequestCapture = EasyMock.newCapture(); - private Setting setting = createNiceMock(Setting.class); @Before public void setUp() throws Exception { - reset(controller, clusterController, hostResourceProvider, serviceGroupResourceProvider, serviceResourceProvider, componentResourceProvider, metaInfo, + reset(controller, clusterController, hostResourceProvider, serviceGroupResourceProvider, serviceResourceProvider, componentResourceProvider, hostComponentResourceProvider, configGroupResourceProvider, topology, blueprint, stack, clusters, cluster, group1Info, configHelper, configGroup1, configGroup2, host1, host2, configFactory); @@ -242,21 +239,18 @@ public void setUp() throws Exception { expect(topology.getClusterId()).andReturn(CLUSTER_ID).anyTimes(); expect(topology.getBlueprint()).andReturn(blueprint).anyTimes(); - expect(topology.getBlueprintName()).andReturn(BP_NAME).anyTimes(); expect(topology.getHostGroupInfo()).andReturn(Collections.singletonMap(HOST_GROUP_1, group1Info)).anyTimes(); expect(blueprint.getName()).andReturn(BP_NAME).anyTimes(); - expect(topology.getStack()).andReturn(stack).anyTimes(); - expect(topology.getStackIds()).andReturn(Collections.singleton(STACK_ID)).anyTimes(); - expect(topology.getServices()).andReturn(blueprintServices).anyTimes(); - expect(topology.getComponents()).andAnswer(() -> Stream.of( - ResolvedComponent.builder(new Component("s1Component1")).stackId(STACK_ID).serviceType("service1").buildPartial(), - ResolvedComponent.builder(new Component("s1Component2")).stackId(STACK_ID).serviceType("service1").buildPartial(), - ResolvedComponent.builder(new Component("s2Component1")).stackId(STACK_ID).serviceType("service2").buildPartial() - )).anyTimes(); - expect(topology.getConfiguration()).andReturn(bpConfiguration).anyTimes(); - expect(topology.getSetting()).andReturn(setting).anyTimes(); - expect(setting.getCredentialStoreEnabled("service1")).andReturn("true").anyTimes(); + expect(blueprint.getStack()).andReturn(stack).anyTimes(); + expect(blueprint.getStackIds()).andReturn(Collections.singleton(STACK_ID)).anyTimes(); + expect(blueprint.getServices()).andReturn(blueprintServices).anyTimes(); + expect(blueprint.getComponentNames("service1")).andReturn(Arrays.asList("s1Component1", "s1Component2")).anyTimes(); + expect(blueprint.getComponentNames("service2")).andReturn(Collections.singleton("s2Component1")).anyTimes(); + expect(blueprint.getStackIdsForService("service1")).andReturn(ImmutableSet.of(STACK_ID)).anyTimes(); + expect(blueprint.getStackIdsForService("service2")).andReturn(ImmutableSet.of(STACK_ID)).anyTimes(); + expect(blueprint.getConfiguration()).andReturn(bpConfiguration).anyTimes(); + expect(blueprint.getCredentialStoreEnabled("service1")).andReturn("true").anyTimes(); expect(stack.getName()).andReturn(STACK_NAME).anyTimes(); expect(stack.getVersion()).andReturn(STACK_VERSION).anyTimes(); @@ -267,8 +261,6 @@ public void setUp() throws Exception { expect(controller.getClusters()).andReturn(clusters).anyTimes(); expect(controller.getConfigHelper()).andReturn(configHelper).anyTimes(); - expect(controller.getAmbariMetaInfo()).andReturn(metaInfo).anyTimes(); - expect(metaInfo.getClusterProperties()).andReturn(emptySet()).anyTimes(); expect(clusters.getCluster(CLUSTER_NAME)).andReturn(cluster).anyTimes(); expect(clusters.getClusterById(CLUSTER_ID)).andReturn(cluster).anyTimes(); @@ -294,14 +286,14 @@ public void setUp() throws Exception { @After public void tearDown() throws Exception { - verify(controller, clusterController, hostResourceProvider, serviceGroupResourceProvider, serviceResourceProvider, componentResourceProvider, metaInfo, - hostComponentResourceProvider, configGroupResourceProvider, topology, blueprint, setting, stack, clusters, + verify(controller, clusterController, hostResourceProvider, serviceGroupResourceProvider, serviceResourceProvider, componentResourceProvider, + hostComponentResourceProvider, configGroupResourceProvider, topology, blueprint, stack, clusters, cluster, group1Info, configHelper, configGroup1, configGroup2, host1, host2, configFactory); } private void replayAll() { - replay(controller, clusterController, hostResourceProvider, serviceGroupResourceProvider, serviceResourceProvider, componentResourceProvider, metaInfo, - hostComponentResourceProvider, configGroupResourceProvider, topology, blueprint, setting, stack, clusters, + replay(controller, clusterController, hostResourceProvider, serviceGroupResourceProvider, serviceResourceProvider, componentResourceProvider, + hostComponentResourceProvider, configGroupResourceProvider, topology, blueprint, stack, clusters, cluster, group1Info, configHelper, configGroup1, configGroup2, host1, host2, configFactory); } @@ -344,7 +336,7 @@ public void testCreateAmbariResources() throws Exception { assertEquals(String.format("%s-%s", STACK_NAME, STACK_VERSION), clusterRequest.getStackVersion()); Set serviceGroupRequests = serviceGroupRequestCapture.getValue(); - Set expectedServiceGroupRequests = Collections.singleton(new ServiceGroupRequest(cluster.getClusterName(), STACK_NAME, clusterRequest.getStackVersion())); + Set expectedServiceGroupRequests = Collections.singleton(new ServiceGroupRequest(cluster.getClusterName(), AmbariContext.DEFAULT_SERVICE_GROUP_NAME, clusterRequest.getStackVersion())); assertEquals(expectedServiceGroupRequests, serviceGroupRequests); Collection serviceRequests = serviceRequestCapture.getValue(); @@ -406,6 +398,8 @@ public void testCreateAmbariHostResources() throws Exception { hostResourceProvider.createHosts(anyObject(Request.class)); expectLastCall().once(); + expect(cluster.getService("service1")).andReturn(mockService1).times(2); + expect(cluster.getService("service2")).andReturn(mockService1).once(); Capture> requestsCapture = EasyMock.newCapture(); expect(controller.createHostComponents(capture(requestsCapture))).andReturn(null).once(); @@ -413,16 +407,49 @@ public void testCreateAmbariHostResources() throws Exception { replayAll(); // test - Stream components = Stream.of( - ResolvedComponent.builder(new Component("component1", "mpack", "service1", null)).buildPartial(), - ResolvedComponent.builder(new Component("component2", "mpack", "service1", null)).buildPartial(), - ResolvedComponent.builder(new Component("component3", "mpack", "service2", null)).buildPartial() - ); - context.createAmbariHostResources(CLUSTER_ID, "host1", components); + Map> componentsMap = new HashMap<>(); + Collection components = new ArrayList<>(); + components.add("component1"); + components.add("component2"); + componentsMap.put("service1", components); + components = new ArrayList<>(); + components.add("component3"); + componentsMap.put("service2", components); + + context.createAmbariHostResources(CLUSTER_ID, "host1", componentsMap); assertEquals(requestsCapture.getValue().size(), 3); } + @Test + public void testCreateAmbariHostResourcesWithMissingService() throws Exception { + // expectations + expect(cluster.getServices()).andReturn(clusterServices).anyTimes(); + + hostResourceProvider.createHosts(anyObject(Request.class)); + expectLastCall().once(); + expect(cluster.getService("service1")).andReturn(mockService1).times(2); + Capture> requestsCapture = EasyMock.newCapture(); + + expect(controller.createHostComponents(capture(requestsCapture))).andReturn(null).once(); + + replayAll(); + + // test + Map> componentsMap = new HashMap<>(); + Collection components = new ArrayList<>(); + components.add("component1"); + components.add("component2"); + componentsMap.put("service1", components); + components = new ArrayList<>(); + components.add("component3"); + componentsMap.put("service2", components); + + context.createAmbariHostResources(CLUSTER_ID, "host1", componentsMap); + + assertEquals(requestsCapture.getValue().size(), 2); + } + @Test public void testRegisterHostWithConfigGroup_createNewConfigGroup() throws Exception { // test specific expectations diff --git a/ambari-server/src/test/java/org/apache/ambari/server/topology/BlueprintBasedClusterProvisionRequestTest.java b/ambari-server/src/test/java/org/apache/ambari/server/topology/BlueprintBasedClusterProvisionRequestTest.java deleted file mode 100644 index e0f6549ccb7..00000000000 --- a/ambari-server/src/test/java/org/apache/ambari/server/topology/BlueprintBasedClusterProvisionRequestTest.java +++ /dev/null @@ -1,72 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package org.apache.ambari.server.topology; - -import static org.easymock.EasyMock.createNiceMock; -import static org.easymock.EasyMock.expect; -import static org.easymock.EasyMock.replay; - -import java.util.Set; - -import org.apache.ambari.server.controller.internal.ProvisionClusterRequest; -import org.apache.ambari.server.controller.internal.StackDefinition; -import org.apache.ambari.server.state.SecurityType; -import org.apache.ambari.server.state.StackId; -import org.junit.Test; - -import com.google.common.collect.ImmutableSet; - -public class BlueprintBasedClusterProvisionRequestTest { - - private static final StackId STACK_ID = new StackId("HDP-2.6"); - private static final Set STACK_IDS = ImmutableSet.of(STACK_ID); - - @Test(expected = IllegalArgumentException.class) // THEN - public void clusterCannotRelaxBlueprintSecurity() { - // GIVEN - AmbariContext context = createNiceMock(AmbariContext.class); - StackDefinition stack = createNiceMock(StackDefinition.class); - expect(context.composeStacks(STACK_IDS)).andReturn(stack).anyTimes(); - - Blueprint blueprint = secureBlueprint(STACK_IDS); - ProvisionClusterRequest request = insecureCluster(); - - replay(context, stack, blueprint, request); - - // WHEN - new BlueprintBasedClusterProvisionRequest(context, null, blueprint, request); - } - - private ProvisionClusterRequest insecureCluster() { - ProvisionClusterRequest request = createNiceMock(ProvisionClusterRequest.class); - expect(request.getSecurityConfiguration()).andReturn(SecurityConfiguration.NONE).anyTimes(); - expect(request.getStackIds()).andReturn(ImmutableSet.of()).anyTimes(); - expect(request.getMpacks()).andReturn(ImmutableSet.of()).anyTimes(); - return request; - } - - private Blueprint secureBlueprint(Set stackIds) { - Blueprint blueprint = createNiceMock(Blueprint.class); - SecurityConfiguration secure = new SecurityConfiguration(SecurityType.KERBEROS); - expect(blueprint.getSecurity()).andReturn(secure).anyTimes(); - expect(blueprint.getStackIds()).andReturn(stackIds).anyTimes(); - expect(blueprint.getMpacks()).andReturn(ImmutableSet.of()).anyTimes(); - return blueprint; - } - -} diff --git a/ambari-server/src/test/java/org/apache/ambari/server/topology/BlueprintFactoryTest.java b/ambari-server/src/test/java/org/apache/ambari/server/topology/BlueprintFactoryTest.java index ff163d37671..722345fc0fa 100644 --- a/ambari-server/src/test/java/org/apache/ambari/server/topology/BlueprintFactoryTest.java +++ b/ambari-server/src/test/java/org/apache/ambari/server/topology/BlueprintFactoryTest.java @@ -19,54 +19,70 @@ package org.apache.ambari.server.topology; import static java.util.stream.Collectors.toSet; +import static org.easymock.EasyMock.anyObject; import static org.easymock.EasyMock.anyString; import static org.easymock.EasyMock.createNiceMock; +import static org.easymock.EasyMock.eq; import static org.easymock.EasyMock.expect; import static org.junit.Assert.assertEquals; import static org.junit.Assert.assertNull; import static org.junit.Assert.assertTrue; import static org.powermock.api.easymock.PowerMock.createStrictMock; +import static org.powermock.api.easymock.PowerMock.expectNew; import static org.powermock.api.easymock.PowerMock.replay; import static org.powermock.api.easymock.PowerMock.reset; import static org.powermock.api.easymock.PowerMock.verify; +import java.lang.reflect.Field; import java.util.Collection; import java.util.Map; -import java.util.Optional; import java.util.Set; import java.util.stream.Collectors; import java.util.stream.Stream; +import org.apache.ambari.server.ObjectNotFoundException; +import org.apache.ambari.server.controller.AmbariManagementController; import org.apache.ambari.server.controller.internal.BlueprintResourceProvider; import org.apache.ambari.server.controller.internal.BlueprintResourceProviderTest; import org.apache.ambari.server.controller.internal.Stack; import org.apache.ambari.server.orm.dao.BlueprintDAO; import org.apache.ambari.server.orm.entities.BlueprintConfigEntity; import org.apache.ambari.server.orm.entities.BlueprintEntity; +import org.apache.ambari.server.stack.NoSuchStackException; import org.apache.ambari.server.state.StackId; +import org.easymock.EasyMockSupport; import org.junit.After; import org.junit.Before; import org.junit.Test; +import org.junit.runner.RunWith; +import org.powermock.core.classloader.annotations.PrepareForTest; +import org.powermock.modules.junit4.PowerMockRunner; import com.google.common.collect.ImmutableSet; import com.google.common.collect.Sets; -import com.google.inject.util.Providers; /** * BlueprintFactory unit tests. */ +@SuppressWarnings("unchecked") +@RunWith(PowerMockRunner.class) +@PrepareForTest(BlueprintImpl.class) public class BlueprintFactoryTest { private static final String BLUEPRINT_NAME = "test-blueprint"; - private Stack stack = createNiceMock(Stack.class); - private BlueprintDAO dao = createStrictMock(BlueprintDAO.class); - private BlueprintEntity entity = createStrictMock(BlueprintEntity.class); - private BlueprintConfigEntity configEntity = createStrictMock(BlueprintConfigEntity.class); - private BlueprintFactory testFactory = new BlueprintFactory(Providers.of(dao)); + BlueprintFactory factory = new BlueprintFactory(); + Stack stack = createNiceMock(Stack.class); + BlueprintFactory testFactory = new TestBlueprintFactory(stack); + BlueprintDAO dao = createStrictMock(BlueprintDAO.class); + BlueprintEntity entity = createStrictMock(BlueprintEntity.class); + BlueprintConfigEntity configEntity = createStrictMock(BlueprintConfigEntity.class); + @Before public void init() throws Exception { + setPrivateField(factory, "blueprintDAO", dao); + Set stackIds = ImmutableSet.of(new StackId("stack", "0.1")); Collection services = ImmutableSet.of("test-service1", "test-service2"); Collection components = ImmutableSet.of("component1", "component2"); @@ -86,6 +102,24 @@ public void tearDown() { reset(stack, dao, entity, configEntity); } + //todo: implement +// @Test +// public void testGetBlueprint() throws Exception { +// +// Collection configs = new ArrayList(); +// configs.add(configEntity); +// +// expect(dao.findByName(BLUEPRINT_NAME)).andReturn(entity).once(); +// expect(entity.getBlueprintName()).andReturn(BLUEPRINT_NAME).atLeastOnce(); +// expect(entity.getConfigurations()).andReturn(configs).atLeastOnce(); +// +// replay(dao, entity); +// +// Blueprint blueprint = factory.getBlueprint(BLUEPRINT_NAME); +// +// +// } + @Test public void testGetMultiInstanceBlueprint() throws Exception { // prepare @@ -101,23 +135,22 @@ public void testGetMultiInstanceBlueprint() throws Exception { Stack edwStack = createNiceMock(Stack.class); expect(edwStack.getName()).andReturn(edw.getStackName()).anyTimes(); expect(edwStack.getVersion()).andReturn(edw.getStackVersion()).anyTimes(); - replay(hdpStack, edwStack, dao); + expectNew(Stack.class, eq(hdp.getStackName()), eq(hdp.getStackVersion()), anyObject(AmbariManagementController.class)).andReturn(hdpStack).anyTimes(); + expectNew(Stack.class, eq(edw.getStackVersion()), eq(edw.getStackVersion()), anyObject(AmbariManagementController.class)).andReturn(edwStack).anyTimes(); + replay(Stack.class, hdpStack, edwStack, dao); // test Blueprint blueprint = testFactory.getBlueprint(BLUEPRINT_NAME); Set mpackNames = blueprint.getMpacks().stream().map(MpackInstance::getMpackName).collect(Collectors.toSet()); assertEquals(ImmutableSet.of("HDPCORE-3.0", "EDW-3.1"), mpackNames ); - Optional hdpCore = blueprint.getMpacks().stream() - .filter(mp -> "HDPCORE-3.0".equals(mp.getMpackName())) - .findAny(); - assertTrue(hdpCore.isPresent()); - MpackInstance mpackInstance = hdpCore.get(); + MpackInstance hdpCore = + blueprint.getMpacks().stream().filter(mp -> "HDPCORE-3.0".equals(mp.getMpackName())).findAny().get(); Set serviceInstanceNames = - mpackInstance.getServiceInstances().stream().map(ServiceInstance::getName).collect(toSet()); + hdpCore.getServiceInstances().stream().map(ServiceInstance::getName).collect(toSet()); assertEquals(ImmutableSet.of("ZK1", "ZK2"), serviceInstanceNames); Set serviceInstanceTypes = - mpackInstance.getServiceInstances().stream().map(ServiceInstance::getType).collect(toSet()); + hdpCore.getServiceInstances().stream().map(ServiceInstance::getType).collect(toSet()); assertEquals(ImmutableSet.of("ZOOKEEPER"), serviceInstanceTypes); Set stackIds = blueprint.getStackIds(); assertEquals(ImmutableSet.of(hdp, edw), stackIds); @@ -129,7 +162,6 @@ public void testGetBlueprint_NotFound() throws Exception { expect(dao.findByName(BLUEPRINT_NAME)).andReturn(null).once(); replay(dao, entity, configEntity); - BlueprintFactory factory = new BlueprintFactory(Providers.of(dao)); assertNull(factory.getBlueprint(BLUEPRINT_NAME)); } @@ -151,6 +183,11 @@ public void testCreateBlueprint() throws Exception { assertEquals(2, components.size()); assertTrue(components.contains("component1")); assertTrue(components.contains("component2")); + Collection services = group1.getServices(); + assertEquals(2, services.size()); + assertTrue(services.contains("test-service1")); + assertTrue(services.contains("test-service2")); + assertTrue(group1.containsMasterComponent()); //todo: add configurations/attributes to properties Configuration configuration = group1.getConfiguration(); assertTrue(configuration.getProperties().isEmpty()); @@ -162,6 +199,10 @@ public void testCreateBlueprint() throws Exception { components = group2.getComponentNames(); assertEquals(1, components.size()); assertTrue(components.contains("component1")); + services = group2.getServices(); + assertEquals(1, services.size()); + assertTrue(services.contains("test-service1")); + assertTrue(group2.containsMasterComponent()); //todo: add configurations/attributes to properties //todo: test both v1 and v2 config syntax configuration = group2.getConfiguration(); @@ -210,6 +251,24 @@ public Blueprint createMultiInstanceBlueprint() throws Exception { return blueprint; } + @Test(expected=NoSuchStackException.class) + public void testCreateInvalidStack() throws Exception { + EasyMockSupport mockSupport = new EasyMockSupport(); + BlueprintFactory.StackFactory mockStackFactory = + mockSupport.createMock(BlueprintFactory.StackFactory.class); + + // setup mock to throw exception, to simulate invalid stack request + expect(mockStackFactory.createStack(new StackId(), null)).andThrow(new ObjectNotFoundException("Invalid Stack")); + + mockSupport.replayAll(); + + BlueprintFactory factoryUnderTest = + new BlueprintFactory(mockStackFactory); + factoryUnderTest.createStack(new StackId()); + + mockSupport.verifyAll(); + } + @Test(expected=IllegalArgumentException.class) public void testCreate_NoBlueprintName() throws Exception { Map props = BlueprintResourceProviderTest.getBlueprintTestProperties().iterator().next(); @@ -250,6 +309,17 @@ public void testCreate_HostGroupWithNoComponents() throws Exception { testFactory.createBlueprint(props, null); } + @Test(expected=IllegalArgumentException.class) + public void testCreate_HostGroupWithInvalidComponent() throws Exception { + Map props = BlueprintResourceProviderTest.getBlueprintTestProperties().iterator().next(); + // change a component name to an invalid name + ((Set>) ((Set>) props.get(BlueprintResourceProvider.HOST_GROUP_PROPERTY_ID)). + iterator().next().get(BlueprintResourceProvider.COMPONENT_PROPERTY_ID)).iterator().next().put("name", "INVALID_COMPONENT"); + + replay(stack, dao, entity, configEntity); + testFactory.createBlueprint(props, null); + } + @Test(expected = IllegalArgumentException.class) // THEN public void verifyDefinitionsDisjointShouldRejectDuplication() { // GIVEN @@ -291,4 +361,24 @@ public void verifyStackDefinitionsAreDisjointShouldAllowDisjointStacks() { // no exception expected } + private class TestBlueprintFactory extends BlueprintFactory { + private Stack stack; + + public TestBlueprintFactory(Stack stack) { + this.stack = stack; + } + + @Override + protected Stack createStack(StackId stackId) throws NoSuchStackException { + return stack; + } + } + + + private void setPrivateField(Object o, String field, Object value) throws Exception { + Class c = o.getClass(); + Field f = c.getDeclaredField(field); + f.setAccessible(true); + f.set(o, value); + } } diff --git a/ambari-server/src/test/java/org/apache/ambari/server/topology/BlueprintImplTest.java b/ambari-server/src/test/java/org/apache/ambari/server/topology/BlueprintImplTest.java index 19827e70432..362f4e4889f 100644 --- a/ambari-server/src/test/java/org/apache/ambari/server/topology/BlueprintImplTest.java +++ b/ambari-server/src/test/java/org/apache/ambari/server/topology/BlueprintImplTest.java @@ -23,11 +23,14 @@ import static org.easymock.EasyMock.createNiceMock; import static org.easymock.EasyMock.expect; import static org.easymock.EasyMock.replay; +import static org.easymock.EasyMock.reset; import static org.easymock.EasyMock.verify; +import static org.junit.Assert.assertFalse; import static org.junit.Assert.assertTrue; import java.util.Arrays; import java.util.Collection; +import java.util.Collections; import java.util.HashMap; import java.util.HashSet; import java.util.Map; @@ -37,7 +40,6 @@ import org.apache.ambari.server.orm.entities.BlueprintEntity; import org.apache.ambari.server.state.SecurityType; import org.junit.Before; -import org.junit.Ignore; import org.junit.Test; @@ -46,9 +48,11 @@ */ public class BlueprintImplTest { private static final Map>> EMPTY_ATTRIBUTES = new HashMap<>(); - private static final Configuration EMPTY_CONFIGURATION = Configuration.createEmpty(); + private static final Map> EMPTY_PROPERTIES = new HashMap<>(); + private static final Configuration EMPTY_CONFIGURATION = new Configuration(EMPTY_PROPERTIES, EMPTY_ATTRIBUTES); Stack stack = createNiceMock(Stack.class); + Setting setting = createNiceMock(Setting.class); HostGroup group1 = createMock(HostGroup.class); HostGroup group2 = createMock(HostGroup.class); Set hostGroups = new HashSet<>(); @@ -58,9 +62,10 @@ public class BlueprintImplTest { Map hdfsProps = new HashMap<>(); Configuration configuration = new Configuration(properties, EMPTY_ATTRIBUTES, EMPTY_CONFIGURATION); private final org.apache.ambari.server.configuration.Configuration serverConfig = createNiceMock(org.apache.ambari.server.configuration.Configuration.class); + private final BlueprintValidator blueprintValidator = new BlueprintValidatorImpl(serverConfig); @Before - public void setup() { + public void setup() throws NoSuchFieldException, IllegalAccessException { properties.put("hdfs-site", hdfsProps); hdfsProps.put("foo", "val"); hdfsProps.put("bar", "val"); @@ -101,11 +106,12 @@ public void setup() { requiredService2Properties.add(new Stack.ConfigProperty("category2", "prop2", null)); expect(stack.getRequiredConfigurationProperties("HDFS")).andReturn(requiredHDFSProperties).anyTimes(); expect(stack.getRequiredConfigurationProperties("SERVICE2")).andReturn(requiredService2Properties).anyTimes(); + + setupConfigurationWithGPLLicense(true); } @Test public void testValidateConfigurations__basic_positive() throws Exception { - // GIVEN expect(group1.getCardinality()).andReturn("1").atLeastOnce(); expect(group1.getComponents()).andReturn(Arrays.asList(new Component("c1"), new Component("c2"))).atLeastOnce(); expect(group2.getCardinality()).andReturn("1").atLeastOnce(); @@ -119,12 +125,10 @@ public void testValidateConfigurations__basic_positive() throws Exception { category2Props.put("prop2", "val"); SecurityConfiguration securityConfiguration = new SecurityConfiguration(SecurityType.KERBEROS, "testRef", null); - - // WHEN - Blueprint blueprint = new BlueprintImpl("test", hostGroups, emptySet(), emptySet(), configuration, securityConfiguration, null); + Blueprint blueprint = new BlueprintImpl("test", hostGroups, stack, emptySet(), emptySet(), configuration, securityConfiguration, null); + blueprintValidator.validateRequiredProperties(blueprint); BlueprintEntity entity = blueprint.toEntity(); - // THEN verify(stack, group1, group2, serverConfig); assertTrue(entity.getSecurityType() == SecurityType.KERBEROS); assertTrue(entity.getSecurityDescriptorReference().equals("testRef")); @@ -132,7 +136,6 @@ public void testValidateConfigurations__basic_positive() throws Exception { @Test public void testValidateConfigurations__hostGroupConfig() throws Exception { - // GIVEN Map> group2Props = new HashMap<>(); Map group2Category2Props = new HashMap<>(); group2Props.put("category2", group2Category2Props); @@ -160,18 +163,13 @@ public void testValidateConfigurations__hostGroupConfig() throws Exception { hadoopProps.put("dfs_ha_initial_namenode_active", "%HOSTGROUP:group1%"); hadoopProps.put("dfs_ha_initial_namenode_standby", "%HOSTGROUP:group2%"); replay(stack, group1, group2, serverConfig); - - // WHEN - Blueprint blueprint = new BlueprintImpl("test", hostGroups, emptySet(), emptySet(), configuration, null, null); + Blueprint blueprint = new BlueprintImpl("test", hostGroups, stack, emptySet(), emptySet(), configuration, null, null); + blueprintValidator.validateRequiredProperties(blueprint); BlueprintEntity entity = blueprint.toEntity(); - - // THEN verify(stack, group1, group2, serverConfig); assertTrue(entity.getSecurityType() == SecurityType.NONE); assertTrue(entity.getSecurityDescriptorReference() == null); } - - @Ignore // move out NAMENODE-specific test @Test public void testValidateConfigurations__hostGroupConfigForNameNodeHAPositive() throws Exception { Map> group2Props = new HashMap<>(); @@ -205,18 +203,16 @@ public void testValidateConfigurations__hostGroupConfigForNameNodeHAPositive() t hadoopProps.put("dfs_ha_initial_namenode_standby", "%HOSTGROUP::group2%"); replay(stack, group1, group2, serverConfig); - // WHEN - Blueprint blueprint = new BlueprintImpl("test", hostGroups, emptySet(), emptySet(), configuration, null, null); + Blueprint blueprint = new BlueprintImpl("test", hostGroups, stack, emptySet(), emptySet(), configuration, null, null); + blueprintValidator.validateRequiredProperties(blueprint); BlueprintEntity entity = blueprint.toEntity(); - // THEN verify(stack, group1, group2, serverConfig); assertTrue(entity.getSecurityType() == SecurityType.NONE); assertTrue(entity.getSecurityDescriptorReference() == null); } - @Ignore // move out NAMENODE-specific test - @Test(expected = IllegalArgumentException.class) + @Test(expected= IllegalArgumentException.class) public void testValidateConfigurations__hostGroupConfigForNameNodeHAInCorrectHostGroups() throws Exception { Map> group2Props = new HashMap<>(); Map group2Category2Props = new HashMap<>(); @@ -248,13 +244,11 @@ public void testValidateConfigurations__hostGroupConfigForNameNodeHAInCorrectHos hadoopProps.put("dfs_ha_initial_namenode_active", "%HOSTGROUP::group2%"); hadoopProps.put("dfs_ha_initial_namenode_standby", "%HOSTGROUP::group3%"); replay(stack, group1, group2, serverConfig); - - // WHEN - new BlueprintImpl("test", hostGroups, emptySet(), emptySet(), configuration, null, null); + Blueprint blueprint = new BlueprintImpl("test", hostGroups, stack, emptySet(), emptySet(), configuration, null, null); + blueprintValidator.validateRequiredProperties(blueprint); + verify(stack, group1, group2, serverConfig); } - - @Ignore // move out NAMENODE-specific test - @Test(expected = IllegalArgumentException.class) + @Test(expected= IllegalArgumentException.class) public void testValidateConfigurations__hostGroupConfigForNameNodeHAMappedSameHostGroup() throws Exception { Map> group2Props = new HashMap<>(); Map group2Category2Props = new HashMap<>(); @@ -286,9 +280,695 @@ public void testValidateConfigurations__hostGroupConfigForNameNodeHAMappedSameHo hadoopProps.put("dfs_ha_initial_namenode_active", "%HOSTGROUP::group2%"); hadoopProps.put("dfs_ha_initial_namenode_standby", "%HOSTGROUP::group2%"); replay(stack, group1, group2, serverConfig); + Blueprint blueprint = new BlueprintImpl("test", hostGroups, stack, emptySet(), emptySet(), configuration, null, null); + blueprintValidator.validateRequiredProperties(blueprint); + verify(stack, group1, group2, serverConfig); + } + @Test(expected = InvalidTopologyException.class) + public void testValidateConfigurations__secretReference() throws InvalidTopologyException, + GPLLicenseNotAcceptedException, NoSuchFieldException, IllegalAccessException { + Map> group2Props = new HashMap<>(); + Map group2Category2Props = new HashMap<>(); - // WHEN - new BlueprintImpl("test", hostGroups, emptySet(), emptySet(), configuration, null, null); + group2Props.put("category2", group2Category2Props); + group2Category2Props.put("prop2", "val"); + hdfsProps.put("secret", "SECRET:hdfs-site:1:test"); + replay(stack, group1, group2, serverConfig); + + Blueprint blueprint = new BlueprintImpl("test", hostGroups, stack, emptySet(), emptySet(), configuration, null, null); + blueprintValidator.validateRequiredProperties(blueprint); + verify(stack, group1, group2, serverConfig); + } + + @Test(expected = GPLLicenseNotAcceptedException.class) + public void testValidateConfigurations__gplIsNotAllowedCodecsProperty() throws InvalidTopologyException, + GPLLicenseNotAcceptedException, NoSuchFieldException, IllegalAccessException { + Map> lzoProperties = new HashMap<>(); + lzoProperties.put("core-site", new HashMap(){{ + put(BlueprintValidatorImpl.CODEC_CLASSES_PROPERTY_NAME, "OtherCodec, " + BlueprintValidatorImpl.LZO_CODEC_CLASS); + }}); + Configuration lzoUsageConfiguration = new Configuration(lzoProperties, EMPTY_ATTRIBUTES, EMPTY_CONFIGURATION); + + setupConfigurationWithGPLLicense(false); + replay(stack, group1, group2, serverConfig); + + Blueprint blueprint = new BlueprintImpl("test", hostGroups, stack, emptySet(), emptySet(), lzoUsageConfiguration, null, null); + blueprintValidator.validateRequiredProperties(blueprint); + verify(stack, group1, group2, serverConfig); } + @Test(expected = GPLLicenseNotAcceptedException.class) + public void testValidateConfigurations__gplIsNotAllowedLZOProperty() throws InvalidTopologyException, + GPLLicenseNotAcceptedException, NoSuchFieldException, IllegalAccessException { + Map> lzoProperties = new HashMap<>(); + lzoProperties.put("core-site", new HashMap(){{ + put(BlueprintValidatorImpl.LZO_CODEC_CLASS_PROPERTY_NAME, BlueprintValidatorImpl.LZO_CODEC_CLASS); + }}); + Configuration lzoUsageConfiguration = new Configuration(lzoProperties, EMPTY_ATTRIBUTES, EMPTY_CONFIGURATION); + + setupConfigurationWithGPLLicense(false); + replay(stack, group1, group2, serverConfig); + + Blueprint blueprint = new BlueprintImpl("test", hostGroups, stack, emptySet(), emptySet(), lzoUsageConfiguration, null, null); + blueprintValidator.validateRequiredProperties(blueprint); + verify(stack, group1, group2, serverConfig); + } + + @Test + public void testValidateConfigurations__gplISAllowed() throws InvalidTopologyException, + GPLLicenseNotAcceptedException, NoSuchFieldException, IllegalAccessException { + Map> lzoProperties = new HashMap<>(); + lzoProperties.put("core-site", new HashMap(){{ + put(BlueprintValidatorImpl.LZO_CODEC_CLASS_PROPERTY_NAME, BlueprintValidatorImpl.LZO_CODEC_CLASS); + put(BlueprintValidatorImpl.CODEC_CLASSES_PROPERTY_NAME, "OtherCodec, " + BlueprintValidatorImpl.LZO_CODEC_CLASS); + }}); + Configuration lzoUsageConfiguration = new Configuration(lzoProperties, EMPTY_ATTRIBUTES, EMPTY_CONFIGURATION); + + expect(group2.getConfiguration()).andReturn(EMPTY_CONFIGURATION).atLeastOnce(); + replay(stack, group1, group2, serverConfig); + + Blueprint blueprint = new BlueprintImpl("test", hostGroups, stack, emptySet(), emptySet(), lzoUsageConfiguration, null, null); + blueprintValidator.validateRequiredProperties(blueprint); + verify(stack, group1, group2, serverConfig); + } + + @Test + public void testAutoSkipFailureEnabled() { + HashMap skipFailureSetting = new HashMap<>(); + skipFailureSetting.put(Setting.SETTING_NAME_SKIP_FAILURE, "true"); + expect(stack.getName()).andReturn("HDPCORE").anyTimes(); + expect(stack.getVersion()).andReturn("3.0.0.0").anyTimes(); + expect(setting.getSettingValue(Setting.SETTING_NAME_DEPLOYMENT_SETTINGS)).andReturn(Collections.singleton(skipFailureSetting)); + replay(stack, setting); + + Blueprint blueprint = new BlueprintImpl("test", hostGroups, stack, emptySet(), emptySet(), configuration, null, setting); + assertTrue(blueprint.shouldSkipFailure()); + + verify(stack, setting); + } + + @Test + public void testAutoSkipFailureDisabled() { + HashMap skipFailureSetting = new HashMap<>(); + skipFailureSetting.put(Setting.SETTING_NAME_SKIP_FAILURE, "false"); + expect(setting.getSettingValue(Setting.SETTING_NAME_DEPLOYMENT_SETTINGS)).andReturn(Collections.singleton(skipFailureSetting)); + replay(stack, setting); + + Blueprint blueprint = new BlueprintImpl("test", hostGroups, stack, emptySet(), emptySet(), configuration, null, setting); + assertFalse(blueprint.shouldSkipFailure()); + + verify(stack, setting); + } + + private org.apache.ambari.server.configuration.Configuration setupConfigurationWithGPLLicense(boolean isGPLAllowed) { + reset(serverConfig); + expect(serverConfig.getGplLicenseAccepted()).andReturn(isGPLAllowed).atLeastOnce(); + return serverConfig; + } + + //todo: ensure coverage for these existing tests + + // private void validateEntity(BlueprintEntity entity, boolean containsConfig) { +// assertEquals(BLUEPRINT_NAME, entity.getBlueprintName()); +// +// StackEntity stackEntity = entity.getStack(); +// assertEquals("test-stack-name", stackEntity.getStackName()); +// assertEquals("test-stack-version", stackEntity.getStackVersion()); +// +// Collection hostGroupEntities = entity.getHostGroups(); +// +// assertEquals(2, hostGroupEntities.size()); +// for (HostGroupEntity hostGroup : hostGroupEntities) { +// assertEquals(BLUEPRINT_NAME, hostGroup.getBlueprintName()); +// assertNotNull(hostGroup.getBlueprintEntity()); +// Collection componentEntities = hostGroup.getComponents(); +// if (hostGroup.getName().equals("group1")) { +// assertEquals("1", hostGroup.getCardinality()); +// assertEquals(2, componentEntities.size()); +// Iterator componentIterator = componentEntities.iterator(); +// String name = componentIterator.next().getName(); +// assertTrue(name.equals("component1") || name.equals("component2")); +// String name2 = componentIterator.next().getName(); +// assertFalse(name.equals(name2)); +// assertTrue(name2.equals("component1") || name2.equals("component2")); +// } else if (hostGroup.getName().equals("group2")) { +// assertEquals("2", hostGroup.getCardinality()); +// assertEquals(1, componentEntities.size()); +// HostGroupComponentEntity componentEntity = componentEntities.iterator().next(); +// assertEquals("component1", componentEntity.getName()); +// +// if (containsConfig) { +// Collection configurations = hostGroup.getConfigurations(); +// assertEquals(1, configurations.size()); +// HostGroupConfigEntity hostGroupConfigEntity = configurations.iterator().next(); +// assertEquals(BLUEPRINT_NAME, hostGroupConfigEntity.getBlueprintName()); +// assertSame(hostGroup, hostGroupConfigEntity.getHostGroupEntity()); +// assertEquals("core-site", hostGroupConfigEntity.getType()); +// Map properties = gson.>fromJson( +// hostGroupConfigEntity.getConfigData(), Map.class); +// assertEquals(1, properties.size()); +// assertEquals("anything", properties.get("my.custom.hg.property")); +// } +// } else { +// fail("Unexpected host group name"); +// } +// } +// Collection configurations = entity.getConfigurations(); +// if (containsConfig) { +// assertEquals(1, configurations.size()); +// BlueprintConfigEntity blueprintConfigEntity = configurations.iterator().next(); +// assertEquals(BLUEPRINT_NAME, blueprintConfigEntity.getBlueprintName()); +// assertSame(entity, blueprintConfigEntity.getBlueprintEntity()); +// assertEquals("core-site", blueprintConfigEntity.getType()); +// Map properties = gson.>fromJson( +// blueprintConfigEntity.getConfigData(), Map.class); +// assertEquals(2, properties.size()); +// assertEquals("480", properties.get("fs.trash.interval")); +// assertEquals("8500", properties.get("ipc.client.idlethreshold")); +// } else { +// assertEquals(0, configurations.size()); +// } +// } + + + + // @Test +// public void testCreateResource_Validate__Cardinality__ExternalComponent() throws Exception { +// +// Set> setProperties = getTestProperties(); +// setConfigurationProperties(setProperties); +// ((Set>) setProperties.iterator().next().get("configurations")). +// add(Collections.singletonMap("global/hive_database", "Existing MySQL Database")); +// +// Iterator iter = ((HashSet>>>) setProperties.iterator().next(). +// get(BlueprintResourceProvider.HOST_GROUP_PROPERTY_ID)). +// iterator().next().get("components").iterator(); +// iter.next(); +// iter.remove(); +// +// AmbariManagementController managementController = createMock(AmbariManagementController.class); +// Capture> stackServiceRequestCapture = EasyMock.newCapture(); +// Capture> serviceComponentRequestCapture = EasyMock.newCapture(); +// Capture stackConfigurationRequestCapture = EasyMock.newCapture(); +// Capture stackLevelConfigurationRequestCapture = EasyMock.newCapture(); +// Request request = createMock(Request.class); +// StackServiceResponse stackServiceResponse = createMock(StackServiceResponse.class); +// StackServiceComponentResponse stackServiceComponentResponse = createNiceMock(StackServiceComponentResponse.class); +// StackServiceComponentResponse stackServiceComponentResponse2 = createNiceMock(StackServiceComponentResponse.class); +// Set setServiceComponents = new HashSet(); +// setServiceComponents.add(stackServiceComponentResponse); +// setServiceComponents.add(stackServiceComponentResponse2); +// +// Map services = new HashMap(); +// ServiceInfo service = new ServiceInfo(); +// service.setName("test-service"); +// services.put("test-service", service); +// +// List serviceComponents = new ArrayList(); +// ComponentInfo component1 = new ComponentInfo(); +// component1.setName("component1"); +// ComponentInfo component2 = new ComponentInfo(); +// component2.setName("MYSQL_SERVER"); +// serviceComponents.add(component1); +// serviceComponents.add(component2); +// +// Capture entityCapture = EasyMock.newCapture(); +// +// // set expectations +// expect(blueprintFactory.createBlueprint(setProperties.iterator().next())).andReturn(blueprint).once(); +// expect(blueprintValidator.validateRequiredProperties()).andReturn(Collections.>>emptyMap()).once(blueprint); +// expect(blueprint.toEntity()).andReturn(entity); +// expect(blueprint.getName()).andReturn(BLUEPRINT_NAME).atLeastOnce(); +// expect(managementController.getStackServices(capture(stackServiceRequestCapture))).andReturn( +// Collections.singleton(stackServiceResponse)); +// expect(stackServiceResponse.getServiceName()).andReturn("test-service").anyTimes(); +// expect(stackServiceResponse.getStackName()).andReturn("test-stack-name").anyTimes(); +// expect(stackServiceResponse.getStackVersion()).andReturn("test-stack-version").anyTimes(); +// expect(stackServiceResponse.getExcludedConfigTypes()).andReturn(Collections.emptySet()); +// +// expect(managementController.getStackComponents(capture(serviceComponentRequestCapture))).andReturn(setServiceComponents).anyTimes(); +// expect(stackServiceComponentResponse.getCardinality()).andReturn("2").anyTimes(); +// expect(stackServiceComponentResponse.getComponentName()).andReturn("component1").anyTimes(); +// expect(stackServiceComponentResponse.getServiceName()).andReturn("test-service").anyTimes(); +// expect(stackServiceComponentResponse.getStackName()).andReturn("test-stack-name").anyTimes(); +// expect(stackServiceComponentResponse.getStackVersion()).andReturn("test-stack-version").anyTimes(); +// expect(stackServiceComponentResponse2.getCardinality()).andReturn("1").anyTimes(); +// expect(stackServiceComponentResponse2.getComponentName()).andReturn("MYSQL_SERVER").anyTimes(); +// expect(stackServiceComponentResponse2.getServiceName()).andReturn("test-service").anyTimes(); +// expect(stackServiceComponentResponse2.getStackName()).andReturn("test-stack-name").anyTimes(); +// expect(stackServiceComponentResponse2.getStackVersion()).andReturn("test-stack-version").anyTimes(); +// +// expect(managementController.getStackConfigurations(Collections.singleton(capture(stackConfigurationRequestCapture)))). +// andReturn(Collections.emptySet()); +// expect(managementController.getStackLevelConfigurations(Collections.singleton(capture(stackLevelConfigurationRequestCapture)))). +// andReturn(Collections.emptySet()); +// +// expect(metaInfo.getComponentDependencies("test-stack-name", "test-stack-version", "test-service", "MYSQL_SERVER")). +// andReturn(Collections.emptyList()).anyTimes(); +// expect(metaInfo.getComponentDependencies("test-stack-name", "test-stack-version", "test-service", "component1")). +// andReturn(Collections.emptyList()).anyTimes(); +// +// expect(request.getProperties()).andReturn(setProperties); +// expect(request.getRequestInfoProperties()).andReturn(Collections.emptyMap()); +// expect(dao.findByName(BLUEPRINT_NAME)).andReturn(null); +// expect(metaInfo.getServices("test-stack-name", "test-stack-version")).andReturn(services).anyTimes(); +// expect(metaInfo.getComponentsByService("test-stack-name", "test-stack-version", "test-service")). +// andReturn(serviceComponents).anyTimes(); +// expect(metaInfo.getComponentToService("test-stack-name", "test-stack-version", "component1")). +// andReturn("test-service").anyTimes(); +// expect(metaInfo.getComponentToService("test-stack-name", "test-stack-version", "component2")). +// andReturn("test-service").anyTimes(); +// expect(metaInfo.getService("test-stack-name", "test-stack-version", "test-service")).andReturn(service).anyTimes(); +// dao.create(capture(entityCapture)); +// +// replay(dao, metaInfo, request, managementController, stackServiceResponse, +// stackServiceComponentResponse, stackServiceComponentResponse2); +// // end expectations +// +// ResourceProvider provider = AbstractControllerResourceProvider.getResourceProvider( +// Resource.Type.Blueprint, +// PropertyHelper.getPropertyIds(Resource.Type.Blueprint), +// PropertyHelper.getKeyPropertyIds(Resource.Type.Blueprint), +// managementController); +// +// AbstractResourceProviderTest.TestObserver observer = new AbstractResourceProviderTest.TestObserver(); +// ((ObservableResourceProvider)provider).addObserver(observer); +// +// provider.createResources(request); +// +// ResourceProviderEvent lastEvent = observer.getLastEvent(); +// assertNotNull(lastEvent); +// assertEquals(Resource.Type.Blueprint, lastEvent.getResourceType()); +// assertEquals(ResourceProviderEvent.Type.Create, lastEvent.getType()); +// assertEquals(request, lastEvent.getRequest()); +// assertNull(lastEvent.getPredicate()); +// +// verify(dao, metaInfo, request, managementController, stackServiceResponse, +// stackServiceComponentResponse, stackServiceComponentResponse2); +// } + +// @Test +// public void testCreateResource_Validate__Cardinality__MultipleDependencyInstances() throws AmbariException, ResourceAlreadyExistsException, +// SystemException, UnsupportedPropertyException, NoSuchParentResourceException { +// +// Set> setProperties = getTestProperties(); +// setConfigurationProperties(setProperties); +// +// AmbariManagementController managementController = createMock(AmbariManagementController.class); +// Capture> stackServiceRequestCapture = EasyMock.newCapture(); +// Capture> serviceComponentRequestCapture = EasyMock.newCapture(); +// Capture stackConfigurationRequestCapture = EasyMock.newCapture(); +// Capture stackLevelConfigurationRequestCapture = EasyMock.newCapture(); +// Request request = createMock(Request.class); +// StackServiceResponse stackServiceResponse = createMock(StackServiceResponse.class); +// StackServiceComponentResponse stackServiceComponentResponse = createNiceMock(StackServiceComponentResponse.class); +// StackServiceComponentResponse stackServiceComponentResponse2 = createNiceMock(StackServiceComponentResponse.class); +// Set setServiceComponents = new HashSet(); +// setServiceComponents.add(stackServiceComponentResponse); +// setServiceComponents.add(stackServiceComponentResponse2); +// +// DependencyInfo dependencyInfo = new DependencyInfo(); +// AutoDeployInfo autoDeployInfo = new AutoDeployInfo(); +// autoDeployInfo.setEnabled(false); +// dependencyInfo.setAutoDeploy(autoDeployInfo); +// dependencyInfo.setScope("cluster"); +// dependencyInfo.setName("test-service/component1"); +// +// Map services = new HashMap(); +// ServiceInfo service = new ServiceInfo(); +// service.setName("test-service"); +// services.put("test-service", service); +// +// List serviceComponents = new ArrayList(); +// ComponentInfo component1 = new ComponentInfo(); +// component1.setName("component1"); +// ComponentInfo component2 = new ComponentInfo(); +// component2.setName("component2"); +// serviceComponents.add(component1); +// serviceComponents.add(component2); +// +// Capture entityCapture = EasyMock.newCapture(); +// +// // set expectations +// expect(managementController.getStackServices(capture(stackServiceRequestCapture))).andReturn( +// Collections.singleton(stackServiceResponse)); +// expect(stackServiceResponse.getServiceName()).andReturn("test-service").anyTimes(); +// expect(stackServiceResponse.getStackName()).andReturn("test-stack-name").anyTimes(); +// expect(stackServiceResponse.getStackVersion()).andReturn("test-stack-version").anyTimes(); +// expect(stackServiceResponse.getExcludedConfigTypes()).andReturn(Collections.emptySet()); +// +// expect(managementController.getStackComponents(capture(serviceComponentRequestCapture))).andReturn(setServiceComponents).anyTimes(); +// expect(stackServiceComponentResponse.getCardinality()).andReturn("2").anyTimes(); +// expect(stackServiceComponentResponse.getComponentName()).andReturn("component1").anyTimes(); +// expect(stackServiceComponentResponse.getServiceName()).andReturn("test-service").anyTimes(); +// expect(stackServiceComponentResponse.getStackName()).andReturn("test-stack-name").anyTimes(); +// expect(stackServiceComponentResponse.getStackVersion()).andReturn("test-stack-version").anyTimes(); +// expect(stackServiceComponentResponse2.getCardinality()).andReturn("1").anyTimes(); +// expect(stackServiceComponentResponse2.getComponentName()).andReturn("component2").anyTimes(); +// expect(stackServiceComponentResponse2.getServiceName()).andReturn("test-service").anyTimes(); +// expect(stackServiceComponentResponse2.getStackName()).andReturn("test-stack-name").anyTimes(); +// expect(stackServiceComponentResponse2.getStackVersion()).andReturn("test-stack-version").anyTimes(); +// +// expect(managementController.getStackConfigurations(Collections.singleton(capture(stackConfigurationRequestCapture)))). +// andReturn(Collections.emptySet()); +// expect(managementController.getStackLevelConfigurations(Collections.singleton(capture(stackLevelConfigurationRequestCapture)))). +// andReturn(Collections.emptySet()); +// +// expect(metaInfo.getComponentDependencies("test-stack-name", "test-stack-version", "test-service", "component2")). +// andReturn(Collections.singletonList(dependencyInfo)).anyTimes(); +// expect(metaInfo.getComponentDependencies("test-stack-name", "test-stack-version", "test-service", "component1")). +// andReturn(Collections.emptyList()).anyTimes(); +// +// expect(request.getProperties()).andReturn(setProperties); +// expect(request.getRequestInfoProperties()).andReturn(Collections.emptyMap()); +// expect(dao.findByName(BLUEPRINT_NAME)).andReturn(null); +// expect(metaInfo.getServices("test-stack-name", "test-stack-version")).andReturn(services).anyTimes(); +// expect(metaInfo.getComponentsByService("test-stack-name", "test-stack-version", "test-service")). +// andReturn(serviceComponents).anyTimes(); +// expect(metaInfo.getComponentToService("test-stack-name", "test-stack-version", "component1")). +// andReturn("test-service").anyTimes(); +// expect(metaInfo.getComponentToService("test-stack-name", "test-stack-version", "component2")). +// andReturn("test-service").anyTimes(); +// expect(metaInfo.getService("test-stack-name", "test-stack-version", "test-service")).andReturn(service).anyTimes(); +// dao.create(capture(entityCapture)); +// +// replay(dao, metaInfo, request, managementController, stackServiceResponse, +// stackServiceComponentResponse, stackServiceComponentResponse2); +// // end expectations +// +// ResourceProvider provider = AbstractControllerResourceProvider.getResourceProvider( +// Resource.Type.Blueprint, +// PropertyHelper.getPropertyIds(Resource.Type.Blueprint), +// PropertyHelper.getKeyPropertyIds(Resource.Type.Blueprint), +// managementController); +// +// AbstractResourceProviderTest.TestObserver observer = new AbstractResourceProviderTest.TestObserver(); +// ((ObservableResourceProvider)provider).addObserver(observer); +// +// provider.createResources(request); +// +// ResourceProviderEvent lastEvent = observer.getLastEvent(); +// assertNotNull(lastEvent); +// assertEquals(Resource.Type.Blueprint, lastEvent.getResourceType()); +// assertEquals(ResourceProviderEvent.Type.Create, lastEvent.getType()); +// assertEquals(request, lastEvent.getRequest()); +// assertNull(lastEvent.getPredicate()); +// +// verify(dao, metaInfo, request, managementController, stackServiceResponse, +// stackServiceComponentResponse, stackServiceComponentResponse2); +// } + +// @Test +// public void testCreateResource_Validate__Cardinality__AutoCommit() throws AmbariException, ResourceAlreadyExistsException, +// SystemException, UnsupportedPropertyException, NoSuchParentResourceException { +// +// Set> setProperties = getTestProperties(); +// setConfigurationProperties(setProperties); +// +// // remove component2 from BP +// Iterator iter = ((HashSet>>>) setProperties.iterator().next(). +// get(BlueprintResourceProvider.HOST_GROUP_PROPERTY_ID)). +// iterator().next().get("components").iterator(); +// iter.next(); +// iter.remove(); +// +// AmbariManagementController managementController = createMock(AmbariManagementController.class); +// Capture> stackServiceRequestCapture = EasyMock.newCapture(); +// Capture> serviceComponentRequestCapture = EasyMock.newCapture(); +// Capture stackConfigurationRequestCapture = EasyMock.newCapture(); +// Capture stackLevelConfigurationRequestCapture = EasyMock.newCapture(); +// Request request = createMock(Request.class); +// StackServiceResponse stackServiceResponse = createMock(StackServiceResponse.class); +// StackServiceComponentResponse stackServiceComponentResponse = createNiceMock(StackServiceComponentResponse.class); +// StackServiceComponentResponse stackServiceComponentResponse2 = createNiceMock(StackServiceComponentResponse.class); +// Set setServiceComponents = new HashSet(); +// setServiceComponents.add(stackServiceComponentResponse); +// setServiceComponents.add(stackServiceComponentResponse2); +// +// DependencyInfo dependencyInfo = new DependencyInfo(); +// AutoDeployInfo autoDeployInfo = new AutoDeployInfo(); +// autoDeployInfo.setEnabled(true); +// autoDeployInfo.setCoLocate("test-service/component1"); +// dependencyInfo.setAutoDeploy(autoDeployInfo); +// dependencyInfo.setScope("cluster"); +// dependencyInfo.setName("test-service/component2"); +// +// Map services = new HashMap(); +// ServiceInfo service = new ServiceInfo(); +// service.setName("test-service"); +// services.put("test-service", service); +// +// List serviceComponents = new ArrayList(); +// ComponentInfo component1 = new ComponentInfo(); +// component1.setName("component1"); +// ComponentInfo component2 = new ComponentInfo(); +// component2.setName("component2"); +// serviceComponents.add(component1); +// serviceComponents.add(component2); +// +// Capture entityCapture = EasyMock.newCapture(); +// +// // set expectations +// expect(managementController.getStackServices(capture(stackServiceRequestCapture))).andReturn( +// Collections.singleton(stackServiceResponse)); +// expect(stackServiceResponse.getServiceName()).andReturn("test-service").anyTimes(); +// expect(stackServiceResponse.getStackName()).andReturn("test-stack-name").anyTimes(); +// expect(stackServiceResponse.getStackVersion()).andReturn("test-stack-version").anyTimes(); +// expect(stackServiceResponse.getExcludedConfigTypes()).andReturn(Collections.emptySet()); +// +// expect(managementController.getStackComponents(capture(serviceComponentRequestCapture))).andReturn(setServiceComponents).anyTimes(); +// expect(stackServiceComponentResponse.getCardinality()).andReturn("2").anyTimes(); +// expect(stackServiceComponentResponse.getComponentName()).andReturn("component1").anyTimes(); +// expect(stackServiceComponentResponse.getServiceName()).andReturn("test-service").anyTimes(); +// expect(stackServiceComponentResponse.getStackName()).andReturn("test-stack-name").anyTimes(); +// expect(stackServiceComponentResponse.getStackVersion()).andReturn("test-stack-version").anyTimes(); +// expect(stackServiceComponentResponse2.getCardinality()).andReturn("1").anyTimes(); +// expect(stackServiceComponentResponse2.getComponentName()).andReturn("component2").anyTimes(); +// expect(stackServiceComponentResponse2.getServiceName()).andReturn("test-service").anyTimes(); +// expect(stackServiceComponentResponse2.getStackName()).andReturn("test-stack-name").anyTimes(); +// expect(stackServiceComponentResponse2.getStackVersion()).andReturn("test-stack-version").anyTimes(); +// +// expect(managementController.getStackConfigurations(Collections.singleton(capture(stackConfigurationRequestCapture)))). +// andReturn(Collections.emptySet()); +// expect(managementController.getStackLevelConfigurations(Collections.singleton(capture(stackLevelConfigurationRequestCapture)))). +// andReturn(Collections.emptySet()); +// +// expect(metaInfo.getComponentDependencies("test-stack-name", "test-stack-version", "test-service", "component2")). +// andReturn(Collections.emptyList()).anyTimes(); +// expect(metaInfo.getComponentDependencies("test-stack-name", "test-stack-version", "test-service", "component1")). +// andReturn(Collections.singletonList(dependencyInfo)).anyTimes(); +// +// expect(request.getProperties()).andReturn(setProperties); +// expect(request.getRequestInfoProperties()).andReturn(Collections.emptyMap()); +// expect(dao.findByName(BLUEPRINT_NAME)).andReturn(null); +// expect(metaInfo.getServices("test-stack-name", "test-stack-version")).andReturn(services).anyTimes(); +// expect(metaInfo.getComponentsByService("test-stack-name", "test-stack-version", "test-service")). +// andReturn(serviceComponents).anyTimes(); +// expect(metaInfo.getComponentToService("test-stack-name", "test-stack-version", "component1")). +// andReturn("test-service").anyTimes(); +// expect(metaInfo.getComponentToService("test-stack-name", "test-stack-version", "component2")). +// andReturn("test-service").anyTimes(); +// expect(metaInfo.getService("test-stack-name", "test-stack-version", "test-service")).andReturn(service).anyTimes(); +// dao.create(capture(entityCapture)); +// +// replay(dao, metaInfo, request, managementController, stackServiceResponse, +// stackServiceComponentResponse, stackServiceComponentResponse2); +// // end expectations +// +// ResourceProvider provider = AbstractControllerResourceProvider.getResourceProvider( +// Resource.Type.Blueprint, +// PropertyHelper.getPropertyIds(Resource.Type.Blueprint), +// PropertyHelper.getKeyPropertyIds(Resource.Type.Blueprint), +// managementController); +// +// AbstractResourceProviderTest.TestObserver observer = new AbstractResourceProviderTest.TestObserver(); +// ((ObservableResourceProvider)provider).addObserver(observer); +// +// provider.createResources(request); +// +// ResourceProviderEvent lastEvent = observer.getLastEvent(); +// assertNotNull(lastEvent); +// assertEquals(Resource.Type.Blueprint, lastEvent.getResourceType()); +// assertEquals(ResourceProviderEvent.Type.Create, lastEvent.getType()); +// assertEquals(request, lastEvent.getRequest()); +// assertNull(lastEvent.getPredicate()); +// +// verify(dao, metaInfo, request, managementController, stackServiceResponse, +// stackServiceComponentResponse, stackServiceComponentResponse2); +// } + +// @Test +// public void testCreateResource_Validate__Cardinality__Fail() throws AmbariException, ResourceAlreadyExistsException, +// SystemException, UnsupportedPropertyException, NoSuchParentResourceException { +// +// Set> setProperties = getTestProperties(); +// setConfigurationProperties(setProperties); +// +// Iterator iter = ((HashSet>>>) setProperties.iterator().next(). +// get(BlueprintResourceProvider.HOST_GROUP_PROPERTY_ID)). +// iterator().next().get("components").iterator(); +// iter.next(); +// iter.remove(); +// +// AmbariManagementController managementController = createMock(AmbariManagementController.class); +// Capture> stackServiceRequestCapture = EasyMock.newCapture(); +// Capture> serviceComponentRequestCapture = EasyMock.newCapture(); +// Capture stackConfigurationRequestCapture = EasyMock.newCapture(); +// Capture stackLevelConfigurationRequestCapture = EasyMock.newCapture(); +// Request request = createMock(Request.class); +// StackServiceResponse stackServiceResponse = createMock(StackServiceResponse.class); +// StackServiceComponentResponse stackServiceComponentResponse = createNiceMock(StackServiceComponentResponse.class); +// StackServiceComponentResponse stackServiceComponentResponse2 = createNiceMock(StackServiceComponentResponse.class); +// Set setServiceComponents = new HashSet(); +// setServiceComponents.add(stackServiceComponentResponse); +// setServiceComponents.add(stackServiceComponentResponse2); +// +// Map services = new HashMap(); +// ServiceInfo service = new ServiceInfo(); +// service.setName("test-service"); +// services.put("test-service", service); +// +// List serviceComponents = new ArrayList(); +// ComponentInfo component1 = new ComponentInfo(); +// component1.setName("component1"); +// ComponentInfo component2 = new ComponentInfo(); +// component2.setName("MYSQL_SERVER"); +// serviceComponents.add(component1); +// serviceComponents.add(component2); +// +// // set expectations +// expect(managementController.getStackServices(capture(stackServiceRequestCapture))).andReturn( +// Collections.singleton(stackServiceResponse)); +// expect(stackServiceResponse.getServiceName()).andReturn("test-service").anyTimes(); +// expect(stackServiceResponse.getStackName()).andReturn("test-stack-name").anyTimes(); +// expect(stackServiceResponse.getStackVersion()).andReturn("test-stack-version").anyTimes(); +// expect(stackServiceResponse.getExcludedConfigTypes()).andReturn(Collections.emptySet()); +// +// expect(managementController.getStackComponents(capture(serviceComponentRequestCapture))).andReturn(setServiceComponents).anyTimes(); +// expect(stackServiceComponentResponse.getCardinality()).andReturn("2").anyTimes(); +// expect(stackServiceComponentResponse.getComponentName()).andReturn("component1").anyTimes(); +// expect(stackServiceComponentResponse.getServiceName()).andReturn("test-service").anyTimes(); +// expect(stackServiceComponentResponse.getStackName()).andReturn("test-stack-name").anyTimes(); +// expect(stackServiceComponentResponse.getStackVersion()).andReturn("test-stack-version").anyTimes(); +// expect(stackServiceComponentResponse2.getCardinality()).andReturn("1").anyTimes(); +// expect(stackServiceComponentResponse2.getComponentName()).andReturn("MYSQL_SERVER").anyTimes(); +// expect(stackServiceComponentResponse2.getServiceName()).andReturn("test-service").anyTimes(); +// expect(stackServiceComponentResponse2.getStackName()).andReturn("test-stack-name").anyTimes(); +// expect(stackServiceComponentResponse2.getStackVersion()).andReturn("test-stack-version").anyTimes(); +// +// expect(managementController.getStackConfigurations(Collections.singleton(capture(stackConfigurationRequestCapture)))). +// andReturn(Collections.emptySet()); +// expect(managementController.getStackLevelConfigurations(Collections.singleton(capture(stackLevelConfigurationRequestCapture)))). +// andReturn(Collections.emptySet()); +// +// expect(metaInfo.getComponentDependencies("test-stack-name", "test-stack-version", "test-service", "MYSQL_SERVER")). +// andReturn(Collections.emptyList()).anyTimes(); +// expect(metaInfo.getComponentDependencies("test-stack-name", "test-stack-version", "test-service", "component1")). +// andReturn(Collections.emptyList()).anyTimes(); +// +// expect(request.getProperties()).andReturn(setProperties); +// expect(request.getRequestInfoProperties()).andReturn(Collections.emptyMap()); +// expect(dao.findByName(BLUEPRINT_NAME)).andReturn(null); +// expect(metaInfo.getServices("test-stack-name", "test-stack-version")).andReturn(services).anyTimes(); +// expect(metaInfo.getComponentsByService("test-stack-name", "test-stack-version", "test-service")). +// andReturn(serviceComponents).anyTimes(); +// expect(metaInfo.getComponentToService("test-stack-name", "test-stack-version", "component1")). +// andReturn("test-service").anyTimes(); +// expect(metaInfo.getComponentToService("test-stack-name", "test-stack-version", "component2")). +// andReturn("test-service").anyTimes(); +// expect(metaInfo.getService("test-stack-name", "test-stack-version", "test-service")).andReturn(service).anyTimes(); +// +// replay(dao, metaInfo, request, managementController, stackServiceResponse, +// stackServiceComponentResponse, stackServiceComponentResponse2); +// // end expectations +// +// ResourceProvider provider = AbstractControllerResourceProvider.getResourceProvider( +// Resource.Type.Blueprint, +// PropertyHelper.getPropertyIds(Resource.Type.Blueprint), +// PropertyHelper.getKeyPropertyIds(Resource.Type.Blueprint), +// managementController); +// +// AbstractResourceProviderTest.TestObserver observer = new AbstractResourceProviderTest.TestObserver(); +// ((ObservableResourceProvider)provider).addObserver(observer); +// +// try { +// provider.createResources(request); +// fail("Expected validation failure for MYSQL_SERVER"); +// } catch (IllegalArgumentException e) { +// // expected +// } +// +// verify(dao, metaInfo, request, managementController, stackServiceResponse, +// stackServiceComponentResponse, stackServiceComponentResponse2); +// } + +// @Test +// public void testCreateResource_Validate__AmbariServerComponent() throws AmbariException, ResourceAlreadyExistsException, +// SystemException, UnsupportedPropertyException, NoSuchParentResourceException +// { +// Request request = createMock(Request.class); +// AmbariManagementController managementController = createMock(AmbariManagementController.class); +// Capture> stackServiceRequestCapture = EasyMock.newCapture(); +// +// Map services = new HashMap(); +// ServiceInfo service = new ServiceInfo(); +// service.setName("test-service"); +// services.put("test-service", service); +// +// List serviceComponents = new ArrayList(); +// ComponentInfo component1 = new ComponentInfo(); +// component1.setName("component1"); +// ComponentInfo component2 = new ComponentInfo(); +// component2.setName("component2"); +// serviceComponents.add(component1); +// serviceComponents.add(component2); +// +// +// Set> setProperties = getTestProperties(); +// ((HashSet>) ((HashSet>) setProperties.iterator().next().get( +// BlueprintResourceProvider.HOST_GROUP_PROPERTY_ID)).iterator().next().get("components")). +// iterator().next().put("name", "AMBARI_SERVER"); +// +// Capture entityCapture = EasyMock.newCapture(); +// +// // set expectations +// expect(managementController.getStackServices(capture(stackServiceRequestCapture))).andReturn( +// Collections.emptySet()); +// expect(request.getProperties()).andReturn(setProperties); +// expect(request.getRequestInfoProperties()).andReturn(Collections.emptyMap()); +// expect(dao.findByName(BLUEPRINT_NAME)).andReturn(null); +// expect(metaInfo.getServices("test-stack-name", "test-stack-version")).andReturn(services).anyTimes(); +// expect(metaInfo.getComponentsByService("test-stack-name", "test-stack-version", "test-service")). +// andReturn(serviceComponents).anyTimes(); +// expect(metaInfo.getComponentToService("test-stack-name", "test-stack-version", "component1")). +// andReturn("test-service").anyTimes(); +// expect(metaInfo.getService("test-stack-name", "test-stack-version", "test-service")).andReturn(service).anyTimes(); +// +// dao.create(capture(entityCapture)); +// +// replay(dao, metaInfo, request, managementController); +// // end expectations +// +// ResourceProvider provider = AbstractControllerResourceProvider.getResourceProvider( +// Resource.Type.Blueprint, +// PropertyHelper.getPropertyIds(Resource.Type.Blueprint), +// PropertyHelper.getKeyPropertyIds(Resource.Type.Blueprint), +// managementController); +// +// AbstractResourceProviderTest.TestObserver observer = new AbstractResourceProviderTest.TestObserver(); +// ((ObservableResourceProvider)provider).addObserver(observer); +// +// provider.createResources(request); +// +// ResourceProviderEvent lastEvent = observer.getLastEvent(); +// assertNotNull(lastEvent); +// assertEquals(Resource.Type.Blueprint, lastEvent.getResourceType()); +// assertEquals(ResourceProviderEvent.Type.Create, lastEvent.getType()); +// assertEquals(request, lastEvent.getRequest()); +// assertNull(lastEvent.getPredicate()); +// +// verify(dao, metaInfo, request, managementController); +// } + + } diff --git a/ambari-server/src/test/java/org/apache/ambari/server/topology/validators/DependencyAndCardinalityValidatorTest.java b/ambari-server/src/test/java/org/apache/ambari/server/topology/BlueprintValidatorImplTest.java similarity index 86% rename from ambari-server/src/test/java/org/apache/ambari/server/topology/validators/DependencyAndCardinalityValidatorTest.java rename to ambari-server/src/test/java/org/apache/ambari/server/topology/BlueprintValidatorImplTest.java index 9e6ac130962..46396877906 100644 --- a/ambari-server/src/test/java/org/apache/ambari/server/topology/validators/DependencyAndCardinalityValidatorTest.java +++ b/ambari-server/src/test/java/org/apache/ambari/server/topology/BlueprintValidatorImplTest.java @@ -16,7 +16,7 @@ * limitations under the License. */ -package org.apache.ambari.server.topology.validators; +package org.apache.ambari.server.topology; import static java.util.stream.Collectors.toList; import static org.easymock.EasyMock.expect; @@ -39,13 +39,6 @@ import org.apache.ambari.server.state.DependencyConditionInfo; import org.apache.ambari.server.state.DependencyInfo; import org.apache.ambari.server.state.StackId; -import org.apache.ambari.server.topology.Blueprint; -import org.apache.ambari.server.topology.Cardinality; -import org.apache.ambari.server.topology.ClusterTopology; -import org.apache.ambari.server.topology.Component; -import org.apache.ambari.server.topology.Configuration; -import org.apache.ambari.server.topology.HostGroup; -import org.apache.ambari.server.topology.InvalidTopologyException; import org.easymock.EasyMock; import org.easymock.EasyMockRule; import org.easymock.Mock; @@ -57,15 +50,15 @@ import com.google.common.collect.ImmutableSet; -public class DependencyAndCardinalityValidatorTest { +/** + * BlueprintValidatorImpl unit tests. + */ +public class BlueprintValidatorImplTest { private final Map hostGroups = new LinkedHashMap<>(); @Rule public EasyMockRule mocks = new EasyMockRule(this); - @Mock(type = MockType.NICE) - private ClusterTopology topology; - @Mock(type = MockType.NICE) private Blueprint blueprint; @@ -109,12 +102,10 @@ public void setup() { autoDeploy.setEnabled(true); autoDeploy.setCoLocate("service1/component2"); - expect(blueprint.getName()).andReturn("blueprint-1").anyTimes(); expect(blueprint.getStackIds()).andReturn(ImmutableSet.of(new StackId("HDP", "2.2"))).anyTimes(); - expect(topology.getStack()).andReturn(stack).anyTimes(); + expect(blueprint.getStack()).andReturn(stack).anyTimes(); expect(blueprint.getHostGroups()).andReturn(hostGroups).anyTimes(); - expect(topology.getBlueprint()).andReturn(blueprint).anyTimes(); - expect(topology.getServices()).andReturn(services).anyTimes(); + expect(blueprint.getServices()).andReturn(services).anyTimes(); expect(group1.getComponentNames()).andReturn(group1Components).anyTimes(); expect(group1.getComponents()). @@ -136,7 +127,7 @@ public void setup() { dependenciesConditionInfos1.add(dependencyConditionInfo1); dependenciesConditionInfos1.add(dependencyConditionInfo2); - expect(topology.getConfiguration()).andReturn(configuration).anyTimes(); + expect(blueprint.getConfiguration()).andReturn(configuration).anyTimes(); } @After @@ -157,10 +148,9 @@ public void testValidateTopology_basic() throws Exception { expect(blueprint.getHostGroupsForComponent("component1")).andReturn(Collections.singleton(group1)).anyTimes(); expect(blueprint.getHostGroupsForComponent("component2")).andReturn(Arrays.asList(group1, group2)).anyTimes(); - replay(blueprint, topology, stack, group1, group2, dependency1); - - TopologyValidator validator = new DependencyAndCardinalityValidator(); - validator.validate(topology); + replay(blueprint, stack, group1, group2, dependency1); + BlueprintValidator validator = new BlueprintValidatorImpl(null); + validator.validateTopology(blueprint); } @Test(expected = InvalidTopologyException.class) @@ -174,10 +164,9 @@ public void testValidateTopology_basic_negative() throws Exception { expect(blueprint.getHostGroupsForComponent("component1")).andReturn(Collections.emptyList()).anyTimes(); expect(blueprint.getHostGroupsForComponent("component2")).andReturn(Arrays.asList(group1, group2)).anyTimes(); - replay(blueprint, topology, stack, group1, group2, dependency1); - - TopologyValidator validator = new DependencyAndCardinalityValidator(); - validator.validate(topology); + replay(blueprint, stack, group1, group2, dependency1); + BlueprintValidator validator = new BlueprintValidatorImpl(null); + validator.validateTopology(blueprint); } @Test @@ -193,9 +182,9 @@ public void testValidateTopology_autoDeploy() throws Exception { expect(group1.addComponent(new Component("component1"))).andReturn(true).once(); - replay(blueprint, topology, stack, group1, group2, dependency1); - TopologyValidator validator = new DependencyAndCardinalityValidator(); - validator.validate(topology); + replay(blueprint, stack, group1, group2, dependency1); + BlueprintValidator validator = new BlueprintValidatorImpl(null); + validator.validateTopology(blueprint); verify(group1); } @@ -230,10 +219,10 @@ public void testValidateTopology_autoDeploy_hasDependency() throws Exception { expect(group1.addComponent(new Component("component1"))).andReturn(true).once(); expect(group1.addComponent(new Component("component3"))).andReturn(true).once(); - replay(blueprint, topology, stack, group1, group2, dependency1, dependencyComponentInfo); + replay(blueprint, stack, group1, group2, dependency1, dependencyComponentInfo); - TopologyValidator validator = new DependencyAndCardinalityValidator(); - validator.validate(topology); + BlueprintValidator validator = new BlueprintValidatorImpl(null); + validator.validateTopology(blueprint); verify(group1); } @@ -250,6 +239,7 @@ public void testShouldDependencyBeExcludedWenRelatedServiceIsNotInBlueprint() th expect(blueprint.getHostGroupsForComponent("component-1")).andReturn(Arrays.asList(group1)).anyTimes(); + expect(blueprint.getName()).andReturn("blueprint-1").anyTimes(); Cardinality cardinality = new Cardinality("1"); @@ -273,11 +263,11 @@ public void testShouldDependencyBeExcludedWenRelatedServiceIsNotInBlueprint() th expect(dependencyComponentInfo.isClient()).andReturn(true).anyTimes(); expect(stack.getComponentInfo("component-d")).andReturn(dependencyComponentInfo).anyTimes(); - replay(blueprint, topology, stack, group1, group2, dependency1, dependencyComponentInfo); + replay(blueprint, stack, group1, group2, dependency1, dependencyComponentInfo); // WHEN - TopologyValidator validator = new DependencyAndCardinalityValidator(); - validator.validate(topology); + BlueprintValidator validator = new BlueprintValidatorImpl(null); + validator.validateTopology(blueprint); // THEN verify(group1); @@ -295,6 +285,7 @@ public void testShouldThrowErrorWhenDependentComponentIsNotInBlueprint() throws expect(blueprint.getHostGroupsForComponent("component-1")).andReturn(Arrays.asList(group1)).anyTimes(); + expect(blueprint.getName()).andReturn("blueprint-1").anyTimes(); Cardinality cardinality = new Cardinality("1"); @@ -315,11 +306,11 @@ public void testShouldThrowErrorWhenDependentComponentIsNotInBlueprint() throws expect(stack.getComponentInfo("component-d")).andReturn(dependencyComponentInfo).anyTimes(); - replay(blueprint, topology, stack, group1, group2, dependency1, dependencyComponentInfo); + replay(blueprint, stack, group1, group2, dependency1, dependencyComponentInfo); // WHEN - TopologyValidator validator = new DependencyAndCardinalityValidator(); - validator.validate(topology); + BlueprintValidator validator = new BlueprintValidatorImpl(null); + validator.validateTopology(blueprint); // THEN verify(group1); @@ -338,6 +329,7 @@ public void testWhenComponentIsConditionallyDependentAndOnlyOneOfTheConditionsIs expect(blueprint.getHostGroupsForComponent("component-1")).andReturn(Arrays.asList(group1)).anyTimes(); + expect(blueprint.getName()).andReturn("blueprint-1").anyTimes(); Map> properties = new HashMap<>(); Map typeProps = new HashMap<>(); typeProps.put("yarn.resourcemanager.hostname", "testhost"); @@ -376,11 +368,11 @@ public void testWhenComponentIsConditionallyDependentAndOnlyOneOfTheConditionsIs expect(dependencyComponentInfo.isClient()).andReturn(false).anyTimes(); expect(stack.getComponentInfo("component-d")).andReturn(dependencyComponentInfo).anyTimes(); - replay(blueprint, topology, stack, group1, group2, dependency1, dependency2, dependencyComponentInfo,dependencyConditionInfo1,dependencyConditionInfo2); + replay(blueprint, stack, group1, group2, dependency1, dependency2, dependencyComponentInfo,dependencyConditionInfo1,dependencyConditionInfo2); // WHEN - TopologyValidator validator = new DependencyAndCardinalityValidator(); - validator.validate(topology); + BlueprintValidator validator = new BlueprintValidatorImpl(null); + validator.validateTopology(blueprint); // THEN verify(group1); diff --git a/ambari-server/src/test/java/org/apache/ambari/server/topology/ClusterConfigurationRequestTest.java b/ambari-server/src/test/java/org/apache/ambari/server/topology/ClusterConfigurationRequestTest.java index 7d51906a356..c4ba8d8ca6c 100644 --- a/ambari-server/src/test/java/org/apache/ambari/server/topology/ClusterConfigurationRequestTest.java +++ b/ambari-server/src/test/java/org/apache/ambari/server/topology/ClusterConfigurationRequestTest.java @@ -31,12 +31,13 @@ import static org.junit.Assert.assertFalse; import static org.junit.Assert.assertTrue; +import java.util.ArrayList; import java.util.Collections; import java.util.HashMap; import java.util.HashSet; +import java.util.List; import java.util.Map; import java.util.Set; -import java.util.stream.Stream; import org.apache.ambari.server.AmbariException; import org.apache.ambari.server.api.services.stackadvisor.StackAdvisorBlueprintProcessor; @@ -235,8 +236,8 @@ private Capture> testProcessWithKerberos(String blueprintP expect(clusters.getCluster("testCluster")).andReturn(cluster).anyTimes(); - expect(topology.getStack()).andReturn(stack).anyTimes(); - expect(topology.getStackIds()).andReturn(ImmutableSet.of(STACK_ID)).anyTimes(); + expect(blueprint.getStack()).andReturn(stack).anyTimes(); + expect(blueprint.getStackIds()).andReturn(ImmutableSet.of(STACK_ID)).anyTimes(); expect(stack.getName()).andReturn(STACK_NAME).anyTimes(); expect(stack.getVersion()).andReturn(STACK_VERSION).anyTimes(); expect(stack.getServiceForConfigType("testConfigType")).andReturn("KERBEROS").anyTimes(); @@ -245,20 +246,28 @@ private Capture> testProcessWithKerberos(String blueprintP expect(stack.getExcludedConfigurationTypes(anyString())).andReturn(Collections.emptySet()).anyTimes(); expect(stack.getConfigurationPropertiesWithMetadata(anyString(), anyString())).andReturn(Collections.emptyMap()).anyTimes(); - Set services = ImmutableSet.of("HDFS", "KERBEROS", "ZOOKEEPER"); - expect(topology.getServices()).andReturn(services).anyTimes(); + Set services = new HashSet<>(); + services.add("HDFS"); + services.add("KERBEROS"); + services.add("ZOOKEPER"); + expect(blueprint.getServices()).andReturn(services).anyTimes(); expect(stack.getConfiguration(services)).andReturn(stackDefaultConfig).once(); - expect(topology.getComponents()).andAnswer(() -> Stream.of( - ResolvedComponent.builder(new Component("NAMENODE")).serviceType("HDFS").buildPartial(), - ResolvedComponent.builder(new Component("KERBEROS")).serviceType("KERBEROS_CLIENT").buildPartial(), - ResolvedComponent.builder(new Component("ZOOKEEPER_SERVER")).serviceType("ZOOKEEPER").buildPartial() - )).anyTimes(); + List hdfsComponents = new ArrayList<>(); + hdfsComponents.add("NAMENODE"); + List kerberosComponents = new ArrayList<>(); + kerberosComponents.add("KERBEROS_CLIENT"); + List zookeeperComponents = new ArrayList<>(); + zookeeperComponents.add("ZOOKEEPER_SERVER"); + + expect(blueprint.getComponentNames("HDFS")).andReturn(hdfsComponents).anyTimes(); + expect(blueprint.getComponentNames("KERBEROS")).andReturn(kerberosComponents).anyTimes(); + expect(blueprint.getComponentNames("ZOOKEPER")).andReturn(zookeeperComponents).anyTimes(); expect(topology.getAmbariContext()).andReturn(ambariContext).anyTimes(); expect(topology.getConfigRecommendationStrategy()).andReturn(ConfigRecommendationStrategy.NEVER_APPLY).anyTimes(); expect(topology.getBlueprint()).andReturn(blueprint).anyTimes(); - expect(topology.isValidConfigType("testConfigType")).andReturn(true).anyTimes(); + expect(blueprint.isValidConfigType("testConfigType")).andReturn(true).anyTimes(); expect(topology.getConfiguration()).andReturn(blueprintConfig).anyTimes(); expect(topology.getHostGroupInfo()).andReturn(Collections.emptyMap()).anyTimes(); @@ -307,7 +316,7 @@ private Capture> testProcessWithKerberos(String blueprintP } @Test - public void testProcessClusterConfigRequestDontIncludeKerberosConfigs() throws Exception { + public void testProcessClusterConfigRequestDontIncludeKererosConfigs() throws Exception { Map> existingConfig = new HashMap<>(); Configuration stackConfig = new Configuration(existingConfig, @@ -320,25 +329,40 @@ public void testProcessClusterConfigRequestDontIncludeKerberosConfigs() throws E expect(controller.getClusters()).andReturn(clusters).anyTimes(); expect(clusters.getCluster("testCluster")).andReturn(cluster).anyTimes(); - expect(topology.getStack()).andReturn(stack).anyTimes(); - expect(topology.getStackIds()).andReturn(ImmutableSet.of(STACK_ID)).anyTimes(); + expect(blueprint.getStack()).andReturn(stack).anyTimes(); + expect(blueprint.getStackIds()).andReturn(ImmutableSet.of(STACK_ID)).anyTimes(); expect(stack.getName()).andReturn(STACK_NAME).anyTimes(); expect(stack.getVersion()).andReturn(STACK_VERSION).anyTimes(); expect(stack.getAllConfigurationTypes(anyString())).andReturn(Collections.singletonList("testConfigType")).anyTimes(); expect(stack.getExcludedConfigurationTypes(anyString())).andReturn(Collections.emptySet()).anyTimes(); expect(stack.getConfigurationPropertiesWithMetadata(anyString(), anyString())).andReturn(Collections.emptyMap()).anyTimes(); - expect(topology.getServices()).andReturn(ImmutableSet.of("HDFS", "KERBEROS", "ZOOKEEPER")).anyTimes(); + Set services = new HashSet<>(); + services.add("HDFS"); + services.add("KERBEROS"); + services.add("ZOOKEPER"); + expect(blueprint.getServices()).andReturn(services).anyTimes(); + + List hdfsComponents = new ArrayList<>(); + hdfsComponents.add("NAMENODE"); + List kerberosComponents = new ArrayList<>(); + kerberosComponents.add("KERBEROS_CLIENT"); + List zookeeperComponents = new ArrayList<>(); + zookeeperComponents.add("ZOOKEEPER_SERVER"); + + expect(blueprint.getComponentNames("HDFS")).andReturn(hdfsComponents).anyTimes(); + expect(blueprint.getComponentNames("KERBEROS")).andReturn(kerberosComponents).anyTimes(); + expect(blueprint.getComponentNames("ZOOKEPER")).andReturn(zookeeperComponents).anyTimes(); + expect(topology.getAmbariContext()).andReturn(ambariContext).anyTimes(); - expect(topology.getComponents()).andAnswer(Stream::empty).anyTimes(); expect(topology.getConfigRecommendationStrategy()).andReturn(ConfigRecommendationStrategy.NEVER_APPLY).anyTimes(); expect(topology.getBlueprint()).andReturn(blueprint).anyTimes(); expect(topology.getConfiguration()).andReturn(stackConfig).anyTimes(); expect(topology.getHostGroupInfo()).andReturn(Collections.emptyMap()).anyTimes(); - expect(topology.getClusterId()).andReturn(1L).anyTimes(); + expect(topology.getClusterId()).andReturn(Long.valueOf(1)).anyTimes(); expect(ambariContext.getConfigHelper()).andReturn(configHelper).anyTimes(); - expect(ambariContext.getClusterName(1L)).andReturn("testCluster").anyTimes(); + expect(ambariContext.getClusterName(Long.valueOf(1))).andReturn("testCluster").anyTimes(); expect(ambariContext.createConfigurationRequests(EasyMock.anyObject())).andReturn(Collections .emptyList()).anyTimes(); @@ -372,14 +396,14 @@ public void testProcessClusterConfigRequestRemoveUnusedConfigTypes() throws Exce expect(topology.getConfiguration()).andReturn(configuration).anyTimes(); expect(topology.getBlueprint()).andReturn(blueprint).anyTimes(); expect(topology.getHostGroupInfo()).andReturn(hostGroupInfoMap); - expect(topology.getStack()).andReturn(stack).anyTimes(); - expect(topology.getServices()).andReturn(services).anyTimes(); + expect(blueprint.getStack()).andReturn(stack).anyTimes(); + expect(blueprint.getServices()).andReturn(services).anyTimes(); - expect(topology.isValidConfigType("hdfs-site")).andReturn(true).anyTimes(); - expect(topology.isValidConfigType("admin-properties")).andReturn(true).anyTimes(); - expect(topology.isValidConfigType("yarn-site")).andReturn(false).anyTimes(); - expect(topology.isValidConfigType("cluster-env")).andReturn(true).anyTimes(); - expect(topology.isValidConfigType("global")).andReturn(true).anyTimes(); + expect(blueprint.isValidConfigType("hdfs-site")).andReturn(true).anyTimes(); + expect(blueprint.isValidConfigType("admin-properties")).andReturn(true).anyTimes(); + expect(blueprint.isValidConfigType("yarn-site")).andReturn(false).anyTimes(); + expect(blueprint.isValidConfigType("cluster-env")).andReturn(true).anyTimes(); + expect(blueprint.isValidConfigType("global")).andReturn(true).anyTimes(); expect(ambariContext.getConfigHelper()).andReturn(configHelper).anyTimes(); @@ -422,12 +446,12 @@ public void testProcessClusterConfigRequestWithOnlyHostGroupConfigRemoveUnusedCo expect(topology.getConfiguration()).andReturn(configuration).anyTimes(); expect(topology.getBlueprint()).andReturn(blueprint).anyTimes(); expect(topology.getHostGroupInfo()).andReturn(hostGroupInfoMap); - expect(topology.getStack()).andReturn(stack).anyTimes(); - expect(topology.getServices()).andReturn(services).anyTimes(); + expect(blueprint.getStack()).andReturn(stack).anyTimes(); + expect(blueprint.getServices()).andReturn(services).anyTimes(); - expect(topology.isValidConfigType("hdfs-site")).andReturn(true).anyTimes(); - expect(topology.isValidConfigType("cluster-env")).andReturn(true).anyTimes(); - expect(topology.isValidConfigType("global")).andReturn(true).anyTimes(); + expect(blueprint.isValidConfigType("hdfs-site")).andReturn(true).anyTimes(); + expect(blueprint.isValidConfigType("cluster-env")).andReturn(true).anyTimes(); + expect(blueprint.isValidConfigType("global")).andReturn(true).anyTimes(); expect(ambariContext.getConfigHelper()).andReturn(configHelper).anyTimes(); diff --git a/ambari-server/src/test/java/org/apache/ambari/server/topology/ClusterDeployWithStartOnlyTest.java b/ambari-server/src/test/java/org/apache/ambari/server/topology/ClusterDeployWithStartOnlyTest.java index dab906242da..55db7684f18 100644 --- a/ambari-server/src/test/java/org/apache/ambari/server/topology/ClusterDeployWithStartOnlyTest.java +++ b/ambari-server/src/test/java/org/apache/ambari/server/topology/ClusterDeployWithStartOnlyTest.java @@ -17,7 +17,6 @@ */ package org.apache.ambari.server.topology; -import static java.util.stream.Collectors.toSet; import static org.easymock.EasyMock.anyBoolean; import static org.easymock.EasyMock.anyLong; import static org.easymock.EasyMock.anyObject; @@ -27,6 +26,7 @@ import static org.easymock.EasyMock.expect; import static org.easymock.EasyMock.expectLastCall; import static org.easymock.EasyMock.isA; +import static org.easymock.EasyMock.isNull; import static org.easymock.EasyMock.newCapture; import static org.junit.Assert.assertEquals; @@ -64,7 +64,6 @@ import org.apache.ambari.server.state.StackId; import org.apache.ambari.server.topology.tasks.ConfigureClusterTask; import org.apache.ambari.server.topology.tasks.ConfigureClusterTaskFactory; -import org.apache.ambari.server.topology.validators.TopologyValidator; import org.apache.ambari.server.topology.validators.TopologyValidatorService; import org.easymock.Capture; import org.easymock.EasyMockRule; @@ -81,7 +80,6 @@ import org.powermock.core.classloader.annotations.PrepareForTest; import org.powermock.modules.junit4.PowerMockRunner; -import com.google.common.collect.ImmutableMap; import com.google.common.collect.ImmutableSet; @RunWith(PowerMockRunner.class) @@ -93,7 +91,6 @@ public class ClusterDeployWithStartOnlyTest extends EasyMockSupport { private static final String STACK_NAME = "test-stack"; private static final String STACK_VERSION = "test-stack-version"; private static final StackId STACK_ID = new StackId(STACK_NAME, STACK_VERSION); - private static final Setting SETTING = new Setting(Collections.emptyMap()); @Rule public EasyMockRule mocks = new EasyMockRule(this); @@ -101,9 +98,6 @@ public class ClusterDeployWithStartOnlyTest extends EasyMockSupport { @TestSubject private TopologyManager topologyManager = new TopologyManager(); - @Mock - private org.apache.ambari.server.configuration.Configuration configuration; - @Mock(type = MockType.NICE) private Blueprint blueprint; @@ -172,8 +166,6 @@ public class ClusterDeployWithStartOnlyTest extends EasyMockSupport { @Mock private TopologyValidatorService topologyValidatorServiceMock; - @Mock - private ComponentResolver componentResolver; private final Configuration stackConfig = new Configuration(new HashMap<>(), new HashMap<>()); @@ -195,10 +187,8 @@ public class ClusterDeployWithStartOnlyTest extends EasyMockSupport { private HostGroupInfo group2Info = new HostGroupInfo("group2"); private Map groupInfoMap = new HashMap<>(); - private final Collection group1Components = Arrays.asList(new Component("component1"), new Component("component2"), new Component("component3")); - private final Collection group1ComponentNames = group1Components.stream().map(Component::getName).collect(toSet()); - private final Collection group2Components = Arrays.asList(new Component("component3"), new Component("component4")); - private final Collection group2ComponentNames = group2Components.stream().map(Component::getName).collect(toSet()); + private Collection group1Components = Arrays.asList("component1", "component2", "component3"); + private Collection group2Components = Arrays.asList("component3", "component4"); private Map> group1ServiceComponents = new HashMap<>(); private Map> group2ServiceComponents = new HashMap<>(); @@ -207,6 +197,7 @@ public class ClusterDeployWithStartOnlyTest extends EasyMockSupport { private List topologyValidators = new ArrayList<>(); + private Capture clusterTopologyCapture; private Capture> configRequestPropertiesCapture; private Capture> configRequestPropertiesCapture2; private Capture> configRequestPropertiesCapture3; @@ -215,10 +206,7 @@ public class ClusterDeployWithStartOnlyTest extends EasyMockSupport { @Before public void setup() throws Exception { - expect(configuration.getParallelTopologyTaskCreationThreadPoolSize()).andReturn(1).anyTimes(); - expect(configuration.getParallelStageExecution()).andReturn(false).anyTimes(); - expect(configuration.getGplLicenseAccepted()).andReturn(true).anyTimes(); - + clusterTopologyCapture = newCapture(); configRequestPropertiesCapture = newCapture(); configRequestPropertiesCapture2 = newCapture(); configRequestPropertiesCapture3 = newCapture(); @@ -256,18 +244,22 @@ public void setup() throws Exception { expect(blueprint.getHostGroup("group1")).andReturn(group1).anyTimes(); expect(blueprint.getHostGroup("group2")).andReturn(group2).anyTimes(); - expect(blueprint.getSetting()).andReturn(SETTING).anyTimes(); + expect(blueprint.getComponentNames("service1")).andReturn(Arrays.asList("component1", "component3")).anyTimes(); + expect(blueprint.getComponentNames("service2")).andReturn(Arrays.asList("component2", "component4")).anyTimes(); expect(blueprint.getConfiguration()).andReturn(bpConfiguration).anyTimes(); expect(blueprint.getHostGroups()).andReturn(groupMap).anyTimes(); expect(blueprint.getHostGroupsForComponent("component1")).andReturn(Collections.singleton(group1)).anyTimes(); expect(blueprint.getHostGroupsForComponent("component2")).andReturn(Collections.singleton(group1)).anyTimes(); expect(blueprint.getHostGroupsForComponent("component3")).andReturn(Arrays.asList(group1, group2)).anyTimes(); expect(blueprint.getHostGroupsForComponent("component4")).andReturn(Collections.singleton(group2)).anyTimes(); + expect(blueprint.getHostGroupsForService("service1")).andReturn(Arrays.asList(group1, group2)).anyTimes(); + expect(blueprint.getHostGroupsForService("service2")).andReturn(Arrays.asList(group1, group2)).anyTimes(); expect(blueprint.getName()).andReturn(BLUEPRINT_NAME).anyTimes(); - expect(ambariContext.composeStacks(anyObject())).andReturn(stack).anyTimes(); + expect(blueprint.getServices()).andReturn(Arrays.asList("service1", "service2")).anyTimes(); + expect(blueprint.getStack()).andReturn(stack).anyTimes(); expect(blueprint.getStackIds()).andReturn(ImmutableSet.of(STACK_ID)).anyTimes(); - expect(blueprint.getSecurity()).andReturn(SecurityConfiguration.NONE).anyTimes(); - expect(blueprint.getMpacks()).andReturn(ImmutableSet.of()).anyTimes(); + expect(blueprint.isValidConfigType(anyString())).andReturn(true).anyTimes(); + expect(blueprint.getRepositorySettings()).andReturn(new ArrayList<>()).anyTimes(); // don't expect toEntity() expect(stack.getAllConfigurationTypes("service1")).andReturn(Arrays.asList("service1-site", "service1-env")).anyTimes(); @@ -297,13 +289,8 @@ public void setup() throws Exception { expect(stack.getVersion()).andReturn(STACK_VERSION).anyTimes(); expect(stack.getServiceForConfigType("service1-site")).andReturn("service1").anyTimes(); expect(stack.getServiceForConfigType("service2-site")).andReturn("service2").anyTimes(); - expect(stack.getDependenciesForComponent(anyString())).andReturn(Collections.emptySet()).anyTimes(); expect(stack.getExcludedConfigurationTypes("service1")).andReturn(Collections.emptySet()).anyTimes(); expect(stack.getExcludedConfigurationTypes("service2")).andReturn(Collections.emptySet()).anyTimes(); - expect(stack.getServiceForComponent("component1")).andReturn("service1").anyTimes(); - expect(stack.getServiceForComponent("component2")).andReturn("service2").anyTimes(); - expect(stack.getServiceForComponent("component3")).andReturn("service1").anyTimes(); - expect(stack.getServiceForComponent("component4")).andReturn("service2").anyTimes(); expect(request.getBlueprint()).andReturn(blueprint).anyTimes(); expect(request.getClusterId()).andReturn(CLUSTER_ID).anyTimes(); @@ -315,33 +302,30 @@ public void setup() throws Exception { expect(request.getProvisionAction()).andReturn(ProvisionAction.START_ONLY).anyTimes(); expect(request.getSecurityConfiguration()).andReturn(null).anyTimes(); expect(request.getRepositoryVersion()).andReturn("1").anyTimes(); - expect(request.getStackIds()).andReturn(ImmutableSet.of()).anyTimes(); - expect(request.getMpacks()).andReturn(ImmutableSet.of()).anyTimes(); - - expect(componentResolver.resolveComponents(anyObject())).andReturn(ImmutableMap.of( - "group1", ImmutableSet.of( - ResolvedComponent.builder(new Component("component1")).serviceType("service1").buildPartial(), - ResolvedComponent.builder(new Component("component2")).serviceType("service2").buildPartial() - ), - "group2", ImmutableSet.of( - ResolvedComponent.builder(new Component("component3")).serviceType("service2").buildPartial(), - ResolvedComponent.builder(new Component("component4")).serviceType("service2").buildPartial() - ) - )).anyTimes(); + expect(group1.getBlueprintName()).andReturn(BLUEPRINT_NAME).anyTimes(); expect(group1.getCardinality()).andReturn("test cardinality").anyTimes(); - expect(group1.getComponents()).andReturn(group1Components).anyTimes(); - expect(group1.getComponentNames()).andReturn(group1ComponentNames).anyTimes(); + expect(group1.containsMasterComponent()).andReturn(true).anyTimes(); + expect(group1.getComponentNames()).andReturn(group1Components).anyTimes(); expect(group1.getComponentNames(anyObject(ProvisionAction.class))).andReturn(Collections.emptyList()).anyTimes(); + expect(group1.getComponentNames("service1")).andReturn(group1ServiceComponents.get("service1")).anyTimes(); + expect(group1.getComponentNames("service2")).andReturn(group1ServiceComponents.get("service1")).anyTimes(); expect(group1.getConfiguration()).andReturn(topoGroup1Config).anyTimes(); expect(group1.getName()).andReturn("group1").anyTimes(); + expect(group1.getServices()).andReturn(Arrays.asList("service1", "service2")).anyTimes(); + expect(group1.getStack()).andReturn(stack).anyTimes(); + expect(group2.getBlueprintName()).andReturn(BLUEPRINT_NAME).anyTimes(); expect(group2.getCardinality()).andReturn("test cardinality").anyTimes(); - expect(group2.getComponents()).andReturn(group2Components).anyTimes(); - expect(group2.getComponentNames()).andReturn(group2ComponentNames).anyTimes(); + expect(group2.containsMasterComponent()).andReturn(false).anyTimes(); + expect(group2.getComponentNames()).andReturn(group2Components).anyTimes(); expect(group2.getComponentNames(anyObject(ProvisionAction.class))).andReturn(Collections.emptyList()).anyTimes(); + expect(group2.getComponentNames("service1")).andReturn(group2ServiceComponents.get("service1")).anyTimes(); + expect(group2.getComponentNames("service2")).andReturn(group2ServiceComponents.get("service2")).anyTimes(); expect(group2.getConfiguration()).andReturn(topoGroup2Config).anyTimes(); expect(group2.getName()).andReturn("group2").anyTimes(); + expect(group2.getServices()).andReturn(Arrays.asList("service1", "service2")).anyTimes(); + expect(group2.getStack()).andReturn(stack).anyTimes(); // Create partial mock to allow actual logical request creation logicalRequestFactory = createMockBuilder(LogicalRequestFactory.class).addMockedMethod( @@ -361,7 +345,7 @@ public void setup() throws Exception { expect(ambariContext.getPersistedTopologyState()).andReturn(persistedState).anyTimes(); //todo: don't ignore param - ambariContext.createAmbariResources(isA(ClusterTopology.class), eq(CLUSTER_NAME), eq(SecurityType.NONE), eq("1"), anyLong()); + ambariContext.createAmbariResources(isA(ClusterTopology.class), eq(CLUSTER_NAME), (SecurityType) isNull(), eq("1"), anyLong()); expectLastCall().once(); expect(ambariContext.getNextRequestId()).andReturn(1L).once(); expect(ambariContext.isClusterKerberosEnabled(CLUSTER_ID)).andReturn(false).anyTimes(); diff --git a/ambari-server/src/test/java/org/apache/ambari/server/topology/ClusterInstallWithoutStartOnComponentLevelTest.java b/ambari-server/src/test/java/org/apache/ambari/server/topology/ClusterInstallWithoutStartOnComponentLevelTest.java index 13d5b175a0c..bac0e5b9f56 100644 --- a/ambari-server/src/test/java/org/apache/ambari/server/topology/ClusterInstallWithoutStartOnComponentLevelTest.java +++ b/ambari-server/src/test/java/org/apache/ambari/server/topology/ClusterInstallWithoutStartOnComponentLevelTest.java @@ -18,7 +18,6 @@ package org.apache.ambari.server.topology; -import static java.util.stream.Collectors.toSet; import static org.apache.ambari.server.controller.internal.ProvisionAction.INSTALL_AND_START; import static org.easymock.EasyMock.anyBoolean; import static org.easymock.EasyMock.anyLong; @@ -29,6 +28,7 @@ import static org.easymock.EasyMock.expect; import static org.easymock.EasyMock.expectLastCall; import static org.easymock.EasyMock.isA; +import static org.easymock.EasyMock.isNull; import static org.easymock.EasyMock.newCapture; import java.lang.reflect.Field; @@ -65,7 +65,6 @@ import org.apache.ambari.server.state.StackId; import org.apache.ambari.server.topology.tasks.ConfigureClusterTask; import org.apache.ambari.server.topology.tasks.ConfigureClusterTaskFactory; -import org.apache.ambari.server.topology.validators.TopologyValidator; import org.apache.ambari.server.topology.validators.TopologyValidatorService; import org.easymock.Capture; import org.easymock.EasyMockRule; @@ -82,7 +81,6 @@ import org.powermock.core.classloader.annotations.PrepareForTest; import org.powermock.modules.junit4.PowerMockRunner; -import com.google.common.collect.ImmutableMap; import com.google.common.collect.ImmutableSet; @RunWith(PowerMockRunner.class) @@ -94,7 +92,6 @@ public class ClusterInstallWithoutStartOnComponentLevelTest extends EasyMockSupp private static final String STACK_NAME = "test-stack"; private static final String STACK_VERSION = "test-stack-version"; private static final StackId STACK_ID = new StackId(STACK_NAME, STACK_VERSION); - private static final Setting SETTING = new Setting(Collections.emptyMap()); @Rule public EasyMockRule mocks = new EasyMockRule(this); @@ -102,9 +99,6 @@ public class ClusterInstallWithoutStartOnComponentLevelTest extends EasyMockSupp @TestSubject private TopologyManager topologyManager = new TopologyManager(); - @Mock - private org.apache.ambari.server.configuration.Configuration configuration; - @Mock(type = MockType.NICE) private Blueprint blueprint; @@ -169,9 +163,6 @@ public class ClusterInstallWithoutStartOnComponentLevelTest extends EasyMockSupp @Mock private TopologyValidatorService topologyValidatorServiceMock; - @Mock - private ComponentResolver componentResolver; - private final Configuration stackConfig = new Configuration(new HashMap<>(), new HashMap<>()); private final Configuration bpConfiguration = new Configuration(new HashMap<>(), @@ -192,10 +183,8 @@ public class ClusterInstallWithoutStartOnComponentLevelTest extends EasyMockSupp private HostGroupInfo group2Info = new HostGroupInfo("group2"); private Map groupInfoMap = new HashMap<>(); - private final Collection group1Components = Arrays.asList(new Component("component1"), new Component("component2"), new Component("component3")); - private final Collection group1ComponentNames = group1Components.stream().map(Component::getName).collect(toSet()); - private final Collection group2Components = Arrays.asList(new Component("component3"), new Component("component4")); - private final Collection group2ComponentNames = group2Components.stream().map(Component::getName).collect(toSet()); + private Collection group1Components = Arrays.asList("component1", "component2", "component3"); + private Collection group2Components = Arrays.asList("component3", "component4"); private Map> group1ServiceComponents = new HashMap<>(); private Map> group2ServiceComponents = new HashMap<>(); @@ -204,6 +193,7 @@ public class ClusterInstallWithoutStartOnComponentLevelTest extends EasyMockSupp private List topologyValidators = new ArrayList<>(); + private Capture clusterTopologyCapture; private Capture> configRequestPropertiesCapture; private Capture> configRequestPropertiesCapture2; private Capture> configRequestPropertiesCapture3; @@ -213,10 +203,7 @@ public class ClusterInstallWithoutStartOnComponentLevelTest extends EasyMockSupp @Before public void setup() throws Exception { - expect(configuration.getParallelTopologyTaskCreationThreadPoolSize()).andReturn(1).anyTimes(); - expect(configuration.getParallelStageExecution()).andReturn(false).anyTimes(); - expect(configuration.getGplLicenseAccepted()).andReturn(true).anyTimes(); - + clusterTopologyCapture = newCapture(); configRequestPropertiesCapture = newCapture(); configRequestPropertiesCapture2 = newCapture(); configRequestPropertiesCapture3 = newCapture(); @@ -254,18 +241,22 @@ public void setup() throws Exception { expect(blueprint.getHostGroup("group1")).andReturn(group1).anyTimes(); expect(blueprint.getHostGroup("group2")).andReturn(group2).anyTimes(); - expect(blueprint.getSetting()).andReturn(SETTING).anyTimes(); + expect(blueprint.getComponents("service1")).andReturn(Arrays.asList(new Component("component1"), new Component("component3"))).anyTimes(); + expect(blueprint.getComponents("service2")).andReturn(Arrays.asList(new Component("component2"), new Component("component4"))).anyTimes(); expect(blueprint.getConfiguration()).andReturn(bpConfiguration).anyTimes(); expect(blueprint.getHostGroups()).andReturn(groupMap).anyTimes(); expect(blueprint.getHostGroupsForComponent("component1")).andReturn(Collections.singleton(group1)).anyTimes(); expect(blueprint.getHostGroupsForComponent("component2")).andReturn(Collections.singleton(group1)).anyTimes(); expect(blueprint.getHostGroupsForComponent("component3")).andReturn(Arrays.asList(group1, group2)).anyTimes(); expect(blueprint.getHostGroupsForComponent("component4")).andReturn(Collections.singleton(group2)).anyTimes(); + expect(blueprint.getHostGroupsForService("service1")).andReturn(Arrays.asList(group1, group2)).anyTimes(); + expect(blueprint.getHostGroupsForService("service2")).andReturn(Arrays.asList(group1, group2)).anyTimes(); expect(blueprint.getName()).andReturn(BLUEPRINT_NAME).anyTimes(); - expect(ambariContext.composeStacks(anyObject())).andReturn(stack).anyTimes(); + expect(blueprint.getServices()).andReturn(Arrays.asList("service1", "service2")).anyTimes(); + expect(blueprint.getStack()).andReturn(stack).anyTimes(); expect(blueprint.getStackIds()).andReturn(ImmutableSet.of(STACK_ID)).anyTimes(); - expect(blueprint.getSecurity()).andReturn(SecurityConfiguration.NONE).anyTimes(); - expect(blueprint.getMpacks()).andReturn(ImmutableSet.of()).anyTimes(); + expect(blueprint.isValidConfigType(anyString())).andReturn(true).anyTimes(); + expect(blueprint.getRepositorySettings()).andReturn(new ArrayList<>()).anyTimes(); // don't expect toEntity() expect(stack.getAllConfigurationTypes("service1")).andReturn(Arrays.asList("service1-site", "service1-env")).anyTimes(); @@ -295,13 +286,8 @@ public void setup() throws Exception { expect(stack.getVersion()).andReturn(STACK_VERSION).anyTimes(); expect(stack.getServiceForConfigType("service1-site")).andReturn("service1").anyTimes(); expect(stack.getServiceForConfigType("service2-site")).andReturn("service2").anyTimes(); - expect(stack.getDependenciesForComponent(anyString())).andReturn(Collections.emptySet()).anyTimes(); expect(stack.getExcludedConfigurationTypes("service1")).andReturn(Collections.emptySet()).anyTimes(); expect(stack.getExcludedConfigurationTypes("service2")).andReturn(Collections.emptySet()).anyTimes(); - expect(stack.getServiceForComponent("component1")).andReturn("service1").anyTimes(); - expect(stack.getServiceForComponent("component2")).andReturn("service2").anyTimes(); - expect(stack.getServiceForComponent("component3")).andReturn("service1").anyTimes(); - expect(stack.getServiceForComponent("component4")).andReturn("service2").anyTimes(); expect(request.getBlueprint()).andReturn(blueprint).anyTimes(); expect(request.getClusterId()).andReturn(CLUSTER_ID).anyTimes(); @@ -313,39 +299,36 @@ public void setup() throws Exception { expect(request.getProvisionAction()).andReturn(INSTALL_AND_START).anyTimes(); expect(request.getSecurityConfiguration()).andReturn(null).anyTimes(); expect(request.getRepositoryVersion()).andReturn("1").anyTimes(); - expect(request.getStackIds()).andReturn(ImmutableSet.of()).anyTimes(); - expect(request.getMpacks()).andReturn(ImmutableSet.of()).anyTimes(); - - expect(componentResolver.resolveComponents(anyObject())).andReturn(ImmutableMap.of( - "group1", ImmutableSet.of( - ResolvedComponent.builder(new Component("component1")).serviceType("service1").buildPartial(), - ResolvedComponent.builder(new Component("component2")).serviceType("service2").buildPartial() - ), - "group2", ImmutableSet.of( - ResolvedComponent.builder(new Component("component3")).serviceType("service2").buildPartial(), - ResolvedComponent.builder(new Component("component4")).serviceType("service2").buildPartial() - ) - )).anyTimes(); + expect(group1.getBlueprintName()).andReturn(BLUEPRINT_NAME).anyTimes(); expect(group1.getCardinality()).andReturn("test cardinality").anyTimes(); - expect(group1.getComponents()).andReturn(group1Components).anyTimes(); - expect(group1.getComponentNames()).andReturn(group1ComponentNames).anyTimes(); + expect(group1.containsMasterComponent()).andReturn(true).anyTimes(); + expect(group1.getComponentNames()).andReturn(group1Components).anyTimes(); expect(group1.getComponentNames(ProvisionAction.INSTALL_ONLY)).andReturn(Arrays.asList("component1")) .anyTimes(); expect(group1.getComponentNames(ProvisionAction.START_ONLY)).andReturn(Collections.emptyList()) .anyTimes(); + expect(group1.getComponents("service1")).andReturn(group1ServiceComponents.get("service1")).anyTimes(); + expect(group1.getComponents("service2")).andReturn(group1ServiceComponents.get("service1")).anyTimes(); expect(group1.getConfiguration()).andReturn(topoGroup1Config).anyTimes(); expect(group1.getName()).andReturn("group1").anyTimes(); + expect(group1.getServices()).andReturn(Arrays.asList("service1", "service2")).anyTimes(); + expect(group1.getStack()).andReturn(stack).anyTimes(); + expect(group2.getBlueprintName()).andReturn(BLUEPRINT_NAME).anyTimes(); expect(group2.getCardinality()).andReturn("test cardinality").anyTimes(); - expect(group2.getComponents()).andReturn(group2Components).anyTimes(); - expect(group2.getComponentNames()).andReturn(group2ComponentNames).anyTimes(); + expect(group2.containsMasterComponent()).andReturn(false).anyTimes(); + expect(group2.getComponentNames()).andReturn(group2Components).anyTimes(); expect(group2.getComponentNames(ProvisionAction.INSTALL_ONLY)).andReturn(Collections.emptyList()).anyTimes(); expect(group2.getComponentNames(ProvisionAction.START_ONLY)).andReturn(Collections.emptyList()) .anyTimes(); + expect(group2.getComponents("service1")).andReturn(group2ServiceComponents.get("service1")).anyTimes(); + expect(group2.getComponents("service2")).andReturn(group2ServiceComponents.get("service2")).anyTimes(); expect(group2.getConfiguration()).andReturn(topoGroup2Config).anyTimes(); expect(group2.getName()).andReturn("group2").anyTimes(); + expect(group2.getServices()).andReturn(Arrays.asList("service1", "service2")).anyTimes(); + expect(group2.getStack()).andReturn(stack).anyTimes(); // Create partial mock to allow actual logical request creation logicalRequestFactory = createMockBuilder(LogicalRequestFactory.class).addMockedMethod( @@ -365,7 +348,7 @@ public void setup() throws Exception { expect(ambariContext.getPersistedTopologyState()).andReturn(persistedState).anyTimes(); //todo: don't ignore param - ambariContext.createAmbariResources(isA(ClusterTopology.class), eq(CLUSTER_NAME), eq(SecurityType.NONE), eq("1"), anyLong()); + ambariContext.createAmbariResources(isA(ClusterTopology.class), eq(CLUSTER_NAME), (SecurityType) isNull(), (String) eq("1"), anyLong()); expectLastCall().once(); expect(ambariContext.getNextRequestId()).andReturn(1L).once(); expect(ambariContext.isClusterKerberosEnabled(CLUSTER_ID)).andReturn(false).anyTimes(); diff --git a/ambari-server/src/test/java/org/apache/ambari/server/topology/ClusterInstallWithoutStartTest.java b/ambari-server/src/test/java/org/apache/ambari/server/topology/ClusterInstallWithoutStartTest.java index d0841abdc17..3e11f5e8c64 100644 --- a/ambari-server/src/test/java/org/apache/ambari/server/topology/ClusterInstallWithoutStartTest.java +++ b/ambari-server/src/test/java/org/apache/ambari/server/topology/ClusterInstallWithoutStartTest.java @@ -18,7 +18,6 @@ package org.apache.ambari.server.topology; -import static java.util.stream.Collectors.toSet; import static org.apache.ambari.server.controller.internal.ProvisionAction.INSTALL_ONLY; import static org.easymock.EasyMock.anyBoolean; import static org.easymock.EasyMock.anyLong; @@ -29,6 +28,7 @@ import static org.easymock.EasyMock.expect; import static org.easymock.EasyMock.expectLastCall; import static org.easymock.EasyMock.isA; +import static org.easymock.EasyMock.isNull; import static org.easymock.EasyMock.newCapture; import java.lang.reflect.Field; @@ -65,7 +65,6 @@ import org.apache.ambari.server.state.StackId; import org.apache.ambari.server.topology.tasks.ConfigureClusterTask; import org.apache.ambari.server.topology.tasks.ConfigureClusterTaskFactory; -import org.apache.ambari.server.topology.validators.TopologyValidator; import org.apache.ambari.server.topology.validators.TopologyValidatorService; import org.easymock.Capture; import org.easymock.EasyMockRule; @@ -82,7 +81,6 @@ import org.powermock.core.classloader.annotations.PrepareForTest; import org.powermock.modules.junit4.PowerMockRunner; -import com.google.common.collect.ImmutableMap; import com.google.common.collect.ImmutableSet; @RunWith(PowerMockRunner.class) @@ -95,7 +93,6 @@ public class ClusterInstallWithoutStartTest extends EasyMockSupport { private static final String STACK_NAME = "test-stack"; private static final String STACK_VERSION = "test-stack-version"; private static final StackId STACK_ID = new StackId(STACK_NAME, STACK_VERSION); - private static final Setting SETTING = new Setting(Collections.emptyMap()); @Rule public EasyMockRule mocks = new EasyMockRule(this); @@ -103,9 +100,6 @@ public class ClusterInstallWithoutStartTest extends EasyMockSupport { @TestSubject private TopologyManager topologyManager = new TopologyManager(); - @Mock - private org.apache.ambari.server.configuration.Configuration configuration; - @Mock(type = MockType.NICE) private Blueprint blueprint; @@ -171,9 +165,6 @@ public class ClusterInstallWithoutStartTest extends EasyMockSupport { @Mock private TopologyValidatorService topologyValidatorServiceMock; - @Mock - private ComponentResolver componentResolver; - private final Configuration stackConfig = new Configuration(new HashMap<>(), new HashMap<>()); private final Configuration bpConfiguration = new Configuration(new HashMap<>(), @@ -194,10 +185,8 @@ public class ClusterInstallWithoutStartTest extends EasyMockSupport { private HostGroupInfo group2Info = new HostGroupInfo("group2"); private Map groupInfoMap = new HashMap<>(); - private final Collection group1Components = Arrays.asList(new Component("component1"), new Component("component2"), new Component("component3")); - private final Collection group1ComponentNames = group1Components.stream().map(Component::getName).collect(toSet()); - private final Collection group2Components = Arrays.asList(new Component("component3"), new Component("component4")); - private final Collection group2ComponentNames = group2Components.stream().map(Component::getName).collect(toSet()); + private Collection group1Components = Arrays.asList("component1", "component2", "component3"); + private Collection group2Components = Arrays.asList("component3", "component4"); private Map> group1ServiceComponents = new HashMap<>(); private Map> group2ServiceComponents = new HashMap<>(); @@ -215,10 +204,6 @@ public class ClusterInstallWithoutStartTest extends EasyMockSupport { @Before public void setup() throws Exception { - expect(configuration.getParallelTopologyTaskCreationThreadPoolSize()).andReturn(1).anyTimes(); - expect(configuration.getParallelStageExecution()).andReturn(false).anyTimes(); - expect(configuration.getGplLicenseAccepted()).andReturn(true).anyTimes(); - clusterTopologyCapture = newCapture(); configRequestPropertiesCapture = newCapture(); configRequestPropertiesCapture2 = newCapture(); @@ -257,26 +242,26 @@ public void setup() throws Exception { expect(blueprint.getHostGroup("group1")).andReturn(group1).anyTimes(); expect(blueprint.getHostGroup("group2")).andReturn(group2).anyTimes(); - expect(blueprint.getSetting()).andReturn(SETTING).anyTimes(); + expect(blueprint.getComponentNames("service1")).andReturn(Arrays.asList("component1", "component3")).anyTimes(); + expect(blueprint.getComponentNames("service2")).andReturn(Arrays.asList("component2", "component4")).anyTimes(); expect(blueprint.getConfiguration()).andReturn(bpConfiguration).anyTimes(); expect(blueprint.getHostGroups()).andReturn(groupMap).anyTimes(); expect(blueprint.getHostGroupsForComponent("component1")).andReturn(Collections.singleton(group1)).anyTimes(); expect(blueprint.getHostGroupsForComponent("component2")).andReturn(Collections.singleton(group1)).anyTimes(); expect(blueprint.getHostGroupsForComponent("component3")).andReturn(Arrays.asList(group1, group2)).anyTimes(); expect(blueprint.getHostGroupsForComponent("component4")).andReturn(Collections.singleton(group2)).anyTimes(); + expect(blueprint.getHostGroupsForService("service1")).andReturn(Arrays.asList(group1, group2)).anyTimes(); + expect(blueprint.getHostGroupsForService("service2")).andReturn(Arrays.asList(group1, group2)).anyTimes(); expect(blueprint.getName()).andReturn(BLUEPRINT_NAME).anyTimes(); - expect(ambariContext.composeStacks(anyObject())).andReturn(stack).anyTimes(); + expect(blueprint.getServices()).andReturn(Arrays.asList("service1", "service2")).anyTimes(); + expect(blueprint.getStack()).andReturn(stack).anyTimes(); expect(blueprint.getStackIds()).andReturn(ImmutableSet.of(STACK_ID)).anyTimes(); - expect(blueprint.getSecurity()).andReturn(SecurityConfiguration.NONE).anyTimes(); - expect(blueprint.getMpacks()).andReturn(ImmutableSet.of()).anyTimes(); + expect(blueprint.isValidConfigType(anyString())).andReturn(true).anyTimes(); + expect(blueprint.getRepositorySettings()).andReturn(new ArrayList<>()).anyTimes(); // don't expect toEntity() - List configTypes1 = Arrays.asList("service1-site", "service1-env"); - List configTypes2 = Arrays.asList("service2-site", "service2-env"); - expect(stack.getConfigurationTypes("service1")).andReturn(configTypes1).anyTimes(); - expect(stack.getConfigurationTypes("service2")).andReturn(configTypes2).anyTimes(); - expect(stack.getAllConfigurationTypes("service1")).andReturn(configTypes1).anyTimes(); - expect(stack.getAllConfigurationTypes("service2")).andReturn(configTypes2).anyTimes(); + expect(stack.getAllConfigurationTypes("service1")).andReturn(Arrays.asList("service1-site", "service1-env")).anyTimes(); + expect(stack.getAllConfigurationTypes("service2")).andReturn(Arrays.asList("service2-site", "service2-env")).anyTimes(); expect(stack.getAutoDeployInfo("component1")).andReturn(null).anyTimes(); expect(stack.getAutoDeployInfo("component2")).andReturn(null).anyTimes(); expect(stack.getAutoDeployInfo("component3")).andReturn(null).anyTimes(); @@ -302,13 +287,8 @@ public void setup() throws Exception { expect(stack.getVersion()).andReturn(STACK_VERSION).anyTimes(); expect(stack.getServiceForConfigType("service1-site")).andReturn("service1").anyTimes(); expect(stack.getServiceForConfigType("service2-site")).andReturn("service2").anyTimes(); - expect(stack.getDependenciesForComponent(anyString())).andReturn(Collections.emptySet()).anyTimes(); expect(stack.getExcludedConfigurationTypes("service1")).andReturn(Collections.emptySet()).anyTimes(); expect(stack.getExcludedConfigurationTypes("service2")).andReturn(Collections.emptySet()).anyTimes(); - expect(stack.getServiceForComponent("component1")).andReturn("service1").anyTimes(); - expect(stack.getServiceForComponent("component2")).andReturn("service2").anyTimes(); - expect(stack.getServiceForComponent("component3")).andReturn("service1").anyTimes(); - expect(stack.getServiceForComponent("component4")).andReturn("service2").anyTimes(); expect(request.getBlueprint()).andReturn(blueprint).anyTimes(); expect(request.getClusterId()).andReturn(CLUSTER_ID).anyTimes(); @@ -320,34 +300,31 @@ public void setup() throws Exception { expect(request.getProvisionAction()).andReturn(INSTALL_ONLY).anyTimes(); expect(request.getSecurityConfiguration()).andReturn(null).anyTimes(); expect(request.getRepositoryVersion()).andReturn("1").anyTimes(); - expect(request.getStackIds()).andReturn(ImmutableSet.of(STACK_ID)).anyTimes(); - expect(request.getMpacks()).andReturn(ImmutableSet.of()).anyTimes(); - - expect(componentResolver.resolveComponents(anyObject())).andReturn(ImmutableMap.of( - "group1", ImmutableSet.of( - ResolvedComponent.builder(new Component("component1")).serviceType("service1").buildPartial(), - ResolvedComponent.builder(new Component("component2")).serviceType("service2").buildPartial() - ), - "group2", ImmutableSet.of( - ResolvedComponent.builder(new Component("component3")).serviceType("service2").buildPartial(), - ResolvedComponent.builder(new Component("component4")).serviceType("service2").buildPartial() - ) - )).anyTimes(); + expect(group1.getBlueprintName()).andReturn(BLUEPRINT_NAME).anyTimes(); expect(group1.getCardinality()).andReturn("test cardinality").anyTimes(); - expect(group1.getComponents()).andReturn(group1Components).anyTimes(); - expect(group1.getComponentNames()).andReturn(group1ComponentNames).anyTimes(); + expect(group1.containsMasterComponent()).andReturn(true).anyTimes(); + expect(group1.getComponentNames()).andReturn(group1Components).anyTimes(); expect(group1.getComponentNames(anyObject(ProvisionAction.class))).andReturn(Collections.emptyList()).anyTimes(); + expect(group1.getComponentNames("service1")).andReturn(group1ServiceComponents.get("service1")).anyTimes(); + expect(group1.getComponentNames("service2")).andReturn(group1ServiceComponents.get("service1")).anyTimes(); expect(group1.getConfiguration()).andReturn(topoGroup1Config).anyTimes(); expect(group1.getName()).andReturn("group1").anyTimes(); + expect(group1.getServices()).andReturn(Arrays.asList("service1", "service2")).anyTimes(); + expect(group1.getStack()).andReturn(stack).anyTimes(); + expect(group2.getBlueprintName()).andReturn(BLUEPRINT_NAME).anyTimes(); expect(group2.getCardinality()).andReturn("test cardinality").anyTimes(); - expect(group2.getComponents()).andReturn(group2Components).anyTimes(); - expect(group2.getComponentNames()).andReturn(group2ComponentNames).anyTimes(); + expect(group2.containsMasterComponent()).andReturn(false).anyTimes(); + expect(group2.getComponentNames()).andReturn(group2Components).anyTimes(); expect(group2.getComponentNames(anyObject(ProvisionAction.class))).andReturn(Collections.emptyList()).anyTimes(); + expect(group2.getComponentNames("service1")).andReturn(group2ServiceComponents.get("service1")).anyTimes(); + expect(group2.getComponentNames("service2")).andReturn(group2ServiceComponents.get("service2")).anyTimes(); expect(group2.getConfiguration()).andReturn(topoGroup2Config).anyTimes(); expect(group2.getName()).andReturn("group2").anyTimes(); + expect(group2.getServices()).andReturn(Arrays.asList("service1", "service2")).anyTimes(); + expect(group2.getStack()).andReturn(stack).anyTimes(); // Create partial mock to allow actual logical request creation logicalRequestFactory = createMockBuilder(LogicalRequestFactory.class).addMockedMethod( @@ -367,7 +344,7 @@ public void setup() throws Exception { expect(ambariContext.getPersistedTopologyState()).andReturn(persistedState).anyTimes(); //todo: don't ignore param - ambariContext.createAmbariResources(isA(ClusterTopology.class), eq(CLUSTER_NAME), eq(SecurityType.NONE), eq("1"), anyLong()); + ambariContext.createAmbariResources(isA(ClusterTopology.class), eq(CLUSTER_NAME), (SecurityType) isNull(), eq("1"), anyLong()); expectLastCall().once(); expect(ambariContext.getNextRequestId()).andReturn(1L).once(); expect(ambariContext.isClusterKerberosEnabled(CLUSTER_ID)).andReturn(false).anyTimes(); diff --git a/ambari-server/src/test/java/org/apache/ambari/server/topology/ClusterTopologyImplTest.java b/ambari-server/src/test/java/org/apache/ambari/server/topology/ClusterTopologyImplTest.java index 5e81730e28d..e51ce5f63e1 100644 --- a/ambari-server/src/test/java/org/apache/ambari/server/topology/ClusterTopologyImplTest.java +++ b/ambari-server/src/test/java/org/apache/ambari/server/topology/ClusterTopologyImplTest.java @@ -33,7 +33,6 @@ import org.junit.After; import org.junit.Before; -import org.junit.Ignore; import org.junit.Test; /** @@ -50,7 +49,6 @@ public class ClusterTopologyImplTest { private static final HostGroup group2 = createNiceMock(HostGroup.class); private static final HostGroup group3 = createNiceMock(HostGroup.class); private static final HostGroup group4 = createNiceMock(HostGroup.class); - private final AmbariContext ambariContext = createNiceMock(AmbariContext.class); private final Map hostGroupInfoMap = new HashMap<>(); private final Map hostGroupMap = new HashMap<>(); @@ -165,7 +163,7 @@ public void testCreate_duplicateHosts() throws Exception { replayAll(); // should throw exception due to duplicate host - new ClusterTopologyImpl(ambariContext, request); + new ClusterTopologyImpl(null, request); } @Test @@ -174,10 +172,9 @@ public void test_GetHostAssigmentForComponents() throws Exception { replayAll(); - new ClusterTopologyImpl(ambariContext, request).getHostAssignmentsForComponent("component1"); + new ClusterTopologyImpl(null, request).getHostAssignmentsForComponent("component1"); } - @Ignore @Test(expected = InvalidTopologyException.class) public void testCreate_NNHAInvaid() throws Exception { bpconfiguration.setProperty("hdfs-site", "dfs.nameservices", "val"); @@ -185,11 +182,10 @@ public void testCreate_NNHAInvaid() throws Exception { hostGroupInfoMap.get("group4").removeHost("host5"); TestTopologyRequest request = new TestTopologyRequest(TopologyRequest.Type.PROVISION); replayAll(); - new ClusterTopologyImpl(ambariContext, request); + new ClusterTopologyImpl(null, request); hostGroupInfoMap.get("group4").addHost("host5"); } - @Ignore @Test(expected = IllegalArgumentException.class) public void testCreate_NNHAHostNameNotCorrectForStandby() throws Exception { expect(group4.getName()).andReturn("group4"); @@ -198,10 +194,9 @@ public void testCreate_NNHAHostNameNotCorrectForStandby() throws Exception { bpconfiguration.setProperty("hadoop-env", "dfs_ha_initial_namenode_standby", "val"); TestTopologyRequest request = new TestTopologyRequest(TopologyRequest.Type.PROVISION); replayAll(); - new ClusterTopologyImpl(ambariContext, request); + new ClusterTopologyImpl(null, request); } - @Ignore @Test(expected = IllegalArgumentException.class) public void testCreate_NNHAHostNameNotCorrectForActive() throws Exception { expect(group4.getName()).andReturn("group4"); @@ -210,10 +205,9 @@ public void testCreate_NNHAHostNameNotCorrectForActive() throws Exception { bpconfiguration.setProperty("hadoop-env", "dfs_ha_initial_namenode_standby", "host5"); TestTopologyRequest request = new TestTopologyRequest(TopologyRequest.Type.PROVISION); replayAll(); - new ClusterTopologyImpl(ambariContext, request); + new ClusterTopologyImpl(null, request); } - @Ignore @Test(expected = IllegalArgumentException.class) public void testCreate_NNHAHostNameNotCorrectForStandbyWithActiveAsVariable() throws Exception { expect(group4.getName()).andReturn("group4"); @@ -222,7 +216,7 @@ public void testCreate_NNHAHostNameNotCorrectForStandbyWithActiveAsVariable() th bpconfiguration.setProperty("hadoop-env", "dfs_ha_initial_namenode_standby", "host6"); TestTopologyRequest request = new TestTopologyRequest(TopologyRequest.Type.PROVISION); replayAll(); - new ClusterTopologyImpl(ambariContext, request); + new ClusterTopologyImpl(null, request); } private class TestTopologyRequest implements TopologyRequest { diff --git a/ambari-server/src/test/java/org/apache/ambari/server/topology/LogicalRequestTest.java b/ambari-server/src/test/java/org/apache/ambari/server/topology/LogicalRequestTest.java index b0c3958d322..a5265f67883 100644 --- a/ambari-server/src/test/java/org/apache/ambari/server/topology/LogicalRequestTest.java +++ b/ambari-server/src/test/java/org/apache/ambari/server/topology/LogicalRequestTest.java @@ -97,9 +97,6 @@ public class LogicalRequestTest extends EasyMockSupport { @Mock private HostGroup hostGroup2; - @Mock - private Setting setting; - @Before public void setup() throws Exception { @@ -116,10 +113,8 @@ public void setup() throws Exception { expect(clusterTopology.getClusterId()).andReturn(clusterId).anyTimes(); expect(clusterTopology.getProvisionAction()).andReturn(ProvisionAction.INSTALL_ONLY).anyTimes(); expect(clusterTopology.getBlueprint()).andReturn(blueprint).anyTimes(); - expect(clusterTopology.getBlueprintName()).andReturn("blueprintDef").anyTimes(); - expect(clusterTopology.getSetting()).andReturn(setting).anyTimes(); expect(blueprint.getName()).andReturn("blueprintDef").anyTimes(); - expect(setting.shouldSkipFailure()).andReturn(true).anyTimes(); + expect(blueprint.shouldSkipFailure()).andReturn(true).anyTimes(); PowerMock.reset(AmbariServer.class); @@ -127,6 +122,9 @@ public void setup() throws Exception { expect(AmbariServer.getController()).andReturn(controller).anyTimes(); PowerMock.replay(AmbariServer.class); + + + } @Test @@ -200,7 +198,7 @@ public void testPersistedRequestsWithReservedHosts() throws Exception { expect(logicalRequestEntity.getTopologyRequestEntity()).andReturn(topologyRequestEntity).atLeastOnce(); expect(blueprint.getHostGroup(eq("host_group_1"))).andReturn(hostGroup1).atLeastOnce(); - expect(clusterTopology.containsMasterComponent("host_group_1")).andReturn(false).atLeastOnce(); + expect(hostGroup1.containsMasterComponent()).andReturn(false).atLeastOnce(); replayAll(); @@ -303,7 +301,7 @@ public void testPersistedRequestsWithHostPredicate() throws Exception { expect(logicalRequestEntity.getTopologyRequestEntity()).andReturn(topologyRequestEntity).atLeastOnce(); expect(blueprint.getHostGroup(eq("host_group_2"))).andReturn(hostGroup2).atLeastOnce(); - expect(clusterTopology.containsMasterComponent("host_group_2")).andReturn(false).atLeastOnce(); + expect(hostGroup2.containsMasterComponent()).andReturn(false).atLeastOnce(); replayAll(); @@ -414,7 +412,7 @@ public void testRemoveHostRequestByHostName() throws Exception { expect(logicalRequestEntity.getTopologyRequestEntity()).andReturn(topologyRequestEntity).atLeastOnce(); expect(logicalRequestEntity.getTopologyHostRequestEntities()).andReturn(reservedHostRequestEntities).atLeastOnce(); expect(blueprint.getHostGroup(eq("host_group_1"))).andReturn(hostGroup1).atLeastOnce(); - expect(clusterTopology.containsMasterComponent("host_group_1")).andReturn(false).atLeastOnce(); + expect(hostGroup1.containsMasterComponent()).andReturn(false).atLeastOnce(); replayAll(); @@ -512,7 +510,7 @@ public void testRemovePendingHostRequests() throws Exception { expect(logicalRequestEntity.getTopologyRequestEntity()).andReturn(topologyRequestEntity).atLeastOnce(); expect(logicalRequestEntity.getTopologyHostRequestEntities()).andReturn(reservedHostRequestEntities).atLeastOnce(); expect(blueprint.getHostGroup(eq("host_group_1"))).andReturn(hostGroup1).atLeastOnce(); - expect(clusterTopology.containsMasterComponent("host_group_1")).andReturn(false).atLeastOnce(); + expect(hostGroup1.containsMasterComponent()).andReturn(false).atLeastOnce(); replayAll(); @@ -598,7 +596,7 @@ private LogicalRequest createTopologyRequestByHostCount(int hostCount, String ho expect(logicalRequestEntity.getTopologyRequestEntity()).andReturn(topologyRequestEntity).anyTimes(); expect(logicalRequestEntity.getTopologyHostRequestEntities()).andReturn(hostRequests).anyTimes(); expect(blueprint.getHostGroup(eq(hostGroupEntity.getName()))).andReturn(hostGroup1).anyTimes(); - expect(clusterTopology.containsMasterComponent(hostGroupName)).andReturn(false).anyTimes(); + expect(hostGroup1.containsMasterComponent()).andReturn(false).anyTimes(); replayAll(); diff --git a/ambari-server/src/test/java/org/apache/ambari/server/topology/PersistedStateImplTest.java b/ambari-server/src/test/java/org/apache/ambari/server/topology/PersistedStateImplTest.java deleted file mode 100644 index 7f6ee4b169a..00000000000 --- a/ambari-server/src/test/java/org/apache/ambari/server/topology/PersistedStateImplTest.java +++ /dev/null @@ -1,123 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package org.apache.ambari.server.topology; - - -import static org.easymock.EasyMock.capture; -import static org.easymock.EasyMock.expect; -import static org.easymock.EasyMock.expectLastCall; -import static org.easymock.EasyMock.newCapture; -import static org.easymock.EasyMock.replay; -import static org.easymock.EasyMock.reset; -import static org.junit.Assert.assertEquals; - -import java.lang.reflect.Field; - -import org.apache.ambari.server.controller.internal.BaseClusterRequest; -import org.apache.ambari.server.controller.internal.ProvisionAction; -import org.apache.ambari.server.controller.internal.ProvisionClusterRequest; -import org.apache.ambari.server.orm.dao.TopologyRequestDAO; -import org.apache.ambari.server.orm.entities.TopologyRequestEntity; -import org.easymock.Capture; -import org.easymock.EasyMockRunner; -import org.easymock.Mock; -import org.junit.After; -import org.junit.Before; -import org.junit.Test; -import org.junit.runner.RunWith; - -import com.google.common.collect.ImmutableMap; - - -@RunWith(EasyMockRunner.class) -public class PersistedStateImplTest { - - private static final String CLUSTER_REQUEST = - "{'blueprint': 'bp', 'host_groups': [{'name': 'group','host_count': '1' }]}".replace('\'', '"'); - - private static final String BLUEPRINT_NAME = "bp"; - - @Mock - private TopologyRequestDAO topologyRequestDAO; - - @Mock - private BlueprintFactory blueprintFactory; - - @Mock - private Blueprint blueprint; - - @Mock - private ProvisionClusterRequest - request; - - private PersistedStateImpl persistedState; - - @Before - public void init() throws Exception { - expect(blueprint.getName()).andReturn(BLUEPRINT_NAME).anyTimes(); - expect(blueprint.getConfiguration()).andReturn(new Configuration()).anyTimes(); - expect(blueprintFactory.getBlueprint(BLUEPRINT_NAME)).andReturn(blueprint).anyTimes(); - - expect(request.getBlueprint()).andReturn(blueprint).anyTimes(); - expect(request.getRawRequestBody()).andReturn(CLUSTER_REQUEST).anyTimes(); - expect(request.getType()).andReturn(TopologyRequest.Type.PROVISION).anyTimes(); - expect(request.getConfiguration()).andReturn(new Configuration()).anyTimes(); - expect(request.getClusterId()).andReturn(1L).anyTimes(); - expect(request.getDescription()).andReturn("").anyTimes(); - expect(request.getProvisionAction()).andReturn(ProvisionAction.INSTALL_AND_START).anyTimes(); - HostGroupInfo hostGroupInfo = new HostGroupInfo("hostgroup1"); - hostGroupInfo.setConfiguration(new Configuration()); - expect(request.getHostGroupInfo()).andReturn(ImmutableMap.of("hostgroup1", hostGroupInfo)).anyTimes(); - - replay(blueprint, blueprintFactory, request); - - Field blueprintFactoryField = BaseClusterRequest.class.getDeclaredField("blueprintFactory"); - blueprintFactoryField.setAccessible(true); - blueprintFactoryField.set(null, blueprintFactory); - - persistedState = new PersistedStateImpl(); - Field topologyRequestDAOField = PersistedStateImpl.class.getDeclaredField("topologyRequestDAO"); - topologyRequestDAOField.setAccessible(true); - topologyRequestDAOField.set(persistedState, topologyRequestDAO); - } - - @After - public void tearDown() { - reset(topologyRequestDAO, blueprintFactory, blueprint, request); - } - - @Test - public void testPersistTopologyRequest_RawRequestIsSaved() throws Exception { - // Given - Capture entityCapture = newCapture(); - topologyRequestDAO.create(capture(entityCapture)); - expectLastCall().andAnswer(() -> { - entityCapture.getValue().setId(1L); - return null; - }); - replay(topologyRequestDAO); - - // When - persistedState.persistTopologyRequest(request); - - // Then - assertEquals(CLUSTER_REQUEST, entityCapture.getValue().getRawRequestBody()); - } - -} \ No newline at end of file diff --git a/ambari-server/src/test/java/org/apache/ambari/server/topology/validators/RequiredPasswordValidatorTest.java b/ambari-server/src/test/java/org/apache/ambari/server/topology/RequiredPasswordValidatorTest.java similarity index 87% rename from ambari-server/src/test/java/org/apache/ambari/server/topology/validators/RequiredPasswordValidatorTest.java rename to ambari-server/src/test/java/org/apache/ambari/server/topology/RequiredPasswordValidatorTest.java index f7ce215ccd8..722f5f1e31c 100644 --- a/ambari-server/src/test/java/org/apache/ambari/server/topology/validators/RequiredPasswordValidatorTest.java +++ b/ambari-server/src/test/java/org/apache/ambari/server/topology/RequiredPasswordValidatorTest.java @@ -16,28 +16,21 @@ * limitations under the License. */ -package org.apache.ambari.server.topology.validators; +package org.apache.ambari.server.topology; import static junit.framework.Assert.assertEquals; import static org.easymock.EasyMock.expect; +import java.util.Arrays; import java.util.Collection; import java.util.Collections; import java.util.HashMap; import java.util.HashSet; import java.util.Map; -import java.util.stream.Stream; import org.apache.ambari.server.controller.internal.Stack; import org.apache.ambari.server.state.PropertyInfo; -import org.apache.ambari.server.topology.Blueprint; -import org.apache.ambari.server.topology.ClusterTopology; -import org.apache.ambari.server.topology.Component; -import org.apache.ambari.server.topology.Configuration; -import org.apache.ambari.server.topology.HostGroup; -import org.apache.ambari.server.topology.HostGroupInfo; -import org.apache.ambari.server.topology.InvalidTopologyException; -import org.apache.ambari.server.topology.ResolvedComponent; +import org.apache.ambari.server.topology.validators.RequiredPasswordValidator; import org.easymock.EasyMockRule; import org.easymock.EasyMockSupport; import org.easymock.Mock; @@ -81,6 +74,8 @@ public class RequiredPasswordValidatorTest extends EasyMockSupport { private static final Map hostGroups = new HashMap<>(); private static final Map hostGroupInfo = new HashMap<>(); + private static final Collection group1Components = new HashSet<>(); + private static final Collection group2Components = new HashSet<>(); private static final Collection service1Components = new HashSet<>(); private static final Collection service2Components = new HashSet<>(); private static final Collection service3Components = new HashSet<>(); @@ -124,6 +119,13 @@ public void setup() { hostGroups.put("group1", group1); hostGroups.put("group2", group2); + group1Components.add("component1"); + group1Components.add("component2"); + group1Components.add("component3"); + + group2Components.add("component1"); + group2Components.add("component4"); + service1Components.add("component1"); service1Components.add("component2"); service2Components.add("component3"); @@ -143,17 +145,21 @@ public void setup() { expect(blueprint.getHostGroups()).andReturn(hostGroups).anyTimes(); expect(blueprint.getHostGroup("group1")).andReturn(group1).anyTimes(); expect(blueprint.getHostGroup("group2")).andReturn(group2).anyTimes(); - expect(topology.getStack()).andReturn(stack).anyTimes(); - - expect(topology.getComponentsInHostGroup("group1")).andReturn(Stream.of( - ResolvedComponent.builder(new Component("component1")).serviceType("service1").buildPartial(), - ResolvedComponent.builder(new Component("component2")).serviceType("service1").buildPartial(), - ResolvedComponent.builder(new Component("component3")).serviceType("service2").buildPartial() - )).anyTimes(); - expect(topology.getComponentsInHostGroup("group2")).andReturn(Stream.of( - ResolvedComponent.builder(new Component("component1")).serviceType("service1").buildPartial(), - ResolvedComponent.builder(new Component("component4")).serviceType("service3").buildPartial() - )).anyTimes(); + expect(blueprint.getStack()).andReturn(stack).anyTimes(); + + expect(group1.getComponentNames()).andReturn(group1Components).anyTimes(); + expect(group2.getComponentNames()).andReturn(group2Components).anyTimes(); + expect(group1.getComponentNames("service1")).andReturn(Arrays.asList("component1", "component2")).anyTimes(); + expect(group1.getComponentNames("service2")).andReturn(Arrays.asList("component3")).anyTimes(); + expect(group1.getComponentNames("service3")).andReturn(Collections.emptySet()).anyTimes(); + expect(group2.getComponentNames("service1")).andReturn(Arrays.asList("component1")).anyTimes(); + expect(group2.getComponentNames("service2")).andReturn(Collections.emptySet()).anyTimes(); + expect(group2.getComponentNames("service3")).andReturn(Arrays.asList("component4")).anyTimes(); + + expect(stack.getServiceForComponent("component1")).andReturn("service1").anyTimes(); + expect(stack.getServiceForComponent("component2")).andReturn("service1").anyTimes(); + expect(stack.getServiceForComponent("component3")).andReturn("service2").anyTimes(); + expect(stack.getServiceForComponent("component4")).andReturn("service3").anyTimes(); expect(stack.getRequiredConfigurationProperties("service1", PropertyInfo.PropertyType.PASSWORD)).andReturn(service1RequiredPwdConfigs).anyTimes(); expect(stack.getRequiredConfigurationProperties("service2", PropertyInfo.PropertyType.PASSWORD)).andReturn(service2RequiredPwdConfigs).anyTimes(); diff --git a/ambari-server/src/test/java/org/apache/ambari/server/topology/SettingFactoryTest.java b/ambari-server/src/test/java/org/apache/ambari/server/topology/SettingFactoryTest.java index 94c412d402d..f75e6c19d95 100644 --- a/ambari-server/src/test/java/org/apache/ambari/server/topology/SettingFactoryTest.java +++ b/ambari-server/src/test/java/org/apache/ambari/server/topology/SettingFactoryTest.java @@ -39,9 +39,13 @@ public class SettingFactoryTest { */ @Test public void testGetSettingWithSetOfProperties() { - Setting setting = SettingFactory.getSetting(createSettingWithSetOfProperties()); - Set> propertyValues = setting.getSettingValue(Setting.SETTING_NAME_RECOVERY_SETTINGS); + SettingFactory settingFactory = new SettingFactory(); + Map>> properties; + + Setting setting = settingFactory.getSetting(createSettingWithSetOfProperties()); + Set> propertyValues = setting.getSettingValue(Setting.SETTING_NAME_RECOVERY_SETTINGS); assertEquals(propertyValues.size(), 1); + assertEquals(propertyValues.iterator().next().get(Setting.SETTING_NAME_RECOVERY_ENABLED), "true"); } @@ -50,9 +54,13 @@ public void testGetSettingWithSetOfProperties() { */ @Test public void testGetSettingWithoutSetOfProperties() { - Setting setting = SettingFactory.getSetting(createSettingWithoutSetOfProperties()); - Set> propertyValues = setting.getSettingValue(Setting.SETTING_NAME_RECOVERY_SETTINGS); + SettingFactory settingFactory = new SettingFactory(); + Map>> properties; + + Setting setting = settingFactory.getSetting(createSettingWithoutSetOfProperties()); + Set> propertyValues = setting.getSettingValue(Setting.SETTING_NAME_RECOVERY_SETTINGS); assertEquals(propertyValues.size(), 1); + assertEquals(propertyValues.iterator().next().get(Setting.SETTING_NAME_RECOVERY_ENABLED), "true"); } @@ -64,11 +72,13 @@ public void testGetSettingWithoutSetOfProperties() { * } * ] * } + * + * @return */ private Collection> createSettingWithSetOfProperties() { - Set> setting1 = new HashSet<>(); - Set> setting2 = new HashSet<>(); + Set> setting1 = new HashSet<>(); + Set> setting2 = new HashSet<>(); // Setting 1: Property1 HashMap setting1Properties1 = new HashMap<>(); @@ -107,6 +117,8 @@ private Collection> createSettingWithSetOfProperties() { * "recovery_enabled":"true" * } * } + * + * @return */ private Collection> createSettingWithoutSetOfProperties() { // Setting 2: Property1 and Property2 @@ -118,7 +130,7 @@ private Collection> createSettingWithoutSetOfProperties() { setting2Properties2.put(Setting.SETTING_NAME_NAME, "TEZ"); setting2Properties2.put(Setting.SETTING_NAME_RECOVERY_ENABLED, "false"); - Set> setting2 = new HashSet<>(); + Set> setting2 = new HashSet<>(); setting2.add(setting2Properties1); setting2.add(setting2Properties2); diff --git a/ambari-server/src/test/java/org/apache/ambari/server/topology/SettingTest.java b/ambari-server/src/test/java/org/apache/ambari/server/topology/SettingTest.java index ecd5acbcd32..7ce3b38d13b 100644 --- a/ambari-server/src/test/java/org/apache/ambari/server/topology/SettingTest.java +++ b/ambari-server/src/test/java/org/apache/ambari/server/topology/SettingTest.java @@ -19,129 +19,72 @@ package org.apache.ambari.server.topology; import static org.junit.Assert.assertEquals; -import static org.junit.Assert.assertFalse; -import static org.junit.Assert.assertTrue; -import java.util.Collections; import java.util.HashMap; import java.util.HashSet; import java.util.Map; import java.util.Set; -import org.junit.BeforeClass; import org.junit.Test; -import com.google.common.collect.ImmutableMap; -import com.google.common.collect.ImmutableSet; - /** * Test the Setting class */ public class SettingTest { - - private static Setting setting; - private static Map>> properties; - private static Set> serviceSettings; - - @BeforeClass - public static void setup() { - properties = new HashMap<>(); - Set> recoverySettings = new HashSet<>(); - Set> deploymentSettings = new HashSet<>(); - Set> repositorySettings = new HashSet<>(); + /** + * Test get and set of entire setting. + */ + @Test + public void testGetProperties() { + Map>> properties = new HashMap<>(); + Set> setting1 = new HashSet<>(); + Set> setting2 = new HashSet<>(); + Set> setting3 = new HashSet<>(); + Set> setting4 = new HashSet<>(); // Setting 1: Property1 - Map setting1Properties1 = new HashMap<>(); + HashMap setting1Properties1 = new HashMap<>(); setting1Properties1.put(Setting.SETTING_NAME_RECOVERY_ENABLED, "true"); - recoverySettings.add(setting1Properties1); + setting1.add(setting1Properties1); // Setting 2: Property1 and Property2 - Map hdfs = ImmutableMap.of( - Setting.SETTING_NAME_NAME, "HDFS", - Setting.SETTING_NAME_RECOVERY_ENABLED, "false" - ); - - Map yarn = ImmutableMap.of( - Setting.SETTING_NAME_NAME, "YARN" - // no RECOVERY_ENABLED value for YARN - ); - - Map tez = ImmutableMap.of( - Setting.SETTING_NAME_NAME, "TEZ", - Setting.SETTING_NAME_RECOVERY_ENABLED, "true" - ); - - serviceSettings = ImmutableSet.of(hdfs, yarn, tez); - - Map hdfsClient = ImmutableMap.of( - Setting.SETTING_NAME_NAME, "HDFS_CLIENT", - Setting.SETTING_NAME_RECOVERY_ENABLED, "false" - ); - - Map namenode = ImmutableMap.of( - Setting.SETTING_NAME_NAME, "NAMENODE", - Setting.SETTING_NAME_RECOVERY_ENABLED, "true" - ); - - Map datanode = ImmutableMap.of( - Setting.SETTING_NAME_NAME, "DATANODE" - // no RECOVERY_ENABLED value - ); - - Map yarnClient = ImmutableMap.of( - Setting.SETTING_NAME_NAME, "YARN_CLIENT", - Setting.SETTING_NAME_RECOVERY_ENABLED, "false" - ); - - Map resourceManager = ImmutableMap.of( - Setting.SETTING_NAME_NAME, "RESOURCE_MANAGER", - Setting.SETTING_NAME_RECOVERY_ENABLED, "true" - ); - - Map nodeManager = ImmutableMap.of( - Setting.SETTING_NAME_NAME, "NODE_MANAGER" - // no RECOVERY_ENABLED value - ); - - Set> componentSettings = ImmutableSet.>builder() - .addAll(ImmutableSet.of(hdfsClient, namenode, datanode)) - .addAll(ImmutableSet.of(yarnClient, resourceManager, nodeManager)) - .build(); + HashMap setting2Properties1 = new HashMap<>(); + setting2Properties1.put(Setting.SETTING_NAME_NAME, "HDFS"); + setting2Properties1.put(Setting.SETTING_NAME_RECOVERY_ENABLED, "false"); + + HashMap setting2Properties2 = new HashMap<>(); + setting2Properties2.put(Setting.SETTING_NAME_NAME, "TEZ"); + setting2Properties2.put(Setting.SETTING_NAME_RECOVERY_ENABLED, "false"); + + setting2.add(setting2Properties1); + setting2.add(setting2Properties2); //Setting 3: Property 1 - Map setting3Properties1 = new HashMap<>(); - setting3Properties1.put(Setting.SETTING_NAME_SKIP_FAILURE, "true"); - deploymentSettings.add(setting3Properties1); + HashMap setting3Properties1 = new HashMap<>(); + setting1Properties1.put(Setting.SETTING_NAME_SKIP_FAILURE, "true"); + setting1.add(setting3Properties1); //Setting 4: Property 1 and 2 - Map setting4Properties1 = new HashMap<>(); + HashMap setting4Properties1 = new HashMap<>(); setting4Properties1.put(RepositorySetting.OVERRIDE_STRATEGY, RepositorySetting.OVERRIDE_STRATEGY_ALWAYS_APPLY); setting4Properties1.put(RepositorySetting.OPERATING_SYSTEM, "redhat7"); setting4Properties1.put(RepositorySetting.REPO_ID, "HDP"); setting4Properties1.put(RepositorySetting.BASE_URL, "http://localhost/repo"); - repositorySettings.add(setting4Properties1); + setting4.add(setting4Properties1); - Map setting4Properties2 = new HashMap<>(); + HashMap setting4Properties2 = new HashMap<>(); setting4Properties2.put(RepositorySetting.OVERRIDE_STRATEGY, RepositorySetting.OVERRIDE_STRATEGY_ALWAYS_APPLY); setting4Properties2.put(RepositorySetting.OPERATING_SYSTEM, "redhat7"); setting4Properties2.put(RepositorySetting.REPO_ID, "HDP-UTIL"); setting4Properties2.put(RepositorySetting.BASE_URL, "http://localhost/repo"); - repositorySettings.add(setting4Properties2); - - properties.put(Setting.SETTING_NAME_RECOVERY_SETTINGS, recoverySettings); - properties.put(Setting.SETTING_NAME_SERVICE_SETTINGS, serviceSettings); - properties.put(Setting.SETTING_NAME_COMPONENT_SETTINGS, componentSettings); - properties.put(Setting.SETTING_NAME_DEPLOYMENT_SETTINGS, deploymentSettings); - properties.put(Setting.SETTING_NAME_REPOSITORY_SETTINGS, repositorySettings); + setting4.add(setting4Properties2); - setting = new Setting(properties); - } + properties.put(Setting.SETTING_NAME_RECOVERY_SETTINGS, setting1); + properties.put(Setting.SETTING_NAME_SERVICE_SETTINGS, setting2); + properties.put(Setting.SETTING_NAME_DEPLOYMENT_SETTINGS, setting3); + properties.put(Setting.SETTING_NAME_REPOSITORY_SETTINGS, setting4); - /** - * Test get and set of entire setting. - */ - @Test - public void testGetProperties() { + Setting setting = new Setting(properties); assertEquals(properties, setting.getProperties()); } @@ -150,47 +93,31 @@ public void testGetProperties() { */ @Test public void testGetSettingProperties() { - assertEquals(serviceSettings, setting.getSettingValue(Setting.SETTING_NAME_SERVICE_SETTINGS)); - } + Map>> properties = new HashMap<>(); + Set> setting1 = new HashSet<>(); + Set> setting2 = new HashSet<>(); - @Test - public void recoveryEnabledAtComponentLevel() { - assertEquals("false", setting.getRecoveryEnabled("HDFS", "HDFS_CLIENT")); - assertEquals("true", setting.getRecoveryEnabled("HDFS", "NAMENODE")); - assertEquals("false", setting.getRecoveryEnabled("YARN", "YARN_CLIENT")); - assertEquals("true", setting.getRecoveryEnabled("YARN", "RESOURCE_MANAGER")); - } + // Setting 1: Property1 + HashMap setting1Properties1 = new HashMap<>(); + setting1Properties1.put(Setting.SETTING_NAME_RECOVERY_ENABLED, "true"); + setting1.add(setting1Properties1); - @Test - public void recoveryEnabledAtServiceLevel() { - assertEquals("true", setting.getRecoveryEnabled("TEZ", "TEZ_CLIENT")); - assertEquals("false", setting.getRecoveryEnabled("HDFS", "DATANODE")); - } + // Setting 2: Property1 and Property2 + HashMap setting2Properties1 = new HashMap<>(); + setting2Properties1.put(Setting.SETTING_NAME_NAME, "HDFS"); + setting2Properties1.put(Setting.SETTING_NAME_RECOVERY_ENABLED, "false"); - @Test - public void recoveryEnabledAtClusterLevel() { - assertEquals("true", setting.getRecoveryEnabled("OOZIE", "OOZIE_SERVER")); - assertEquals("true", setting.getRecoveryEnabled("YARN", "NODE_MANAGER")); - } + HashMap setting2Properties2 = new HashMap<>(); + setting2Properties2.put(Setting.SETTING_NAME_NAME, "TEZ"); + setting2Properties2.put(Setting.SETTING_NAME_RECOVERY_ENABLED, "false"); - @Test - public void testAutoSkipFailureEnabled() { - Map skipFailureSetting = ImmutableMap.of(Setting.SETTING_NAME_SKIP_FAILURE, "true"); - Setting setting = new Setting(ImmutableMap.of(Setting.SETTING_NAME_DEPLOYMENT_SETTINGS, Collections.singleton(skipFailureSetting))); - assertTrue(setting.shouldSkipFailure()); - } + setting2.add(setting2Properties1); + setting2.add(setting2Properties2); - @Test - public void testAutoSkipFailureDisabled() { - Map skipFailureSetting = ImmutableMap.of(Setting.SETTING_NAME_SKIP_FAILURE, "false"); - Setting setting = new Setting(ImmutableMap.of(Setting.SETTING_NAME_DEPLOYMENT_SETTINGS, Collections.singleton(skipFailureSetting))); - assertFalse(setting.shouldSkipFailure()); - } + properties.put(Setting.SETTING_NAME_RECOVERY_SETTINGS, setting1); + properties.put(Setting.SETTING_NAME_SERVICE_SETTINGS, setting2); - @Test - public void testAutoSkipFailureUnspecified() { - Setting setting = new Setting(ImmutableMap.of()); - assertFalse(setting.shouldSkipFailure()); + Setting setting = new Setting(properties); + assertEquals(setting2, setting.getSettingValue(Setting.SETTING_NAME_SERVICE_SETTINGS)); } - } diff --git a/ambari-server/src/test/java/org/apache/ambari/server/topology/StackComponentResolverTest.java b/ambari-server/src/test/java/org/apache/ambari/server/topology/StackComponentResolverTest.java deleted file mode 100644 index 6235aae4666..00000000000 --- a/ambari-server/src/test/java/org/apache/ambari/server/topology/StackComponentResolverTest.java +++ /dev/null @@ -1,87 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package org.apache.ambari.server.topology; - -import static org.easymock.EasyMock.createNiceMock; -import static org.easymock.EasyMock.expect; -import static org.easymock.EasyMock.replay; -import static org.easymock.EasyMock.reset; - -import java.util.stream.Stream; - -import org.apache.ambari.server.controller.internal.StackDefinition; -import org.apache.ambari.server.topology.validators.RejectUnknownComponents; -import org.apache.ambari.server.topology.validators.TopologyValidator; -import org.junit.After; -import org.junit.Before; -import org.junit.Test; - -import com.google.common.collect.ImmutableSet; - -public class StackComponentResolverTest { - - private final TopologyValidator validator = new RejectUnknownComponents(); - private final ClusterTopology topology = createNiceMock(ClusterTopology.class); - private final StackDefinition stack = createNiceMock(StackDefinition.class); - - @Before - public void setUp() { - expect(topology.getStack()).andReturn(stack).anyTimes(); - } - - @After - public void tearDown() { - reset(topology, stack); - } - - @Test - public void acceptsKnownComponents() throws Exception { - // GIVEN - componentsInTopologyAre("VALID_COMPONENT", "ANOTHER_COMPONENT"); - validComponentsAre("VALID_COMPONENT", "ANOTHER_COMPONENT", "ONE_MORE_COMPONENT"); - - // WHEN - validator.validate(topology); - - // THEN - // no exception expected - } - - @Test(expected = InvalidTopologyException.class) - public void rejectsUnknownComponents() throws Exception { - // GIVEN - componentsInTopologyAre("VALID_COMPONENT", "UNKNOWN_COMPONENT"); - validComponentsAre("VALID_COMPONENT", "ANOTHER_COMPONENT"); - - // WHEN - validator.validate(topology); - } - - private void componentsInTopologyAre(String... components) { - expect(topology.getComponents()).andReturn(Stream.of(components) - .map(name -> ResolvedComponent.builder(new Component(name)).buildPartial()) - ).anyTimes(); - replay(topology); - } - - private void validComponentsAre(String... components) { - expect(stack.getComponents()).andReturn(ImmutableSet.builder().add(components).build()).anyTimes(); - replay(stack); - } - -} diff --git a/ambari-server/src/test/java/org/apache/ambari/server/topology/TopologyManagerTest.java b/ambari-server/src/test/java/org/apache/ambari/server/topology/TopologyManagerTest.java index 89f2f4f7f62..7174a5e4f8e 100644 --- a/ambari-server/src/test/java/org/apache/ambari/server/topology/TopologyManagerTest.java +++ b/ambari-server/src/test/java/org/apache/ambari/server/topology/TopologyManagerTest.java @@ -18,10 +18,8 @@ package org.apache.ambari.server.topology; -import static java.util.stream.Collectors.toSet; import static org.easymock.EasyMock.anyLong; import static org.easymock.EasyMock.anyObject; -import static org.easymock.EasyMock.anyString; import static org.easymock.EasyMock.capture; import static org.easymock.EasyMock.eq; import static org.easymock.EasyMock.expect; @@ -43,11 +41,9 @@ import java.util.HashSet; import java.util.List; import java.util.Map; -import java.util.Set; import java.util.TreeMap; import java.util.concurrent.ExecutorService; import java.util.concurrent.Future; -import java.util.stream.Stream; import org.apache.ambari.server.AmbariException; import org.apache.ambari.server.actionmanager.HostRoleStatus; @@ -73,7 +69,6 @@ import org.apache.ambari.server.state.quicklinksprofile.QuickLinksProfile; import org.apache.ambari.server.topology.tasks.ConfigureClusterTask; import org.apache.ambari.server.topology.tasks.ConfigureClusterTaskFactory; -import org.apache.ambari.server.topology.validators.TopologyValidator; import org.apache.ambari.server.topology.validators.TopologyValidatorService; import org.easymock.Capture; import org.easymock.EasyMock; @@ -92,7 +87,6 @@ import org.powermock.core.classloader.annotations.PrepareForTest; import org.powermock.modules.junit4.PowerMockRunner; -import com.google.common.collect.ImmutableMap; import com.google.common.collect.ImmutableSet; /** @@ -115,9 +109,6 @@ public class TopologyManagerTest { @Rule public EasyMockRule mocks = new EasyMockRule(this); - @Mock - private org.apache.ambari.server.configuration.Configuration configuration; - @TestSubject private TopologyManager topologyManager = new TopologyManager(); @@ -179,9 +170,6 @@ public class TopologyManagerTest { @Mock private TopologyValidatorService topologyValidatorService; - @Mock - private ComponentResolver componentResolver; - private final Configuration stackConfig = new Configuration(new HashMap<>(), new HashMap<>()); private final Configuration bpConfiguration = new Configuration(new HashMap<>(), @@ -202,10 +190,8 @@ public class TopologyManagerTest { private HostGroupInfo group2Info = new HostGroupInfo("group2"); private Map groupInfoMap = new HashMap<>(); - private final Collection group1Components = Arrays.asList(new Component("component1"), new Component("component2"), new Component("component3")); - private final Collection group1ComponentNames = group1Components.stream().map(Component::getName).collect(toSet()); - private final Collection group2Components = Arrays.asList(new Component("component3"), new Component("component4")); - private final Collection group2ComponentNames = group2Components.stream().map(Component::getName).collect(toSet()); + private Collection group1Components = Arrays.asList(new Component("component1"), new Component("component2"), new Component("component3")); + private Collection group2Components = Arrays.asList(new Component("component3"), new Component("component4")); private Map> group1ServiceComponents = new HashMap<>(); private Map> group2ServiceComponents = new HashMap<>(); @@ -223,11 +209,6 @@ public class TopologyManagerTest { @Before public void setup() throws Exception { - expect(configuration.getParallelTopologyTaskCreationThreadPoolSize()).andReturn(1).anyTimes(); - expect(configuration.getParallelStageExecution()).andReturn(false).anyTimes(); - expect(configuration.getGplLicenseAccepted()).andReturn(true).anyTimes(); - replay(configuration); - clusterTopologyCapture = newCapture(); configRequestPropertiesCapture = newCapture(); configRequestPropertiesCapture2 = newCapture(); @@ -266,24 +247,21 @@ public void setup() throws Exception { expect(blueprint.getHostGroup("group1")).andReturn(group1).anyTimes(); expect(blueprint.getHostGroup("group2")).andReturn(group2).anyTimes(); - expect(clusterTopologyMock.getComponents()).andReturn(Stream.of( - ResolvedComponent.builder(new Component("component1")).serviceType("service1").buildPartial(), - ResolvedComponent.builder(new Component("component2")).serviceType("service2").buildPartial(), - ResolvedComponent.builder(new Component("component3")).serviceType("service1").buildPartial(), - ResolvedComponent.builder(new Component("component4")).serviceType("service2").buildPartial() - )).anyTimes(); + expect(blueprint.getComponentNames("service1")).andReturn(Arrays.asList("component1", "component3")).anyTimes(); + expect(blueprint.getComponentNames("service2")).andReturn(Arrays.asList("component2", "component4")).anyTimes(); expect(blueprint.getConfiguration()).andReturn(bpConfiguration).anyTimes(); expect(blueprint.getHostGroups()).andReturn(groupMap).anyTimes(); expect(blueprint.getHostGroupsForComponent("component1")).andReturn(Collections.singleton(group1)).anyTimes(); expect(blueprint.getHostGroupsForComponent("component2")).andReturn(Collections.singleton(group1)).anyTimes(); expect(blueprint.getHostGroupsForComponent("component3")).andReturn(Arrays.asList(group1, group2)).anyTimes(); expect(blueprint.getHostGroupsForComponent("component4")).andReturn(Collections.singleton(group2)).anyTimes(); + expect(blueprint.getHostGroupsForService("service1")).andReturn(Arrays.asList(group1, group2)).anyTimes(); + expect(blueprint.getHostGroupsForService("service2")).andReturn(Arrays.asList(group1, group2)).anyTimes(); expect(blueprint.getName()).andReturn(BLUEPRINT_NAME).anyTimes(); - expect(clusterTopologyMock.getServices()).andReturn(Arrays.asList("service1", "service2")).anyTimes(); - expect(clusterTopologyMock.getStack()).andReturn(stack).anyTimes(); + expect(blueprint.getServices()).andReturn(Arrays.asList("service1", "service2")).anyTimes(); + expect(blueprint.getStack()).andReturn(stack).anyTimes(); expect(blueprint.getStackIds()).andReturn(ImmutableSet.of(STACK_ID)).anyTimes(); - expect(blueprint.getSecurity()).andReturn(SecurityConfiguration.NONE).anyTimes(); - expect(blueprint.getMpacks()).andReturn(ImmutableSet.of()).anyTimes(); + expect(blueprint.getRepositorySettings()).andReturn(new ArrayList<>()).anyTimes(); // don't expect toEntity() expect(stack.getAllConfigurationTypes("service1")).andReturn(Arrays.asList("service1-site", "service1-env")).anyTimes(); @@ -300,17 +278,12 @@ public void setup() throws Exception { expect(stack.getComponents("service1")).andReturn(components1).anyTimes(); expect(stack.getComponents("service2")).andReturn(components2).anyTimes(); expect(stack.getServiceForConfigType("service1-site")).andReturn("service1").anyTimes(); - expect(stack.getDependenciesForComponent(anyString())).andReturn(Collections.emptySet()).anyTimes(); expect(stack.getServiceForConfigType("service2-site")).andReturn("service2").anyTimes(); expect(stack.getConfiguration()).andReturn(stackConfig).anyTimes(); expect(stack.getName()).andReturn(STACK_NAME).anyTimes(); expect(stack.getVersion()).andReturn(STACK_VERSION).anyTimes(); expect(stack.getExcludedConfigurationTypes("service1")).andReturn(Collections.emptySet()).anyTimes(); expect(stack.getExcludedConfigurationTypes("service2")).andReturn(Collections.emptySet()).anyTimes(); - expect(stack.getServiceForComponent("component1")).andReturn("service1").anyTimes(); - expect(stack.getServiceForComponent("component2")).andReturn("service2").anyTimes(); - expect(stack.getServiceForComponent("component3")).andReturn("service1").anyTimes(); - expect(stack.getServiceForComponent("component4")).andReturn("service2").anyTimes(); expect(request.getBlueprint()).andReturn(blueprint).anyTimes(); expect(request.getClusterId()).andReturn(CLUSTER_ID).anyTimes(); @@ -320,24 +293,29 @@ public void setup() throws Exception { expect(request.getHostGroupInfo()).andReturn(groupInfoMap).anyTimes(); expect(request.getConfigRecommendationStrategy()).andReturn(ConfigRecommendationStrategy.NEVER_APPLY).anyTimes(); expect(request.getSecurityConfiguration()).andReturn(null).anyTimes(); - expect(request.getStackIds()).andReturn(ImmutableSet.of()).anyTimes(); - expect(request.getMpacks()).andReturn(ImmutableSet.of()).anyTimes(); - expect(componentResolver.resolveComponents(anyObject())).andReturn(ImmutableMap.of()).anyTimes(); + expect(group1.getBlueprintName()).andReturn(BLUEPRINT_NAME).anyTimes(); expect(group1.getCardinality()).andReturn("test cardinality").anyTimes(); - expect(clusterTopologyMock.containsMasterComponent("group1")).andReturn(true).anyTimes(); + expect(group1.containsMasterComponent()).andReturn(true).anyTimes(); expect(group1.getComponents()).andReturn(group1Components).anyTimes(); - expect(group1.getComponentNames()).andReturn(group1ComponentNames).anyTimes(); + expect(group1.getComponentNames("service1")).andReturn(group1ServiceComponents.get("service1")).anyTimes(); + expect(group1.getComponentNames("service2")).andReturn(group1ServiceComponents.get("service1")).anyTimes(); expect(group1.getConfiguration()).andReturn(topoGroup1Config).anyTimes(); expect(group1.getName()).andReturn("group1").anyTimes(); + expect(group1.getServices()).andReturn(Arrays.asList("service1", "service2")).anyTimes(); + expect(group1.getStack()).andReturn(stack).anyTimes(); + expect(group2.getBlueprintName()).andReturn(BLUEPRINT_NAME).anyTimes(); expect(group2.getCardinality()).andReturn("test cardinality").anyTimes(); - expect(clusterTopologyMock.containsMasterComponent("group2")).andReturn(false).anyTimes(); + expect(group2.containsMasterComponent()).andReturn(false).anyTimes(); expect(group2.getComponents()).andReturn(group2Components).anyTimes(); - expect(group2.getComponentNames()).andReturn(group2ComponentNames).anyTimes(); + expect(group2.getComponentNames("service1")).andReturn(group2ServiceComponents.get("service1")).anyTimes(); + expect(group2.getComponentNames("service2")).andReturn(group2ServiceComponents.get("service2")).anyTimes(); expect(group2.getConfiguration()).andReturn(topoGroup2Config).anyTimes(); expect(group2.getName()).andReturn("group2").anyTimes(); + expect(group2.getServices()).andReturn(Arrays.asList("service1", "service2")).anyTimes(); + expect(group2.getStack()).andReturn(stack).anyTimes(); expect(logicalRequestFactory.createRequest(eq(1L), (TopologyRequest) anyObject(), capture(clusterTopologyCapture))). @@ -347,10 +325,9 @@ public void setup() throws Exception { expect(logicalRequest.getReservedHosts()).andReturn(Collections.singleton("host1")).anyTimes(); expect(logicalRequest.getRequestStatus()).andReturn(requestStatusResponse).anyTimes(); - expect(ambariContext.composeStacks(anyObject())).andReturn(stack).anyTimes(); expect(ambariContext.getPersistedTopologyState()).andReturn(persistedState).anyTimes(); //todo: don't ignore param - ambariContext.createAmbariResources(isA(ClusterTopology.class), eq(CLUSTER_NAME), eq(SecurityType.NONE), isNull(), anyLong()); + ambariContext.createAmbariResources(isA(ClusterTopology.class), eq(CLUSTER_NAME), (SecurityType) isNull(), (String) isNull(), anyLong()); expectLastCall().anyTimes(); expect(ambariContext.getNextRequestId()).andReturn(1L).anyTimes(); expect(ambariContext.isClusterKerberosEnabled(CLUSTER_ID)).andReturn(false).anyTimes(); @@ -400,12 +377,12 @@ public void setup() throws Exception { @After public void tearDown() { PowerMock.verify(System.class); - verify(blueprint, stack, request, group1, group2, ambariContext, logicalRequestFactory, componentResolver, + verify(blueprint, stack, request, group1, group2, ambariContext, logicalRequestFactory, logicalRequest, configurationRequest, configurationRequest2, configurationRequest3, requestStatusResponse, executor, persistedState, clusterTopologyMock, mockFuture, settingDAO); PowerMock.reset(System.class); - reset(blueprint, stack, request, group1, group2, ambariContext, logicalRequestFactory, componentResolver, + reset(blueprint, stack, request, group1, group2, ambariContext, logicalRequestFactory, logicalRequest, configurationRequest, configurationRequest2, configurationRequest3, requestStatusResponse, executor, persistedState, clusterTopologyMock, mockFuture, settingDAO); } @@ -561,7 +538,7 @@ private void requestFinished() { } private void replayAll() { - replay(blueprint, stack, request, group1, group2, ambariContext, logicalRequestFactory, componentResolver, + replay(blueprint, stack, request, group1, group2, ambariContext, logicalRequestFactory, configurationRequest, configurationRequest2, configurationRequest3, executor, persistedState, clusterTopologyMock, securityConfigurationFactory, credentialStoreService, clusterController, resourceProvider, mockFuture, requestStatusResponse, logicalRequest, settingDAO, @@ -570,7 +547,7 @@ private void replayAll() { @Test(expected = InvalidTopologyException.class) public void testScaleHosts__alreadyExistingHost() throws InvalidTopologyTemplateException, InvalidTopologyException, AmbariException, NoSuchStackException { - Set> propertySet = new HashSet<>(); + HashSet> propertySet = new HashSet<>(); Map properties = new TreeMap<>(); properties.put(HostResourceProvider.HOST_HOST_NAME_PROPERTY_ID, "host1"); properties.put(HostResourceProvider.HOST_GROUP_PROPERTY_ID, "group1"); @@ -584,7 +561,7 @@ public void testScaleHosts__alreadyExistingHost() throws InvalidTopologyTemplate expect(persistedState.getAllRequests()).andReturn(Collections.emptyMap()).anyTimes(); replayAll(); topologyManager.provisionCluster(request); - topologyManager.scaleHosts(new ScaleClusterRequest("{}", propertySet)); + topologyManager.scaleHosts(new ScaleClusterRequest(propertySet)); Assert.fail("InvalidTopologyException should have been thrown"); } diff --git a/ambari-server/src/test/java/org/apache/ambari/server/topology/TopologyRequestUtilTest.java b/ambari-server/src/test/java/org/apache/ambari/server/topology/TopologyRequestUtilTest.java deleted file mode 100644 index db4046ac277..00000000000 --- a/ambari-server/src/test/java/org/apache/ambari/server/topology/TopologyRequestUtilTest.java +++ /dev/null @@ -1,60 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package org.apache.ambari.server.topology; - -import static org.junit.Assert.assertEquals; - -import java.util.Collections; - -import org.apache.ambari.server.state.StackId; -import org.junit.Test; - -import com.google.common.collect.ImmutableSet; - -public class TopologyRequestUtilTest { - - private static final String REQUEST_WITH_MPACK_INSTANCES = - "{ 'mpack_instances' : [ {'name': 'HDPCORE', 'version': '1.0.0-b98'}, {'name': 'EDW', 'version': '1.0.0'} ] }".replace('\'', '"'); - - private static final String REQUEST_WITH_INVALID_MPACK_INSTANCE = - "{ 'mpack_instances' : [ {'name': 'HDPCORE', 'version': '1.0.0-b98'}, {'name': 'EDW'} ] }".replace('\'', '"'); - - private static final String REQUEST_WITHOUT_MPACK_INSTANCE = "{}"; - - - @Test - public void testGetStackIdsFromRawRequest_normalCase() { - assertEquals( - ImmutableSet.of(new StackId("HDPCORE", "1.0.0-b98"), new StackId("EDW", "1.0.0")), - TopologyRequestUtil.getStackIdsFromRequest(REQUEST_WITH_MPACK_INSTANCES)); - } - - @Test - public void testGetStackIdsFromRawRequest_noMpackInstances() { - assertEquals( - Collections.emptySet(), - TopologyRequestUtil.getStackIdsFromRequest(REQUEST_WITHOUT_MPACK_INSTANCE)); - } - - @Test(expected = IllegalArgumentException.class) - public void testGetStackIdsFromRawRequest_wrongMpackInstance() { - TopologyRequestUtil.getStackIdsFromRequest(REQUEST_WITH_INVALID_MPACK_INSTANCE); - } - -} \ No newline at end of file diff --git a/ambari-server/src/test/java/org/apache/ambari/server/topology/validators/ClusterConfigTypeValidatorTest.java b/ambari-server/src/test/java/org/apache/ambari/server/topology/validators/ClusterConfigTypeValidatorTest.java index ae144266885..c2fea1d43e0 100644 --- a/ambari-server/src/test/java/org/apache/ambari/server/topology/validators/ClusterConfigTypeValidatorTest.java +++ b/ambari-server/src/test/java/org/apache/ambari/server/topology/validators/ClusterConfigTypeValidatorTest.java @@ -63,7 +63,7 @@ public void before() { EasyMock.expect(clusterTopologyMock.getConfiguration()).andReturn(clusterConfigurationMock).anyTimes(); EasyMock.expect(clusterTopologyMock.getBlueprint()).andReturn(blueprintMock).anyTimes(); - EasyMock.expect(clusterTopologyMock.getStack()).andReturn(stackMock).anyTimes(); + EasyMock.expect(blueprintMock.getStack()).andReturn(stackMock).anyTimes(); } @After @@ -94,7 +94,7 @@ public void testShouldValidationPassWhenAllConfigTypesAreValid() throws Exceptio clusterRequestConfigTypes = new HashSet<>(Arrays.asList("core-site", "yarn-site")); EasyMock.expect(clusterConfigurationMock.getAllConfigTypes()).andReturn(clusterRequestConfigTypes).anyTimes(); - EasyMock.expect(clusterTopologyMock.getServices()).andReturn(new HashSet<>(Arrays.asList("YARN", "HDFS"))); + EasyMock.expect(blueprintMock.getServices()).andReturn(new HashSet<>(Arrays.asList("YARN", "HDFS"))); EasyMock.expect(stackMock.getConfigurationTypes("HDFS")).andReturn(Arrays.asList("core-site")); EasyMock.expect(stackMock.getConfigurationTypes("YARN")).andReturn(Arrays.asList("yarn-site")); @@ -117,7 +117,7 @@ public void testShouldValidationFailWhenInvalidConfigGroupsSpecifiedInCCTemplate clusterRequestConfigTypes = new HashSet<>(Arrays.asList("oozie-site")); EasyMock.expect(clusterConfigurationMock.getAllConfigTypes()).andReturn(clusterRequestConfigTypes).anyTimes(); - EasyMock.expect(clusterTopologyMock.getServices()).andReturn(new HashSet<>(Arrays.asList("YARN", "HDFS"))); + EasyMock.expect(blueprintMock.getServices()).andReturn(new HashSet<>(Arrays.asList("YARN", "HDFS"))); EasyMock.expect(stackMock.getConfigurationTypes("HDFS")).andReturn(Arrays.asList("core-site")); EasyMock.expect(stackMock.getConfigurationTypes("YARN")).andReturn(Arrays.asList("yarn-site")); @@ -138,7 +138,7 @@ public void testShouldValidationFailWhenThereIsAnInvalidConfigGroupProvided() th clusterRequestConfigTypes = new HashSet<>(Arrays.asList("core-site", "yarn-site", "oozie-site")); EasyMock.expect(clusterConfigurationMock.getAllConfigTypes()).andReturn(clusterRequestConfigTypes).anyTimes(); - EasyMock.expect(clusterTopologyMock.getServices()).andReturn(new HashSet<>(Arrays.asList("YARN", "HDFS"))); + EasyMock.expect(blueprintMock.getServices()).andReturn(new HashSet<>(Arrays.asList("YARN", "HDFS"))); EasyMock.expect(stackMock.getConfigurationTypes("HDFS")).andReturn(Arrays.asList("core-site")); EasyMock.expect(stackMock.getConfigurationTypes("YARN")).andReturn(Arrays.asList("yarn-site")); diff --git a/ambari-server/src/test/java/org/apache/ambari/server/topology/validators/GplPropertiesValidatorTest.java b/ambari-server/src/test/java/org/apache/ambari/server/topology/validators/GplPropertiesValidatorTest.java deleted file mode 100644 index 2cb1ef10ed4..00000000000 --- a/ambari-server/src/test/java/org/apache/ambari/server/topology/validators/GplPropertiesValidatorTest.java +++ /dev/null @@ -1,122 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package org.apache.ambari.server.topology.validators; - -import static org.apache.ambari.server.topology.validators.GplPropertiesValidator.CODEC_CLASSES_PROPERTY_NAME; -import static org.apache.ambari.server.topology.validators.GplPropertiesValidator.LZO_CODEC_CLASS; -import static org.apache.ambari.server.topology.validators.GplPropertiesValidator.LZO_CODEC_CLASS_PROPERTY_NAME; -import static org.easymock.EasyMock.createNiceMock; -import static org.easymock.EasyMock.expect; -import static org.easymock.EasyMock.replay; -import static org.easymock.EasyMock.reset; -import static org.easymock.EasyMock.verify; - -import org.apache.ambari.server.topology.ClusterTopology; -import org.apache.ambari.server.topology.GPLLicenseNotAcceptedException; -import org.junit.After; -import org.junit.Before; -import org.junit.Test; - -import com.google.common.collect.ImmutableMap; - -public class GplPropertiesValidatorTest { - - private static final boolean ACCEPTED = true; - - private final org.apache.ambari.server.configuration.Configuration serverConfig = createNiceMock(org.apache.ambari.server.configuration.Configuration.class); - private final TopologyValidator validator = new GplPropertiesValidator(serverConfig); - - @Before - public void setup() { - reset(serverConfig); - } - - @After - public void tearDown() { - verify(serverConfig); - } - - @Test(expected = GPLLicenseNotAcceptedException.class) // THEN - public void rejectsCodecsPropertyWithLzoClassIfGplIsNotAccepted() throws Exception { - // GIVEN - gpl(!ACCEPTED); - ClusterTopology topology = TopologyValidatorTests.topologyWithProperties(ImmutableMap.of( - "core-site", ImmutableMap.of( - CODEC_CLASSES_PROPERTY_NAME, "OtherCodec, " + LZO_CODEC_CLASS - ) - )); - - // WHEN - validator.validate(topology); - } - - @Test(expected = GPLLicenseNotAcceptedException.class) // THEN - public void rejectsLzoCodecPropertyIfGplIsNotAccepted() throws Exception { - // GIVEN - gpl(!ACCEPTED); - ClusterTopology topology = TopologyValidatorTests.topologyWithProperties(ImmutableMap.of( - "core-site", ImmutableMap.of( - LZO_CODEC_CLASS_PROPERTY_NAME, LZO_CODEC_CLASS - ) - )); - - // WHEN - validator.validate(topology); - } - - @Test - public void allowsLzoCodecIfGplIsAccepted() throws Exception { - // GIVEN - gpl(ACCEPTED); - ClusterTopology topology = TopologyValidatorTests.topologyWithProperties(ImmutableMap.of( - "core-site", ImmutableMap.of( - LZO_CODEC_CLASS_PROPERTY_NAME, LZO_CODEC_CLASS, - CODEC_CLASSES_PROPERTY_NAME, "OtherCodec," + LZO_CODEC_CLASS - ) - )); - - // WHEN - validator.validate(topology); - - // THEN - // no exception expected - } - - @Test - public void allowsConfigWithoutReferenceToGplEvenIfGplIsNotAccepted() throws Exception { - // GIVEN - gpl(!ACCEPTED); - ClusterTopology topology = TopologyValidatorTests.topologyWithProperties(ImmutableMap.of( - "core-site", ImmutableMap.of( - "fs.defaultFS", "hdfs://localhost:8020", - "io.compression.codecs", "org.apache.hadoop.io.compress.DefaultCodec" - ) - )); - - // WHEN - validator.validate(topology); - - // THEN - // no exception expected - } - - private void gpl(boolean accepted) { - expect(serverConfig.getGplLicenseAccepted()).andReturn(accepted).atLeastOnce(); - replay(serverConfig); - } -} diff --git a/ambari-server/src/test/java/org/apache/ambari/server/topology/validators/HiveServiceValidatorTest.java b/ambari-server/src/test/java/org/apache/ambari/server/topology/validators/HiveServiceValidatorTest.java index 805edaddf5b..3ce0a024813 100644 --- a/ambari-server/src/test/java/org/apache/ambari/server/topology/validators/HiveServiceValidatorTest.java +++ b/ambari-server/src/test/java/org/apache/ambari/server/topology/validators/HiveServiceValidatorTest.java @@ -14,16 +14,15 @@ package org.apache.ambari.server.topology.validators; -import static org.easymock.EasyMock.expect; -import static org.easymock.EasyMock.replay; - import java.util.Arrays; +import java.util.Collection; +import java.util.Collections; +import org.apache.ambari.server.topology.Blueprint; import org.apache.ambari.server.topology.ClusterTopology; -import org.apache.ambari.server.topology.Component; import org.apache.ambari.server.topology.Configuration; import org.apache.ambari.server.topology.InvalidTopologyException; -import org.apache.ambari.server.topology.ResolvedComponent; +import org.easymock.EasyMock; import org.easymock.EasyMockRule; import org.easymock.EasyMockSupport; import org.easymock.Mock; @@ -33,25 +32,27 @@ import org.junit.Rule; import org.junit.Test; -import com.google.common.collect.ImmutableSet; - public class HiveServiceValidatorTest extends EasyMockSupport { @Rule public EasyMockRule mocks = new EasyMockRule(this); @Mock - private ClusterTopology topology; + private ClusterTopology clusterTopologyMock; + + @Mock + private Blueprint blueprintMock; @Mock - private Configuration configuration; + private Configuration configurationMock; @TestSubject - private final TopologyValidator hiveServiceValidator = new HiveServiceValidator(); + private HiveServiceValidator hiveServiceValidator = new HiveServiceValidator(); @Before public void setUp() throws Exception { - expect(topology.getConfiguration()).andReturn(configuration).anyTimes(); + + } @After @@ -60,90 +61,101 @@ public void tearDown() throws Exception { } @Test - public void allowsTopologyWithoutHive() throws Exception { + public void testShouldValidationPassWhenHiveServiceIsNotInBlueprint() throws Exception { + // GIVEN - noHiveInTopology(); - noHiveConfig(); + EasyMock.expect(clusterTopologyMock.getBlueprint()).andReturn(blueprintMock); + EasyMock.expect(blueprintMock.getServices()).andReturn(Collections.emptySet()); + replayAll(); // WHEN - hiveServiceValidator.validate(topology); + hiveServiceValidator.validate(clusterTopologyMock); // THEN - // OK + } - @Test - public void allowsExistingDatabaseWithoutMysqlComponent() throws Exception { + @Test(expected = InvalidTopologyException.class) + public void testShouldValidationFailWhenHiveServiceIsMissingConfigType() throws Exception { + // GIVEN - topologyHasMysql(false); - hiveDatabaseIs("Existing MySQL"); + Collection blueprintServices = Arrays.asList("HIVE", "OOZIE"); + EasyMock.expect(clusterTopologyMock.getBlueprint()).andReturn(blueprintMock); + EasyMock.expect(blueprintMock.getServices()).andReturn(blueprintServices); + EasyMock.expect(clusterTopologyMock.getConfiguration()).andReturn(configurationMock); + EasyMock.expect(configurationMock.getAllConfigTypes()).andReturn(Collections.emptySet()); + + replayAll(); // WHEN - hiveServiceValidator.validate(topology); + hiveServiceValidator.validate(clusterTopologyMock); // THEN - // OK + } @Test - public void allowsNewMysqlDatabaseWithMysqlComponent() throws Exception { + public void testShouldValidationPassWhenCustomHiveDatabaseSettingsProvided() throws Exception { + // GIVEN - topologyHasMysql(true); - hiveDatabaseIs("New MySQL Database"); + Collection blueprintServices = Arrays.asList("HIVE", "OOZIE"); + Collection configTypes = Arrays.asList("hive-env", "core-site", "hadoop-env"); + EasyMock.expect(clusterTopologyMock.getBlueprint()).andReturn(blueprintMock); + EasyMock.expect(blueprintMock.getServices()).andReturn(blueprintServices); + EasyMock.expect(clusterTopologyMock.getConfiguration()).andReturn(configurationMock); + EasyMock.expect(configurationMock.getAllConfigTypes()).andReturn(configTypes); + + EasyMock.expect(configurationMock.getPropertyValue("hive-env", "hive_database")).andReturn("PSQL"); + replayAll(); // WHEN - hiveServiceValidator.validate(topology); + hiveServiceValidator.validate(clusterTopologyMock); // THEN - // OK + } - @Test(expected = InvalidTopologyException.class) // THEN - public void rejectsHiveWithoutConfig() throws Exception { + @Test(expected = InvalidTopologyException.class) + public void testShouldValidationFailWhenDefaultsAreUsedAndMysqlComponentIsMissing() throws Exception { // GIVEN - topologyHasMysql(true); - noHiveConfig(); + Collection blueprintServices = Arrays.asList("HIVE", "HDFS"); + Collection configTypes = Arrays.asList("hive-env", "core-site", "hadoop-env"); + EasyMock.expect(clusterTopologyMock.getBlueprint()).andReturn(blueprintMock).anyTimes(); + EasyMock.expect(blueprintMock.getServices()).andReturn(blueprintServices).anyTimes(); + EasyMock.expect(blueprintMock.getComponentNames("HIVE")).andReturn(Collections.emptyList()).anyTimes(); + EasyMock.expect(clusterTopologyMock.getConfiguration()).andReturn(configurationMock); + EasyMock.expect(configurationMock.getAllConfigTypes()).andReturn(configTypes); - // WHEN - hiveServiceValidator.validate(topology); - } + EasyMock.expect(configurationMock.getPropertyValue("hive-env", "hive_database")).andReturn("New MySQL Database"); + replayAll(); - @Test(expected = InvalidTopologyException.class) // THEN - public void rejectsNewMysqlDatabaseWithoutMysqlComponent() throws Exception { - // GIVEN - topologyHasMysql(false); - hiveDatabaseIs("New MySQL Database"); // WHEN - hiveServiceValidator.validate(topology); - } + hiveServiceValidator.validate(clusterTopologyMock); - private void noHiveConfig() { - expect(configuration.getAllConfigTypes()).andReturn(Arrays.asList("core-site", "hadoop-env")).anyTimes(); - replay(configuration); - } + // THEN - private void hiveDatabaseIs(String database) { - expect(configuration.getAllConfigTypes()).andReturn(Arrays.asList("hive-env", "hive-site", "core-site", "hadoop-env")); - expect(configuration.getPropertyValue("hive-env", "hive_database")).andReturn(database).anyTimes(); - replay(configuration); } - private void topologyHasMysql(boolean hasMysql) { - ImmutableSet.Builder components = ImmutableSet.builder().add("HIVE_CLIENT", "HIVE_METASTORE", "HIVE_SERVER"); - if (hasMysql) { - components.add("MYSQL_SERVER"); - } - expect(topology.getComponents()).andReturn(components.build().stream() - .map(name -> ResolvedComponent.builder(new Component(name)).serviceType("HIVE").buildPartial()) - ).anyTimes(); - expect(topology.getServices()).andReturn(ImmutableSet.of("HDFS", "YARN", "HIVE")).anyTimes(); - replay(topology); - } + @Test + public void testShouldValidationPassWhenDefaultsAreUsedAndMsqlComponentIsListed() throws Exception { + // GIVEN + Collection blueprintServices = Arrays.asList("HIVE", "HDFS", "MYSQL_SERVER"); + Collection hiveComponents = Arrays.asList("MYSQL_SERVER"); + Collection configTypes = Arrays.asList("hive-env", "core-site", "hadoop-env"); + EasyMock.expect(clusterTopologyMock.getBlueprint()).andReturn(blueprintMock).anyTimes(); + EasyMock.expect(blueprintMock.getServices()).andReturn(blueprintServices).anyTimes(); + EasyMock.expect(blueprintMock.getComponentNames("HIVE")).andReturn(hiveComponents).anyTimes(); + EasyMock.expect(clusterTopologyMock.getConfiguration()).andReturn(configurationMock); + EasyMock.expect(configurationMock.getAllConfigTypes()).andReturn(configTypes); + + EasyMock.expect(configurationMock.getPropertyValue("hive-env", "hive_database")).andReturn("New MySQL Database"); + replayAll(); - private void noHiveInTopology() { - expect(topology.getServices()).andReturn(ImmutableSet.of("HDFS", "YARN")).anyTimes(); - replay(topology); - } + // WHEN + hiveServiceValidator.validate(clusterTopologyMock); + // THEN + + } } diff --git a/ambari-server/src/test/java/org/apache/ambari/server/topology/validators/RejectUnknownStacksTest.java b/ambari-server/src/test/java/org/apache/ambari/server/topology/validators/RejectUnknownStacksTest.java deleted file mode 100644 index 48cd6718308..00000000000 --- a/ambari-server/src/test/java/org/apache/ambari/server/topology/validators/RejectUnknownStacksTest.java +++ /dev/null @@ -1,96 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package org.apache.ambari.server.topology.validators; - -import static org.easymock.EasyMock.expect; -import static org.junit.Assert.assertTrue; -import static org.junit.Assert.fail; - -import org.apache.ambari.server.api.services.AmbariMetaInfo; -import org.apache.ambari.server.state.StackId; -import org.apache.ambari.server.topology.ClusterTopology; -import org.apache.ambari.server.topology.InvalidTopologyException; -import org.easymock.EasyMockSupport; -import org.junit.Before; -import org.junit.Test; - -import com.google.common.collect.ImmutableSet; -import com.google.inject.util.Providers; - -public class RejectUnknownStacksTest extends EasyMockSupport { - - private final StackId validStackId = new StackId("valid", "1.0"); - private final StackId anotherValidStackId = new StackId("another", "2.1"); - private final StackId invalidStackId = new StackId("invalid", "3.2"); - private final StackId anotherInvalidStackId = new StackId("invalid", "1.1"); - private RejectUnknownStacks validator; - - @Before - public void setUp() { - AmbariMetaInfo metaInfo = createNiceMock(AmbariMetaInfo.class); - validator = new RejectUnknownStacks(Providers.of(metaInfo)); - - expect(metaInfo.isKnownStack(validStackId)).andReturn(true).anyTimes(); - expect(metaInfo.isKnownStack(anotherValidStackId)).andReturn(true).anyTimes(); - expect(metaInfo.isKnownStack(invalidStackId)).andReturn(false).anyTimes(); - expect(metaInfo.isKnownStack(anotherInvalidStackId)).andReturn(false).anyTimes(); - } - - @Test(expected = InvalidTopologyException.class) // THEN - public void rejectsUnknownStack() throws InvalidTopologyException { - // GIVEN - ClusterTopology topology = createNiceMock(ClusterTopology.class); - expect(topology.getStackIds()).andReturn(ImmutableSet.of(validStackId, invalidStackId)).anyTimes(); - replayAll(); - - // WHEN - validator.validate(topology); - } - - @Test - public void acceptsKnownStack() throws InvalidTopologyException { - // GIVEN - ClusterTopology topology = createNiceMock(ClusterTopology.class); - expect(topology.getStackIds()).andReturn(ImmutableSet.of(validStackId, anotherValidStackId)).anyTimes(); - replayAll(); - - // WHEN - validator.validate(topology); - - // THEN - // no exception - } - - @Test - public void reportsUnknownStacks() { - // GIVEN - ClusterTopology topology = createNiceMock(ClusterTopology.class); - expect(topology.getStackIds()).andReturn(ImmutableSet.of(invalidStackId, anotherInvalidStackId)).anyTimes(); - replayAll(); - - // WHEN - try { - validator.validate(topology); - fail("Expected " + InvalidTopologyException.class); - } catch (InvalidTopologyException e) { - // THEN - assertTrue(e.getMessage(), e.getMessage().contains(invalidStackId.toString())); - assertTrue(e.getMessage(), e.getMessage().contains(anotherInvalidStackId.toString())); - } - } -} diff --git a/ambari-server/src/test/java/org/apache/ambari/server/topology/validators/RequiredConfigPropertiesValidatorTest.java b/ambari-server/src/test/java/org/apache/ambari/server/topology/validators/RequiredConfigPropertiesValidatorTest.java index e249feb8da6..780ca53bdd9 100644 --- a/ambari-server/src/test/java/org/apache/ambari/server/topology/validators/RequiredConfigPropertiesValidatorTest.java +++ b/ambari-server/src/test/java/org/apache/ambari/server/topology/validators/RequiredConfigPropertiesValidatorTest.java @@ -99,15 +99,17 @@ public void setup() { EasyMock.expect(clusterTopologyMock.getBlueprint()).andReturn(blueprintMock).anyTimes(); EasyMock.expect(blueprintMock.getHostGroups()).andReturn(hostGroups); - EasyMock.expect(clusterTopologyMock.getServices()).andReturn(bpServices); - EasyMock.expect(clusterTopologyMock.getStack()).andReturn(stackMock).anyTimes(); + EasyMock.expect(blueprintMock.getServices()).andReturn(bpServices); + EasyMock.expect(blueprintMock.getStack()).andReturn(stackMock).anyTimes(); EasyMock.expect(masterHostGroupMock.getName()).andReturn("master").anyTimes(); EasyMock.expect(masterHostGroupMock.getConfiguration()).andReturn(masterHostGroupConfigurationMock).anyTimes(); + EasyMock.expect(masterHostGroupMock.getServices()).andReturn(masterHostGroupServices); EasyMock.expect(slaveHostGroupMock.getName()).andReturn("slave").anyTimes(); EasyMock.expect(slaveHostGroupMock.getConfiguration()).andReturn(slaveHostGroupConfigurationMock).anyTimes(); + EasyMock.expect(slaveHostGroupMock.getServices()).andReturn(slaveHostGroupServices); // there are 2 hostgroups to be considered by the test hostGroups.put("master", masterHostGroupMock); diff --git a/ambari-server/src/test/java/org/apache/ambari/server/topology/validators/SecretReferenceValidatorTest.java b/ambari-server/src/test/java/org/apache/ambari/server/topology/validators/SecretReferenceValidatorTest.java deleted file mode 100644 index fa3021f047e..00000000000 --- a/ambari-server/src/test/java/org/apache/ambari/server/topology/validators/SecretReferenceValidatorTest.java +++ /dev/null @@ -1,59 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package org.apache.ambari.server.topology.validators; - -import static org.apache.ambari.server.topology.validators.TopologyValidatorTests.topologyWithProperties; - -import org.apache.ambari.server.topology.ClusterTopology; -import org.apache.ambari.server.topology.InvalidTopologyException; -import org.junit.Test; - -import com.google.common.collect.ImmutableMap; - -public class SecretReferenceValidatorTest { - - private final TopologyValidator validator = new SecretReferenceValidator(); - - @Test - public void acceptsTopologyWithoutSecretReferences() throws InvalidTopologyException { - ClusterTopology topology = topologyWithProperties(ImmutableMap.of( - "hdfs-site", ImmutableMap.of( - "password", "secret" - ) - )); - - // WHEN - validator.validate(topology); - - // THEN - // no exceptions expected - } - - @Test(expected = InvalidTopologyException.class) - public void rejectsTopologyWithSecretReferences() throws InvalidTopologyException { - ClusterTopology topology = topologyWithProperties(ImmutableMap.of( - "hdfs-site", ImmutableMap.of( - "password", "SECRET:hdfs-site:1:test" - ) - )); - - // WHEN - validator.validate(topology); - } - -} diff --git a/ambari-server/src/test/java/org/apache/ambari/server/topology/validators/StackConfigTypeValidatorTest.java b/ambari-server/src/test/java/org/apache/ambari/server/topology/validators/StackConfigTypeValidatorTest.java index a03290a2ff0..417c403e1fd 100644 --- a/ambari-server/src/test/java/org/apache/ambari/server/topology/validators/StackConfigTypeValidatorTest.java +++ b/ambari-server/src/test/java/org/apache/ambari/server/topology/validators/StackConfigTypeValidatorTest.java @@ -18,7 +18,8 @@ import java.util.Arrays; import java.util.Collections; -import java.util.List; +import java.util.HashSet; +import java.util.Set; import org.apache.ambari.server.controller.internal.Stack; import org.apache.ambari.server.topology.Blueprint; @@ -54,6 +55,8 @@ public class StackConfigTypeValidatorTest extends EasyMockSupport { @Mock private ClusterTopology clusterTopologyMock; + private Set clusterRequestConfigTypes; + @TestSubject private StackConfigTypeValidator stackConfigTypeValidator = new StackConfigTypeValidator(); @@ -62,7 +65,7 @@ public void before() { expect(clusterTopologyMock.getConfiguration()).andReturn(clusterConfigurationMock).anyTimes(); expect(clusterTopologyMock.getBlueprint()).andReturn(blueprintMock).anyTimes(); - expect(clusterTopologyMock.getStack()).andReturn(stackMock).anyTimes(); + expect(blueprintMock.getStack()).andReturn(stackMock).anyTimes(); } @After @@ -71,24 +74,30 @@ public void after() { } - @Test(expected = InvalidTopologyException.class) // THEN - public void rejectInvalidType() throws Exception { + @Test(expected = InvalidTopologyException.class) + public void testShouldValidationFailWhenUnknownConfigTypeComesIn() throws Exception { // GIVEN expect(stackMock.getConfiguration()).andReturn(stackConfigurationMock); - expect(stackConfigurationMock.getAllConfigTypes()).andReturn(Arrays.asList("core-site", "yarn-site")); - expect(clusterConfigurationMock.getAllConfigTypes()).andReturn(Collections.singleton("invalid-site")); + expect(stackConfigurationMock.getAllConfigTypes()).andReturn(new HashSet<>(Arrays.asList("core-site", "yarn-site"))); + expect(clusterConfigurationMock.getAllConfigTypes()).andReturn(new HashSet<>(Arrays.asList("invalid-site"))); + replayAll(); // WHEN stackConfigTypeValidator.validate(clusterTopologyMock); + + // THEN + // exception is thrown + } @Test - public void allowEmptyConfig() throws Exception { + public void testShouldValidationPassifNoConfigTypesomeIn() throws Exception { // GIVEN expect(stackMock.getConfiguration()).andReturn(stackConfigurationMock); - expect(stackConfigurationMock.getAllConfigTypes()).andReturn(Arrays.asList("core-site", "yarn-site")); - expect(clusterConfigurationMock.getAllConfigTypes()).andReturn(Collections.emptySet()); + expect(stackConfigurationMock.getAllConfigTypes()).andReturn(new HashSet<>(Arrays.asList("core-site", "yarn-site"))); + expect(clusterConfigurationMock.getAllConfigTypes()).andReturn(new HashSet<>(Collections.emptyList())); + replayAll(); // WHEN @@ -96,27 +105,16 @@ public void allowEmptyConfig() throws Exception { // THEN // no exception is thrown - } - - @Test(expected = InvalidTopologyException.class) // THEN - public void rejectMultipleInvalidTypes() throws Exception { - // GIVEN - expect(stackMock.getConfiguration()).andReturn(stackConfigurationMock); - expect(stackConfigurationMock.getAllConfigTypes()).andReturn(Arrays.asList("core-site", "yarn-site")); - expect(clusterConfigurationMock.getAllConfigTypes()).andReturn(Arrays.asList("invalid-site-1", "invalid-default")); - replayAll(); - // WHEN - stackConfigTypeValidator.validate(clusterTopologyMock); } - @Test - public void allowValidTypes() throws InvalidTopologyException { + @Test(expected = InvalidTopologyException.class) + public void testShouldValidationFailIfMultipleInvalidConfigTypesComeIn() throws Exception { // GIVEN - List configTypes = Arrays.asList("core-site", "yarn-site"); expect(stackMock.getConfiguration()).andReturn(stackConfigurationMock); - expect(stackConfigurationMock.getAllConfigTypes()).andReturn(configTypes); - expect(clusterConfigurationMock.getAllConfigTypes()).andReturn(configTypes); + expect(stackConfigurationMock.getAllConfigTypes()).andReturn(new HashSet<>(Arrays.asList("core-site", "yarn-site"))); + expect(clusterConfigurationMock.getAllConfigTypes()).andReturn(new HashSet<>(Arrays.asList("invalid-site-1", "invalid-default"))); + replayAll(); // WHEN @@ -124,5 +122,6 @@ public void allowValidTypes() throws InvalidTopologyException { // THEN // no exception is thrown + } } diff --git a/ambari-server/src/test/java/org/apache/ambari/server/topology/validators/TopologyValidatorTests.java b/ambari-server/src/test/java/org/apache/ambari/server/topology/validators/TopologyValidatorTests.java deleted file mode 100644 index 2ae84412de1..00000000000 --- a/ambari-server/src/test/java/org/apache/ambari/server/topology/validators/TopologyValidatorTests.java +++ /dev/null @@ -1,46 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package org.apache.ambari.server.topology.validators; - -import static org.easymock.EasyMock.createNiceMock; -import static org.easymock.EasyMock.expect; -import static org.easymock.EasyMock.replay; - -import java.util.HashMap; -import java.util.Map; - -import org.apache.ambari.server.topology.ClusterTopology; -import org.apache.ambari.server.topology.Configuration; - -/** - * Helper for TopologyValidator tests. - */ -public class TopologyValidatorTests { - - /** - * Creates a mock ClusterTopology with the given properties. - */ - static ClusterTopology topologyWithProperties(Map> properties) { - Configuration topologyConfig = new Configuration(properties, new HashMap<>()); - ClusterTopology topology = createNiceMock(ClusterTopology.class); - expect(topology.getConfiguration()).andReturn(topologyConfig).anyTimes(); - replay(topology); - return topology; - } - -} diff --git a/ambari-server/src/test/java/org/apache/ambari/server/topology/validators/UnitValidatorTest.java b/ambari-server/src/test/java/org/apache/ambari/server/topology/validators/UnitValidatorTest.java index 926bbd335e2..015dc36ceb8 100644 --- a/ambari-server/src/test/java/org/apache/ambari/server/topology/validators/UnitValidatorTest.java +++ b/ambari-server/src/test/java/org/apache/ambari/server/topology/validators/UnitValidatorTest.java @@ -76,7 +76,7 @@ public void skipsValidatingIrrelevantProperty() throws Exception { public void setUp() throws Exception { expect(clusterTopology.getBlueprint()).andReturn(blueprint).anyTimes(); expect(clusterTopology.getHostGroupInfo()).andReturn(Collections.emptyMap()).anyTimes(); - expect(clusterTopology.getStack()).andReturn(stack).anyTimes(); + expect(blueprint.getStack()).andReturn(stack).anyTimes(); expect(stack.getConfigurationPropertiesWithMetadata(SERVICE, CONFIG_TYPE)).andReturn(stackConfigWithMetadata).anyTimes(); } diff --git a/ambari-server/src/test/java/org/apache/ambari/server/utils/StageUtilsTest.java b/ambari-server/src/test/java/org/apache/ambari/server/utils/StageUtilsTest.java index 18637d4505e..d43a712bf92 100644 --- a/ambari-server/src/test/java/org/apache/ambari/server/utils/StageUtilsTest.java +++ b/ambari-server/src/test/java/org/apache/ambari/server/utils/StageUtilsTest.java @@ -82,15 +82,9 @@ import org.apache.ambari.server.state.cluster.ClusterFactory; import org.apache.ambari.server.state.host.HostFactory; import org.apache.ambari.server.state.stack.OsFamily; -import org.apache.ambari.server.topology.ComponentResolver; -import org.apache.ambari.server.topology.DefaultStackFactory; import org.apache.ambari.server.topology.PersistedState; -import org.apache.ambari.server.topology.StackComponentResolver; -import org.apache.ambari.server.topology.StackFactory; import org.apache.ambari.server.topology.TopologyManager; import org.apache.ambari.server.topology.tasks.ConfigureClusterTaskFactory; -import org.apache.ambari.server.topology.validators.BasicBlueprintValidator; -import org.apache.ambari.server.topology.validators.BlueprintValidator; import org.codehaus.jackson.JsonGenerationException; import org.codehaus.jackson.map.JsonMappingException; import org.easymock.EasyMockSupport; @@ -141,9 +135,6 @@ protected void configure() { bind(MpackManagerFactory.class).toInstance(createNiceMock(MpackManagerFactory.class)); bind(AmbariManagementController.class).toInstance(createNiceMock(AmbariManagementController.class)); bind(RootLevelSettingsManagerFactory.class).toInstance(createNiceMock(RootLevelSettingsManagerFactory.class)); - bind(ComponentResolver.class).to(StackComponentResolver.class); - bind(BlueprintValidator.class).to(BasicBlueprintValidator.class); - bind(StackFactory.class).to(DefaultStackFactory.class); install(new FactoryModuleBuilder().build(ExecutionCommandWrapperFactory.class)); install(new FactoryModuleBuilder().implement(Config.class, ConfigImpl.class).build(ConfigFactory.class)); install(new FactoryModuleBuilder().build(ConfigureClusterTaskFactory.class)); diff --git a/ambari-server/src/test/python/TestBootstrap.py b/ambari-server/src/test/python/TestBootstrap.py index 586ed018142..bea47f498cb 100644 --- a/ambari-server/src/test/python/TestBootstrap.py +++ b/ambari-server/src/test/python/TestBootstrap.py @@ -26,7 +26,7 @@ import pprint from ambari_commons.os_check import OSCheck -from bootstrap import PBootstrap, Bootstrap, BootstrapDefault, ValidateHost, SharedState, HostLog, SCP, SSH +from bootstrap import PBootstrap, Bootstrap, BootstrapDefault, SharedState, HostLog, SCP, SSH from unittest import TestCase from subprocess import Popen from bootstrap import AMBARI_PASSPHRASE_VAR_NAME @@ -817,43 +817,35 @@ def test_interruptBootstrap(self, write_mock, createDoneFile_mock): bootstrap_obj.interruptBootstrap() self.assertTrue(createDoneFile_mock.called) + + @patch("time.sleep") + @patch("time.time") + @patch("logging.warn") + @patch("logging.info") @patch.object(BootstrapDefault, "start") @patch.object(BootstrapDefault, "interruptBootstrap") @patch.object(BootstrapDefault, "getStatus") - def test_PBootstrap(self, getStatus_mock, interruptBootstrap_mock, start_mock): + def test_PBootstrap(self, getStatus_mock, interruptBootstrap_mock, start_mock, + info_mock, warn_mock, time_mock, sleep_mock): shared_state = SharedState("root", "123", "sshkey_file", "scriptDir", "bootdir", "setupAgentFile", "ambariServer", "centos6", None, "8440", "root") - self.help_test_PBootstrap(shared_state, getStatus_mock, interruptBootstrap_mock, start_mock) - - @patch.object(ValidateHost, "start") - @patch.object(ValidateHost, "interruptBootstrap") - @patch.object(ValidateHost, "getStatus") - def test_PBootstrapForHostValidation(self, getStatus_mock, interruptBootstrap_mock, start_mock): - shared_state = SharedState("root", "123", "sshkey_file", "scriptDir", "bootdir", - "setupAgentFile", "ambariServer", "centos6", - None, "8440", "root", True) - self.help_test_PBootstrap(shared_state, getStatus_mock, interruptBootstrap_mock, start_mock) - - @patch("time.sleep") - @patch("time.time") - def help_test_PBootstrap(self, shared_state, getStatus, interruptBootstrap, start, time_mock, sleep_mock): n = 180 - init_time = 100500 - time_mock.return_value = init_time + time = 100500 + time_mock.return_value = time hosts = [] for i in range(0, n): hosts.append("host" + str(i)) # Testing normal case - getStatus.return_value = {"return_code": 0, - "start_time": init_time + 999} + getStatus_mock.return_value = {"return_code": 0, + "start_time": time + 999} pbootstrap_obj = PBootstrap(hosts, shared_state) pbootstrap_obj.run() - self.assertEqual(start.call_count, n) - self.assertEqual(interruptBootstrap.call_count, 0) + self.assertEqual(start_mock.call_count, n) + self.assertEqual(interruptBootstrap_mock.call_count, 0) - start.reset_mock() - getStatus.reset_mock() + start_mock.reset_mock() + getStatus_mock.reset_mock() # Testing case of timeout def fake_return_code_generator(): call_number = 0 @@ -865,9 +857,8 @@ def fake_return_code_generator(): yield None def fake_start_time_generator(): - timeout = bootstrap.HOST_CONNECTIVITY_TIMEOUT if shared_state.validate else bootstrap.HOST_BOOTSTRAP_TIMEOUT while True: - yield init_time - timeout - 1 + yield time - bootstrap.HOST_BOOTSTRAP_TIMEOUT - 1 return_code_generator = fake_return_code_generator() start_time_generator = fake_start_time_generator() @@ -880,9 +871,9 @@ def status_get_item_mock(item): dict_mock = MagicMock() dict_mock.__getitem__.side_effect = status_get_item_mock - getStatus.return_value = dict_mock + getStatus_mock.return_value = dict_mock pbootstrap_obj.run() - self.assertEqual(start.call_count, n) - self.assertEqual(interruptBootstrap.call_count, n / 5) + self.assertEqual(start_mock.call_count, n) + self.assertEqual(interruptBootstrap_mock.call_count, n / 5) diff --git a/ambari-server/src/test/resources/mpacks-v2/upgrade-packs/upgrade-basic.xml b/ambari-server/src/test/resources/mpacks-v2/upgrade-packs/upgrade-basic.xml index e94853464e4..1ebb00faa44 100644 --- a/ambari-server/src/test/resources/mpacks-v2/upgrade-packs/upgrade-basic.xml +++ b/ambari-server/src/test/resources/mpacks-v2/upgrade-packs/upgrade-basic.xml @@ -22,26 +22,27 @@ false false rolling - - - - - - - - - - - - - - - HDP-2.3.0.0 - my-value - - + org.apache.ambari.server.checks.HiveMultipleMetastoreCheck + org.apache.ambari.server.checks.MapReduce2JobHistoryStatePreservingCheck + org.apache.ambari.server.checks.SecondaryNamenodeDeletedCheck + org.apache.ambari.server.checks.ServicesMapReduceDistributedCacheCheck + org.apache.ambari.server.checks.ServicesNamenodeHighAvailabilityCheck + org.apache.ambari.server.checks.ServicesTezDistributedCacheCheck + org.apache.ambari.server.checks.ServicesYarnWorkPreservingCheck + org.apache.ambari.server.checks.YarnRMHighAvailabilityCheck + org.apache.ambari.server.checks.YarnTimelineServerStatePreservingCheck + org.apache.ambari.server.checks.DruidHighAvailabilityCheck + org.apache.ambari.server.checks.LZOCheck + + + + + + HDP-2.3.0.0 + + @@ -150,7 +151,8 @@ - + + true KAFKA_BROKER diff --git a/ambari-server/src/test/resources/stacks/HDP/0.1/services/PIG/metainfo.xml b/ambari-server/src/test/resources/stacks/HDP/0.1/services/PIG/metainfo.xml index a43835d8c70..a63b6e74677 100644 --- a/ambari-server/src/test/resources/stacks/HDP/0.1/services/PIG/metainfo.xml +++ b/ambari-server/src/test/resources/stacks/HDP/0.1/services/PIG/metainfo.xml @@ -34,16 +34,6 @@ 600 - - SOME_CLIENT_FOR_SERVICE_CHECK - CLIENT - 0+ - - - PYTHON - 600 - - diff --git a/mpack-instance-manager/src/main/python/instance_manager/instance_manager.py b/mpack-instance-manager/src/main/python/instance_manager/instance_manager.py index 6f7587c6cb6..668223428f2 100644 --- a/mpack-instance-manager/src/main/python/instance_manager/instance_manager.py +++ b/mpack-instance-manager/src/main/python/instance_manager/instance_manager.py @@ -51,8 +51,8 @@ def create_mpack(mpack_name, mpack_version, mpack_instance, subgroup_name=DEFAUL OR list of 'components_instances_name' to be created. (default) OR '*' for all components """ - mpack_name, module_name, components, components_map = normalize_parameters( - mpack_name, module_name, components, components_map) + mpack_name = mpack_name.lower() + module_name = module_name.lower() validate_mpack_for_creation_or_changing(mpack_name, mpack_version, module_name, components, components_map) @@ -71,17 +71,14 @@ def set_mpack_instance(mpack, mpack_version, mpack_instance, subgroup_name=DEFAU OR list of 'components_instances_name' to be created. (default) OR '*' for all components """ - mpack, module_name, components, components_map = normalize_parameters( - mpack, module_name, components, components_map) + mpack = mpack.lower() + module_name = module_name.lower() instances = MpackInstance.parse_instances_with_filtering(os.path.join(ROOT_FOLDER_PATH, INSTANCES_FOLDER_NAME), mpack, mpack_instance, subgroup_name, module_name, components, components_map) if not instances: - raise ValueError("Found no instances for the given filters: mpack_name:{0}, instance_name:{1}," - " subgroup_name:{2}, module_name:{3}, components:{4}, components_map:{5}". - format(mpack, mpack_instance, subgroup_name, module_name, - components, components_map)) + raise ValueError("Found no instances for the given filters.") validate_mpack_for_creation_or_changing(mpack, mpack_version, module_name, components, components_map) @@ -145,8 +142,11 @@ def build_granular_json_with_filtering(mpack_name_filter, instance_name_filter, The output_conf_dir or output_path for each component instance will be included in json depending on given parameters. """ - mpack_name_filter, module_name_filter, skipped, components_name_filter_map = normalize_parameters( - mpack_name_filter, module_name_filter, [], components_name_filter_map) + + if mpack_name_filter: + mpack_name_filter = mpack_name_filter.lower() + if module_name_filter: + module_name_filter = module_name_filter.lower() instances = MpackInstance.parse_instances_with_filtering(os.path.join(ROOT_FOLDER_PATH, INSTANCES_FOLDER_NAME), mpack_name_filter, @@ -155,10 +155,7 @@ def build_granular_json_with_filtering(mpack_name_filter, instance_name_filter, None, components_name_filter_map) if not instances: - raise ValueError("Found no instances for the given filters: mpack_name:{0}, instance_name:{1}," - " subgroup_name:{2}, module_name:{3}, components_name_map:{4}". - format(mpack_name_filter, instance_name_filter, subgroup_name_filter, - module_name_filter, components_name_filter_map)) + raise ValueError("Found no instances for the given filters.") full_json_output = build_json_output(instances, output_conf_dir=output_conf_dir, output_path=output_path) @@ -232,22 +229,6 @@ def build_json_output(instances, output_conf_dir=False, output_path=False): return {'mpacks': result} -def normalize_parameters(mpack_name, module_name, components, components_map): - """ - normalize parameters to be lowercase, for components_map dictionary only keys are normalized - """ - if mpack_name: - mpack_name = mpack_name.lower() - if module_name: - module_name = module_name.lower() - if components and components != "*": - components = [component.lower() for component in components] - if components_map: - components_map = dict((k.lower(), v) for k, v in components_map.iteritems()) - - return mpack_name, module_name, components, components_map - - def validate_mpack_for_creation_or_changing(mpack_name, mpack_version, module_name, components, components_map): mpack_root_path = os.path.join(ROOT_FOLDER_PATH, MPACKS_FOLDER_NAME, mpack_name) if not os.path.exists(mpack_root_path): @@ -495,18 +476,16 @@ def parse_into_module_instance_dict(path, module_name_filter, components_filter, if ((not components_filter and not components_name_filter_map) or (components_filter and (components_filter == '*' or folder_name in components_filter)) or (components_name_filter_map and folder_name in components_name_filter_map)): - components_map = \ - {folder_name: - {DEFAULT_COMPONENT_INSTANCE_NAME: - ComponentInstance(name=DEFAULT_COMPONENT_INSTANCE_NAME, - component_path=os.path.join(path, folder_name), - path_exec=os.readlink(os.path.join(path, folder_name, CURRENT_SOFTLINK_NAME)))}} + components_map = ComponentInstance(name=DEFAULT_COMPONENT_INSTANCE_NAME, + component_path=os.path.join(path, folder_name), + path_exec=os.path.realpath( + os.path.join(path, folder_name, CURRENT_SOFTLINK_NAME))) else: components_map = ComponentInstance.parse_into_components_dict(os.path.join(path, folder_name), components_filter, components_name_filter_map) if components_map: - result[module_name] = ModuleInstance(module_name, components_map, module_category) + result[folder_name] = ModuleInstance(folder_name, components_map, module_category) return result @staticmethod @@ -532,19 +511,28 @@ def create_module_instance(mpack_name, mpack_version, mpack_instance, subgroup_n is_client_module) def set_new_version(self, mpack_name, mpack_version): - for component_type in self.components_map: - for component_name in self.components_map[component_type]: - component_instance = self.components_map[component_type][component_name] - print("\nSetting new version for component : " + component_instance.component_path) - component_instance.set_new_version(mpack_name, mpack_version, component_type) + if self.category == CLIENT_CATEGORY: + component_instance = self.components_map + print("\nSetting new version for component : " + component_instance.component_path) + component_instance.set_new_version(mpack_name, mpack_version, self.module_name) + else: + for component_type in self.components_map: + for component_name in self.components_map[component_type]: + component_instance = self.components_map[component_type][component_name] + print("\nSetting new version for component : " + component_instance.component_path) + component_instance.set_new_version(mpack_name, mpack_version, component_type) def build_json_output(self, output_conf_dir, output_path): result = {} - for component_type in self.components_map.keys(): - result[component_type] = build_json_output_from_instances_dict(self.components_map[component_type], - ComponentInstance.plural_name, - output_conf_dir, output_path) - result = {'components': result} + if self.category == CLIENT_CATEGORY: + result['component_instances'] = {'default': self.components_map.build_json_output(output_conf_dir, output_path)} + else: + for component_type in self.components_map.keys(): + result[component_type] = build_json_output_from_instances_dict(self.components_map[component_type], + ComponentInstance.plural_name, + output_conf_dir, output_path) + result = {'components': result} + result['category'] = self.category result['name'] = self.module_name return result @@ -578,7 +566,7 @@ def parse_into_component_instance_dict(path, component_names_filter=None): if not component_names_filter or component_instance_name in component_names_filter: result[component_instance_name] = ComponentInstance(name=component_instance_name, component_path=os.path.join(path, component_instance_name), - path_exec=os.readlink( + path_exec=os.path.realpath( os.path.join(path, component_instance_name, CURRENT_SOFTLINK_NAME))) return result diff --git a/mpack-instance-manager/src/main/python/instance_manager/mpack-instance-manager.py b/mpack-instance-manager/src/main/python/instance_manager/mpack-instance-manager.py index f9458b14420..38c29f827f1 100644 --- a/mpack-instance-manager/src/main/python/instance_manager/mpack-instance-manager.py +++ b/mpack-instance-manager/src/main/python/instance_manager/mpack-instance-manager.py @@ -56,14 +56,8 @@ def check_required_options(options, action, parser): if not options.module_name: missing_options.append("module-name") - if missing_options: - parser.error("Missing following required command options: {0}".format(missing_options)) - - if not options.components_map and not options.components: - parser.error("Either components or components-map option must be specified.") - - if options.components_map and options.components: - parser.error("Only components or components-map option could be specified. Can't use both.") + if missing_options: + parser.error("Missing following required command options: {0}".format(missing_options)) def init_create_parser_options(parser): @@ -123,11 +117,8 @@ def main(options, args): parsed_components = None parsed_components_map = None try: - if hasattr(options, 'components') and options.components: - if options.components == '*': - parsed_components = '*' - else: - parsed_components = ast.literal_eval(options.components) + if hasattr(options, 'components') and options.components and options.components != '*': + parsed_components = ast.literal_eval(options.components) if options.components_map: parsed_components_map = ast.literal_eval(options.components_map) except ValueError: diff --git a/mpack-instance-manager/src/test/python/instance_manager/test_instance_manager.py b/mpack-instance-manager/src/test/python/instance_manager/test_instance_manager.py index 0b2447cff65..53150941526 100644 --- a/mpack-instance-manager/src/test/python/instance_manager/test_instance_manager.py +++ b/mpack-instance-manager/src/test/python/instance_manager/test_instance_manager.py @@ -84,7 +84,7 @@ def test_create_mpack_server_module_with_default_component_instance(self): instance_manager.DEFAULT_MPACK_INSTANCE_NAME))) def test_create_mpack_server_module_with_multiple_component_instances(self): - create_mpack_with_defaults(components=None, components_map={SERVER_COMPONENT_NAME.upper(): ['server1', 'server2']}) + create_mpack_with_defaults(components=None, components_map={SERVER_COMPONENT_NAME: ['server1', 'server2']}) current_link_1 = os.path.join(TMP_ROOT_FOLDER, instance_manager.INSTANCES_FOLDER_NAME, MPACK_NAME, INSTANCE_NAME_1, SUBGROUP_NAME, SERVER_MODULE_NAME, SERVER_COMPONENT_NAME, @@ -121,12 +121,12 @@ def test_set_version_server_module_asterisk(self): MPACK_VERSION_2, SERVER_COMPONENT_NAME)) def test_set_version_client_module_asterisk(self): - create_mpack_with_defaults(module_name=CLIENT_MODULE_NAME.upper()) + create_mpack_with_defaults(module_name=CLIENT_MODULE_NAME) build_rpm_structure(mpack_version=MPACK_VERSION_2, remove_old_content=False, create_modules=False) - instance_manager.set_mpack_instance(MPACK_NAME.upper(), MPACK_VERSION_2, INSTANCE_NAME_1, SUBGROUP_NAME, - CLIENT_MODULE_NAME.upper(), '*') + instance_manager.set_mpack_instance(MPACK_NAME, MPACK_VERSION_2, INSTANCE_NAME_1, SUBGROUP_NAME, + CLIENT_MODULE_NAME, '*') current_link = os.path.join(TMP_ROOT_FOLDER, instance_manager.INSTANCES_FOLDER_NAME, MPACK_NAME, INSTANCE_NAME_1, SUBGROUP_NAME, CLIENT_COMPONENT_NAME, @@ -160,9 +160,9 @@ def test_set_version_for_one_of_two_component_instances(self): MPACK_VERSION_2, SERVER_COMPONENT_NAME)) def test_get_conf_dir_all(self): - create_mpack_with_defaults(module_name=CLIENT_MODULE_NAME.upper()) - create_mpack_with_defaults(module_name=SERVER_MODULE_NAME.upper(), components=None, - components_map={SERVER_COMPONENT_NAME.upper(): ['server1']}) + create_mpack_with_defaults(module_name=CLIENT_MODULE_NAME) + create_mpack_with_defaults(module_name=SERVER_MODULE_NAME, components=None, + components_map={SERVER_COMPONENT_NAME: ['server1']}) conf_dir_json = instance_manager.get_conf_dir() @@ -189,19 +189,15 @@ def test_get_conf_dir_all(self): } } }, - "hdfs-clients": { + "hdfs_client": { "category": "CLIENT", - "components": { - "hdfs_client": { - "component-instances": { - "default": { - "config_dir": "/tmp/instance_manager_test/instances/hdpcore/Production/default/hdfs_client/conf", - "name": "default" - } - } + "component_instances": { + "default": { + "config_dir": "/tmp/instance_manager_test/instances/hdpcore/Production/default/hdfs_client/conf", + "name": "default" } }, - "name": "hdfs-clients" + "name": "hdfs_client" } } } @@ -214,9 +210,9 @@ def test_get_conf_dir_all(self): self.assertEqual(conf_dir_json, expected_json) def test_list_instances_all(self): - create_mpack_with_defaults(module_name=CLIENT_MODULE_NAME.upper()) - create_mpack_with_defaults(module_name=SERVER_MODULE_NAME.upper(), components=None, - components_map={SERVER_COMPONENT_NAME.upper(): ['server1']}) + create_mpack_with_defaults(module_name=CLIENT_MODULE_NAME) + create_mpack_with_defaults(module_name=SERVER_MODULE_NAME, components=None, + components_map={SERVER_COMPONENT_NAME: ['server1']}) conf_dir_json = instance_manager.list_instances() @@ -236,26 +232,22 @@ def test_list_instances_all(self): "hdfs_server": { "component-instances": { "server1": { - "path": "/tmp/instance_manager_test/mpacks/hdpcore/1.0.0-b1/hdfs_server", + "path": "/tmp/instance_manager_test/modules/hdfs/3.1.0.0-b1", "name": "server1" } } } } }, - "hdfs-clients": { + "hdfs_client": { "category": "CLIENT", - "components": { - "hdfs_client": { - "component-instances": { - "default": { - "path": "/tmp/instance_manager_test/mpacks/hdpcore/1.0.0-b1/hdfs_client", - "name": "default" - } - } + "component_instances": { + "default": { + "path": "/tmp/instance_manager_test/modules/hdfs-clients/3.1.0.0-b1", + "name": "default" } }, - "name": "hdfs-clients" + "name": "hdfs_client" } } } @@ -269,7 +261,6 @@ def test_list_instances_all(self): def test_granularity(self): create_mpack_with_defaults() - create_mpack_with_defaults(module_name=CLIENT_MODULE_NAME) full_conf_dir_json = instance_manager.get_conf_dir() self.assertTrue('mpacks' in full_conf_dir_json) @@ -289,10 +280,6 @@ def test_granularity(self): subgroup_name=SUBGROUP_NAME, module_name=SERVER_MODULE_NAME) self.assertTrue('components' in module_conf_dir_json) - module_conf_dir_json = instance_manager.get_conf_dir(mpack=MPACK_NAME, mpack_instance=INSTANCE_NAME_1, - subgroup_name=SUBGROUP_NAME, module_name=CLIENT_MODULE_NAME) - self.assertTrue('components' in module_conf_dir_json) - # The mpack level filter not specified full_conf_dir_json = instance_manager.get_conf_dir(mpack_instance=INSTANCE_NAME_1, subgroup_name=SUBGROUP_NAME, module_name=SERVER_MODULE_NAME) @@ -330,7 +317,7 @@ def test_filtering(self): expected_filter_result = {'mpacks': {'edw': {'mpack-instances': {'eCommerce': {'name': 'eCommerce', 'subgroups': { 'default': {'modules': {'hdfs': {'category': 'SERVER', 'name': 'hdfs', 'components': {'hdfs_server': { 'component-instances': { - 'server2': {'path': '/tmp/instance_manager_test/mpacks/edw/1.0.0-b1/hdfs_server', 'name': 'server2'}}}}}}}}}}}}} + 'server2': {'path': '/tmp/instance_manager_test/modules/hdfs/3.1.0.0-b1', 'name': 'server2'}}}}}}}}}}}}} self.assertEquals(expected_filter_result, filter_by_component_instance_name_json) def test_validation(self): @@ -376,19 +363,6 @@ def test_creating_existing_component_instance(self): "The instance /tmp/instance_manager_test/instances/hdpcore/Production/default/hdfs/" "hdfs_server/default already exist. To change the version use set-mpack-instance command") - def test_normalize_parameters(self): - mpack_name = MPACK_NAME.upper() - module_name = SERVER_MODULE_NAME.upper() - components = [SERVER_COMPONENT_NAME.upper()] - components_map = {SERVER_COMPONENT_NAME.upper(): ["DEFAULT"]} - mpack_name, module_name, components, components_map = instance_manager.normalize_parameters( - mpack_name, module_name, components, components_map) - - self.assertEquals(mpack_name, MPACK_NAME.lower()) - self.assertEquals(module_name, SERVER_MODULE_NAME.lower()) - self.assertEquals(components, [SERVER_COMPONENT_NAME.lower()]) - self.assertEquals(components_map, {SERVER_COMPONENT_NAME.lower(): ["DEFAULT"]}) - def test_set_non_existing_instance(self): try: instance_manager.set_mpack_instance(mpack=MPACK_NAME, mpack_version=MPACK_VERSION_1, @@ -409,9 +383,7 @@ def test_set_non_existing_instance(self): raise AssertionError("The previous call should have thrown exception") except ValueError as e: self.assertEquals(e.message, - "Found no instances for the given filters: mpack_name:hdpcore, instance_name:Production," - " subgroup_name:default, module_name:hdfs, components:None," - " components_map:{'hdfs_server': ['non-existing-instance']}") + "Found no instances for the given filters.") def create_mpack_with_defaults(mpack_name=MPACK_NAME, mpack_version=MPACK_VERSION_1, mpack_instance=INSTANCE_NAME_1,