Skip to content
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
Show all changes
30 commits
Select commit Hold shift + click to select a range
dba2ed3
Introduce CompositeStack
adoroszlai Jan 21, 2018
86e4653
Look up repo id for each stack
adoroszlai Jan 22, 2018
e92b160
Use stackId
adoroszlai Jan 22, 2018
10e2823
Do not assume all services are present in the stack
adoroszlai Jan 22, 2018
288bf20
Handle unknown config type gracefully
adoroszlai Jan 22, 2018
a919f9c
Test with AmbariInfra instead of HDS
adoroszlai Jan 22, 2018
c52c9da
Not all stacks have cluster-env
adoroszlai Jan 22, 2018
a1dcc4d
cluster-env does not belong to any service
adoroszlai Jan 22, 2018
28da1fd
Make sure default group (passed via user_group) is created
adoroszlai Jan 22, 2018
c122f04
Make sure smoke_user is created
adoroszlai Jan 22, 2018
4039ea7
Workaround for lack of hdp-select in some mpacks
adoroszlai Jan 22, 2018
f0b4e01
Stack advisor should work on non-HDP stack
adoroszlai Jan 23, 2018
3acfa74
Remove getStackId() from Blueprint
adoroszlai Jan 23, 2018
9a91e19
Cut BlueprintValidator instantiation from BlueprintImpl (will be requ…
adoroszlai Jan 23, 2018
80aa54f
After merge implement Stack.getServicesForConfigType() introduced ups…
adoroszlai Jan 24, 2018
2fe9258
Refactor Stack to not use AmbariManagementController
adoroszlai Jan 24, 2018
2c374bf
Move javadoc from Stack to the interface
adoroszlai Jan 25, 2018
039a526
Rename StackInfo interface to StackDefinition to avoid confusion with…
adoroszlai Jan 25, 2018
04f2810
More tests for Stack
adoroszlai Jan 25, 2018
0b5093f
Add getStackIds(), more javadoc
adoroszlai Jan 25, 2018
6e698a8
Get stackIds from BlueprintEntity if available
adoroszlai Jan 25, 2018
fcbdcbe
Get rid of static injection in BlueprintResourceProvider
adoroszlai Jan 25, 2018
98256a8
Factory method for StackDefinition
adoroszlai Jan 25, 2018
925e0cd
Some tests for CompositeStack based on test stack definitions
adoroszlai Jan 25, 2018
bfb3a13
AMBARI-22875. Blueprint cluster creation using manually installed mpacks
adoroszlai Jan 29, 2018
346d778
AMBARI-22875. Review: javadoc, Iterables
adoroszlai Jan 30, 2018
3b0703e
Revert "Make sure smoke_user is created"
adoroszlai Jan 30, 2018
126e527
AMBARI-22875. Use ObjectReader/ObjectWriter
adoroszlai Jan 31, 2018
45dad5b
AMBARI-22875. Check services are each defined in a single stack
adoroszlai Jan 31, 2018
a4b3ac9
AMBARI-22875. Check components are each defined in a single stack
adoroszlai Jan 31, 2018
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
Original file line number Diff line number Diff line change
Expand Up @@ -45,7 +45,6 @@
import org.apache.ambari.server.controller.internal.ExportBlueprintRequest;
import org.apache.ambari.server.controller.internal.RequestImpl;
import org.apache.ambari.server.controller.internal.ResourceImpl;
import org.apache.ambari.server.controller.internal.Stack;
import org.apache.ambari.server.controller.spi.ClusterController;
import org.apache.ambari.server.controller.spi.NoSuchParentResourceException;
import org.apache.ambari.server.controller.spi.NoSuchResourceException;
Expand All @@ -56,6 +55,7 @@
import org.apache.ambari.server.controller.spi.UnsupportedPropertyException;
import org.apache.ambari.server.controller.utilities.PredicateBuilder;
import org.apache.ambari.server.state.SecurityType;
import org.apache.ambari.server.state.StackId;
import org.apache.ambari.server.topology.AmbariContext;
import org.apache.ambari.server.topology.ClusterTopology;
import org.apache.ambari.server.topology.ClusterTopologyImpl;
Expand All @@ -68,6 +68,8 @@
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;

import com.google.common.collect.Iterables;

/**
* Renderer which renders a cluster resource as a blueprint.
*/
Expand Down Expand Up @@ -195,9 +197,12 @@ private Resource createBlueprintResource(TreeNode<Resource> clusterNode) {
BlueprintConfigurationProcessor configProcessor = new BlueprintConfigurationProcessor(topology);
configProcessor.doUpdateForBlueprintExport();

Stack stack = topology.getBlueprint().getStack();
blueprintResource.setProperty("Blueprints/stack_name", stack.getName());
blueprintResource.setProperty("Blueprints/stack_version", stack.getVersion());
Set<StackId> stackIds = topology.getBlueprint().getStackIds();
if (stackIds.size() == 1) {
StackId stackId = Iterables.getOnlyElement(stackIds);
blueprintResource.setProperty("Blueprints/stack_name", stackId.getStackName());
blueprintResource.setProperty("Blueprints/stack_version", stackId.getStackVersion());
}

if (topology.isClusterKerberosEnabled()) {
Map<String, Object> securityConfigMap = new LinkedHashMap<>();
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -18,7 +18,6 @@

package org.apache.ambari.server.api.services.stackadvisor;

import java.util.ArrayList;
import java.util.HashMap;
import java.util.HashSet;
import java.util.List;
Expand All @@ -29,7 +28,7 @@
import org.apache.ambari.server.api.services.stackadvisor.recommendations.RecommendationResponse;
import org.apache.ambari.server.api.services.stackadvisor.recommendations.RecommendationResponse.BlueprintConfigurations;
import org.apache.ambari.server.controller.internal.ConfigurationTopologyException;
import org.apache.ambari.server.controller.internal.Stack;
import org.apache.ambari.server.state.StackId;
import org.apache.ambari.server.state.ValueAttributesInfo;
import org.apache.ambari.server.topology.AdvisedConfiguration;
import org.apache.ambari.server.topology.Blueprint;
Expand Down Expand Up @@ -77,26 +76,27 @@ public static void init(StackAdvisorHelper instance) {
* @param userProvidedConfigurations User configurations of cluster provided in Blueprint + Cluster template
*/
public void adviseConfiguration(ClusterTopology clusterTopology, Map<String, Map<String, String>> userProvidedConfigurations) throws ConfigurationTopologyException {
StackAdvisorRequest request = createStackAdvisorRequest(clusterTopology, StackAdvisorRequestType.CONFIGURATIONS);
try {
RecommendationResponse response = stackAdvisorHelper.recommend(request);
addAdvisedConfigurationsToTopology(response, clusterTopology, userProvidedConfigurations);
} catch (StackAdvisorException e) {
throw new ConfigurationTopologyException(RECOMMENDATION_FAILED, e);
} catch (IllegalArgumentException e) {
throw new ConfigurationTopologyException(INVALID_RESPONSE, e);
for (StackId stackId : clusterTopology.getBlueprint().getStackIds()) {
StackAdvisorRequest request = createStackAdvisorRequest(clusterTopology, stackId, StackAdvisorRequestType.CONFIGURATIONS);
try {
RecommendationResponse response = stackAdvisorHelper.recommend(request);
addAdvisedConfigurationsToTopology(response, clusterTopology, userProvidedConfigurations);
} catch (StackAdvisorException e) {
throw new ConfigurationTopologyException(RECOMMENDATION_FAILED, e);
} catch (IllegalArgumentException e) {
throw new ConfigurationTopologyException(INVALID_RESPONSE, e);
}
}
}

private StackAdvisorRequest createStackAdvisorRequest(ClusterTopology clusterTopology, StackAdvisorRequestType requestType) {
Stack stack = clusterTopology.getBlueprint().getStack(); // TODO: implement multi-stack
private StackAdvisorRequest createStackAdvisorRequest(ClusterTopology clusterTopology, StackId stackId, StackAdvisorRequestType requestType) {
Map<String, Set<String>> hgComponentsMap = gatherHostGroupComponents(clusterTopology);
Map<String, Set<String>> hgHostsMap = gatherHostGroupBindings(clusterTopology);
Map<String, Set<String>> componentHostsMap = gatherComponentsHostsMap(hgComponentsMap,
hgHostsMap);
return StackAdvisorRequest.StackAdvisorRequestBuilder
.forStack(stack.getName(), stack.getVersion())
.forServices(new ArrayList<>(clusterTopology.getBlueprint().getServices()))
.forStack(stackId)
.forServices(clusterTopology.getBlueprint().getStack().getServices(stackId))
.forHosts(gatherHosts(clusterTopology))
.forHostsGroupBindings(gatherHostGroupBindings(clusterTopology))
.forHostComponents(gatherHostGroupComponents(clusterTopology))
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -33,7 +33,6 @@
import org.apache.ambari.server.api.services.stackadvisor.recommendations.RecommendationResponse;
import org.apache.ambari.server.api.services.stackadvisor.validations.ValidationResponse;
import org.apache.ambari.server.configuration.Configuration;

import org.apache.ambari.server.state.ServiceInfo;
import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
Expand Down Expand Up @@ -122,9 +121,7 @@ public synchronized RecommendationResponse recommend(StackAdvisorRequest request
throws StackAdvisorException {
requestId = generateRequestId();

// TODO, need to pass the service Name that was modified.
// For now, hardcode
String serviceName = "ZOOKEEPER";
String serviceName = request.getServices().stream().findAny().orElse(null);

ServiceInfo.ServiceAdvisorType serviceAdvisorType = getServiceAdvisorType(request.getStackName(), request.getStackVersion(), serviceName);
StackAdvisorCommand<RecommendationResponse> command = createRecommendationCommand(serviceName, request);
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -29,6 +29,7 @@

import org.apache.ambari.server.api.services.stackadvisor.recommendations.RecommendationResponse;
import org.apache.ambari.server.state.ChangedConfigInfo;
import org.apache.ambari.server.state.StackId;
import org.apache.commons.lang.StringUtils;

import com.google.common.base.Preconditions;
Expand Down Expand Up @@ -146,6 +147,10 @@ public static StackAdvisorRequestBuilder forStack(String stackName, String stack
return new StackAdvisorRequestBuilder(stackName, stackVersion);
}

public static StackAdvisorRequestBuilder forStack(StackId stackId) {
return new StackAdvisorRequestBuilder(stackId.getStackName(), stackId.getStackVersion());
}

public StackAdvisorRequestBuilder ofType(StackAdvisorRequestType requestType) {
this.instance.requestType = requestType;
return this;
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -52,7 +52,7 @@ public class StackAdvisorRunner {
*/
public void runScript(ServiceInfo.ServiceAdvisorType serviceAdvisorType, StackAdvisorCommandType saCommandType, File actionDirectory)
throws StackAdvisorException {
LOG.info(String.format("StackAdvisorRunner. serviceAdvisorType=%s, actionDirectory=%s, command=%s", serviceAdvisorType.toString(), actionDirectory,
LOG.info(String.format("StackAdvisorRunner. serviceAdvisorType=%s, actionDirectory=%s, command=%s", serviceAdvisorType, actionDirectory,
saCommandType));

String outputFile = actionDirectory + File.separator + "stackadvisor.out";
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -63,7 +63,6 @@
import org.apache.ambari.server.controller.internal.AbstractControllerResourceProvider;
import org.apache.ambari.server.controller.internal.AmbariPrivilegeResourceProvider;
import org.apache.ambari.server.controller.internal.BaseClusterRequest;
import org.apache.ambari.server.controller.internal.BlueprintResourceProvider;
import org.apache.ambari.server.controller.internal.ClusterPrivilegeResourceProvider;
import org.apache.ambari.server.controller.internal.ClusterResourceProvider;
import org.apache.ambari.server.controller.internal.HostResourceProvider;
Expand Down Expand Up @@ -940,8 +939,6 @@ public void performStaticInjection() {
SecurityFilter.init(injector.getInstance(Configuration.class));
StackDefinedPropertyProvider.init(injector);
AbstractControllerResourceProvider.init(injector.getInstance(ResourceProviderFactory.class));
BlueprintResourceProvider.init(injector.getInstance(BlueprintFactory.class),
injector.getInstance(BlueprintDAO.class), injector.getInstance(SecurityConfigurationFactory.class), ambariMetaInfo);
StackDependencyResourceProvider.init(ambariMetaInfo);
ClusterResourceProvider.init(injector.getInstance(TopologyManager.class),
injector.getInstance(TopologyRequestFactoryImpl.class), injector.getInstance(SecurityConfigurationFactory
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -63,6 +63,7 @@
import org.apache.ambari.server.configuration.Configuration.ConnectionPoolType;
import org.apache.ambari.server.configuration.Configuration.DatabaseType;
import org.apache.ambari.server.controller.internal.AlertTargetResourceProvider;
import org.apache.ambari.server.controller.internal.BlueprintResourceProvider;
import org.apache.ambari.server.controller.internal.ClusterSettingResourceProvider;
import org.apache.ambari.server.controller.internal.ClusterStackVersionResourceProvider;
import org.apache.ambari.server.controller.internal.ComponentResourceProvider;
Expand Down Expand Up @@ -168,6 +169,8 @@
import org.apache.ambari.server.state.stack.OsFamily;
import org.apache.ambari.server.state.svccomphost.ServiceComponentHostImpl;
import org.apache.ambari.server.topology.BlueprintFactory;
import org.apache.ambari.server.topology.BlueprintValidator;
import org.apache.ambari.server.topology.BlueprintValidatorImpl;
import org.apache.ambari.server.topology.PersistedState;
import org.apache.ambari.server.topology.PersistedStateImpl;
import org.apache.ambari.server.topology.SecurityConfigurationFactory;
Expand Down Expand Up @@ -506,6 +509,7 @@ private void installFactories() {
.implement(ResourceProvider.class, Names.named("alertTarget"), AlertTargetResourceProvider.class)
.implement(ResourceProvider.class, Names.named("viewInstance"), ViewInstanceResourceProvider.class)
.implement(ResourceProvider.class, Names.named("rootServiceHostComponentConfiguration"), RootServiceComponentConfigurationResourceProvider.class)
.implement(ResourceProvider.class, Names.named(BlueprintResourceProvider.NAME), BlueprintResourceProvider.class)
.build(ResourceProviderFactory.class));

install(new FactoryModuleBuilder().implement(
Expand Down Expand Up @@ -537,6 +541,7 @@ private void installFactories() {
bind(RegistryFactory.class).to(RegistryFactoryImpl.class);
bind(HostRoleCommandFactory.class).to(HostRoleCommandFactoryImpl.class);
bind(SecurityHelper.class).toInstance(SecurityHelperImpl.getInstance());
bind(BlueprintValidator.class).to(BlueprintValidatorImpl.class);
bind(BlueprintFactory.class);

install(new FactoryModuleBuilder().implement(AmbariEvent.class, Names.named("userCreated"), UserCreatedEvent.class).build(AmbariEventFactory.class));
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -22,6 +22,7 @@
import javax.inject.Named;

import org.apache.ambari.server.controller.internal.AlertTargetResourceProvider;
import org.apache.ambari.server.controller.internal.BlueprintResourceProvider;
import org.apache.ambari.server.controller.internal.ClusterStackVersionResourceProvider;
import org.apache.ambari.server.controller.internal.UpgradeResourceProvider;
import org.apache.ambari.server.controller.internal.ViewInstanceResourceProvider;
Expand Down Expand Up @@ -89,4 +90,9 @@ public interface ResourceProviderFactory {
@Named("viewInstance")
ViewInstanceResourceProvider getViewInstanceResourceProvider();

@Named(BlueprintResourceProvider.NAME)
BlueprintResourceProvider getBlueprintResourceProvider(
AmbariManagementController managementController
);

}
Original file line number Diff line number Diff line change
Expand Up @@ -226,7 +226,7 @@ public static ResourceProvider getResourceProvider(Resource.Type type,
case HostComponentProcess:
return new HostComponentProcessResourceProvider(managementController);
case Blueprint:
return new BlueprintResourceProvider(managementController);
return resourceProviderFactory.getBlueprintResourceProvider(managementController);
case KerberosDescriptor:
return resourceProviderFactory.getKerberosDescriptorResourceProvider(managementController);
case Recommendation:
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -58,13 +58,15 @@

import com.google.common.base.Predicates;
import com.google.common.collect.ImmutableList;
import com.google.common.collect.ImmutableSet;
import com.google.common.collect.Maps;
import com.google.common.collect.Sets;

/**
* Updates configuration properties based on cluster topology. This is done when exporting
* a blueprint and when a cluster is provisioned via a blueprint.
*/
// TODO move to topology package
public class BlueprintConfigurationProcessor {

private static final Logger LOG = LoggerFactory.getLogger(BlueprintConfigurationProcessor.class);
Expand Down Expand Up @@ -437,7 +439,7 @@ public Set<String> doUpdateForClusterCreate() throws ConfigurationTopologyExcept

private void trimProperties(Configuration clusterConfig, ClusterTopology clusterTopology) {
Blueprint blueprint = clusterTopology.getBlueprint();
Stack stack = blueprint.getStack();
StackDefinition stack = blueprint.getStack();

Map<String, Map<String, String>> configTypes = clusterConfig.getFullProperties();
for (String configType : configTypes.keySet()) {
Expand All @@ -448,7 +450,7 @@ private void trimProperties(Configuration clusterConfig, ClusterTopology cluster
}
}

private void trimPropertyValue(Configuration clusterConfig, Stack stack, String configType, Map<String, String> properties, String propertyName) {
private void trimPropertyValue(Configuration clusterConfig, StackDefinition stack, String configType, Map<String, String> properties, String propertyName) {
if (propertyName != null && properties.get(propertyName) != null) {

TrimmingStrategy trimmingStrategy =
Expand Down Expand Up @@ -2880,7 +2882,7 @@ private Collection<String> setupHDFSProxyUsers(Configuration configuration, Set<
* @param configTypesUpdated
* @param stack
*/
private void addExcludedConfigProperties(Configuration configuration, Set<String> configTypesUpdated, Stack stack) {
private void addExcludedConfigProperties(Configuration configuration, Set<String> configTypesUpdated, StackDefinition stack) {
Collection<String> blueprintServices = clusterTopology.getBlueprint().getServices();

LOG.debug("Handling excluded properties for blueprint services: {}", blueprintServices);
Expand Down Expand Up @@ -2971,38 +2973,41 @@ private static void setRetryConfiguration(Configuration configuration, Set<Strin
* @param configTypesUpdated
* the list of configuration types updated (cluster-env will be added
* to this).
* @throws ConfigurationTopologyException
*/
private void setStackToolsAndFeatures(Configuration configuration, Set<String> configTypesUpdated)
throws ConfigurationTopologyException {
ConfigHelper configHelper = clusterTopology.getAmbariContext().getConfigHelper();
Stack stack = clusterTopology.getBlueprint().getStack();
String stackName = stack.getName();
String stackVersion = stack.getVersion();

StackId stackId = new StackId(stackName, stackVersion);

Set<String> properties = Sets.newHashSet(ConfigHelper.CLUSTER_ENV_STACK_NAME_PROPERTY,
ConfigHelper.CLUSTER_ENV_STACK_ROOT_PROPERTY, ConfigHelper.CLUSTER_ENV_STACK_TOOLS_PROPERTY,
ConfigHelper.CLUSTER_ENV_STACK_FEATURES_PROPERTY,
ConfigHelper.CLUSTER_ENV_STACK_PACKAGES_PROPERTY);
Set<String> properties = ImmutableSet.of(
ConfigHelper.CLUSTER_ENV_STACK_NAME_PROPERTY,
ConfigHelper.CLUSTER_ENV_STACK_ROOT_PROPERTY,
ConfigHelper.CLUSTER_ENV_STACK_TOOLS_PROPERTY,
ConfigHelper.CLUSTER_ENV_STACK_FEATURES_PROPERTY,
ConfigHelper.CLUSTER_ENV_STACK_PACKAGES_PROPERTY
);

try {
Map<String, Map<String, String>> defaultStackProperties = configHelper.getDefaultStackProperties(stackId);
Map<String,String> clusterEnvDefaultProperties = defaultStackProperties.get(CLUSTER_ENV_CONFIG_TYPE_NAME);

for( String property : properties ){
if (clusterEnvDefaultProperties.containsKey(property)) {
configuration.setProperty(CLUSTER_ENV_CONFIG_TYPE_NAME, property,
clusterEnvDefaultProperties.get(property));
for (StackId stackId : clusterTopology.getBlueprint().getStackIds()) {
Map<String, Map<String, String>> defaultStackProperties = configHelper.getDefaultStackProperties(stackId);
if (defaultStackProperties.containsKey(CLUSTER_ENV_CONFIG_TYPE_NAME)) {
Map<String, String> clusterEnvDefaultProperties = defaultStackProperties.get(CLUSTER_ENV_CONFIG_TYPE_NAME);

for (String property : properties) {
if (clusterEnvDefaultProperties.containsKey(property)) {
configuration.setProperty(CLUSTER_ENV_CONFIG_TYPE_NAME, property,
clusterEnvDefaultProperties.get(property)
);

// make sure to include the configuration type as being updated
configTypesUpdated.add(CLUSTER_ENV_CONFIG_TYPE_NAME);
}
}

// make sure to include the configuration type as being updated
configTypesUpdated.add(CLUSTER_ENV_CONFIG_TYPE_NAME);
break;
}
}
} catch( AmbariException ambariException ){
throw new ConfigurationTopologyException("Unable to retrieve the stack tools and features",
ambariException);
} catch (AmbariException e) {
throw new ConfigurationTopologyException("Unable to retrieve the stack tools and features", e);
}
}

Expand Down Expand Up @@ -3101,7 +3106,7 @@ private static class StackPropertyTypeFilter implements PropertyFilter {
*/
@Override
public boolean isPropertyIncluded(String propertyName, String propertyValue, String configType, ClusterTopology topology) {
Stack stack = topology.getBlueprint().getStack();
StackDefinition stack = topology.getBlueprint().getStack();
final String serviceName = stack.getServiceForConfigType(configType);
return !(stack.isPasswordProperty(serviceName, configType, propertyName) ||
stack.isKerberosPrincipalNameProperty(serviceName, configType, propertyName));
Expand Down Expand Up @@ -3198,7 +3203,7 @@ private static abstract class DependencyFilter implements PropertyFilter {
*/
@Override
public boolean isPropertyIncluded(String propertyName, String propertyValue, String configType, ClusterTopology topology) {
Stack stack = topology.getBlueprint().getStack();
StackDefinition stack = topology.getBlueprint().getStack();
Configuration configuration = topology.getConfiguration();

final String serviceName = stack.getServiceForConfigType(configType);
Expand Down
Loading