Skip to content
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
Original file line number Diff line number Diff line change
Expand Up @@ -29,13 +29,15 @@
import org.apache.ambari.server.controller.spi.Resource;
import org.apache.ambari.server.controller.spi.ResourceProvider;
import org.apache.ambari.server.controller.utilities.ClusterControllerHelper;
import org.apache.ambari.server.state.StackId;
import org.apache.ambari.server.topology.Blueprint;
import org.apache.ambari.server.topology.BlueprintFactory;
import org.apache.ambari.server.topology.Configuration;
import org.apache.ambari.server.topology.HostGroupInfo;
import org.apache.ambari.server.topology.InvalidTopologyTemplateException;
import org.apache.ambari.server.topology.SecurityConfiguration;
import org.apache.ambari.server.topology.TopologyRequest;
import org.apache.ambari.server.topology.TopologyRequestUtil;

/**
* Provides common cluster request functionality.
Expand All @@ -53,6 +55,11 @@ public abstract class BaseClusterRequest implements TopologyRequest {

protected ProvisionAction provisionAction;

/**
* The raw request body. We would like to persist it.
*/
protected String rawRequestBody;

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Persisting the cluster creation template is a great idea, but we probably need to filter some information out of the template first (the "default_password" field, any configuration items in the cluster config that are of type "PASSWORD") prior to persisting the document.

There may be other types of information that should be filtered out as well, but from a security perspective, we should definitely filter out any password information prior to persisting the data.


/**
* cluster id
*/
Expand Down Expand Up @@ -118,6 +125,19 @@ public Map<String, HostGroupInfo> getHostGroupInfo() {
return hostGroupInfoMap;
}

/**
* @return the raw request body in JSON string
*/
public String getRawRequestBody() {
return rawRequestBody;
}

@Override
public Set<StackId> getStackIds() {
return TopologyRequestUtil.getStackIdsFromRequest(
TopologyRequestUtil.getPropertyMap(rawRequestBody));
}

/**
* Validate that all properties specified in the predicate are valid for the Host resource.
*
Expand Down Expand Up @@ -180,6 +200,7 @@ public SecurityConfiguration getSecurityConfiguration() {
return securityConfiguration;
}


/**
* Get the host resource provider instance.
*
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -533,7 +533,8 @@ private RequestStatusResponse processBlueprintCreate(Map<String, Object> propert

ProvisionClusterRequest createClusterRequest;
try {
createClusterRequest = topologyRequestFactory.createProvisionClusterRequest(properties, securityConfiguration);
createClusterRequest =
topologyRequestFactory.createProvisionClusterRequest(rawRequestBody, properties, securityConfiguration);

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Maybe at this point only the "filtered" document should be passed around, meaning that the passwords have already been removed from the document.

} catch (InvalidTopologyTemplateException e) {
throw new IllegalArgumentException("Invalid Cluster Creation Template: " + e, e);
}
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -1086,7 +1086,9 @@ public static String getHostNameFromProperties(Map<String, Object> properties) {
private RequestStatusResponse submitHostRequests(Request request) throws SystemException {
ScaleClusterRequest requestRequest;
try {
requestRequest = new ScaleClusterRequest(request.getProperties());
requestRequest = new ScaleClusterRequest(
request.getRequestInfoProperties().get(Request.REQUEST_INFO_BODY_PROPERTY),
request.getProperties());
} catch (InvalidTopologyTemplateException e) {
throw new IllegalArgumentException("Invalid Add Hosts Template: " + e, e);
}
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -174,8 +174,10 @@ public class ProvisionClusterRequest extends BaseClusterRequest implements Provi
* @param properties request properties
* @param securityConfiguration security config related properties
*/
public ProvisionClusterRequest(Map<String, Object> properties, SecurityConfiguration securityConfiguration) throws
public ProvisionClusterRequest(String rawRequestBody, Map<String, Object> properties, SecurityConfiguration securityConfiguration) throws
InvalidTopologyTemplateException {
this.rawRequestBody = rawRequestBody;

setClusterName(String.valueOf(properties.get(
ClusterResourceProvider.CLUSTER_NAME_PROPERTY_ID)));

Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -51,7 +51,8 @@ public class ScaleClusterRequest extends BaseClusterRequest {
*
* @throws InvalidTopologyTemplateException if any validation of properties fails
*/
public ScaleClusterRequest(Set<Map<String, Object>> propertySet) throws InvalidTopologyTemplateException {
public ScaleClusterRequest(String rawRequestBody, Set<Map<String, Object>> propertySet) throws InvalidTopologyTemplateException {
this.rawRequestBody = rawRequestBody;
for (Map<String, Object> properties : propertySet) {
// can only operate on a single cluster per logical request
if (getClusterName() == null) {
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -75,6 +75,10 @@ public class TopologyRequestEntity {
@Column(name = "description", length = 1024, nullable = false)
private String description;

@Lob
@Column(name = "raw_request_body", length = 100000, nullable = false)
private String rawRequestBody;

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

It might be worth considering persisting the document in a slightly different way. The cluster "artifacts" directory (clusters/CLUSTER_NAME/artifacts) REST resource already supports persisting arbitrary documents.

Persisting the document in the "artifacts" resource would also make it available via the REST API, which would greatly assist in debugging failed Blueprint deployments, particularly when users generate the document and do not store it.

It may still be best to persist the information here (minus the passwords of course), but I think its at least a good idea to consider the "artifacts"-related approach.

Copy link
Contributor Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

It looks to me that the only usage of the Artifacts table used to be to store the kerberos descriptor, but now Kerberos has its on tables it's kind of abandoned.

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

I just checked with @rlevas, the Ambari Kerberos expert, and he mentioned that the Kerberos descriptor is still persisted to the artifacts table.

The Kerberos tables you mentioned are somehow related to the accounting process for Kerberos identities, and do not replace the usage of the artifacts resource.

We should probably persist the cluster creation template in "artifacts", since this is in keeping with how things have been handled for the Kerberos descriptor, which is also a user-supplied artifact that is useful to retrieve via the REST APIs.

I still think that using the "artifacts" resource is a better approach, since that table is already defined for this purpose, and also since using "artifacts" would provide REST access to this resource after a deployment, without adding any new code.


@OneToMany(mappedBy = "topologyRequestEntity", cascade = CascadeType.ALL)
private Collection<TopologyHostGroupEntity> topologyHostGroupEntities;

Expand Down Expand Up @@ -141,6 +145,20 @@ public void setDescription(String description) {
this.description = description;
}

/**
* @return the raw request body in JSON
*/
public String getRawRequestBody() {
return rawRequestBody;
}

/**
* @param rawRequestBody the raw request body in JSON
*/
public void setRawRequestBody(String rawRequestBody) {
this.rawRequestBody = rawRequestBody;
}

public Collection<TopologyHostGroupEntity> getTopologyHostGroupEntities() {
return topologyHostGroupEntities;
}
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -24,7 +24,6 @@
import java.util.Map;
import java.util.Set;
import java.util.function.Function;
import java.util.stream.Stream;

import javax.annotation.Nonnull;

Expand All @@ -37,7 +36,6 @@
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;

import com.google.common.collect.ImmutableMap;
import com.google.common.collect.ImmutableSet;
import com.google.common.collect.Sets;

Expand All @@ -52,7 +50,7 @@ public class BlueprintBasedClusterProvisionRequest implements Blueprint, Provisi
private final ProvisionClusterRequest request;
private final Set<StackId> stackIds;
private final StackDefinition stack;
private final Map<String, MpackInstance> mpacks;
private final Set<MpackInstance> mpacks;
private final SecurityConfiguration securityConfiguration;

public BlueprintBasedClusterProvisionRequest(AmbariContext ambariContext, SecurityConfigurationFactory securityConfigurationFactory, Blueprint blueprint, ProvisionClusterRequest request) {
Expand All @@ -61,9 +59,9 @@ public BlueprintBasedClusterProvisionRequest(AmbariContext ambariContext, Securi

stackIds = ImmutableSet.copyOf(Sets.union(blueprint.getStackIds(), request.getStackIds()));
stack = ambariContext.composeStacks(stackIds);
mpacks = ImmutableMap.copyOf(
Stream.concat(blueprint.getMpacks().stream(), request.getMpacks().stream())
.collect(toMap(MpackInstance::getMpackName, Function.identity())));
mpacks = ImmutableSet.<MpackInstance>builder().
addAll(blueprint.getMpacks()).
addAll(request.getMpacks()).build();

securityConfiguration = processSecurityConfiguration(securityConfigurationFactory);

Expand Down Expand Up @@ -104,7 +102,7 @@ public Set<StackId> getStackIds() {

@Override
public Collection<MpackInstance> getMpacks() {
return mpacks.values();
return mpacks;
}

@Override
Expand Down Expand Up @@ -166,7 +164,7 @@ public StackDefinition getStack() {

public Map<String, Map<String, ServiceInstance>> getServicesByMpack() {
Map<String, Map<String, ServiceInstance>> result = new HashMap<>();
for (MpackInstance mpack : mpacks.values()) {
for (MpackInstance mpack : mpacks) {
Map<String, ServiceInstance> services = mpack.getServiceInstances().stream()
.collect(toMap(ServiceInstance::getName, Function.identity()));
result.put(mpack.getMpackName(), services);
Expand All @@ -179,7 +177,7 @@ public Map<String, Map<String, ServiceInstance>> getServicesByMpack() {
* whose name is unique across all mpacks.
*/
public Map<String, ServiceInstance> getUniqueServices() {
Map<String, ServiceInstance> map = mpacks.values().stream()
Map<String, ServiceInstance> map = mpacks.stream()
.flatMap(mpack -> mpack.getServiceInstances().stream())
.collect(toMap(ServiceInstance::getName, Function.identity(), (s1, s2) -> null));
map.entrySet().removeIf(e -> e.getValue() == null); // remove non-unique names mapped to null
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -46,6 +46,7 @@

import com.google.common.collect.ImmutableMap;
import com.google.common.collect.ImmutableSet;
import com.google.common.collect.Sets;

/**
* Represents a cluster topology.
Expand All @@ -68,18 +69,21 @@ public class ClusterTopologyImpl implements ClusterTopology {
private final BlueprintBasedClusterProvisionRequest provisionRequest;
private final String defaultPassword;
private final Map<String, Set<ResolvedComponent>> resolvedComponents;
private final Setting setting;

public ClusterTopologyImpl(AmbariContext ambariContext, TopologyRequest topologyRequest) throws InvalidTopologyException {
this.ambariContext = ambariContext;
this.clusterId = topologyRequest.getClusterId();
this.blueprint = topologyRequest.getBlueprint();
this.setting = blueprint.getSetting();
this.configuration = topologyRequest.getConfiguration();
configRecommendationStrategy = ConfigRecommendationStrategy.NEVER_APPLY;
provisionAction = topologyRequest instanceof BaseClusterRequest ? ((BaseClusterRequest) topologyRequest).getProvisionAction() : INSTALL_AND_START; // FIXME

provisionRequest = null;
defaultPassword = null;
stackIds = topologyRequest.getBlueprint().getStackIds();
stackIds = ImmutableSet.copyOf(
Sets.union(topologyRequest.getStackIds(), topologyRequest.getBlueprint().getStackIds()));
stack = ambariContext.composeStacks(stackIds);
resolvedComponents = ImmutableMap.of();

Expand All @@ -104,7 +108,7 @@ public ClusterTopologyImpl(
defaultPassword = provisionRequest.getDefaultPassword();
stackIds = request.getStackIds();
stack = request.getStack();

setting = request.getSetting();
blueprint.getConfiguration().setParentConfiguration(stack.getConfiguration(getServices()));
registerHostGroupInfo(request.getHostGroupInfo());
}
Expand Down Expand Up @@ -150,7 +154,7 @@ public Configuration getConfiguration() {

@Override
public Setting getSetting() {
return provisionRequest.getSetting();
return setting;
}

@Override
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -23,7 +23,9 @@
import java.util.HashMap;
import java.util.List;
import java.util.Map;
import java.util.Set;

import javax.inject.Inject;
import javax.inject.Singleton;

import org.apache.ambari.server.AmbariException;
Expand All @@ -50,12 +52,12 @@
import org.apache.ambari.server.orm.entities.TopologyRequestEntity;
import org.apache.ambari.server.stack.NoSuchStackException;
import org.apache.ambari.server.state.Host;
import org.apache.ambari.server.state.StackId;
import org.apache.ambari.server.topology.tasks.TopologyTask;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;

import com.google.gson.Gson;
import com.google.inject.Inject;
import com.google.inject.persist.Transactional;

/**
Expand Down Expand Up @@ -91,9 +93,6 @@ public class PersistedStateImpl implements PersistedState {
@Inject
private HostRoleCommandDAO hostRoleCommandDAO;

@Inject
private HostRoleCommandDAO physicalTaskDAO;

@Inject
private BlueprintFactory blueprintFactory;

Expand Down Expand Up @@ -255,6 +254,8 @@ public Map<ClusterTopology, List<LogicalRequest>> getAllRequests() {
private TopologyRequestEntity toEntity(BaseClusterRequest request) {
TopologyRequestEntity entity = new TopologyRequestEntity();

entity.setRawRequestBody(request.getRawRequestBody());

//todo: this isn't set for a scaling operation because we had intended to allow multiple
//todo: bp's to be used to scale a cluster although this isn't currently supported by
//todo: new topology infrastructure
Expand Down Expand Up @@ -330,7 +331,7 @@ private TopologyHostRequestEntity toEntity(HostRequest request, TopologyLogicalR
logicalTaskEntity.setTopologyHostTaskEntity(topologyTaskEntity);
Long physicalId = request.getPhysicalTaskId(logicalTaskId);
if (physicalId != null) {
logicalTaskEntity.setHostRoleCommandEntity(physicalTaskDAO.findByPK(physicalId));
logicalTaskEntity.setHostRoleCommandEntity(hostRoleCommandDAO.findByPK(physicalId));
}
logicalTaskEntity.setTopologyHostTaskEntity(topologyTaskEntity);
}
Expand Down Expand Up @@ -391,13 +392,16 @@ private static class ReplayedTopologyRequest implements TopologyRequest {
private final Configuration configuration;
private final Map<String, HostGroupInfo> hostGroupInfoMap = new HashMap<>();
private final ProvisionAction provisionAction;
private final Set<StackId> stackIds;

public ReplayedTopologyRequest(TopologyRequestEntity entity, BlueprintFactory blueprintFactory) {
clusterId = entity.getClusterId();
type = Type.valueOf(entity.getAction());
description = entity.getDescription();
provisionAction = entity.getProvisionAction();

stackIds = TopologyRequestUtil.getStackIdsFromRequest(entity.getRawRequestBody());

try {
blueprint = blueprintFactory.getBlueprint(entity.getBlueprintName());
} catch (NoSuchStackException e) {
Expand All @@ -409,6 +413,11 @@ public ReplayedTopologyRequest(TopologyRequestEntity entity, BlueprintFactory bl
parseHostGroupInfo(entity);
}

@Override
public Set<StackId> getStackIds() {
return stackIds;
}

@Override
public Long getClusterId() {
return clusterId;
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -18,7 +18,12 @@

package org.apache.ambari.server.topology;

import static java.util.Collections.emptySet;

import java.util.Map;
import java.util.Set;

import org.apache.ambari.server.state.StackId;

/**
* A request which is used to create or modify a cluster topology.
Expand Down Expand Up @@ -70,4 +75,11 @@ enum Type { PROVISION, SCALE, EXPORT }
* @return string description of the request
*/
String getDescription();

/**
* @return a set of stack id's if supported by the TopologyRequest.
*/
default Set<StackId> getStackIds() {
return emptySet();
}
}
Original file line number Diff line number Diff line change
Expand Up @@ -28,6 +28,6 @@
*/
public interface TopologyRequestFactory {

ProvisionClusterRequest createProvisionClusterRequest(Map<String, Object> properties, SecurityConfiguration securityConfiguration) throws InvalidTopologyTemplateException;
ProvisionClusterRequest createProvisionClusterRequest(String rawRequestBody, Map<String, Object> properties, SecurityConfiguration securityConfiguration) throws InvalidTopologyTemplateException;
// todo: use to create other request types
}
Original file line number Diff line number Diff line change
Expand Up @@ -29,8 +29,7 @@
public class TopologyRequestFactoryImpl implements TopologyRequestFactory {

@Override
public ProvisionClusterRequest createProvisionClusterRequest(Map<String, Object> properties, SecurityConfiguration securityConfiguration) throws InvalidTopologyTemplateException {
return new ProvisionClusterRequest(properties, securityConfiguration);

public ProvisionClusterRequest createProvisionClusterRequest(String rawRequestBody, Map<String, Object> properties, SecurityConfiguration securityConfiguration) throws InvalidTopologyTemplateException {
return new ProvisionClusterRequest(rawRequestBody, properties, securityConfiguration);
}
}
Loading