Skip to content
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
Original file line number Diff line number Diff line change
Expand Up @@ -116,7 +116,7 @@ public static void setup() throws AmbariException {

OrmTestHelper helper = injector.getInstance(OrmTestHelper.class);
RepositoryVersionEntity repositoryVersion = helper.getOrCreateRepositoryVersion(cluster1);
ServiceGroup serviceGroup = cluster1.addServiceGroup("CORE", "HDP-2.6.0");
ServiceGroup serviceGroup = cluster1.addServiceGroup("CORE", cluster1.getDesiredStackVersion().getStackId());
cluster1.addService(serviceGroup, "HDFS", "HDFS", repositoryVersion);

SERVICE_SITE_CLUSTER = new HashMap<>();
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -1311,7 +1311,7 @@ public void testComponentInProgressStatusSafeAfterStatusReport() throws Exceptio
*/
private Service addService(Cluster cluster, String serviceName) throws AmbariException {
RepositoryVersionEntity repositoryVersion = helper.getOrCreateRepositoryVersion(cluster);
ServiceGroup serviceGroup = cluster.addServiceGroup("CORE", "HDP-2.6.0");
ServiceGroup serviceGroup = cluster.addServiceGroup("CORE", cluster.getDesiredStackVersion().getStackId());
return cluster.addService(serviceGroup, serviceName, serviceName, repositoryVersion);
}
}
Original file line number Diff line number Diff line change
Expand Up @@ -1604,7 +1604,7 @@ private File createTestKeytabData(HeartBeatHandler heartbeatHandler) throws Exce
*/
private Service addService(Cluster cluster, String serviceName) throws AmbariException {
RepositoryVersionEntity repositoryVersion = helper.getOrCreateRepositoryVersion(cluster);
ServiceGroup serviceGroup = cluster.addServiceGroup("CORE", "HDP-2.6.0");
ServiceGroup serviceGroup = cluster.addServiceGroup("CORE", DummyStackId);
return cluster.addService(serviceGroup, serviceName, serviceName, repositoryVersion);
}

Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -148,7 +148,7 @@ public void testServiceComponentInstalled()
Cluster cluster = heartbeatTestHelper.getDummyCluster();

RepositoryVersionEntity repositoryVersion = helper.getOrCreateRepositoryVersion(cluster);
ServiceGroup serviceGroup = cluster.addServiceGroup("CORE", "HDP-2.6.0.0");
ServiceGroup serviceGroup = cluster.addServiceGroup("CORE", cluster.getDesiredStackVersion());
Service hdfs = cluster.addService(serviceGroup, HDFS, HDFS, repositoryVersion);

hdfs.addServiceComponent(DATANODE).setRecoveryEnabled(true);
Expand Down Expand Up @@ -183,7 +183,7 @@ public void testServiceComponentUninstalled()
throws Exception {
Cluster cluster = heartbeatTestHelper.getDummyCluster();
RepositoryVersionEntity repositoryVersion = helper.getOrCreateRepositoryVersion(cluster);
ServiceGroup serviceGroup = cluster.addServiceGroup("CORE", "HDP-2.6.0.0");
ServiceGroup serviceGroup = cluster.addServiceGroup("CORE", cluster.getDesiredStackVersion().getStackId());
Service hdfs = cluster.addService(serviceGroup, HDFS, HDFS, repositoryVersion);

hdfs.addServiceComponent(DATANODE).setRecoveryEnabled(true);
Expand Down Expand Up @@ -220,7 +220,7 @@ public void testClusterEnvConfigChanged()
throws Exception {
Cluster cluster = heartbeatTestHelper.getDummyCluster();
RepositoryVersionEntity repositoryVersion = helper.getOrCreateRepositoryVersion(cluster);
ServiceGroup serviceGroup = cluster.addServiceGroup("CORE", "HDP-2.6.0.0");
ServiceGroup serviceGroup = cluster.addServiceGroup("CORE", cluster.getDesiredStackVersion().getStackId());
Service hdfs = cluster.addService(serviceGroup, HDFS, HDFS, repositoryVersion);

hdfs.addServiceComponent(DATANODE).setRecoveryEnabled(true);
Expand Down Expand Up @@ -261,7 +261,7 @@ public void testMaintenanceModeChanged()
throws Exception {
Cluster cluster = heartbeatTestHelper.getDummyCluster();
RepositoryVersionEntity repositoryVersion = helper.getOrCreateRepositoryVersion(cluster);
ServiceGroup serviceGroup = cluster.addServiceGroup("CORE", "HDP-2.6.0.0");
ServiceGroup serviceGroup = cluster.addServiceGroup("CORE", cluster.getDesiredStackVersion().getStackId());
Service hdfs = cluster.addService(serviceGroup, HDFS, HDFS, repositoryVersion);

hdfs.addServiceComponent(DATANODE).setRecoveryEnabled(true);
Expand Down Expand Up @@ -297,7 +297,7 @@ public void testServiceComponentRecoveryChanged()
throws Exception {
Cluster cluster = heartbeatTestHelper.getDummyCluster();
RepositoryVersionEntity repositoryVersion = helper.getOrCreateRepositoryVersion(cluster);
ServiceGroup serviceGroup = cluster.addServiceGroup("CORE", "HDP-2.6.0.0");
ServiceGroup serviceGroup = cluster.addServiceGroup("CORE", cluster.getDesiredStackVersion().getStackId());
Service hdfs = cluster.addService(serviceGroup, HDFS, HDFS, repositoryVersion);

hdfs.addServiceComponent(DATANODE).setRecoveryEnabled(true);
Expand Down Expand Up @@ -341,7 +341,7 @@ public void testMultiNodeCluster()
RepositoryVersionEntity repositoryVersion = helper.getOrCreateRepositoryVersion(cluster);

// Add HDFS service with DATANODE component to the cluster
ServiceGroup serviceGroup = cluster.addServiceGroup("CORE", "HDP-2.6.0.0");
ServiceGroup serviceGroup = cluster.addServiceGroup("CORE", cluster.getDesiredStackVersion().getStackId());
Service hdfs = cluster.addService(serviceGroup, HDFS, HDFS, repositoryVersion);

hdfs.addServiceComponent(DATANODE).setRecoveryEnabled(true);
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -341,16 +341,17 @@ private Cluster createDefaultCluster(String clusterName) throws Exception {
final String host2 = "b" + getUniqueName();
final String host3 = "c" + getUniqueName();

setupClusterWithHosts(clusterName, "HDP-2.0.6", Arrays.asList(host1, host2, host3), "centos6");
String stackId = "HDP-2.0.6";
setupClusterWithHosts(clusterName, stackId, Arrays.asList(host1, host2, host3), "centos6");

Cluster cluster = clusters.getCluster(clusterName);
cluster.setDesiredStackVersion(new StackId("HDP-2.0.6"));
cluster.setCurrentStackVersion(new StackId("HDP-2.0.6"));
cluster.setDesiredStackVersion(new StackId(stackId));
cluster.setCurrentStackVersion(new StackId(stackId));

RepositoryVersionEntity repositoryVersion = repositoryVersion206;

ServiceGroup serviceGroupCore = cluster.addServiceGroup(SERVICE_GROUP_NAME_CORE, "HDP-1.0");
ServiceGroup serviceGroupTest = cluster.addServiceGroup(SERVICE_GROUP_NAME_TEST, "HDP-1.0");
ServiceGroup serviceGroupCore = cluster.addServiceGroup(SERVICE_GROUP_NAME_CORE, stackId);
ServiceGroup serviceGroupTest = cluster.addServiceGroup(SERVICE_GROUP_NAME_TEST, stackId);

Service hdfs = cluster.addService(serviceGroupCore, SERVICE_NAME_HDFS, SERVICE_NAME_HDFS, repositoryVersion);
Service yarn = cluster.addService(serviceGroupCore, SERVICE_NAME_YARN, SERVICE_NAME_YARN, repositoryVersion);
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -698,7 +698,7 @@ private void makeService(String serviceName, RepositoryVersionEntity serviceRepo
} catch (AmbariException e) {
clusters.addCluster(clusterName, parentEntity.getStackId());
cluster = clusters.getCluster(clusterName);
serviceGroup = cluster.addServiceGroup(serviceGroupName, "HDP-1.0");
serviceGroup = cluster.addServiceGroup(serviceGroupName, cluster.getDesiredStackVersion().getStackId());
}

cluster.addService(serviceGroup, serviceName, serviceName, serviceRepo);
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -151,7 +151,7 @@ private RepositoryVersionEntity createClusterAndHosts(String INSTALLED_VERSION,
Map<String, List<Integer>> zkTopology = new HashMap<>();
List<Integer> zkServerHosts = Arrays.asList(0, 1, 2);
zkTopology.put("ZOOKEEPER_SERVER", new ArrayList<>(zkServerHosts));
ServiceGroup serviceGroup = c1.addServiceGroup("CORE", "HDP-1.0");
ServiceGroup serviceGroup = c1.addServiceGroup("CORE", this.stackId);
addService(c1, serviceGroup, hostList, zkTopology, "ZOOKEEPER", repositoryVersionEntity);

// install new version
Expand Down Expand Up @@ -477,7 +477,7 @@ public void testComponentHostVersionNotRequired() throws Exception {
.put("NAMENODE", Lists.newArrayList(0))
.put("DATANODE", Lists.newArrayList(1))
.build();
ServiceGroup serviceGroup = c1.addServiceGroup("CORE", "HDP-1.0");
ServiceGroup serviceGroup = c1.addServiceGroup("CORE", this.stackId);
addService(c1, serviceGroup, allHosts, topology, "HDFS", repo);

topology = new ImmutableMap.Builder<String, List<Integer>>()
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -144,6 +144,7 @@ public class OrmTestHelper {
@Inject
private StackDAO stackDAO;

private static final StackId HDP_206 = new StackId("HDP", "2.0.6");
public static final StackId STACK_ID = new StackId("HDP", "2.2.0");
public static final String CLUSTER_NAME = "test_cluster1";
public static final String SERVICE_GROUP_NAME = "CORE";
Expand Down Expand Up @@ -372,7 +373,7 @@ public Long createCluster(String clusterName) throws Exception {
ClusterDAO clusterDAO = injector.getInstance(ClusterDAO.class);
StackDAO stackDAO = injector.getInstance(StackDAO.class);

StackEntity stackEntity = stackDAO.find("HDP", "2.0.6");
StackEntity stackEntity = stackDAO.find(HDP_206);
assertNotNull(stackEntity);

ClusterEntity clusterEntity = new ClusterEntity();
Expand Down Expand Up @@ -410,13 +411,12 @@ public Cluster buildNewCluster(Clusters clusters,
ServiceFactory serviceFactory, ServiceComponentFactory componentFactory,
ServiceComponentHostFactory schFactory, String hostName) throws Exception {
String clusterName = "cluster-" + System.currentTimeMillis();
StackId stackId = new StackId("HDP", "2.0.6");

createStack(stackId);
createStack(HDP_206);

clusters.addCluster(clusterName, stackId);
clusters.addCluster(clusterName, HDP_206);
Cluster cluster = clusters.getCluster(clusterName);
ServiceGroup serviceGroup = cluster.addServiceGroup(SERVICE_GROUP_NAME, stackId.getStackId());
ServiceGroup serviceGroup = cluster.addServiceGroup(SERVICE_GROUP_NAME, HDP_206.getStackId());
cluster = initializeClusterWithStack(cluster);

addHost(clusters, cluster, hostName);
Expand All @@ -427,9 +427,8 @@ public Cluster buildNewCluster(Clusters clusters,
}

public Cluster initializeClusterWithStack(Cluster cluster) throws Exception {
StackId stackId = new StackId("HDP", "2.0.6");
cluster.setDesiredStackVersion(stackId);
getOrCreateRepositoryVersion(stackId, stackId.getStackVersion());
cluster.setDesiredStackVersion(HDP_206);
getOrCreateRepositoryVersion(HDP_206, HDP_206.getStackVersion());
return cluster;
}

Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -117,7 +117,7 @@ public void setup() throws Exception {
EventBusSynchronizer.synchronizeAmbariEventPublisher(m_injector);

m_cluster = m_clusters.getClusterById(m_helper.createCluster());
serviceGroup = m_cluster.addServiceGroup("CORE", "HDP-1.0");
serviceGroup = m_cluster.addServiceGroup("CORE", m_cluster.getDesiredStackVersion().getStackId());
m_helper.initializeClusterWithStack(m_cluster);
}

Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -123,7 +123,7 @@ public void setup() throws Exception {
// install YARN so there is at least 1 service installed and no
// unexpected alerts since the test YARN service doesn't have any alerts
m_cluster = m_clusters.getClusterById(m_helper.createCluster());
serviceGroup = m_cluster.addServiceGroup("CORE", "HDP-1.0");
serviceGroup = m_cluster.addServiceGroup("CORE", m_cluster.getDesiredStackVersion().getStackId());
m_helper.initializeClusterWithStack(m_cluster);
m_helper.addHost(m_clusters, m_cluster, HOSTNAME);
m_helper.installYarnService(m_cluster, m_serviceFactory,
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -122,6 +122,7 @@ ConfigGroup createConfigGroup() throws AmbariException {
ServiceGroupEntity serviceGroupEntity = new ServiceGroupEntity();
serviceGroupEntity.setClusterEntity(clusterEntity);
serviceGroupEntity.setServiceGroupName("default");
serviceGroupEntity.setStack(clusterEntity.getDesiredStack());
serviceGroupDAO.create(serviceGroupEntity);

ClusterServiceEntity clusterServiceEntity = new ClusterServiceEntity();
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -90,7 +90,7 @@ public void teardown() throws AmbariException, SQLException {

@Test
public void testCanBeRemoved() throws Exception{
ServiceGroup serviceGroup = cluster.addServiceGroup("CORE", "HDP-1.0");
ServiceGroup serviceGroup = cluster.addServiceGroup("CORE", STACK_ID.getStackId());
Service service = cluster.addService(serviceGroup, "HDFS", "HDFS", repositoryVersion);

for (State state : State.values()) {
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -298,7 +298,7 @@ private void installHdfsService() throws Exception {
cluster.getCurrentStackVersion(), REPO_VERSION);

String serviceName = "HDFS";
ServiceGroup serviceGroup = cluster.addServiceGroup("CORE", "HDP-1.0");
ServiceGroup serviceGroup = cluster.addServiceGroup("CORE", cluster.getDesiredStackVersion().getStackId());
serviceFactory.createNew(cluster, serviceGroup, Collections.emptyList(), serviceName, serviceName, repositoryVersion);
Service service = cluster.getService(serviceName);

Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -181,7 +181,7 @@ public void testInitialAlertEvent() throws Exception {

private void installHdfsService() throws Exception {
String serviceName = "HDFS";
ServiceGroup serviceGroup = m_cluster.addServiceGroup("CORE", "HDP-1.0");
ServiceGroup serviceGroup = m_cluster.addServiceGroup("CORE", STACK_ID.getStackId());
m_serviceFactory.createNew(m_cluster, serviceGroup, Collections.emptyList(), serviceName, serviceName, m_repositoryVersion);
Service service = m_cluster.getService(serviceName);

Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -143,7 +143,7 @@ public void setup() throws Exception {
clusters.mapHostToCluster(hostName, "c1");
}

serviceGroup = cluster.addServiceGroup("CORE", "HDP-1.0");
serviceGroup = cluster.addServiceGroup("CORE", stackId.getStackId());
Service service = installService("HDFS", serviceGroup);
addServiceComponent(service, "NAMENODE");
addServiceComponent(service, "DATANODE");
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -427,7 +427,7 @@ public void testDeleteCluster() throws Exception {
// host config override
host1.addDesiredConfig(cluster.getClusterId(), true, "_test", config2);

ServiceGroup serviceGroup = cluster.addServiceGroup("CORE", "HDP-1.0");
ServiceGroup serviceGroup = cluster.addServiceGroup("CORE", stackId.getStackId());
Service hdfs = cluster.addService(serviceGroup, "HDFS", "HDFS", repositoryVersion);

//Assert.assertNotNull(injector.getInstance(ClusterServiceDAO.class).findByClusterAndServiceNames(c1, "HDFS"));
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -91,7 +91,7 @@ public void testOrphanedSCHDesiredEntityReAdd() throws Exception {
Assert.assertNotNull(clusterId);

Cluster cluster = clusters.getCluster(OrmTestHelper.CLUSTER_NAME);
ServiceGroup serviceGroup = cluster.addServiceGroup("CORE", "HDP-1.0");
ServiceGroup serviceGroup = cluster.addServiceGroup("CORE", cluster.getDesiredStackVersion());
Assert.assertNotNull(cluster);

helper.addHost(clusters, cluster, "h1");
Expand Down