diff --git a/qa/rolling-upgrade/build.gradle b/qa/rolling-upgrade/build.gradle
index 9f10c5dcfab73..6c4004074fa3b 100644
--- a/qa/rolling-upgrade/build.gradle
+++ b/qa/rolling-upgrade/build.gradle
@@ -30,6 +30,26 @@ task bwcTest {
}
for (Version version : bwcVersions.wireCompatible) {
+ /*
+ * The goal here is to:
+ *
+ * - start three nodes on the old version
+ *
- run tests with systemProperty 'tests.rest.suite', 'old_cluster'
+ *
- shut down one node
+ *
- start a node with the new version
+ *
- run tests with systemProperty 'tests.rest.suite', 'mixed_cluster'
+ *
- shut down one node on the old version
+ *
- start a node with the new version
+ *
- run tests with systemProperty 'tests.rest.suite', 'mixed_cluster' again
+ *
- shut down the last node with the old version
+ *
- start a node with the new version
+ *
- run tests with systemProperty 'tests.rest.suite', 'upgraded_cluster'
+ *
- shut down the entire cluster
+ *
+ *
+ * Be careful: gradle dry run spits out tasks in the wrong order but,
+ * strangely, running the tasks works properly.
+ */
String baseName = "v${version}"
Task oldClusterTest = tasks.create(name: "${baseName}#oldClusterTest", type: RestIntegTestTask) {
@@ -39,8 +59,8 @@ for (Version version : bwcVersions.wireCompatible) {
Object extension = extensions.findByName("${baseName}#oldClusterTestCluster")
configure(extensions.findByName("${baseName}#oldClusterTestCluster")) {
bwcVersion = version
- numBwcNodes = 2
- numNodes = 2
+ numBwcNodes = 3
+ numNodes = 3
clusterName = 'rolling-upgrade'
setting 'repositories.url.allowed_urls', 'http://snapshot.test*'
if (version.onOrAfter('5.3.0')) {
@@ -53,43 +73,57 @@ for (Version version : bwcVersions.wireCompatible) {
systemProperty 'tests.rest.suite', 'old_cluster'
}
- Task mixedClusterTest = tasks.create(name: "${baseName}#mixedClusterTest", type: RestIntegTestTask)
+ Closure configureUpgradeCluster = {String name, Task lastRunner, int stopNode, Closure unicastSeed ->
+ configure(extensions.findByName("${baseName}#${name}")) {
+ dependsOn lastRunner, "${baseName}#oldClusterTestCluster#node${stopNode}.stop"
+ clusterName = 'rolling-upgrade'
+ unicastTransportUri = { seedNode, node, ant -> unicastSeed() }
+ minimumMasterNodes = { 3 }
+ /* Override the data directory so the new node always gets the node we
+ * just stopped's data directory. */
+ dataDir = { nodeNumber -> oldClusterTest.nodes[stopNode].dataDir }
+ setting 'repositories.url.allowed_urls', 'http://snapshot.test*'
+ }
+ }
- configure(extensions.findByName("${baseName}#mixedClusterTestCluster")) {
- dependsOn oldClusterTestRunner, "${baseName}#oldClusterTestCluster#node1.stop"
- clusterName = 'rolling-upgrade'
- unicastTransportUri = { seedNode, node, ant -> oldClusterTest.nodes.get(0).transportUri() }
- minimumMasterNodes = { 2 }
- /* Override the data directory so the new node always gets the node we
- * just stopped's data directory. */
- dataDir = { nodeNumber -> oldClusterTest.nodes[1].dataDir }
- setting 'repositories.url.allowed_urls', 'http://snapshot.test*'
+ Task oneThirdUpgradedTest = tasks.create(name: "${baseName}#oneThirdUpgradedTest", type: RestIntegTestTask)
+
+ configureUpgradeCluster("oneThirdUpgradedTestCluster", oldClusterTestRunner,
+ 0, { oldClusterTest.nodes.get(1).transportUri() })
+
+ Task oneThirdUpgradedTestRunner = tasks.getByName("${baseName}#oneThirdUpgradedTestRunner")
+ oneThirdUpgradedTestRunner.configure {
+ systemProperty 'tests.rest.suite', 'mixed_cluster'
+ systemProperty 'tests.first_round', 'true'
+ finalizedBy "${baseName}#oldClusterTestCluster#node1.stop"
}
- Task mixedClusterTestRunner = tasks.getByName("${baseName}#mixedClusterTestRunner")
- mixedClusterTestRunner.configure {
+ Task twoThirdsUpgradedTest = tasks.create(name: "${baseName}#twoThirdsUpgradedTest", type: RestIntegTestTask)
+
+ configureUpgradeCluster("twoThirdsUpgradedTestCluster", oneThirdUpgradedTestRunner,
+ 1, { oneThirdUpgradedTest.nodes.get(0).transportUri() })
+
+ Task twoThirdsUpgradedTestRunner = tasks.getByName("${baseName}#twoThirdsUpgradedTestRunner")
+ twoThirdsUpgradedTestRunner.configure {
systemProperty 'tests.rest.suite', 'mixed_cluster'
- finalizedBy "${baseName}#oldClusterTestCluster#node0.stop"
+ systemProperty 'tests.first_round', 'false'
+ finalizedBy "${baseName}#oldClusterTestCluster#node2.stop"
}
Task upgradedClusterTest = tasks.create(name: "${baseName}#upgradedClusterTest", type: RestIntegTestTask)
- configure(extensions.findByName("${baseName}#upgradedClusterTestCluster")) {
- dependsOn mixedClusterTestRunner, "${baseName}#oldClusterTestCluster#node0.stop"
- clusterName = 'rolling-upgrade'
- unicastTransportUri = { seedNode, node, ant -> mixedClusterTest.nodes.get(0).transportUri() }
- minimumMasterNodes = { 2 }
- /* Override the data directory so the new node always gets the node we
- * just stopped's data directory. */
- dataDir = { nodeNumber -> oldClusterTest.nodes[0].dataDir}
- setting 'repositories.url.allowed_urls', 'http://snapshot.test*'
- }
+ configureUpgradeCluster("upgradedClusterTestCluster", twoThirdsUpgradedTestRunner,
+ 2, { twoThirdsUpgradedTest.nodes.get(0).transportUri() })
Task upgradedClusterTestRunner = tasks.getByName("${baseName}#upgradedClusterTestRunner")
upgradedClusterTestRunner.configure {
systemProperty 'tests.rest.suite', 'upgraded_cluster'
- // only need to kill the mixed cluster tests node here because we explicitly told it to not stop nodes upon completion
- finalizedBy "${baseName}#mixedClusterTestCluster#stop"
+ /*
+ * Force stopping all the upgraded nodes after the test runner
+ * so they are alive during the test.
+ */
+ finalizedBy "${baseName}#oneThirdUpgradedTestCluster#stop"
+ finalizedBy "${baseName}#twoThirdsUpgradedTestCluster#stop"
}
Task versionBwcTest = tasks.create(name: "${baseName}#bwcTest") {
diff --git a/qa/rolling-upgrade/src/test/java/org/elasticsearch/upgrades/AbstractRollingTestCase.java b/qa/rolling-upgrade/src/test/java/org/elasticsearch/upgrades/AbstractRollingTestCase.java
new file mode 100644
index 0000000000000..6f4453aa06cc9
--- /dev/null
+++ b/qa/rolling-upgrade/src/test/java/org/elasticsearch/upgrades/AbstractRollingTestCase.java
@@ -0,0 +1,91 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+package org.elasticsearch.upgrades;
+
+import org.apache.http.entity.ContentType;
+import org.apache.http.entity.StringEntity;
+import org.elasticsearch.Version;
+import org.elasticsearch.action.support.PlainActionFuture;
+import org.elasticsearch.client.Response;
+import org.elasticsearch.cluster.metadata.IndexMetaData;
+import org.elasticsearch.common.settings.Settings;
+import org.elasticsearch.common.util.concurrent.AbstractRunnable;
+import org.elasticsearch.test.rest.ESRestTestCase;
+import org.elasticsearch.test.rest.yaml.ObjectPath;
+
+import java.io.IOException;
+import java.util.ArrayList;
+import java.util.Collections;
+import java.util.List;
+import java.util.Map;
+import java.util.concurrent.Future;
+import java.util.function.Predicate;
+
+import static com.carrotsearch.randomizedtesting.RandomizedTest.randomAsciiOfLength;
+import static java.util.Collections.emptyMap;
+import static org.elasticsearch.cluster.routing.UnassignedInfo.INDEX_DELAYED_NODE_LEFT_TIMEOUT_SETTING;
+import static org.elasticsearch.cluster.routing.allocation.decider.EnableAllocationDecider.INDEX_ROUTING_ALLOCATION_ENABLE_SETTING;
+import static org.elasticsearch.cluster.routing.allocation.decider.MaxRetryAllocationDecider.SETTING_ALLOCATION_MAX_RETRY;
+import static org.hamcrest.Matchers.equalTo;
+import static org.hamcrest.Matchers.hasSize;
+import static org.hamcrest.Matchers.notNullValue;
+
+public abstract class AbstractRollingTestCase extends ESRestTestCase {
+ protected enum ClusterType {
+ OLD,
+ MIXED,
+ UPGRADED;
+
+ public static ClusterType parse(String value) {
+ switch (value) {
+ case "old_cluster":
+ return OLD;
+ case "mixed_cluster":
+ return MIXED;
+ case "upgraded_cluster":
+ return UPGRADED;
+ default:
+ throw new AssertionError("unknown cluster type: " + value);
+ }
+ }
+ }
+
+ protected static final ClusterType CLUSTER_TYPE = ClusterType.parse(System.getProperty("tests.rest.suite"));
+
+ @Override
+ protected final boolean preserveIndicesUponCompletion() {
+ return true;
+ }
+
+ @Override
+ protected final boolean preserveReposUponCompletion() {
+ return true;
+ }
+
+ @Override
+ protected final Settings restClientSettings() {
+ return Settings.builder().put(super.restClientSettings())
+ // increase the timeout here to 90 seconds to handle long waits for a green
+ // cluster health. the waits for green need to be longer than a minute to
+ // account for delayed shards
+ .put(ESRestTestCase.CLIENT_RETRY_TIMEOUT, "90s")
+ .put(ESRestTestCase.CLIENT_SOCKET_TIMEOUT, "90s")
+ .build();
+ }
+}
diff --git a/qa/rolling-upgrade/src/test/java/org/elasticsearch/upgrades/IndexingIT.java b/qa/rolling-upgrade/src/test/java/org/elasticsearch/upgrades/IndexingIT.java
new file mode 100644
index 0000000000000..f1e01d24acff6
--- /dev/null
+++ b/qa/rolling-upgrade/src/test/java/org/elasticsearch/upgrades/IndexingIT.java
@@ -0,0 +1,135 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+package org.elasticsearch.upgrades;
+
+import org.apache.http.util.EntityUtils;
+import org.elasticsearch.common.Booleans;
+import org.elasticsearch.client.Request;
+import org.elasticsearch.client.Response;
+
+import java.io.IOException;
+import java.nio.charset.StandardCharsets;
+
+/**
+ * Basic test that indexed documents survive the rolling restart. See
+ * {@link RecoveryIT} for much more in depth testing of the mechanism
+ * by which they survive.
+ */
+public class IndexingIT extends AbstractRollingTestCase {
+ public void testIndexing() throws IOException {
+ switch (CLUSTER_TYPE) {
+ case OLD:
+ break;
+ case MIXED:
+ Request waitForYellow = new Request("GET", "/_cluster/health");
+ waitForYellow.addParameter("wait_for_nodes", "3");
+ waitForYellow.addParameter("wait_for_status", "yellow");
+ client().performRequest(waitForYellow);
+ break;
+ case UPGRADED:
+ Request waitForGreen = new Request("GET", "/_cluster/health/test_index,index_with_replicas,empty_index");
+ waitForGreen.addParameter("wait_for_nodes", "3");
+ waitForGreen.addParameter("wait_for_status", "green");
+ // wait for long enough that we give delayed unassigned shards to stop being delayed
+ waitForGreen.addParameter("timeout", "70s");
+ waitForGreen.addParameter("level", "shards");
+ client().performRequest(waitForGreen);
+ break;
+ default:
+ throw new UnsupportedOperationException("Unknown cluster type [" + CLUSTER_TYPE + "]");
+ }
+
+ if (CLUSTER_TYPE == ClusterType.OLD) {
+ Request createTestIndex = new Request("PUT", "/test_index");
+ createTestIndex.setJsonEntity("{\"settings\": {\"index.number_of_replicas\": 0}}");
+ client().performRequest(createTestIndex);
+
+ String recoverQuickly = "{\"settings\": {\"index.unassigned.node_left.delayed_timeout\": \"100ms\"}}";
+ Request createIndexWithReplicas = new Request("PUT", "/index_with_replicas");
+ createIndexWithReplicas.setJsonEntity(recoverQuickly);
+ client().performRequest(createIndexWithReplicas);
+
+ Request createEmptyIndex = new Request("PUT", "/empty_index");
+ // Ask for recovery to be quick
+ createEmptyIndex.setJsonEntity(recoverQuickly);
+ client().performRequest(createEmptyIndex);
+
+ bulk("test_index", "_OLD", 5);
+ bulk("index_with_replicas", "_OLD", 5);
+ }
+
+ int expectedCount;
+ switch (CLUSTER_TYPE) {
+ case OLD:
+ expectedCount = 5;
+ break;
+ case MIXED:
+ if (Booleans.parseBoolean(System.getProperty("tests.first_round"))) {
+ expectedCount = 5;
+ } else {
+ expectedCount = 10;
+ }
+ break;
+ case UPGRADED:
+ expectedCount = 15;
+ break;
+ default:
+ throw new UnsupportedOperationException("Unknown cluster type [" + CLUSTER_TYPE + "]");
+ }
+
+ assertCount("test_index", expectedCount);
+ assertCount("index_with_replicas", 5);
+ assertCount("empty_index", 0);
+
+ if (CLUSTER_TYPE != ClusterType.OLD) {
+ bulk("test_index", "_" + CLUSTER_TYPE, 5);
+ Request toBeDeleted = new Request("PUT", "/test_index/doc/to_be_deleted");
+ toBeDeleted.addParameter("refresh", "true");
+ toBeDeleted.setJsonEntity("{\"f1\": \"delete-me\"}");
+ client().performRequest(toBeDeleted);
+ assertCount("test_index", expectedCount + 6);
+
+ Request delete = new Request("DELETE", "/test_index/doc/to_be_deleted");
+ delete.addParameter("refresh", "true");
+ client().performRequest(delete);
+
+ assertCount("test_index", expectedCount + 5);
+ }
+ }
+
+ private void bulk(String index, String valueSuffix, int count) throws IOException {
+ StringBuilder b = new StringBuilder();
+ for (int i = 0; i < count; i++) {
+ b.append("{\"index\": {\"_index\": \"").append(index).append("\", \"_type\": \"doc\"}}\n");
+ b.append("{\"f1\": \"v").append(i).append(valueSuffix).append("\", \"f2\": ").append(i).append("}\n");
+ }
+ Request bulk = new Request("POST", "/_bulk");
+ bulk.addParameter("refresh", "true");
+ bulk.setJsonEntity(b.toString());
+ client().performRequest(bulk);
+ }
+
+ private void assertCount(String index, int count) throws IOException {
+ Request searchTestIndexRequest = new Request("POST", "/" + index + "/_search");
+ searchTestIndexRequest.addParameter("filter_path", "hits.total");
+ Response searchTestIndexResponse = client().performRequest(searchTestIndexRequest);
+ assertEquals("{\"hits\":{\"total\":" + count + "}}",
+ EntityUtils.toString(searchTestIndexResponse.getEntity(), StandardCharsets.UTF_8));
+ }
+}
diff --git a/qa/rolling-upgrade/src/test/java/org/elasticsearch/upgrades/RecoveryIT.java b/qa/rolling-upgrade/src/test/java/org/elasticsearch/upgrades/RecoveryIT.java
index d208a7097a784..350636551d9ad 100644
--- a/qa/rolling-upgrade/src/test/java/org/elasticsearch/upgrades/RecoveryIT.java
+++ b/qa/rolling-upgrade/src/test/java/org/elasticsearch/upgrades/RecoveryIT.java
@@ -46,53 +46,13 @@
import static org.hamcrest.Matchers.hasSize;
import static org.hamcrest.Matchers.notNullValue;
-public class RecoveryIT extends ESRestTestCase {
-
- @Override
- protected boolean preserveIndicesUponCompletion() {
- return true;
- }
-
- @Override
- protected boolean preserveReposUponCompletion() {
- return true;
- }
-
- private enum CLUSTER_TYPE {
- OLD,
- MIXED,
- UPGRADED;
-
- public static CLUSTER_TYPE parse(String value) {
- switch (value) {
- case "old_cluster":
- return OLD;
- case "mixed_cluster":
- return MIXED;
- case "upgraded_cluster":
- return UPGRADED;
- default:
- throw new AssertionError("unknown cluster type: " + value);
- }
- }
- }
-
- private final CLUSTER_TYPE clusterType = CLUSTER_TYPE.parse(System.getProperty("tests.rest.suite"));
-
- @Override
- protected Settings restClientSettings() {
- return Settings.builder().put(super.restClientSettings())
- // increase the timeout here to 90 seconds to handle long waits for a green
- // cluster health. the waits for green need to be longer than a minute to
- // account for delayed shards
- .put(ESRestTestCase.CLIENT_RETRY_TIMEOUT, "90s")
- .put(ESRestTestCase.CLIENT_SOCKET_TIMEOUT, "90s")
- .build();
- }
-
+/**
+ * In depth testing of the recovery mechanism during a rolling restart.
+ */
+public class RecoveryIT extends AbstractRollingTestCase {
public void testHistoryUUIDIsGenerated() throws Exception {
final String index = "index_history_uuid";
- if (clusterType == CLUSTER_TYPE.OLD) {
+ if (CLUSTER_TYPE == ClusterType.OLD) {
Settings.Builder settings = Settings.builder()
.put(IndexMetaData.INDEX_NUMBER_OF_SHARDS_SETTING.getKey(), 1)
.put(IndexMetaData.INDEX_NUMBER_OF_REPLICAS_SETTING.getKey(), 1)
@@ -102,7 +62,7 @@ public void testHistoryUUIDIsGenerated() throws Exception {
// before timing out
.put(INDEX_DELAYED_NODE_LEFT_TIMEOUT_SETTING.getKey(), "100ms");
createIndex(index, settings.build());
- } else if (clusterType == CLUSTER_TYPE.UPGRADED) {
+ } else if (CLUSTER_TYPE == ClusterType.UPGRADED) {
ensureGreen(index);
Response response = client().performRequest("GET", index + "/_stats", Collections.singletonMap("level", "shards"));
assertOK(response);
@@ -157,11 +117,11 @@ public void testRecoveryWithConcurrentIndexing() throws Exception {
final Map nodeMap = objectPath.evaluate("nodes");
List nodes = new ArrayList<>(nodeMap.keySet());
- switch (clusterType) {
+ switch (CLUSTER_TYPE) {
case OLD:
Settings.Builder settings = Settings.builder()
.put(IndexMetaData.INDEX_NUMBER_OF_SHARDS_SETTING.getKey(), 1)
- .put(IndexMetaData.INDEX_NUMBER_OF_REPLICAS_SETTING.getKey(), 1)
+ .put(IndexMetaData.INDEX_NUMBER_OF_REPLICAS_SETTING.getKey(), 2)
// if the node with the replica is the first to be restarted, while a replica is still recovering
// then delayed allocation will kick in. When the node comes back, the master will search for a copy
// but the recovering copy will be seen as invalid and the cluster health won't return to GREEN
@@ -181,6 +141,7 @@ public void testRecoveryWithConcurrentIndexing() throws Exception {
assertOK(client().performRequest("POST", index + "/_refresh"));
assertCount(index, "_only_nodes:" + nodes.get(0), 60);
assertCount(index, "_only_nodes:" + nodes.get(1), 60);
+ assertCount(index, "_only_nodes:" + nodes.get(2), 60);
// make sure that we can index while the replicas are recovering
updateIndexSettings(index, Settings.builder().put(INDEX_ROUTING_ALLOCATION_ENABLE_SETTING.getKey(), "primaries"));
break;
@@ -191,9 +152,10 @@ public void testRecoveryWithConcurrentIndexing() throws Exception {
assertOK(client().performRequest("POST", index + "/_refresh"));
assertCount(index, "_only_nodes:" + nodes.get(0), 110);
assertCount(index, "_only_nodes:" + nodes.get(1), 110);
+ assertCount(index, "_only_nodes:" + nodes.get(2), 110);
break;
default:
- throw new IllegalStateException("unknown type " + clusterType);
+ throw new IllegalStateException("unknown type " + CLUSTER_TYPE);
}
}
@@ -221,11 +183,11 @@ private String getNodeId(Predicate versionPredicate) throws IOException
public void testRelocationWithConcurrentIndexing() throws Exception {
final String index = "relocation_with_concurrent_indexing";
- switch (clusterType) {
+ switch (CLUSTER_TYPE) {
case OLD:
Settings.Builder settings = Settings.builder()
.put(IndexMetaData.INDEX_NUMBER_OF_SHARDS_SETTING.getKey(), 1)
- .put(IndexMetaData.INDEX_NUMBER_OF_REPLICAS_SETTING.getKey(), 1)
+ .put(IndexMetaData.INDEX_NUMBER_OF_REPLICAS_SETTING.getKey(), 2)
// if the node with the replica is the first to be restarted, while a replica is still recovering
// then delayed allocation will kick in. When the node comes back, the master will search for a copy
// but the recovering copy will be seen as invalid and the cluster health won't return to GREEN
@@ -258,7 +220,7 @@ public void testRelocationWithConcurrentIndexing() throws Exception {
break;
case UPGRADED:
updateIndexSettings(index, Settings.builder()
- .put(IndexMetaData.INDEX_NUMBER_OF_REPLICAS_SETTING.getKey(), 1)
+ .put(IndexMetaData.INDEX_NUMBER_OF_REPLICAS_SETTING.getKey(), 2)
.put("index.routing.allocation.include._id", (String)null)
);
asyncIndexDocs(index, 60, 50).get();
@@ -271,9 +233,10 @@ public void testRelocationWithConcurrentIndexing() throws Exception {
assertCount(index, "_only_nodes:" + nodes.get(0), 110);
assertCount(index, "_only_nodes:" + nodes.get(1), 110);
+ assertCount(index, "_only_nodes:" + nodes.get(2), 110);
break;
default:
- throw new IllegalStateException("unknown type " + clusterType);
+ throw new IllegalStateException("unknown type " + CLUSTER_TYPE);
}
}
diff --git a/qa/rolling-upgrade/src/test/java/org/elasticsearch/upgrades/UpgradeClusterClientYamlTestSuiteIT.java b/qa/rolling-upgrade/src/test/java/org/elasticsearch/upgrades/UpgradeClusterClientYamlTestSuiteIT.java
index f3c0256b2c3c5..7932328c8c2f6 100644
--- a/qa/rolling-upgrade/src/test/java/org/elasticsearch/upgrades/UpgradeClusterClientYamlTestSuiteIT.java
+++ b/qa/rolling-upgrade/src/test/java/org/elasticsearch/upgrades/UpgradeClusterClientYamlTestSuiteIT.java
@@ -60,4 +60,3 @@ protected Settings restClientSettings() {
.build();
}
}
-
diff --git a/qa/rolling-upgrade/src/test/resources/rest-api-spec/test/mixed_cluster/10_basic.yml b/qa/rolling-upgrade/src/test/resources/rest-api-spec/test/mixed_cluster/10_basic.yml
index 0810341db1317..0cce81c8985cd 100644
--- a/qa/rolling-upgrade/src/test/resources/rest-api-spec/test/mixed_cluster/10_basic.yml
+++ b/qa/rolling-upgrade/src/test/resources/rest-api-spec/test/mixed_cluster/10_basic.yml
@@ -1,74 +1,8 @@
----
-"Index data and search on the mixed cluster":
- - do:
- cluster.health:
- wait_for_status: yellow
- wait_for_nodes: 2
-
- - do:
- search:
- index: test_index
-
- - match: { hits.total: 5 } # no new indexed data, so expect the original 5 documents from the old cluster
-
- - do:
- search:
- index: index_with_replicas
-
- - match: { hits.total: 5 } # just check we recovered fine
-
- - do:
- bulk:
- refresh: true
- body:
- - '{"index": {"_index": "test_index", "_type": "doc"}}'
- - '{"f1": "v1_mixed", "f2": 5}'
- - '{"index": {"_index": "test_index", "_type": "doc"}}'
- - '{"f1": "v2_mixed", "f2": 6}'
- - '{"index": {"_index": "test_index", "_type": "doc"}}'
- - '{"f1": "v3_mixed", "f2": 7}'
- - '{"index": {"_index": "test_index", "_type": "doc"}}'
- - '{"f1": "v4_mixed", "f2": 8}'
- - '{"index": {"_index": "test_index", "_type": "doc"}}'
- - '{"f1": "v5_mixed", "f2": 9}'
-
- - do:
- index:
- index: test_index
- type: doc
- id: d10
- body: {"f1": "v6_mixed", "f2": 10}
-
- - do:
- indices.refresh:
- index: test_index
-
- - do:
- search:
- index: test_index
-
- - match: { hits.total: 11 } # 5 docs from old cluster, 6 docs from mixed cluster
-
- - do:
- delete:
- index: test_index
- type: doc
- id: d10
-
- - do:
- indices.refresh:
- index: test_index
-
- - do:
- search:
- index: test_index
-
- - match: { hits.total: 10 }
-
---
"Verify that we can still find things with the template":
- do:
search_template:
+ index: test_search_template
body:
id: test_search_template
params:
diff --git a/qa/rolling-upgrade/src/test/resources/rest-api-spec/test/old_cluster/10_basic.yml b/qa/rolling-upgrade/src/test/resources/rest-api-spec/test/old_cluster/10_basic.yml
index 067eba6e4b860..04d85eb607835 100644
--- a/qa/rolling-upgrade/src/test/resources/rest-api-spec/test/old_cluster/10_basic.yml
+++ b/qa/rolling-upgrade/src/test/resources/rest-api-spec/test/old_cluster/10_basic.yml
@@ -1,76 +1,5 @@
---
-"Index data, search, and create things in the cluster state that we'll validate are there after the ugprade":
- - do:
- indices.create:
- index: test_index
- body:
- settings:
- index:
- number_of_replicas: 0
- - do:
- indices.create:
- index: index_with_replicas # dummy index to ensure we can recover indices with replicas just fine
- body:
- # if the node with the replica is the first to be restarted, then delayed
- # allocation will kick in, and the cluster health won't return to GREEN
- # before timing out
- index.unassigned.node_left.delayed_timeout: "100ms"
-
- - do:
- indices.create:
- index: empty_index # index to ensure we can recover empty indices
- body:
- # if the node with the replica is the first to be restarted, then delayed
- # allocation will kick in, and the cluster health won't return to GREEN
- # before timing out
- index.unassigned.node_left.delayed_timeout: "100ms"
-
- - do:
- bulk:
- refresh: true
- body:
- - '{"index": {"_index": "test_index", "_type": "doc"}}'
- - '{"f1": "v1_old", "f2": 0}'
- - '{"index": {"_index": "test_index", "_type": "doc"}}'
- - '{"f1": "v2_old", "f2": 1}'
- - '{"index": {"_index": "test_index", "_type": "doc"}}'
- - '{"f1": "v3_old", "f2": 2}'
- - '{"index": {"_index": "test_index", "_type": "doc"}}'
- - '{"f1": "v4_old", "f2": 3}'
- - '{"index": {"_index": "test_index", "_type": "doc"}}'
- - '{"f1": "v5_old", "f2": 4}'
-
- - do:
- bulk:
- refresh: true
- body:
- - '{"index": {"_index": "index_with_replicas", "_type": "doc"}}'
- - '{"f1": "d_old"}'
- - '{"index": {"_index": "index_with_replicas", "_type": "doc"}}'
- - '{"f1": "d_old"}'
- - '{"index": {"_index": "index_with_replicas", "_type": "doc"}}'
- - '{"f1": "d_old"}'
- - '{"index": {"_index": "index_with_replicas", "_type": "doc"}}'
- - '{"f1": "d_old"}'
- - '{"index": {"_index": "index_with_replicas", "_type": "doc"}}'
- - '{"f1": "d_old"}'
-
- - do:
- indices.refresh:
- index: test_index,index_with_replicas
-
- - do:
- search:
- index: test_index
-
- - match: { hits.total: 5 }
-
- - do:
- search:
- index: index_with_replicas
-
- - match: { hits.total: 5 }
-
+"Create things in the cluster state that we'll validate are there after the ugprade":
- do:
snapshot.create_repository:
repository: my_repo
@@ -91,6 +20,21 @@
}
- match: { "acknowledged": true }
+ - do:
+ bulk:
+ refresh: true
+ body:
+ - '{"index": {"_index": "test_search_template", "_type": "doc"}}'
+ - '{"f1": "v1_old"}'
+ - '{"index": {"_index": "test_search_template", "_type": "doc"}}'
+ - '{"f1": "v2_old"}'
+ - '{"index": {"_index": "test_search_template", "_type": "doc"}}'
+ - '{"f1": "v3_old"}'
+ - '{"index": {"_index": "test_search_template", "_type": "doc"}}'
+ - '{"f1": "v4_old"}'
+ - '{"index": {"_index": "test_search_template", "_type": "doc"}}'
+ - '{"f1": "v5_old"}'
+
- do:
put_script:
id: test_search_template
@@ -105,6 +49,7 @@
- do:
search_template:
+ index: test_search_template
body:
id: test_search_template
params:
diff --git a/qa/rolling-upgrade/src/test/resources/rest-api-spec/test/upgraded_cluster/10_basic.yml b/qa/rolling-upgrade/src/test/resources/rest-api-spec/test/upgraded_cluster/10_basic.yml
index 011db854ecdc6..3e293f91ce12a 100644
--- a/qa/rolling-upgrade/src/test/resources/rest-api-spec/test/upgraded_cluster/10_basic.yml
+++ b/qa/rolling-upgrade/src/test/resources/rest-api-spec/test/upgraded_cluster/10_basic.yml
@@ -1,55 +1,8 @@
----
-"Index data and search on the upgraded cluster":
- - do:
- cluster.health:
- wait_for_status: green
- wait_for_nodes: 2
- # wait for long enough that we give delayed unassigned shards to stop being delayed
- timeout: 70s
- level: shards
- index: test_index,index_with_replicas,empty_index
-
- - do:
- search:
- index: test_index
-
- - match: { hits.total: 10 } # no new indexed data, so expect the original 10 documents from the old and mixed clusters
-
- - do:
- search:
- index: index_with_replicas
-
- - match: { hits.total: 5 } # just check we recovered fine
-
- - do:
- bulk:
- refresh: true
- body:
- - '{"index": {"_index": "test_index", "_type": "doc"}}'
- - '{"f1": "v1_upgraded", "f2": 10}'
- - '{"index": {"_index": "test_index", "_type": "doc"}}'
- - '{"f1": "v2_upgraded", "f2": 11}'
- - '{"index": {"_index": "test_index", "_type": "doc"}}'
- - '{"f1": "v3_upgraded", "f2": 12}'
- - '{"index": {"_index": "test_index", "_type": "doc"}}'
- - '{"f1": "v4_upgraded", "f2": 13}'
- - '{"index": {"_index": "test_index", "_type": "doc"}}'
- - '{"f1": "v5_upgraded", "f2": 14}'
-
- - do:
- indices.refresh:
- index: test_index
-
- - do:
- search:
- index: test_index
-
- - match: { hits.total: 15 } # 10 docs from previous clusters plus 5 new docs
-
---
"Verify that we can still find things with the template":
- do:
search_template:
+ index: test_search_template
body:
id: test_search_template
params: