From e0340d3b8b758752bd7e109ba697229e841f5e2f Mon Sep 17 00:00:00 2001 From: Mark Vieira Date: Wed, 18 Jan 2023 12:01:14 -0800 Subject: [PATCH 01/38] Convert full cluster restart tests to new rest testing framework --- .../main/groovy/elasticsearch.bwc-test.gradle | 14 +++ .../InternalTestArtifactExtension.java | 2 +- .../test/rest/RestTestBasePlugin.java | 74 ++++++++++-- qa/full-cluster-restart/build.gradle | 62 ++--------- .../FullClustRestartUpgradeStatus.java | 14 +++ .../upgrades/FullClusterRestartIT.java | 49 +++++++- .../FullClusterRestartTestOrdering.java | 24 ++++ ...rameterizedFullClusterRestartTestCase.java | 90 +++++++++++++++ .../upgrades/QueryBuilderBWCIT.java | 29 ++++- .../test/cluster/ClusterHandle.java | 17 +++ .../test/cluster/ElasticsearchCluster.java | 3 + .../local/AbstractLocalSpecBuilder.java | 23 ++++ .../local/DefaultLocalClusterSpecBuilder.java | 34 +++++- .../cluster/local/LocalClusterFactory.java | 62 +++++++++-- .../cluster/local/LocalClusterHandle.java | 49 ++++---- .../test/cluster/local/LocalClusterSpec.java | 13 ++- .../local/LocalClusterSpecBuilder.java | 11 ++ .../local/LocalElasticsearchCluster.java | 30 ++++- .../test/cluster/local/LocalSpecBuilder.java | 11 ++ .../LocalDistributionResolver.java | 3 + .../ReleasedDistributionResolver.java | 54 +++++++++ .../SnapshotDistributionResolver.java | 30 ++++- .../qa/full-cluster-restart/build.gradle | 99 ++--------------- .../xpack/restart/FullClusterRestartIT.java | 46 +++++++- .../resources/system_key | 0 x-pack/qa/full-cluster-restart/build.gradle | 105 ++---------------- ...stractXpackFullClusterRestartTestCase.java | 49 ++++++++ .../restart/CoreFullClusterRestartIT.java | 46 ++++++++ .../xpack/restart/FullClusterRestartIT.java | 10 +- ...MLModelDeploymentFullClusterRestartIT.java | 10 +- ...nfigIndexMappingsFullClusterRestartIT.java | 10 +- .../MlHiddenIndicesFullClusterRestartIT.java | 10 +- .../MlMigrationFullClusterRestartIT.java | 10 +- .../xpack/restart/QueryBuilderBWCIT.java | 42 +++++++ .../xpack/restart/WatcherMappingUpdateIT.java | 10 +- .../xpack/restart/funny-timeout-watch.json | 0 .../xpack/restart/logging-watch.json | 0 .../xpack/restart/simple-watch.json | 0 .../xpack/restart/throttle-period-watch.json | 0 .../resources/system_key | 0 .../restart/CoreFullClusterRestartIT.java | 24 ---- .../xpack/restart/QueryBuilderBWCIT.java | 22 ---- 42 files changed, 830 insertions(+), 361 deletions(-) create mode 100644 qa/full-cluster-restart/src/javaRestTest/java/org/elasticsearch/upgrades/FullClustRestartUpgradeStatus.java rename qa/full-cluster-restart/src/{test => javaRestTest}/java/org/elasticsearch/upgrades/FullClusterRestartIT.java (97%) create mode 100644 qa/full-cluster-restart/src/javaRestTest/java/org/elasticsearch/upgrades/FullClusterRestartTestOrdering.java create mode 100644 qa/full-cluster-restart/src/javaRestTest/java/org/elasticsearch/upgrades/ParameterizedFullClusterRestartTestCase.java rename qa/full-cluster-restart/src/{test => javaRestTest}/java/org/elasticsearch/upgrades/QueryBuilderBWCIT.java (92%) create mode 100644 test/test-clusters/src/main/java/org/elasticsearch/test/cluster/local/distribution/ReleasedDistributionResolver.java rename x-pack/plugin/shutdown/qa/full-cluster-restart/src/{test => javaRestTest}/java/org/elasticsearch/xpack/restart/FullClusterRestartIT.java (66%) rename x-pack/plugin/shutdown/qa/full-cluster-restart/src/{test => javaRestTest}/resources/system_key (100%) create mode 100644 x-pack/qa/full-cluster-restart/src/javaRestTest/java/org/elasticsearch/xpack/restart/AbstractXpackFullClusterRestartTestCase.java create mode 100644 x-pack/qa/full-cluster-restart/src/javaRestTest/java/org/elasticsearch/xpack/restart/CoreFullClusterRestartIT.java rename x-pack/qa/full-cluster-restart/src/{test => javaRestTest}/java/org/elasticsearch/xpack/restart/FullClusterRestartIT.java (99%) rename x-pack/qa/full-cluster-restart/src/{test => javaRestTest}/java/org/elasticsearch/xpack/restart/MLModelDeploymentFullClusterRestartIT.java (97%) rename x-pack/qa/full-cluster-restart/src/{test => javaRestTest}/java/org/elasticsearch/xpack/restart/MlConfigIndexMappingsFullClusterRestartIT.java (94%) rename x-pack/qa/full-cluster-restart/src/{test => javaRestTest}/java/org/elasticsearch/xpack/restart/MlHiddenIndicesFullClusterRestartIT.java (96%) rename x-pack/qa/full-cluster-restart/src/{test => javaRestTest}/java/org/elasticsearch/xpack/restart/MlMigrationFullClusterRestartIT.java (96%) create mode 100644 x-pack/qa/full-cluster-restart/src/javaRestTest/java/org/elasticsearch/xpack/restart/QueryBuilderBWCIT.java rename x-pack/qa/full-cluster-restart/src/{test => javaRestTest}/java/org/elasticsearch/xpack/restart/WatcherMappingUpdateIT.java (92%) rename x-pack/qa/full-cluster-restart/src/{test => javaRestTest}/resources/org/elasticsearch/xpack/restart/funny-timeout-watch.json (100%) rename x-pack/qa/full-cluster-restart/src/{test => javaRestTest}/resources/org/elasticsearch/xpack/restart/logging-watch.json (100%) rename x-pack/qa/full-cluster-restart/src/{test => javaRestTest}/resources/org/elasticsearch/xpack/restart/simple-watch.json (100%) rename x-pack/qa/full-cluster-restart/src/{test => javaRestTest}/resources/org/elasticsearch/xpack/restart/throttle-period-watch.json (100%) rename x-pack/qa/full-cluster-restart/src/{test => javaRestTest}/resources/system_key (100%) delete mode 100644 x-pack/qa/full-cluster-restart/src/test/java/org/elasticsearch/xpack/restart/CoreFullClusterRestartIT.java delete mode 100644 x-pack/qa/full-cluster-restart/src/test/java/org/elasticsearch/xpack/restart/QueryBuilderBWCIT.java diff --git a/build-tools-internal/src/main/groovy/elasticsearch.bwc-test.gradle b/build-tools-internal/src/main/groovy/elasticsearch.bwc-test.gradle index b80c450c5914e..a5e74c3721297 100644 --- a/build-tools-internal/src/main/groovy/elasticsearch.bwc-test.gradle +++ b/build-tools-internal/src/main/groovy/elasticsearch.bwc-test.gradle @@ -9,6 +9,8 @@ import org.elasticsearch.gradle.Version import org.elasticsearch.gradle.internal.ElasticsearchTestBasePlugin import org.elasticsearch.gradle.internal.info.BuildParams +import org.elasticsearch.gradle.internal.test.rest.InternalJavaRestTestPlugin +import org.elasticsearch.gradle.testclusters.StandaloneRestIntegTestTask ext.bwcTaskName = { Version version -> return "v${version}#bwcTest" @@ -36,5 +38,17 @@ plugins.withType(ElasticsearchTestBasePlugin) { } } +plugins.withType(InternalJavaRestTestPlugin) { + tasks.named("javaRestTest") { + enabled = false + } + + tasks.withType(StandaloneRestIntegTestTask).configureEach { + testClassesDirs = sourceSets.javaRestTest.output.classesDirs + classpath = sourceSets.javaRestTest.runtimeClasspath + usesDefaultDistribution() + } +} + tasks.matching { it.name.equals("check") }.configureEach {dependsOn(bwcTestSnapshots) } tasks.matching { it.name.equals("test") }.configureEach {enabled = false} diff --git a/build-tools-internal/src/main/java/org/elasticsearch/gradle/internal/InternalTestArtifactExtension.java b/build-tools-internal/src/main/java/org/elasticsearch/gradle/internal/InternalTestArtifactExtension.java index fae845b229651..4952085f466be 100644 --- a/build-tools-internal/src/main/java/org/elasticsearch/gradle/internal/InternalTestArtifactExtension.java +++ b/build-tools-internal/src/main/java/org/elasticsearch/gradle/internal/InternalTestArtifactExtension.java @@ -32,7 +32,7 @@ public void registerTestArtifactFromSourceSet(SourceSet sourceSet) { JavaPluginExtension javaPluginExtension = project.getExtensions().getByType(JavaPluginExtension.class); javaPluginExtension.registerFeature(name + "Artifacts", featureSpec -> { featureSpec.usingSourceSet(sourceSet); - featureSpec.capability("org.elasticsearch.gradle", project.getName() + "-" + name + "-artifacts", "1.0"); + featureSpec.capability("org.elasticsearch.gradle", project.getName() + "-test-artifacts", "1.0"); // This feature is only used internally in the // elasticsearch build so we do not need any publication. featureSpec.disablePublication(); diff --git a/build-tools-internal/src/main/java/org/elasticsearch/gradle/internal/test/rest/RestTestBasePlugin.java b/build-tools-internal/src/main/java/org/elasticsearch/gradle/internal/test/rest/RestTestBasePlugin.java index 9baa17bc00d7c..1a7b5bc3ee2a1 100644 --- a/build-tools-internal/src/main/java/org/elasticsearch/gradle/internal/test/rest/RestTestBasePlugin.java +++ b/build-tools-internal/src/main/java/org/elasticsearch/gradle/internal/test/rest/RestTestBasePlugin.java @@ -13,6 +13,8 @@ import org.elasticsearch.gradle.Architecture; import org.elasticsearch.gradle.DistributionDownloadPlugin; import org.elasticsearch.gradle.ElasticsearchDistribution; +import org.elasticsearch.gradle.ElasticsearchDistributionType; +import org.elasticsearch.gradle.Version; import org.elasticsearch.gradle.VersionProperties; import org.elasticsearch.gradle.distribution.ElasticsearchDistributionTypes; import org.elasticsearch.gradle.internal.ElasticsearchJavaPlugin; @@ -58,6 +60,8 @@ public class RestTestBasePlugin implements Plugin { private static final String TESTS_RUNTIME_JAVA_SYSPROP = "tests.runtime.java"; private static final String DEFAULT_DISTRIBUTION_SYSPROP = "tests.default.distribution"; private static final String INTEG_TEST_DISTRIBUTION_SYSPROP = "tests.integ-test.distribution"; + private static final String BWC_SNAPSHOT_DISTRIBUTION_SYSPROP_PREFIX = "tests.snapshot.distribution."; + private static final String BWC_RELEASED_DISTRIBUTION_SYSPROP_PREFIX = "tests.release.distribution."; private static final String TESTS_CLUSTER_MODULES_PATH_SYSPROP = "tests.cluster.modules.path"; private static final String TESTS_CLUSTER_PLUGINS_PATH_SYSPROP = "tests.cluster.plugins.path"; private static final String DEFAULT_REST_INTEG_TEST_DISTRO = "default_distro"; @@ -79,16 +83,17 @@ public void apply(Project project) { project.getPluginManager().apply(InternalDistributionDownloadPlugin.class); // Register integ-test and default distributions - NamedDomainObjectContainer distributions = DistributionDownloadPlugin.getContainer(project); - ElasticsearchDistribution defaultDistro = distributions.create(DEFAULT_REST_INTEG_TEST_DISTRO, distro -> { - distro.setVersion(VersionProperties.getElasticsearch()); - distro.setArchitecture(Architecture.current()); - }); - ElasticsearchDistribution integTestDistro = distributions.create(INTEG_TEST_REST_INTEG_TEST_DISTRO, distro -> { - distro.setVersion(VersionProperties.getElasticsearch()); - distro.setArchitecture(Architecture.current()); - distro.setType(ElasticsearchDistributionTypes.INTEG_TEST_ZIP); - }); + ElasticsearchDistribution defaultDistro = createDistribution( + project, + DEFAULT_REST_INTEG_TEST_DISTRO, + VersionProperties.getElasticsearch() + ); + ElasticsearchDistribution integTestDistro = createDistribution( + project, + INTEG_TEST_REST_INTEG_TEST_DISTRO, + VersionProperties.getElasticsearch(), + ElasticsearchDistributionTypes.INTEG_TEST_ZIP + ); // Create configures for module and plugin dependencies Configuration modulesConfiguration = createPluginConfiguration(project, MODULES_CONFIGURATION, true, false); @@ -151,6 +156,35 @@ public Void call(Object... args) { return null; } }); + + // Add `usesBwcDistribution(version)` extension method to test tasks to indicate they require a BWC distribution + task.getExtensions().getExtraProperties().set("usesBwcDistribution", new Closure(task) { + @Override + public Void call(Object... args) { + if (args.length != 1 && args[0] instanceof Version == false) { + throw new IllegalArgumentException("Expected exactly one argument of type org.elasticsearch.gradle.Version"); + } + + Version version = (Version) args[0]; + boolean isReleased = BuildParams.getBwcVersions().unreleasedInfo(version) == null; + String versionString = version.toString(); + ElasticsearchDistribution bwcDistro = createDistribution(project, "bwc_" + versionString, versionString); + + task.dependsOn(bwcDistro); + registerDistributionInputs(task, bwcDistro); + + nonInputSystemProperties.systemProperty( + (isReleased ? BWC_RELEASED_DISTRIBUTION_SYSPROP_PREFIX : BWC_SNAPSHOT_DISTRIBUTION_SYSPROP_PREFIX) + versionString, + providerFactory.provider(() -> bwcDistro.getExtracted().getSingleFile().getPath()) + ); + + if (version.before(BuildParams.getBwcVersions().getMinimumWireCompatibleVersion())) { + // If we are upgrade testing older versions we also need to upgrade to 7.last + this.call(BuildParams.getBwcVersions().getMinimumWireCompatibleVersion()); + } + return null; + } + }); }); project.getTasks() @@ -158,6 +192,26 @@ public Void call(Object... args) { .configure(check -> check.dependsOn(project.getTasks().withType(StandaloneRestIntegTestTask.class))); } + private ElasticsearchDistribution createDistribution(Project project, String name, String version) { + return createDistribution(project, name, version, null); + } + + private ElasticsearchDistribution createDistribution(Project project, String name, String version, ElasticsearchDistributionType type) { + NamedDomainObjectContainer distributions = DistributionDownloadPlugin.getContainer(project); + ElasticsearchDistribution maybeDistro = distributions.findByName(name); + if (maybeDistro == null) { + return distributions.create(name, distro -> { + distro.setVersion(version); + distro.setArchitecture(Architecture.current()); + if (type != null) { + distro.setType(type); + } + }); + } else { + return maybeDistro; + } + } + private FileTree getDistributionFiles(ElasticsearchDistribution distribution, Action patternFilter) { return distribution.getExtracted().getAsFileTree().matching(patternFilter); } diff --git a/qa/full-cluster-restart/build.gradle b/qa/full-cluster-restart/build.gradle index a3af45b43363e..b6f181809e0e4 100644 --- a/qa/full-cluster-restart/build.gradle +++ b/qa/full-cluster-restart/build.gradle @@ -6,64 +6,20 @@ * Side Public License, v 1. */ - -import org.elasticsearch.gradle.Version import org.elasticsearch.gradle.internal.info.BuildParams import org.elasticsearch.gradle.testclusters.StandaloneRestIntegTestTask -apply plugin: 'elasticsearch.internal-testclusters' -apply plugin: 'elasticsearch.standalone-rest-test' -apply plugin: 'elasticsearch.internal-test-artifact' +apply plugin: 'elasticsearch.internal-java-rest-test' +apply plugin: 'elasticsearch.internal-test-artifact-base' apply plugin: 'elasticsearch.bwc-test' -BuildParams.bwcVersions.withIndexCompatible { bwcVersion, baseName -> - def baseCluster = testClusters.register(baseName) { - if (bwcVersion.before(BuildParams.bwcVersions.minimumWireCompatibleVersion)) { - // When testing older versions we have to first upgrade to 7.last - versions = [bwcVersion.toString(), BuildParams.bwcVersions.minimumWireCompatibleVersion.toString(), project.version] - } else { - versions = [bwcVersion.toString(), project.version] - } - numberOfNodes = 2 - // some tests rely on the translog not being flushed - setting 'indices.memory.shard_inactive_time', '60m' - setting 'path.repo', "${buildDir}/cluster/shared/repo/${baseName}" - setting 'xpack.security.enabled', 'false' - requiresFeature 'es.index_mode_feature_flag_registered', Version.fromString("8.0.0") - } - - tasks.register("${baseName}#oldClusterTest", StandaloneRestIntegTestTask) { - useCluster baseCluster - mustRunAfter("precommit") - doFirst { - delete("${buildDir}/cluster/shared/repo/${baseName}") - } - - systemProperty 'tests.is_old_cluster', 'true' - } - - tasks.register("${baseName}#upgradedClusterTest", StandaloneRestIntegTestTask) { - useCluster baseCluster - dependsOn "${baseName}#oldClusterTest" - doFirst { - baseCluster.get().goToNextVersion() - if (bwcVersion.before(BuildParams.bwcVersions.minimumWireCompatibleVersion)) { - // When doing a full cluster restart of older versions we actually have to upgrade twice. First to 7.last, then to the current version. - baseCluster.get().goToNextVersion() - } - } - systemProperty 'tests.is_old_cluster', 'false' - } - - String oldVersion = bwcVersion.toString().minus("-SNAPSHOT") - tasks.matching { it.name.startsWith(baseName) && it.name.endsWith("ClusterTest") }.configureEach { - it.systemProperty 'tests.old_cluster_version', oldVersion - it.systemProperty 'tests.path.repo', "${buildDir}/cluster/shared/repo/${baseName}" - it.nonInputProperties.systemProperty('tests.rest.cluster', baseCluster.map(c -> c.allHttpSocketURI.join(","))) - it.nonInputProperties.systemProperty('tests.clustername', baseName) - } +testArtifacts { + registerTestArtifactFromSourceSet(sourceSets.javaRestTest) +} - tasks.register(bwcTaskName(bwcVersion)) { - dependsOn tasks.named("${baseName}#upgradedClusterTest") +BuildParams.bwcVersions.withIndexCompatible { bwcVersion, baseName -> + tasks.register(bwcTaskName(bwcVersion), StandaloneRestIntegTestTask) { + usesBwcDistribution(bwcVersion) + systemProperty("tests.old_cluster_version", bwcVersion) } } diff --git a/qa/full-cluster-restart/src/javaRestTest/java/org/elasticsearch/upgrades/FullClustRestartUpgradeStatus.java b/qa/full-cluster-restart/src/javaRestTest/java/org/elasticsearch/upgrades/FullClustRestartUpgradeStatus.java new file mode 100644 index 0000000000000..dda196ddafc20 --- /dev/null +++ b/qa/full-cluster-restart/src/javaRestTest/java/org/elasticsearch/upgrades/FullClustRestartUpgradeStatus.java @@ -0,0 +1,14 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0 and the Server Side Public License, v 1; you may not use this file except + * in compliance with, at your election, the Elastic License 2.0 or the Server + * Side Public License, v 1. + */ + +package org.elasticsearch.upgrades; + +public enum FullClustRestartUpgradeStatus { + OLD, + UPGRADED +} diff --git a/qa/full-cluster-restart/src/test/java/org/elasticsearch/upgrades/FullClusterRestartIT.java b/qa/full-cluster-restart/src/javaRestTest/java/org/elasticsearch/upgrades/FullClusterRestartIT.java similarity index 97% rename from qa/full-cluster-restart/src/test/java/org/elasticsearch/upgrades/FullClusterRestartIT.java rename to qa/full-cluster-restart/src/javaRestTest/java/org/elasticsearch/upgrades/FullClusterRestartIT.java index af66fbc61562b..e250a945aa903 100644 --- a/qa/full-cluster-restart/src/test/java/org/elasticsearch/upgrades/FullClusterRestartIT.java +++ b/qa/full-cluster-restart/src/javaRestTest/java/org/elasticsearch/upgrades/FullClusterRestartIT.java @@ -8,6 +8,8 @@ package org.elasticsearch.upgrades; +import com.carrotsearch.randomizedtesting.annotations.Name; + import org.apache.http.util.EntityUtils; import org.elasticsearch.Version; import org.elasticsearch.action.admin.cluster.settings.RestClusterGetSettingsResponse; @@ -28,6 +30,10 @@ import org.elasticsearch.rest.action.admin.indices.RestPutIndexTemplateAction; import org.elasticsearch.test.NotEqualMessageBuilder; import org.elasticsearch.test.XContentTestUtils; +import org.elasticsearch.test.cluster.ElasticsearchCluster; +import org.elasticsearch.test.cluster.FeatureFlag; +import org.elasticsearch.test.cluster.local.LocalClusterConfigProvider; +import org.elasticsearch.test.cluster.local.distribution.DistributionType; import org.elasticsearch.test.rest.ESRestTestCase; import org.elasticsearch.test.rest.ObjectPath; import org.elasticsearch.transport.Compression; @@ -35,6 +41,10 @@ import org.elasticsearch.xcontent.XContentParser; import org.elasticsearch.xcontent.json.JsonXContent; import org.junit.Before; +import org.junit.ClassRule; +import org.junit.rules.RuleChain; +import org.junit.rules.TemporaryFolder; +import org.junit.rules.TestRule; import java.io.IOException; import java.util.ArrayList; @@ -44,7 +54,6 @@ import java.util.HashMap; import java.util.HashSet; import java.util.List; -import java.util.Locale; import java.util.Map; import java.util.Set; import java.util.concurrent.TimeUnit; @@ -80,13 +89,41 @@ * version is started with the same data directories and then this is rerun * with {@code tests.is_old_cluster} set to {@code false}. */ -public class FullClusterRestartIT extends AbstractFullClusterRestartTestCase { +public class FullClusterRestartIT extends ParameterizedFullClusterRestartTestCase { + + private static TemporaryFolder repoDirectory = new TemporaryFolder(); + + protected static LocalClusterConfigProvider clusterConfig = c -> {}; + + private static ElasticsearchCluster cluster = ElasticsearchCluster.local() + .distribution(DistributionType.DEFAULT) + .version(getOldClusterTestVersion()) + .nodes(2) + .setting("path.repo", () -> repoDirectory.getRoot().getPath()) + .setting("xpack.security.enabled", "false") + // some tests rely on the translog not being flushed + .setting("indices.memory.shard_inactive_time", "60m") + .apply(() -> clusterConfig) + .feature(FeatureFlag.TIME_SERIES_MODE) + .build(); + + @ClassRule + public static TestRule ruleChain = RuleChain.outerRule(repoDirectory).around(cluster); private String index; + public FullClusterRestartIT(@Name("cluster") FullClustRestartUpgradeStatus upgradeStatus) { + super(upgradeStatus); + } + + @Override + protected ElasticsearchCluster getUpgradeCluster() { + return cluster; + } + @Before public void setIndex() { - index = getTestName().toLowerCase(Locale.ROOT); + index = getRootTestName(); } public void testSearch() throws Exception { @@ -1051,7 +1088,7 @@ public void testSnapshotRestore() throws IOException { repoConfig.startObject("settings"); { repoConfig.field("compress", randomBoolean()); - repoConfig.field("location", System.getProperty("tests.path.repo")); + repoConfig.field("location", repoDirectory.getRoot().getPath()); } repoConfig.endObject(); } @@ -1725,7 +1762,7 @@ public void testEnableSoftDeletesOnRestore() throws Exception { repoConfig.startObject("settings"); { repoConfig.field("compress", randomBoolean()); - repoConfig.field("location", System.getProperty("tests.path.repo")); + repoConfig.field("location", repoDirectory.getRoot().getPath()); } repoConfig.endObject(); } @@ -1785,7 +1822,7 @@ public void testForbidDisableSoftDeletesOnRestore() throws Exception { repoConfig.startObject("settings"); { repoConfig.field("compress", randomBoolean()); - repoConfig.field("location", System.getProperty("tests.path.repo")); + repoConfig.field("location", repoDirectory.getRoot().getPath()); } repoConfig.endObject(); } diff --git a/qa/full-cluster-restart/src/javaRestTest/java/org/elasticsearch/upgrades/FullClusterRestartTestOrdering.java b/qa/full-cluster-restart/src/javaRestTest/java/org/elasticsearch/upgrades/FullClusterRestartTestOrdering.java new file mode 100644 index 0000000000000..9f5c57346b945 --- /dev/null +++ b/qa/full-cluster-restart/src/javaRestTest/java/org/elasticsearch/upgrades/FullClusterRestartTestOrdering.java @@ -0,0 +1,24 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0 and the Server Side Public License, v 1; you may not use this file except + * in compliance with, at your election, the Elastic License 2.0 or the Server + * Side Public License, v 1. + */ + +package org.elasticsearch.upgrades; + +import com.carrotsearch.randomizedtesting.TestMethodAndParams; + +import java.util.Comparator; + +public class FullClusterRestartTestOrdering implements Comparator { + @Override + public int compare(TestMethodAndParams o1, TestMethodAndParams o2) { + return Integer.compare(getOrdinal(o1), getOrdinal(o2)); + } + + private int getOrdinal(TestMethodAndParams t) { + return ((FullClustRestartUpgradeStatus) t.getInstanceArguments().get(0)).ordinal(); + } +} diff --git a/qa/full-cluster-restart/src/javaRestTest/java/org/elasticsearch/upgrades/ParameterizedFullClusterRestartTestCase.java b/qa/full-cluster-restart/src/javaRestTest/java/org/elasticsearch/upgrades/ParameterizedFullClusterRestartTestCase.java new file mode 100644 index 0000000000000..29e1ae3e92255 --- /dev/null +++ b/qa/full-cluster-restart/src/javaRestTest/java/org/elasticsearch/upgrades/ParameterizedFullClusterRestartTestCase.java @@ -0,0 +1,90 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0 and the Server Side Public License, v 1; you may not use this file except + * in compliance with, at your election, the Elastic License 2.0 or the Server + * Side Public License, v 1. + */ + +package org.elasticsearch.upgrades; + +import com.carrotsearch.randomizedtesting.annotations.Name; +import com.carrotsearch.randomizedtesting.annotations.ParametersFactory; +import com.carrotsearch.randomizedtesting.annotations.TestCaseOrdering; + +import org.elasticsearch.test.cluster.ElasticsearchCluster; +import org.elasticsearch.test.cluster.util.Version; +import org.elasticsearch.test.rest.ESRestTestCase; +import org.junit.Before; + +import java.util.Arrays; +import java.util.Locale; + +import static org.elasticsearch.upgrades.FullClustRestartUpgradeStatus.OLD; +import static org.elasticsearch.upgrades.FullClustRestartUpgradeStatus.UPGRADED; + +@TestCaseOrdering(FullClusterRestartTestOrdering.class) +public abstract class ParameterizedFullClusterRestartTestCase extends ESRestTestCase { + private static final Version MINIMUM_WIRE_COMPATIBLE_VERSION = Version.fromString("7.17.0"); + private static final Version OLD_CLUSTER_VERSION = Version.fromString(System.getProperty("tests.old_cluster_version")); + private static boolean upgradeFailed = false; + private final FullClustRestartUpgradeStatus requestedUpgradeStatus; + + public ParameterizedFullClusterRestartTestCase(@Name("cluster") FullClustRestartUpgradeStatus upgradeStatus) { + this.requestedUpgradeStatus = upgradeStatus; + } + + @ParametersFactory + public static Iterable parameters() throws Exception { + return Arrays.stream(FullClustRestartUpgradeStatus.values()).map(v -> new Object[] { v }).toList(); + } + + @Before + public void maybeUpgrade() throws Exception { + if (getUpgradeCluster().getVersion().equals(OLD_CLUSTER_VERSION) && requestedUpgradeStatus == UPGRADED) { + try { + if (OLD_CLUSTER_VERSION.before(MINIMUM_WIRE_COMPATIBLE_VERSION)) { + // First upgrade to latest wire compatible version + getUpgradeCluster().upgradeToVersion(MINIMUM_WIRE_COMPATIBLE_VERSION); + } + getUpgradeCluster().upgradeToVersion(Version.CURRENT); + closeClients(); + initClient(); + } catch (Exception e) { + upgradeFailed = true; + throw e; + } + } + + // Skip remaining tests if upgrade failed + assumeFalse("Cluster upgrade failed", upgradeFailed); + } + + public boolean isRunningAgainstOldCluster() { + return requestedUpgradeStatus == OLD; + } + + public static org.elasticsearch.Version getOldClusterVersion() { + return org.elasticsearch.Version.fromString(OLD_CLUSTER_VERSION.toString()); + } + + public static Version getOldClusterTestVersion() { + return Version.fromString(OLD_CLUSTER_VERSION.toString()); + } + + protected abstract ElasticsearchCluster getUpgradeCluster(); + + @Override + protected String getTestRestCluster() { + return getUpgradeCluster().getHttpAddresses(); + } + + @Override + protected boolean preserveClusterUponCompletion() { + return true; + } + + protected String getRootTestName() { + return getTestName().split(" ")[0].toLowerCase(Locale.ROOT); + } +} diff --git a/qa/full-cluster-restart/src/test/java/org/elasticsearch/upgrades/QueryBuilderBWCIT.java b/qa/full-cluster-restart/src/javaRestTest/java/org/elasticsearch/upgrades/QueryBuilderBWCIT.java similarity index 92% rename from qa/full-cluster-restart/src/test/java/org/elasticsearch/upgrades/QueryBuilderBWCIT.java rename to qa/full-cluster-restart/src/javaRestTest/java/org/elasticsearch/upgrades/QueryBuilderBWCIT.java index d69f0b05958f9..91607dec6f721 100644 --- a/qa/full-cluster-restart/src/test/java/org/elasticsearch/upgrades/QueryBuilderBWCIT.java +++ b/qa/full-cluster-restart/src/javaRestTest/java/org/elasticsearch/upgrades/QueryBuilderBWCIT.java @@ -8,6 +8,8 @@ package org.elasticsearch.upgrades; +import com.carrotsearch.randomizedtesting.annotations.Name; + import org.elasticsearch.client.Request; import org.elasticsearch.client.Response; import org.elasticsearch.common.Strings; @@ -32,7 +34,11 @@ import org.elasticsearch.index.query.functionscore.FunctionScoreQueryBuilder; import org.elasticsearch.index.query.functionscore.RandomScoreFunctionBuilder; import org.elasticsearch.search.SearchModule; +import org.elasticsearch.test.cluster.ElasticsearchCluster; +import org.elasticsearch.test.cluster.local.LocalClusterConfigProvider; +import org.elasticsearch.test.cluster.local.distribution.DistributionType; import org.elasticsearch.xcontent.XContentBuilder; +import org.junit.ClassRule; import java.io.ByteArrayInputStream; import java.io.InputStream; @@ -54,10 +60,29 @@ * The queries to test are specified in json format, which turns out to work because we tend break here rarely. If the * json format of a query being tested here then feel free to change this. */ -public class QueryBuilderBWCIT extends AbstractFullClusterRestartTestCase { - +public class QueryBuilderBWCIT extends ParameterizedFullClusterRestartTestCase { private static final List CANDIDATES = new ArrayList<>(); + protected static LocalClusterConfigProvider clusterConfig = c -> {}; + + @ClassRule + public static ElasticsearchCluster cluster = ElasticsearchCluster.local() + .distribution(DistributionType.DEFAULT) + .version(getOldClusterTestVersion()) + .nodes(2) + .setting("xpack.security.enabled", "false") + .apply(() -> clusterConfig) + .build(); + + @Override + protected ElasticsearchCluster getUpgradeCluster() { + return cluster; + } + + public QueryBuilderBWCIT(@Name("cluster") FullClustRestartUpgradeStatus upgradeStatus) { + super(upgradeStatus); + } + static { addCandidate(""" "match": { "text_field": "value"} diff --git a/test/test-clusters/src/main/java/org/elasticsearch/test/cluster/ClusterHandle.java b/test/test-clusters/src/main/java/org/elasticsearch/test/cluster/ClusterHandle.java index 658925744860d..2a4e3e3958c57 100644 --- a/test/test-clusters/src/main/java/org/elasticsearch/test/cluster/ClusterHandle.java +++ b/test/test-clusters/src/main/java/org/elasticsearch/test/cluster/ClusterHandle.java @@ -8,6 +8,8 @@ package org.elasticsearch.test.cluster; +import org.elasticsearch.test.cluster.util.Version; + import java.io.Closeable; /** @@ -73,4 +75,19 @@ public interface ClusterHandle extends Closeable { * @return cluster node TCP transport endpoints */ String getTransportEndpoint(int index); + + /** + * Upgrades a single node to the given version. Method blocks until the node is back up and ready to respond to requests. + * + * @param index index of node ot upgrade + * @param version version to upgrade to + */ + void upgradeNodeToVersion(int index, Version version); + + /** + * Performs a "full cluster restart" upgrade to the given version. Method blocks until the cluster is restarted and available. + * + * @param version version to upgrade to + */ + void upgradeToVersion(Version version); } diff --git a/test/test-clusters/src/main/java/org/elasticsearch/test/cluster/ElasticsearchCluster.java b/test/test-clusters/src/main/java/org/elasticsearch/test/cluster/ElasticsearchCluster.java index 02eb3fb73df63..3bc4efaeb032f 100644 --- a/test/test-clusters/src/main/java/org/elasticsearch/test/cluster/ElasticsearchCluster.java +++ b/test/test-clusters/src/main/java/org/elasticsearch/test/cluster/ElasticsearchCluster.java @@ -10,6 +10,7 @@ import org.elasticsearch.test.cluster.local.DefaultLocalClusterSpecBuilder; import org.elasticsearch.test.cluster.local.LocalClusterSpecBuilder; +import org.elasticsearch.test.cluster.util.Version; import org.junit.rules.TestRule; /** @@ -32,4 +33,6 @@ static LocalClusterSpecBuilder local() { return new DefaultLocalClusterSpecBuilder(); } + Version getVersion(); + } diff --git a/test/test-clusters/src/main/java/org/elasticsearch/test/cluster/local/AbstractLocalSpecBuilder.java b/test/test-clusters/src/main/java/org/elasticsearch/test/cluster/local/AbstractLocalSpecBuilder.java index 7e6fede6b84aa..2ca00e27435c1 100644 --- a/test/test-clusters/src/main/java/org/elasticsearch/test/cluster/local/AbstractLocalSpecBuilder.java +++ b/test/test-clusters/src/main/java/org/elasticsearch/test/cluster/local/AbstractLocalSpecBuilder.java @@ -12,6 +12,7 @@ import org.elasticsearch.test.cluster.FeatureFlag; import org.elasticsearch.test.cluster.SettingsProvider; import org.elasticsearch.test.cluster.local.distribution.DistributionType; +import org.elasticsearch.test.cluster.util.Version; import org.elasticsearch.test.cluster.util.resource.Resource; import java.util.ArrayList; @@ -32,8 +33,10 @@ public abstract class AbstractLocalSpecBuilder> im private final Set plugins = new HashSet<>(); private final Set features = new HashSet<>(); private final Map keystoreSettings = new HashMap<>(); + private final Map keystoreFiles = new HashMap<>(); private final Map extraConfigFiles = new HashMap<>(); private DistributionType distributionType; + private Version version; protected AbstractLocalSpecBuilder(AbstractLocalSpecBuilder parent) { this.parent = parent; @@ -136,6 +139,16 @@ public Map getKeystoreSettings() { return inherit(() -> parent.getKeystoreSettings(), keystoreSettings); } + @Override + public T keystore(String key, Resource file) { + this.keystoreFiles.put(key, file); + return cast(this); + } + + public Map getKeystoreFiles() { + return inherit(() -> parent.getKeystoreFiles(), keystoreFiles); + } + @Override public T configFile(String fileName, Resource configFile) { this.extraConfigFiles.put(fileName, configFile); @@ -146,6 +159,16 @@ public Map getExtraConfigFiles() { return inherit(() -> parent.getExtraConfigFiles(), extraConfigFiles); } + @Override + public T version(Version version) { + this.version = version; + return cast(this); + } + + public Version getVersion() { + return inherit(() -> parent.getVersion(), version); + } + private List inherit(Supplier> parent, List child) { List combinedList = new ArrayList<>(); if (this.parent != null) { diff --git a/test/test-clusters/src/main/java/org/elasticsearch/test/cluster/local/DefaultLocalClusterSpecBuilder.java b/test/test-clusters/src/main/java/org/elasticsearch/test/cluster/local/DefaultLocalClusterSpecBuilder.java index 7e4011ca9481c..8d8ae010c552f 100644 --- a/test/test-clusters/src/main/java/org/elasticsearch/test/cluster/local/DefaultLocalClusterSpecBuilder.java +++ b/test/test-clusters/src/main/java/org/elasticsearch/test/cluster/local/DefaultLocalClusterSpecBuilder.java @@ -19,12 +19,14 @@ import java.util.List; import java.util.Optional; import java.util.function.Consumer; +import java.util.function.Supplier; public class DefaultLocalClusterSpecBuilder extends AbstractLocalSpecBuilder implements LocalClusterSpecBuilder { private String name = "test-cluster"; private final List nodeBuilders = new ArrayList<>(); private final List users = new ArrayList<>(); private final List roleFiles = new ArrayList<>(); + private final List> lazyConfigProviders = new ArrayList<>(); public DefaultLocalClusterSpecBuilder() { super(null); @@ -45,6 +47,12 @@ public DefaultLocalClusterSpecBuilder apply(LocalClusterConfigProvider configPro return this; } + @Override + public LocalClusterSpecBuilder apply(Supplier configProvider) { + lazyConfigProviders.add(configProvider); + return this; + } + @Override public DefaultLocalClusterSpecBuilder nodes(int nodes) { if (nodes < nodeBuilders.size()) { @@ -116,7 +124,28 @@ public ElasticsearchCluster build() { clusterSpec.setNodes(nodeSpecs); clusterSpec.validate(); - return new LocalElasticsearchCluster(clusterSpec); + return new LocalElasticsearchCluster(this); + } + + LocalClusterSpec buildClusterSpec() { + // Apply lazily provided configuration + lazyConfigProviders.forEach(s -> s.get().apply(this)); + + List clusterUsers = users.isEmpty() ? List.of(User.DEFAULT_USER) : users; + LocalClusterSpec clusterSpec = new LocalClusterSpec(name, clusterUsers, roleFiles); + List nodeSpecs; + + if (nodeBuilders.isEmpty()) { + // No node-specific configuration so assume a single-node cluster + nodeSpecs = List.of(new DefaultLocalNodeSpecBuilder(this).build(clusterSpec)); + } else { + nodeSpecs = nodeBuilders.stream().map(node -> node.build(clusterSpec)).toList(); + } + + clusterSpec.setNodes(nodeSpecs); + clusterSpec.validate(); + + return clusterSpec; } public static class DefaultLocalNodeSpecBuilder extends AbstractLocalSpecBuilder implements LocalNodeSpecBuilder { @@ -137,7 +166,7 @@ private LocalNodeSpec build(LocalClusterSpec cluster) { return new LocalNodeSpec( cluster, name, - Version.CURRENT, + Optional.of(getVersion()).orElse(Version.CURRENT), getSettingsProviders(), getSettings(), getEnvironmentProviders(), @@ -147,6 +176,7 @@ private LocalNodeSpec build(LocalClusterSpec cluster) { Optional.ofNullable(getDistributionType()).orElse(DistributionType.INTEG_TEST), getFeatures(), getKeystoreSettings(), + getKeystoreFiles(), getExtraConfigFiles() ); } diff --git a/test/test-clusters/src/main/java/org/elasticsearch/test/cluster/local/LocalClusterFactory.java b/test/test-clusters/src/main/java/org/elasticsearch/test/cluster/local/LocalClusterFactory.java index d85f008727176..92112ca9559d9 100644 --- a/test/test-clusters/src/main/java/org/elasticsearch/test/cluster/local/LocalClusterFactory.java +++ b/test/test-clusters/src/main/java/org/elasticsearch/test/cluster/local/LocalClusterFactory.java @@ -81,7 +81,7 @@ public class Node { private final Path configDir; private final Path tempDir; - private boolean initialized = false; + private Version currentVersion; private Process process = null; private DistributionDescriptor distributionDescriptor; @@ -96,19 +96,22 @@ public Node(LocalNodeSpec spec) { this.tempDir = workingDir.resolve("tmp"); // elasticsearch temporary directory } - public synchronized void start() { + public synchronized void start(Version version) { LOGGER.info("Starting Elasticsearch node '{}'", spec.getName()); + if (version != null) { + spec.setVersion(version); + } - if (initialized == false) { + if (currentVersion == null || currentVersion.equals(spec.getVersion()) == false) { LOGGER.info("Creating installation for node '{}' in {}", spec.getName(), workingDir); distributionDescriptor = resolveDistribution(); LOGGER.info("Distribution for node '{}': {}", spec.getName(), distributionDescriptor); - initializeWorkingDirectory(); + initializeWorkingDirectory(currentVersion != null); installPlugins(); - if (spec.getDistributionType() == DistributionType.INTEG_TEST) { + if (distributionDescriptor.getType() == DistributionType.INTEG_TEST) { installModules(); } - initialized = true; + currentVersion = spec.getVersion(); } try { @@ -120,6 +123,7 @@ public synchronized void start() { writeConfiguration(); createKeystore(); addKeystoreSettings(); + addKeystoreFiles(); configureSecurity(); copyExtraConfigFiles(); @@ -155,6 +159,20 @@ public String getTransportEndpoint() { return readPortsFile(portsFile).get(0); } + public void deletePortsFiles() { + try { + Path hostsFile = workingDir.resolve("config").resolve("unicast_hosts.txt"); + Path httpPortsFile = workingDir.resolve("logs").resolve("http.ports"); + Path transportPortsFile = workingDir.resolve("logs").resolve("transport.ports"); + + Files.deleteIfExists(hostsFile); + Files.deleteIfExists(httpPortsFile); + Files.deleteIfExists(transportPortsFile); + } catch (IOException e) { + throw new UncheckedIOException("Failed to write unicast_hosts for: " + this, e); + } + } + public LocalNodeSpec getSpec() { return spec; } @@ -192,9 +210,13 @@ private List readPortsFile(Path file) { } } - private void initializeWorkingDirectory() { + private void initializeWorkingDirectory(boolean preserverWorkingDirectory) { try { - IOUtils.deleteWithRetry(workingDir); + if (preserverWorkingDirectory == false) { + IOUtils.deleteWithRetry(workingDir); + } else { + IOUtils.deleteWithRetry(distributionDir); + } try { IOUtils.syncWithLinks(distributionDescriptor.getDistributionDir(), distributionDir); } catch (IOUtils.LinkCreationException e) { @@ -310,6 +332,30 @@ private void addKeystoreSettings() { }); } + private void addKeystoreFiles() { + spec.getKeystoreFiles().forEach((key, file) -> { + try { + Path path = Files.createTempFile(tempDir, key, null); + file.writeTo(path); + + ProcessUtils.exec( + workingDir, + OS.conditional( + c -> c.onWindows(() -> distributionDir.resolve("bin").resolve("elasticsearch-keystore.bat")) + .onUnix(() -> distributionDir.resolve("bin").resolve("elasticsearch-keystore")) + ), + getEnvironmentVariables(), + false, + "add-file", + key, + path.toString() + ).waitFor(); + } catch (InterruptedException | IOException e) { + throw new RuntimeException(e); + } + }); + } + private void configureSecurity() { if (spec.isSecurityEnabled()) { if (spec.getUsers().isEmpty() == false) { diff --git a/test/test-clusters/src/main/java/org/elasticsearch/test/cluster/local/LocalClusterHandle.java b/test/test-clusters/src/main/java/org/elasticsearch/test/cluster/local/LocalClusterHandle.java index 878b017e3cd62..62ba9113d47c1 100644 --- a/test/test-clusters/src/main/java/org/elasticsearch/test/cluster/local/LocalClusterHandle.java +++ b/test/test-clusters/src/main/java/org/elasticsearch/test/cluster/local/LocalClusterHandle.java @@ -15,6 +15,7 @@ import org.elasticsearch.test.cluster.local.model.User; import org.elasticsearch.test.cluster.util.ExceptionUtils; import org.elasticsearch.test.cluster.util.Retry; +import org.elasticsearch.test.cluster.util.Version; import java.io.IOException; import java.io.UncheckedIOException; @@ -66,7 +67,7 @@ public LocalClusterHandle(String name, List nodes) { public void start() { if (started.getAndSet(true) == false) { LOGGER.info("Starting Elasticsearch test cluster '{}'", name); - execute(() -> nodes.parallelStream().forEach(Node::start)); + execute(() -> nodes.parallelStream().forEach(n -> n.start(null))); } waitUntilReady(); } @@ -75,11 +76,11 @@ public void start() { public void stop(boolean forcibly) { if (started.getAndSet(false)) { LOGGER.info("Stopping Elasticsearch test cluster '{}', forcibly: {}", name, forcibly); - execute(() -> nodes.forEach(n -> n.stop(forcibly))); - deletePortFiles(); + execute(() -> nodes.parallelStream().forEach(n -> n.stop(forcibly))); + execute(() -> nodes.parallelStream().forEach(Node::deletePortsFiles)); } else { // Make sure the process is stopped, otherwise wait - execute(() -> nodes.forEach(n -> n.waitForExit())); + execute(() -> nodes.parallelStream().forEach(Node::waitForExit)); } } @@ -128,6 +129,26 @@ public String getTransportEndpoint(int index) { return getTransportEndpoints().split(",")[index]; } + @Override + public void upgradeNodeToVersion(int index, Version version) { + Node node = nodes.get(index); + node.stop(false); + LOGGER.info("Upgrading node '{}' to version {}", node.getSpec().getName(), version); + node.deletePortsFiles(); + node.start(version); + waitUntilReady(); + } + + @Override + public void upgradeToVersion(Version version) { + stop(false); + if (started.getAndSet(true) == false) { + LOGGER.info("Upgrading Elasticsearch test cluster '{}' to version {}", name, version); + execute(() -> nodes.parallelStream().forEach(n -> n.start(version))); + } + waitUntilReady(); + } + private void waitUntilReady() { writeUnicastHostsFile(); try { @@ -191,7 +212,7 @@ private boolean isSecurityAutoConfigured(Node node) { private void writeUnicastHostsFile() { String transportUris = execute(() -> nodes.parallelStream().map(Node::getTransportEndpoint).collect(Collectors.joining("\n"))); - nodes.forEach(node -> { + execute(() -> nodes.parallelStream().forEach(node -> { try { Path hostsFile = node.getWorkingDir().resolve("config").resolve("unicast_hosts.txt"); if (Files.notExists(hostsFile)) { @@ -200,23 +221,7 @@ private void writeUnicastHostsFile() { } catch (IOException e) { throw new UncheckedIOException("Failed to write unicast_hosts for: " + node, e); } - }); - } - - private void deletePortFiles() { - nodes.forEach(node -> { - try { - Path hostsFile = node.getWorkingDir().resolve("config").resolve("unicast_hosts.txt"); - Path httpPortsFile = node.getWorkingDir().resolve("logs").resolve("http.ports"); - Path tranportPortsFile = node.getWorkingDir().resolve("logs").resolve("transport.ports"); - - Files.deleteIfExists(hostsFile); - Files.deleteIfExists(httpPortsFile); - Files.deleteIfExists(tranportPortsFile); - } catch (IOException e) { - throw new UncheckedIOException("Failed to write unicast_hosts for: " + node, e); - } - }); + })); } private T execute(Callable task) { diff --git a/test/test-clusters/src/main/java/org/elasticsearch/test/cluster/local/LocalClusterSpec.java b/test/test-clusters/src/main/java/org/elasticsearch/test/cluster/local/LocalClusterSpec.java index da409a4935abe..52ff95920b4fd 100644 --- a/test/test-clusters/src/main/java/org/elasticsearch/test/cluster/local/LocalClusterSpec.java +++ b/test/test-clusters/src/main/java/org/elasticsearch/test/cluster/local/LocalClusterSpec.java @@ -69,7 +69,6 @@ void validate() { public static class LocalNodeSpec { private final LocalClusterSpec cluster; private final String name; - private final Version version; private final List settingsProviders; private final Map settings; private final List environmentProviders; @@ -79,7 +78,9 @@ public static class LocalNodeSpec { private final DistributionType distributionType; private final Set features; private final Map keystoreSettings; + private final Map keystoreFiles; private final Map extraConfigFiles; + private Version version; public LocalNodeSpec( LocalClusterSpec cluster, @@ -94,6 +95,7 @@ public LocalNodeSpec( DistributionType distributionType, Set features, Map keystoreSettings, + Map keystoreFiles, Map extraConfigFiles ) { this.cluster = cluster; @@ -108,9 +110,14 @@ public LocalNodeSpec( this.distributionType = distributionType; this.features = features; this.keystoreSettings = keystoreSettings; + this.keystoreFiles = keystoreFiles; this.extraConfigFiles = extraConfigFiles; } + void setVersion(Version version) { + this.version = version; + } + public LocalClusterSpec getCluster() { return cluster; } @@ -151,6 +158,10 @@ public Map getKeystoreSettings() { return keystoreSettings; } + public Map getKeystoreFiles() { + return keystoreFiles; + } + public Map getExtraConfigFiles() { return extraConfigFiles; } diff --git a/test/test-clusters/src/main/java/org/elasticsearch/test/cluster/local/LocalClusterSpecBuilder.java b/test/test-clusters/src/main/java/org/elasticsearch/test/cluster/local/LocalClusterSpecBuilder.java index c07a491d2ace6..1f4086fd47fe8 100644 --- a/test/test-clusters/src/main/java/org/elasticsearch/test/cluster/local/LocalClusterSpecBuilder.java +++ b/test/test-clusters/src/main/java/org/elasticsearch/test/cluster/local/LocalClusterSpecBuilder.java @@ -12,6 +12,7 @@ import org.elasticsearch.test.cluster.util.resource.Resource; import java.util.function.Consumer; +import java.util.function.Supplier; public interface LocalClusterSpecBuilder extends LocalSpecBuilder { /** @@ -19,8 +20,18 @@ public interface LocalClusterSpecBuilder extends LocalSpecBuilder configProvider); + /** * Sets the number of nodes for the cluster. */ diff --git a/test/test-clusters/src/main/java/org/elasticsearch/test/cluster/local/LocalElasticsearchCluster.java b/test/test-clusters/src/main/java/org/elasticsearch/test/cluster/local/LocalElasticsearchCluster.java index 54d541cd07144..dc532dfd956bb 100644 --- a/test/test-clusters/src/main/java/org/elasticsearch/test/cluster/local/LocalElasticsearchCluster.java +++ b/test/test-clusters/src/main/java/org/elasticsearch/test/cluster/local/LocalElasticsearchCluster.java @@ -10,18 +10,21 @@ import org.elasticsearch.test.cluster.ElasticsearchCluster; import org.elasticsearch.test.cluster.local.distribution.LocalDistributionResolver; +import org.elasticsearch.test.cluster.local.distribution.ReleasedDistributionResolver; import org.elasticsearch.test.cluster.local.distribution.SnapshotDistributionResolver; +import org.elasticsearch.test.cluster.util.Version; import org.junit.runner.Description; import org.junit.runners.model.Statement; import java.nio.file.Path; public class LocalElasticsearchCluster implements ElasticsearchCluster { - private final LocalClusterSpec spec; + private final DefaultLocalClusterSpecBuilder builder; + private LocalClusterSpec spec; private LocalClusterHandle handle; - public LocalElasticsearchCluster(LocalClusterSpec spec) { - this.spec = spec; + public LocalElasticsearchCluster(DefaultLocalClusterSpecBuilder builder) { + this.builder = builder; } @Override @@ -30,9 +33,10 @@ public Statement apply(Statement base, Description description) { @Override public void evaluate() throws Throwable { try { + spec = builder.buildClusterSpec(); handle = new LocalClusterFactory( Path.of(System.getProperty("java.io.tmpdir")).resolve(description.getDisplayName()).toAbsolutePath(), - new LocalDistributionResolver(new SnapshotDistributionResolver()) + new LocalDistributionResolver(new SnapshotDistributionResolver(new ReleasedDistributionResolver())) ).create(spec); handle.start(); base.evaluate(); @@ -97,6 +101,24 @@ public String getTransportEndpoint(int index) { return handle.getTransportEndpoint(index); } + @Override + public void upgradeNodeToVersion(int index, Version version) { + checkHandle(); + handle.upgradeNodeToVersion(index, version); + } + + @Override + public void upgradeToVersion(Version version) { + checkHandle(); + handle.upgradeToVersion(version); + } + + @Override + public Version getVersion() { + checkHandle(); + return spec.getNodes().get(0).getVersion(); + } + private void checkHandle() { if (handle == null) { throw new IllegalStateException("Cluster handle has not been initialized. Did you forget the @ClassRule annotation?"); diff --git a/test/test-clusters/src/main/java/org/elasticsearch/test/cluster/local/LocalSpecBuilder.java b/test/test-clusters/src/main/java/org/elasticsearch/test/cluster/local/LocalSpecBuilder.java index 0b73c0737c440..2bd8831521ddb 100644 --- a/test/test-clusters/src/main/java/org/elasticsearch/test/cluster/local/LocalSpecBuilder.java +++ b/test/test-clusters/src/main/java/org/elasticsearch/test/cluster/local/LocalSpecBuilder.java @@ -12,6 +12,7 @@ import org.elasticsearch.test.cluster.FeatureFlag; import org.elasticsearch.test.cluster.SettingsProvider; import org.elasticsearch.test.cluster.local.distribution.DistributionType; +import org.elasticsearch.test.cluster.util.Version; import org.elasticsearch.test.cluster.util.resource.Resource; import java.util.function.Supplier; @@ -68,8 +69,18 @@ interface LocalSpecBuilder> { */ T keystore(String key, String value); + /** + * Adds a secure file to the node keystore. + */ + T keystore(String key, Resource file); + /** * Adds a file to the node config directory */ T configFile(String fileName, Resource configFile); + + /** + * Sets the version of Elasticsearch. Defaults to {@link Version#CURRENT}. + */ + T version(Version version); } diff --git a/test/test-clusters/src/main/java/org/elasticsearch/test/cluster/local/distribution/LocalDistributionResolver.java b/test/test-clusters/src/main/java/org/elasticsearch/test/cluster/local/distribution/LocalDistributionResolver.java index 5c9f45cbe092f..b9442b28e1591 100644 --- a/test/test-clusters/src/main/java/org/elasticsearch/test/cluster/local/distribution/LocalDistributionResolver.java +++ b/test/test-clusters/src/main/java/org/elasticsearch/test/cluster/local/distribution/LocalDistributionResolver.java @@ -13,6 +13,9 @@ import java.nio.file.Files; import java.nio.file.Path; +/** + * A {@link DistributionResolver} for resolving locally built distributions for the current version of Elasticsearch. + */ public class LocalDistributionResolver implements DistributionResolver { private final DistributionResolver delegate; diff --git a/test/test-clusters/src/main/java/org/elasticsearch/test/cluster/local/distribution/ReleasedDistributionResolver.java b/test/test-clusters/src/main/java/org/elasticsearch/test/cluster/local/distribution/ReleasedDistributionResolver.java new file mode 100644 index 0000000000000..12654be310ef8 --- /dev/null +++ b/test/test-clusters/src/main/java/org/elasticsearch/test/cluster/local/distribution/ReleasedDistributionResolver.java @@ -0,0 +1,54 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0 and the Server Side Public License, v 1; you may not use this file except + * in compliance with, at your election, the Elastic License 2.0 or the Server + * Side Public License, v 1. + */ + +package org.elasticsearch.test.cluster.local.distribution; + +import org.elasticsearch.test.cluster.util.Version; + +import java.nio.file.Files; +import java.nio.file.Path; + +/** + * A {@link DistributionResolver} for resolving previously released distributions of Elasticsearch. + */ +public class ReleasedDistributionResolver implements DistributionResolver { + private static final String BWC_DISTRIBUTION_SYSPROP_PREFIX = "tests.release.distribution."; + + @Override + public DistributionDescriptor resolve(Version version, DistributionType type) { + String distributionPath = System.getProperty(BWC_DISTRIBUTION_SYSPROP_PREFIX + version.toString()); + + if (distributionPath == null) { + String taskPath = System.getProperty("tests.task"); + String project = taskPath.substring(0, taskPath.lastIndexOf(':')); + String taskName = taskPath.substring(taskPath.lastIndexOf(':') + 1); + + throw new IllegalStateException( + "Cannot locate Elasticsearch distribution. Ensure you've added the following to the build script for project '" + + project + + "':\n\n" + + "tasks.named('" + + taskName + + "') {\n" + + " usesBwcDistribution(" + + version + + ")\n" + + "}" + ); + } + + Path distributionDir = Path.of(distributionPath); + if (Files.notExists(distributionDir)) { + throw new IllegalStateException( + "Cannot locate Elasticsearch distribution. Directory at '" + distributionDir + "' does not exist." + ); + } + + return new DefaultDistributionDescriptor(version, false, distributionDir, DistributionType.DEFAULT); + } +} diff --git a/test/test-clusters/src/main/java/org/elasticsearch/test/cluster/local/distribution/SnapshotDistributionResolver.java b/test/test-clusters/src/main/java/org/elasticsearch/test/cluster/local/distribution/SnapshotDistributionResolver.java index 182dbe66a584d..c6cecf09e9b9d 100644 --- a/test/test-clusters/src/main/java/org/elasticsearch/test/cluster/local/distribution/SnapshotDistributionResolver.java +++ b/test/test-clusters/src/main/java/org/elasticsearch/test/cluster/local/distribution/SnapshotDistributionResolver.java @@ -10,10 +10,36 @@ import org.elasticsearch.test.cluster.util.Version; +import java.nio.file.Files; +import java.nio.file.Path; + +/** + * A {@link DistributionResolver} for resolving snapshot versions of Elasticsearch for previous, backwards-compatible versions. + */ public class SnapshotDistributionResolver implements DistributionResolver { + private static final String BWC_DISTRIBUTION_SYSPROP_PREFIX = "tests.snapshot.distribution."; + private final DistributionResolver delegate; + + public SnapshotDistributionResolver(DistributionResolver delegate) { + this.delegate = delegate; + } + @Override public DistributionDescriptor resolve(Version version, DistributionType type) { - // Not yet implemented - throw new UnsupportedOperationException("Cannot resolve distribution for version " + version); + String distributionPath = System.getProperty(BWC_DISTRIBUTION_SYSPROP_PREFIX + version.toString()); + + if (distributionPath != null) { + Path distributionDir = Path.of(distributionPath); + if (Files.notExists(distributionDir)) { + throw new IllegalStateException( + "Cannot locate Elasticsearch distribution. Directory at '" + distributionDir + "' does not exist." + ); + } + + // Snapshot distributions are never release builds and always use the default distribution + return new DefaultDistributionDescriptor(version, true, distributionDir, DistributionType.DEFAULT); + } + + return delegate.resolve(version, type); } } diff --git a/x-pack/plugin/shutdown/qa/full-cluster-restart/build.gradle b/x-pack/plugin/shutdown/qa/full-cluster-restart/build.gradle index 429b29bbc9fdb..d9539bb668b4b 100644 --- a/x-pack/plugin/shutdown/qa/full-cluster-restart/build.gradle +++ b/x-pack/plugin/shutdown/qa/full-cluster-restart/build.gradle @@ -1,104 +1,21 @@ -import org.elasticsearch.gradle.Version import org.elasticsearch.gradle.internal.info.BuildParams import org.elasticsearch.gradle.testclusters.StandaloneRestIntegTestTask -apply plugin: 'elasticsearch.internal-testclusters' -apply plugin: 'elasticsearch.standalone-rest-test' +apply plugin: 'elasticsearch.internal-java-rest-test' apply plugin: 'elasticsearch.bwc-test' dependencies { // TODO: Remove core dependency and change tests to not use builders that are part of xpack-core. // Currently needed for MlConfigIndexMappingsFullClusterRestartIT and SLM classes used in // FullClusterRestartIT - testImplementation(testArtifact(project(xpackModule('core')))) - testImplementation(testArtifact(project(":qa:full-cluster-restart"))) - testImplementation project(':x-pack:qa') -} - -tasks.named("forbiddenPatterns") { - exclude '**/system_key' -} - -String outputDir = "${buildDir}/generated-resources/${project.name}" - -tasks.register("copyTestNodeKeyMaterial", Copy) { - from project(':x-pack:plugin:core') - .files( - 'src/test/resources/org/elasticsearch/xpack/security/transport/ssl/certs/simple/testnode.pem', - 'src/test/resources/org/elasticsearch/xpack/security/transport/ssl/certs/simple/testnode.crt' - ) - into outputDir + javaRestTestImplementation(testArtifact(project(xpackModule('core')))) + javaRestTestImplementation(testArtifact(project(":qa:full-cluster-restart"))) + javaRestTestImplementation project(':x-pack:qa') } BuildParams.bwcVersions.withIndexCompatible { bwcVersion, baseName -> - def baseCluster = testClusters.register(baseName) { - testDistribution = "DEFAULT" - if (bwcVersion.before(BuildParams.bwcVersions.minimumWireCompatibleVersion)) { - // When testing older versions we have to first upgrade to 7.last - versions = [bwcVersion.toString(), BuildParams.bwcVersions.minimumWireCompatibleVersion.toString(), project.version] - } else { - versions = [bwcVersion.toString(), project.version] - } - numberOfNodes = 2 - setting 'path.repo', "${buildDir}/cluster/shared/repo/${baseName}" - user username: "test_user", password: "x-pack-test-password" - - setting 'path.repo', "${buildDir}/cluster/shared/repo/${baseName}" - // some tests rely on the translog not being flushed - setting 'indices.memory.shard_inactive_time', '60m' - setting 'xpack.security.enabled', 'true' - setting 'xpack.security.transport.ssl.enabled', 'true' - setting 'xpack.license.self_generated.type', 'trial' - - extraConfigFile 'testnode.pem', file("${outputDir}/testnode.pem") - extraConfigFile 'testnode.crt', file("${outputDir}/testnode.crt") - - keystore 'xpack.watcher.encryption_key', file("${project.projectDir}/src/test/resources/system_key") - setting 'xpack.watcher.encrypt_sensitive_data', 'true' - - setting 'xpack.security.transport.ssl.key', 'testnode.pem' - setting 'xpack.security.transport.ssl.certificate', 'testnode.crt' - keystore 'xpack.security.transport.ssl.secure_key_passphrase', 'testnode' - - setting 'xpack.security.authc.api_key.enabled', 'true' - - requiresFeature 'es.index_mode_feature_flag_registered', Version.fromString("8.0.0") - } - - tasks.register("${baseName}#oldClusterTest", StandaloneRestIntegTestTask) { - mustRunAfter("precommit") - useCluster baseCluster - dependsOn "copyTestNodeKeyMaterial" - doFirst { - delete("${buildDir}/cluster/shared/repo/${baseName}") - } - systemProperty 'tests.is_old_cluster', 'true' - } - - tasks.register("${baseName}#upgradedClusterTest", StandaloneRestIntegTestTask) { - mustRunAfter("precommit") - useCluster baseCluster - dependsOn "${baseName}#oldClusterTest" - doFirst { - baseCluster.get().goToNextVersion() - if (bwcVersion.before(BuildParams.bwcVersions.minimumWireCompatibleVersion)) { - // When doing a full cluster restart of older versions we actually have to upgrade twice. First to 7.last, then to the current version. - baseCluster.get().goToNextVersion() - } - } - systemProperty 'tests.is_old_cluster', 'false' - } - - String oldVersion = bwcVersion.toString().minus("-SNAPSHOT") - tasks.matching { it.name.startsWith("${baseName}#") && it.name.endsWith("ClusterTest") }.configureEach { - it.systemProperty 'tests.old_cluster_version', oldVersion - it.systemProperty 'tests.path.repo', "${buildDir}/cluster/shared/repo/${baseName}" - it.nonInputProperties.systemProperty('tests.rest.cluster', baseCluster.map(c -> c.allHttpSocketURI.join(","))) - it.nonInputProperties.systemProperty('tests.clustername', baseName) - } - - tasks.register(bwcTaskName(bwcVersion)) { - dependsOn "${baseName}#upgradedClusterTest" - } - + tasks.register(bwcTaskName(bwcVersion), StandaloneRestIntegTestTask) { + usesBwcDistribution(bwcVersion) + systemProperty("tests.old_cluster_version", bwcVersion) + } } diff --git a/x-pack/plugin/shutdown/qa/full-cluster-restart/src/test/java/org/elasticsearch/xpack/restart/FullClusterRestartIT.java b/x-pack/plugin/shutdown/qa/full-cluster-restart/src/javaRestTest/java/org/elasticsearch/xpack/restart/FullClusterRestartIT.java similarity index 66% rename from x-pack/plugin/shutdown/qa/full-cluster-restart/src/test/java/org/elasticsearch/xpack/restart/FullClusterRestartIT.java rename to x-pack/plugin/shutdown/qa/full-cluster-restart/src/javaRestTest/java/org/elasticsearch/xpack/restart/FullClusterRestartIT.java index df6e3ed6b9388..69684377df662 100644 --- a/x-pack/plugin/shutdown/qa/full-cluster-restart/src/test/java/org/elasticsearch/xpack/restart/FullClusterRestartIT.java +++ b/x-pack/plugin/shutdown/qa/full-cluster-restart/src/javaRestTest/java/org/elasticsearch/xpack/restart/FullClusterRestartIT.java @@ -6,6 +6,8 @@ */ package org.elasticsearch.xpack.restart; +import com.carrotsearch.randomizedtesting.annotations.Name; + import org.elasticsearch.Version; import org.elasticsearch.client.Request; import org.elasticsearch.client.Response; @@ -13,10 +15,16 @@ import org.elasticsearch.common.Strings; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.util.concurrent.ThreadContext; +import org.elasticsearch.test.cluster.ElasticsearchCluster; +import org.elasticsearch.test.cluster.FeatureFlag; +import org.elasticsearch.test.cluster.local.distribution.DistributionType; +import org.elasticsearch.test.cluster.util.resource.Resource; import org.elasticsearch.test.rest.ESRestTestCase; -import org.elasticsearch.upgrades.AbstractFullClusterRestartTestCase; +import org.elasticsearch.upgrades.FullClustRestartUpgradeStatus; +import org.elasticsearch.upgrades.ParameterizedFullClusterRestartTestCase; import org.elasticsearch.xcontent.XContentBuilder; import org.elasticsearch.xcontent.json.JsonXContent; +import org.junit.ClassRule; import java.io.IOException; import java.nio.charset.StandardCharsets; @@ -33,7 +41,37 @@ import static org.hamcrest.Matchers.hasSize; import static org.hamcrest.Matchers.notNullValue; -public class FullClusterRestartIT extends AbstractFullClusterRestartTestCase { +public class FullClusterRestartIT extends ParameterizedFullClusterRestartTestCase { + + @ClassRule + public static ElasticsearchCluster cluster = ElasticsearchCluster.local() + .distribution(DistributionType.DEFAULT) + .version(getOldClusterTestVersion()) + .nodes(2) + // some tests rely on the translog not being flushed + .setting("indices.memory.shard_inactive_time", "60m") + .setting("xpack.security.enabled", "true") + .setting("xpack.security.transport.ssl.enabled", "true") + .setting("xpack.security.transport.ssl.key", "testnode.pem") + .setting("xpack.security.transport.ssl.certificate", "testnode.crt") + .setting("xpack.license.self_generated.type", "trial") + .setting("xpack.watcher.encrypt_sensitive_data", "true") + .setting("xpack.security.authc.api_key.enabled", "true") + .configFile("testnode.pem", Resource.fromClasspath("org/elasticsearch/xpack/security/transport/ssl/certs/simple/testnode.pem")) + .configFile("testnode.crt", Resource.fromClasspath("org/elasticsearch/xpack/security/transport/ssl/certs/simple/testnode.crt")) + .keystore("xpack.watcher.encryption_key", Resource.fromClasspath("system_key")) + .keystore("xpack.security.transport.ssl.secure_key_passphrase", "testnode") + .feature(FeatureFlag.TIME_SERIES_MODE) + .build(); + + public FullClusterRestartIT(@Name("cluster") FullClustRestartUpgradeStatus upgradeStatus) { + super(upgradeStatus); + } + + @Override + protected ElasticsearchCluster getUpgradeCluster() { + return cluster; + } @Override protected Settings restClientSettings() { @@ -64,7 +102,7 @@ public void testNodeShutdown() throws Exception { // Use the types available from as early as possible final String type = randomFrom("restart", "remove"); putBody.field("type", type); - putBody.field("reason", this.getTestName()); + putBody.field("reason", getRootTestName()); } putBody.endObject(); putShutdownRequest.setJsonEntity(Strings.toString(putBody)); @@ -81,7 +119,7 @@ public void testNodeShutdown() throws Exception { assertThat("there should be exactly one shutdown registered", shutdowns, hasSize(1)); final Map shutdown = shutdowns.get(0); assertThat(shutdown.get("node_id"), notNullValue()); // Since we randomly determine the node ID, we can't check it - assertThat(shutdown.get("reason"), equalTo(this.getTestName())); + assertThat(shutdown.get("reason"), equalTo(getRootTestName())); assertThat( (String) shutdown.get("status"), anyOf( diff --git a/x-pack/plugin/shutdown/qa/full-cluster-restart/src/test/resources/system_key b/x-pack/plugin/shutdown/qa/full-cluster-restart/src/javaRestTest/resources/system_key similarity index 100% rename from x-pack/plugin/shutdown/qa/full-cluster-restart/src/test/resources/system_key rename to x-pack/plugin/shutdown/qa/full-cluster-restart/src/javaRestTest/resources/system_key diff --git a/x-pack/qa/full-cluster-restart/build.gradle b/x-pack/qa/full-cluster-restart/build.gradle index 3923d439d394d..d9539bb668b4b 100644 --- a/x-pack/qa/full-cluster-restart/build.gradle +++ b/x-pack/qa/full-cluster-restart/build.gradle @@ -1,110 +1,21 @@ -import org.elasticsearch.gradle.Version import org.elasticsearch.gradle.internal.info.BuildParams import org.elasticsearch.gradle.testclusters.StandaloneRestIntegTestTask -apply plugin: 'elasticsearch.internal-testclusters' -apply plugin: 'elasticsearch.standalone-rest-test' +apply plugin: 'elasticsearch.internal-java-rest-test' apply plugin: 'elasticsearch.bwc-test' dependencies { // TODO: Remove core dependency and change tests to not use builders that are part of xpack-core. // Currently needed for MlConfigIndexMappingsFullClusterRestartIT and SLM classes used in // FullClusterRestartIT - testImplementation(testArtifact(project(xpackModule('core')))) - testImplementation(testArtifact(project(":qa:full-cluster-restart"))) - testImplementation project(':x-pack:qa') -} - -tasks.named("forbiddenPatterns") { - exclude '**/system_key' -} - -String outputDir = "${buildDir}/generated-resources/${project.name}" - -tasks.register("copyTestNodeKeyMaterial", Copy) { - from project(':x-pack:plugin:core') - .files( - 'src/test/resources/org/elasticsearch/xpack/security/transport/ssl/certs/simple/testnode.pem', - 'src/test/resources/org/elasticsearch/xpack/security/transport/ssl/certs/simple/testnode.crt' - ) - into outputDir + javaRestTestImplementation(testArtifact(project(xpackModule('core')))) + javaRestTestImplementation(testArtifact(project(":qa:full-cluster-restart"))) + javaRestTestImplementation project(':x-pack:qa') } BuildParams.bwcVersions.withIndexCompatible { bwcVersion, baseName -> - def baseCluster = testClusters.register(baseName) { - testDistribution = "DEFAULT" - if (bwcVersion.before(BuildParams.bwcVersions.minimumWireCompatibleVersion)) { - // When testing older versions we have to first upgrade to 7.last - versions = [bwcVersion.toString(), BuildParams.bwcVersions.minimumWireCompatibleVersion.toString(), project.version] - } else { - versions = [bwcVersion.toString(), project.version] - } - numberOfNodes = 2 - setting 'path.repo', "${buildDir}/cluster/shared/repo/${baseName}" - user username: "test_user", password: "x-pack-test-password" - - setting 'path.repo', "${buildDir}/cluster/shared/repo/${baseName}" - // some tests rely on the translog not being flushed - setting 'indices.memory.shard_inactive_time', '60m' - setting 'xpack.security.enabled', 'true' - setting 'xpack.security.transport.ssl.enabled', 'true' - setting 'xpack.license.self_generated.type', 'trial' - - extraConfigFile 'testnode.pem', file("${outputDir}/testnode.pem") - extraConfigFile 'testnode.crt', file("${outputDir}/testnode.crt") - - keystore 'xpack.watcher.encryption_key', file("${project.projectDir}/src/test/resources/system_key") - setting 'xpack.watcher.encrypt_sensitive_data', 'true' - - setting 'xpack.security.transport.ssl.key', 'testnode.pem' - setting 'xpack.security.transport.ssl.certificate', 'testnode.crt' - keystore 'xpack.security.transport.ssl.secure_key_passphrase', 'testnode' - - setting 'xpack.security.authc.api_key.enabled', 'true' - - requiresFeature 'es.index_mode_feature_flag_registered', Version.fromString("8.0.0") - } - - tasks.register("${baseName}#oldClusterTest", StandaloneRestIntegTestTask) { - mustRunAfter("precommit") - useCluster baseCluster - dependsOn "copyTestNodeKeyMaterial" - doFirst { - delete("${buildDir}/cluster/shared/repo/${baseName}") - } - systemProperty 'tests.is_old_cluster', 'true' - exclude 'org/elasticsearch/upgrades/FullClusterRestartIT.class' - exclude 'org/elasticsearch/upgrades/FullClusterRestartSettingsUpgradeIT.class' - exclude 'org/elasticsearch/upgrades/QueryBuilderBWCIT.class' - } - - tasks.register("${baseName}#upgradedClusterTest", StandaloneRestIntegTestTask) { - mustRunAfter("precommit") - useCluster baseCluster - dependsOn "${baseName}#oldClusterTest" - doFirst { - baseCluster.get().goToNextVersion() - if (bwcVersion.before(BuildParams.bwcVersions.minimumWireCompatibleVersion)) { - // When doing a full cluster restart of older versions we actually have to upgrade twice. First to 7.last, then to the current version. - baseCluster.get().goToNextVersion() - } - } - systemProperty 'tests.is_old_cluster', 'false' - exclude 'org/elasticsearch/upgrades/FullClusterRestartIT.class' - exclude 'org/elasticsearch/upgrades/FullClusterRestartSettingsUpgradeIT.class' - exclude 'org/elasticsearch/upgrades/QueryBuilderBWCIT.class' - } - - String oldVersion = bwcVersion.toString().minus("-SNAPSHOT") - tasks.matching { it.name.startsWith("${baseName}#") && it.name.endsWith("ClusterTest") }.configureEach { - it.systemProperty 'tests.old_cluster_version', oldVersion - it.systemProperty 'tests.path.repo', "${buildDir}/cluster/shared/repo/${baseName}" - it.nonInputProperties.systemProperty('tests.rest.cluster', baseCluster.map(c -> c.allHttpSocketURI.join(","))) - it.nonInputProperties.systemProperty('tests.clustername', baseName) - } - - tasks.register(bwcTaskName(bwcVersion)) { - dependsOn "${baseName}#upgradedClusterTest" - } - + tasks.register(bwcTaskName(bwcVersion), StandaloneRestIntegTestTask) { + usesBwcDistribution(bwcVersion) + systemProperty("tests.old_cluster_version", bwcVersion) + } } diff --git a/x-pack/qa/full-cluster-restart/src/javaRestTest/java/org/elasticsearch/xpack/restart/AbstractXpackFullClusterRestartTestCase.java b/x-pack/qa/full-cluster-restart/src/javaRestTest/java/org/elasticsearch/xpack/restart/AbstractXpackFullClusterRestartTestCase.java new file mode 100644 index 0000000000000..10486c914d470 --- /dev/null +++ b/x-pack/qa/full-cluster-restart/src/javaRestTest/java/org/elasticsearch/xpack/restart/AbstractXpackFullClusterRestartTestCase.java @@ -0,0 +1,49 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0; you may not use this file except in compliance with the Elastic License + * 2.0. + */ + +package org.elasticsearch.xpack.restart; + +import org.elasticsearch.test.cluster.ElasticsearchCluster; +import org.elasticsearch.test.cluster.FeatureFlag; +import org.elasticsearch.test.cluster.local.distribution.DistributionType; +import org.elasticsearch.test.cluster.util.resource.Resource; +import org.elasticsearch.upgrades.FullClustRestartUpgradeStatus; +import org.elasticsearch.upgrades.ParameterizedFullClusterRestartTestCase; +import org.junit.ClassRule; + +public abstract class AbstractXpackFullClusterRestartTestCase extends ParameterizedFullClusterRestartTestCase { + + @ClassRule + public static ElasticsearchCluster cluster = ElasticsearchCluster.local() + .distribution(DistributionType.DEFAULT) + .version(getOldClusterTestVersion()) + .nodes(2) + // some tests rely on the translog not being flushed + .setting("indices.memory.shard_inactive_time", "60m") + .setting("xpack.security.enabled", "true") + .setting("xpack.security.transport.ssl.enabled", "true") + .setting("xpack.security.transport.ssl.key", "testnode.pem") + .setting("xpack.security.transport.ssl.certificate", "testnode.crt") + .setting("xpack.license.self_generated.type", "trial") + .setting("xpack.watcher.encrypt_sensitive_data", "true") + .setting("xpack.security.authc.api_key.enabled", "true") + .configFile("testnode.pem", Resource.fromClasspath("org/elasticsearch/xpack/security/transport/ssl/certs/simple/testnode.pem")) + .configFile("testnode.crt", Resource.fromClasspath("org/elasticsearch/xpack/security/transport/ssl/certs/simple/testnode.crt")) + .keystore("xpack.watcher.encryption_key", Resource.fromClasspath("system_key")) + .keystore("xpack.security.transport.ssl.secure_key_passphrase", "testnode") + .feature(FeatureFlag.TIME_SERIES_MODE) + .build(); + + public AbstractXpackFullClusterRestartTestCase(FullClustRestartUpgradeStatus upgradeStatus) { + super(upgradeStatus); + } + + @Override + protected ElasticsearchCluster getUpgradeCluster() { + return cluster; + } +} diff --git a/x-pack/qa/full-cluster-restart/src/javaRestTest/java/org/elasticsearch/xpack/restart/CoreFullClusterRestartIT.java b/x-pack/qa/full-cluster-restart/src/javaRestTest/java/org/elasticsearch/xpack/restart/CoreFullClusterRestartIT.java new file mode 100644 index 0000000000000..cc202cd71569d --- /dev/null +++ b/x-pack/qa/full-cluster-restart/src/javaRestTest/java/org/elasticsearch/xpack/restart/CoreFullClusterRestartIT.java @@ -0,0 +1,46 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0; you may not use this file except in compliance with the Elastic License + * 2.0. + */ +package org.elasticsearch.xpack.restart; + +import com.carrotsearch.randomizedtesting.annotations.Name; + +import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.common.util.concurrent.ThreadContext; +import org.elasticsearch.test.cluster.util.resource.Resource; +import org.elasticsearch.upgrades.FullClustRestartUpgradeStatus; +import org.elasticsearch.upgrades.FullClusterRestartIT; + +import java.nio.charset.StandardCharsets; +import java.util.Base64; + +public class CoreFullClusterRestartIT extends FullClusterRestartIT { + + static { + clusterConfig = c -> c.setting("xpack.security.enabled", "true") + .setting("xpack.security.transport.ssl.enabled", "true") + .setting("xpack.security.transport.ssl.key", "testnode.pem") + .setting("xpack.security.transport.ssl.certificate", "testnode.crt") + .setting("xpack.license.self_generated.type", "trial") + .setting("xpack.watcher.encrypt_sensitive_data", "true") + .setting("xpack.security.authc.api_key.enabled", "true") + .configFile("testnode.pem", Resource.fromClasspath("org/elasticsearch/xpack/security/transport/ssl/certs/simple/testnode.pem")) + .configFile("testnode.crt", Resource.fromClasspath("org/elasticsearch/xpack/security/transport/ssl/certs/simple/testnode.crt")) + .keystore("xpack.watcher.encryption_key", Resource.fromClasspath("system_key")) + .keystore("xpack.security.transport.ssl.secure_key_passphrase", "testnode"); + } + + public CoreFullClusterRestartIT(@Name("cluster") FullClustRestartUpgradeStatus upgradeStatus) { + super(upgradeStatus); + } + + @Override + protected Settings restClientSettings() { + String token = "Basic " + Base64.getEncoder().encodeToString("test_user:x-pack-test-password".getBytes(StandardCharsets.UTF_8)); + return Settings.builder().put(ThreadContext.PREFIX + ".Authorization", token).build(); + } + +} diff --git a/x-pack/qa/full-cluster-restart/src/test/java/org/elasticsearch/xpack/restart/FullClusterRestartIT.java b/x-pack/qa/full-cluster-restart/src/javaRestTest/java/org/elasticsearch/xpack/restart/FullClusterRestartIT.java similarity index 99% rename from x-pack/qa/full-cluster-restart/src/test/java/org/elasticsearch/xpack/restart/FullClusterRestartIT.java rename to x-pack/qa/full-cluster-restart/src/javaRestTest/java/org/elasticsearch/xpack/restart/FullClusterRestartIT.java index 42c551b16655b..b15f6a08ef2c4 100644 --- a/x-pack/qa/full-cluster-restart/src/test/java/org/elasticsearch/xpack/restart/FullClusterRestartIT.java +++ b/x-pack/qa/full-cluster-restart/src/javaRestTest/java/org/elasticsearch/xpack/restart/FullClusterRestartIT.java @@ -6,6 +6,8 @@ */ package org.elasticsearch.xpack.restart; +import com.carrotsearch.randomizedtesting.annotations.Name; + import org.apache.http.entity.ContentType; import org.apache.http.entity.StringEntity; import org.apache.http.util.EntityUtils; @@ -25,7 +27,7 @@ import org.elasticsearch.rest.action.search.RestSearchAction; import org.elasticsearch.test.StreamsUtils; import org.elasticsearch.test.rest.ESRestTestCase; -import org.elasticsearch.upgrades.AbstractFullClusterRestartTestCase; +import org.elasticsearch.upgrades.FullClustRestartUpgradeStatus; import org.elasticsearch.xcontent.ObjectPath; import org.elasticsearch.xcontent.XContentBuilder; import org.elasticsearch.xcontent.XContentParser; @@ -61,11 +63,15 @@ import static org.hamcrest.Matchers.not; import static org.hamcrest.Matchers.startsWith; -public class FullClusterRestartIT extends AbstractFullClusterRestartTestCase { +public class FullClusterRestartIT extends AbstractXpackFullClusterRestartTestCase { public static final int UPGRADE_FIELD_EXPECTED_INDEX_FORMAT_VERSION = 6; public static final int SECURITY_EXPECTED_INDEX_FORMAT_VERSION = 6; + public FullClusterRestartIT(@Name("cluster") FullClustRestartUpgradeStatus upgradeStatus) { + super(upgradeStatus); + } + @Override protected Settings restClientSettings() { String token = "Basic " + Base64.getEncoder().encodeToString("test_user:x-pack-test-password".getBytes(StandardCharsets.UTF_8)); diff --git a/x-pack/qa/full-cluster-restart/src/test/java/org/elasticsearch/xpack/restart/MLModelDeploymentFullClusterRestartIT.java b/x-pack/qa/full-cluster-restart/src/javaRestTest/java/org/elasticsearch/xpack/restart/MLModelDeploymentFullClusterRestartIT.java similarity index 97% rename from x-pack/qa/full-cluster-restart/src/test/java/org/elasticsearch/xpack/restart/MLModelDeploymentFullClusterRestartIT.java rename to x-pack/qa/full-cluster-restart/src/javaRestTest/java/org/elasticsearch/xpack/restart/MLModelDeploymentFullClusterRestartIT.java index a2b61b55a2975..00e253c43cbc8 100644 --- a/x-pack/qa/full-cluster-restart/src/test/java/org/elasticsearch/xpack/restart/MLModelDeploymentFullClusterRestartIT.java +++ b/x-pack/qa/full-cluster-restart/src/javaRestTest/java/org/elasticsearch/xpack/restart/MLModelDeploymentFullClusterRestartIT.java @@ -7,6 +7,8 @@ package org.elasticsearch.xpack.restart; +import com.carrotsearch.randomizedtesting.annotations.Name; + import org.apache.http.util.EntityUtils; import org.elasticsearch.Version; import org.elasticsearch.client.Request; @@ -15,7 +17,7 @@ import org.elasticsearch.common.util.concurrent.ThreadContext; import org.elasticsearch.common.xcontent.support.XContentMapValues; import org.elasticsearch.core.Strings; -import org.elasticsearch.upgrades.AbstractFullClusterRestartTestCase; +import org.elasticsearch.upgrades.FullClustRestartUpgradeStatus; import org.elasticsearch.xpack.core.ml.inference.assignment.AllocationStatus; import org.junit.Before; @@ -32,7 +34,7 @@ import static org.hamcrest.Matchers.equalTo; import static org.hamcrest.Matchers.hasSize; -public class MLModelDeploymentFullClusterRestartIT extends AbstractFullClusterRestartTestCase { +public class MLModelDeploymentFullClusterRestartIT extends AbstractXpackFullClusterRestartTestCase { // See PyTorchModelIT for how this model was created static final String BASE_64_ENCODED_MODEL = @@ -63,6 +65,10 @@ public class MLModelDeploymentFullClusterRestartIT extends AbstractFullClusterRe RAW_MODEL_SIZE = Base64.getDecoder().decode(BASE_64_ENCODED_MODEL).length; } + public MLModelDeploymentFullClusterRestartIT(@Name("cluster") FullClustRestartUpgradeStatus upgradeStatus) { + super(upgradeStatus); + } + @Before public void setLogging() throws IOException { Request loggingSettings = new Request("PUT", "_cluster/settings"); diff --git a/x-pack/qa/full-cluster-restart/src/test/java/org/elasticsearch/xpack/restart/MlConfigIndexMappingsFullClusterRestartIT.java b/x-pack/qa/full-cluster-restart/src/javaRestTest/java/org/elasticsearch/xpack/restart/MlConfigIndexMappingsFullClusterRestartIT.java similarity index 94% rename from x-pack/qa/full-cluster-restart/src/test/java/org/elasticsearch/xpack/restart/MlConfigIndexMappingsFullClusterRestartIT.java rename to x-pack/qa/full-cluster-restart/src/javaRestTest/java/org/elasticsearch/xpack/restart/MlConfigIndexMappingsFullClusterRestartIT.java index bfc078ffe9206..da3a00574f4a3 100644 --- a/x-pack/qa/full-cluster-restart/src/test/java/org/elasticsearch/xpack/restart/MlConfigIndexMappingsFullClusterRestartIT.java +++ b/x-pack/qa/full-cluster-restart/src/javaRestTest/java/org/elasticsearch/xpack/restart/MlConfigIndexMappingsFullClusterRestartIT.java @@ -6,6 +6,8 @@ */ package org.elasticsearch.xpack.restart; +import com.carrotsearch.randomizedtesting.annotations.Name; + import org.elasticsearch.Version; import org.elasticsearch.client.Request; import org.elasticsearch.client.Response; @@ -13,7 +15,7 @@ import org.elasticsearch.common.util.concurrent.ThreadContext; import org.elasticsearch.common.xcontent.support.XContentMapValues; import org.elasticsearch.core.Strings; -import org.elasticsearch.upgrades.AbstractFullClusterRestartTestCase; +import org.elasticsearch.upgrades.FullClustRestartUpgradeStatus; import org.elasticsearch.xpack.test.rest.IndexMappingTemplateAsserter; import org.elasticsearch.xpack.test.rest.XPackRestTestConstants; import org.elasticsearch.xpack.test.rest.XPackRestTestHelper; @@ -29,11 +31,15 @@ import static org.hamcrest.Matchers.is; import static org.hamcrest.Matchers.nullValue; -public class MlConfigIndexMappingsFullClusterRestartIT extends AbstractFullClusterRestartTestCase { +public class MlConfigIndexMappingsFullClusterRestartIT extends AbstractXpackFullClusterRestartTestCase { private static final String OLD_CLUSTER_JOB_ID = "ml-config-mappings-old-cluster-job"; private static final String NEW_CLUSTER_JOB_ID = "ml-config-mappings-new-cluster-job"; + public MlConfigIndexMappingsFullClusterRestartIT(@Name("cluster") FullClustRestartUpgradeStatus upgradeStatus) { + super(upgradeStatus); + } + @Override protected Settings restClientSettings() { String token = "Basic " + Base64.getEncoder().encodeToString("test_user:x-pack-test-password".getBytes(StandardCharsets.UTF_8)); diff --git a/x-pack/qa/full-cluster-restart/src/test/java/org/elasticsearch/xpack/restart/MlHiddenIndicesFullClusterRestartIT.java b/x-pack/qa/full-cluster-restart/src/javaRestTest/java/org/elasticsearch/xpack/restart/MlHiddenIndicesFullClusterRestartIT.java similarity index 96% rename from x-pack/qa/full-cluster-restart/src/test/java/org/elasticsearch/xpack/restart/MlHiddenIndicesFullClusterRestartIT.java rename to x-pack/qa/full-cluster-restart/src/javaRestTest/java/org/elasticsearch/xpack/restart/MlHiddenIndicesFullClusterRestartIT.java index ff15b6a428182..509f1b1cf72cc 100644 --- a/x-pack/qa/full-cluster-restart/src/test/java/org/elasticsearch/xpack/restart/MlHiddenIndicesFullClusterRestartIT.java +++ b/x-pack/qa/full-cluster-restart/src/javaRestTest/java/org/elasticsearch/xpack/restart/MlHiddenIndicesFullClusterRestartIT.java @@ -6,6 +6,8 @@ */ package org.elasticsearch.xpack.restart; +import com.carrotsearch.randomizedtesting.annotations.Name; + import org.elasticsearch.Version; import org.elasticsearch.client.Request; import org.elasticsearch.client.RequestOptions; @@ -16,7 +18,7 @@ import org.elasticsearch.common.xcontent.support.XContentMapValues; import org.elasticsearch.core.Strings; import org.elasticsearch.core.Tuple; -import org.elasticsearch.upgrades.AbstractFullClusterRestartTestCase; +import org.elasticsearch.upgrades.FullClustRestartUpgradeStatus; import org.elasticsearch.xcontent.XContentParser; import org.elasticsearch.xcontent.XContentParserConfiguration; import org.elasticsearch.xcontent.spi.XContentProvider; @@ -38,7 +40,7 @@ import static org.hamcrest.Matchers.notNullValue; import static org.hamcrest.Matchers.nullValue; -public class MlHiddenIndicesFullClusterRestartIT extends AbstractFullClusterRestartTestCase { +public class MlHiddenIndicesFullClusterRestartIT extends AbstractXpackFullClusterRestartTestCase { private static final String JOB_ID = "ml-hidden-indices-old-cluster-job"; private static final List, String>> EXPECTED_INDEX_ALIAS_PAIRS = List.of( @@ -49,6 +51,10 @@ public class MlHiddenIndicesFullClusterRestartIT extends AbstractFullClusterRest Tuple.tuple(List.of(".ml-anomalies-shared"), ".ml-anomalies-.write-" + JOB_ID) ); + public MlHiddenIndicesFullClusterRestartIT(@Name("cluster") FullClustRestartUpgradeStatus upgradeStatus) { + super(upgradeStatus); + } + @Override protected Settings restClientSettings() { String token = "Basic " + Base64.getEncoder().encodeToString("test_user:x-pack-test-password".getBytes(StandardCharsets.UTF_8)); diff --git a/x-pack/qa/full-cluster-restart/src/test/java/org/elasticsearch/xpack/restart/MlMigrationFullClusterRestartIT.java b/x-pack/qa/full-cluster-restart/src/javaRestTest/java/org/elasticsearch/xpack/restart/MlMigrationFullClusterRestartIT.java similarity index 96% rename from x-pack/qa/full-cluster-restart/src/test/java/org/elasticsearch/xpack/restart/MlMigrationFullClusterRestartIT.java rename to x-pack/qa/full-cluster-restart/src/javaRestTest/java/org/elasticsearch/xpack/restart/MlMigrationFullClusterRestartIT.java index 61ce6f7827e2a..726e3ab559818 100644 --- a/x-pack/qa/full-cluster-restart/src/test/java/org/elasticsearch/xpack/restart/MlMigrationFullClusterRestartIT.java +++ b/x-pack/qa/full-cluster-restart/src/javaRestTest/java/org/elasticsearch/xpack/restart/MlMigrationFullClusterRestartIT.java @@ -6,6 +6,8 @@ */ package org.elasticsearch.xpack.restart; +import com.carrotsearch.randomizedtesting.annotations.Name; + import org.elasticsearch.Version; import org.elasticsearch.client.Request; import org.elasticsearch.client.Response; @@ -17,7 +19,7 @@ import org.elasticsearch.search.aggregations.AggregatorFactories; import org.elasticsearch.search.aggregations.bucket.terms.TermsAggregationBuilder; import org.elasticsearch.search.aggregations.metrics.MaxAggregationBuilder; -import org.elasticsearch.upgrades.AbstractFullClusterRestartTestCase; +import org.elasticsearch.upgrades.FullClustRestartUpgradeStatus; import org.elasticsearch.xpack.core.ml.MlTasks; import org.elasticsearch.xpack.core.ml.datafeed.DatafeedConfig; import org.elasticsearch.xpack.test.rest.XPackRestTestConstants; @@ -35,13 +37,17 @@ import static org.hamcrest.Matchers.emptyOrNullString; import static org.hamcrest.Matchers.is; -public class MlMigrationFullClusterRestartIT extends AbstractFullClusterRestartTestCase { +public class MlMigrationFullClusterRestartIT extends AbstractXpackFullClusterRestartTestCase { private static final String OLD_CLUSTER_OPEN_JOB_ID = "migration-old-cluster-open-job"; private static final String OLD_CLUSTER_STARTED_DATAFEED_ID = "migration-old-cluster-started-datafeed"; private static final String OLD_CLUSTER_CLOSED_JOB_ID = "migration-old-cluster-closed-job"; private static final String OLD_CLUSTER_STOPPED_DATAFEED_ID = "migration-old-cluster-stopped-datafeed"; + public MlMigrationFullClusterRestartIT(@Name("cluster") FullClustRestartUpgradeStatus upgradeStatus) { + super(upgradeStatus); + } + @Override protected Settings restClientSettings() { String token = "Basic " + Base64.getEncoder().encodeToString("test_user:x-pack-test-password".getBytes(StandardCharsets.UTF_8)); diff --git a/x-pack/qa/full-cluster-restart/src/javaRestTest/java/org/elasticsearch/xpack/restart/QueryBuilderBWCIT.java b/x-pack/qa/full-cluster-restart/src/javaRestTest/java/org/elasticsearch/xpack/restart/QueryBuilderBWCIT.java new file mode 100644 index 0000000000000..5a2268626864e --- /dev/null +++ b/x-pack/qa/full-cluster-restart/src/javaRestTest/java/org/elasticsearch/xpack/restart/QueryBuilderBWCIT.java @@ -0,0 +1,42 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0; you may not use this file except in compliance with the Elastic License + * 2.0. + */ +package org.elasticsearch.xpack.restart; + +import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.common.util.concurrent.ThreadContext; +import org.elasticsearch.test.cluster.util.resource.Resource; +import org.elasticsearch.upgrades.FullClustRestartUpgradeStatus; + +import java.nio.charset.StandardCharsets; +import java.util.Base64; + +public class QueryBuilderBWCIT extends org.elasticsearch.upgrades.QueryBuilderBWCIT { + + static { + clusterConfig = c -> c.setting("xpack.security.enabled", "true") + .setting("xpack.security.transport.ssl.enabled", "true") + .setting("xpack.security.transport.ssl.key", "testnode.pem") + .setting("xpack.security.transport.ssl.certificate", "testnode.crt") + .setting("xpack.license.self_generated.type", "trial") + .setting("xpack.watcher.encrypt_sensitive_data", "true") + .setting("xpack.security.authc.api_key.enabled", "true") + .configFile("testnode.pem", Resource.fromClasspath("org/elasticsearch/xpack/security/transport/ssl/certs/simple/testnode.pem")) + .configFile("testnode.crt", Resource.fromClasspath("org/elasticsearch/xpack/security/transport/ssl/certs/simple/testnode.crt")) + .keystore("xpack.watcher.encryption_key", Resource.fromClasspath("system_key")) + .keystore("xpack.security.transport.ssl.secure_key_passphrase", "testnode"); + } + + public QueryBuilderBWCIT(FullClustRestartUpgradeStatus upgradeStatus) { + super(upgradeStatus); + } + + @Override + protected Settings restClientSettings() { + String token = "Basic " + Base64.getEncoder().encodeToString("test_user:x-pack-test-password".getBytes(StandardCharsets.UTF_8)); + return Settings.builder().put(ThreadContext.PREFIX + ".Authorization", token).build(); + } +} diff --git a/x-pack/qa/full-cluster-restart/src/test/java/org/elasticsearch/xpack/restart/WatcherMappingUpdateIT.java b/x-pack/qa/full-cluster-restart/src/javaRestTest/java/org/elasticsearch/xpack/restart/WatcherMappingUpdateIT.java similarity index 92% rename from x-pack/qa/full-cluster-restart/src/test/java/org/elasticsearch/xpack/restart/WatcherMappingUpdateIT.java rename to x-pack/qa/full-cluster-restart/src/javaRestTest/java/org/elasticsearch/xpack/restart/WatcherMappingUpdateIT.java index ea926e964360d..e6f8a89f08c1b 100644 --- a/x-pack/qa/full-cluster-restart/src/test/java/org/elasticsearch/xpack/restart/WatcherMappingUpdateIT.java +++ b/x-pack/qa/full-cluster-restart/src/javaRestTest/java/org/elasticsearch/xpack/restart/WatcherMappingUpdateIT.java @@ -7,6 +7,8 @@ package org.elasticsearch.xpack.restart; +import com.carrotsearch.randomizedtesting.annotations.Name; + import org.apache.http.util.EntityUtils; import org.elasticsearch.Version; import org.elasticsearch.client.Request; @@ -14,7 +16,7 @@ import org.elasticsearch.client.Response; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.util.concurrent.ThreadContext; -import org.elasticsearch.upgrades.AbstractFullClusterRestartTestCase; +import org.elasticsearch.upgrades.FullClustRestartUpgradeStatus; import java.nio.charset.StandardCharsets; import java.util.Base64; @@ -23,7 +25,11 @@ import static org.hamcrest.Matchers.containsString; import static org.hamcrest.Matchers.not; -public class WatcherMappingUpdateIT extends AbstractFullClusterRestartTestCase { +public class WatcherMappingUpdateIT extends AbstractXpackFullClusterRestartTestCase { + + public WatcherMappingUpdateIT(@Name("cluster") FullClustRestartUpgradeStatus upgradeStatus) { + super(upgradeStatus); + } @Override protected Settings restClientSettings() { diff --git a/x-pack/qa/full-cluster-restart/src/test/resources/org/elasticsearch/xpack/restart/funny-timeout-watch.json b/x-pack/qa/full-cluster-restart/src/javaRestTest/resources/org/elasticsearch/xpack/restart/funny-timeout-watch.json similarity index 100% rename from x-pack/qa/full-cluster-restart/src/test/resources/org/elasticsearch/xpack/restart/funny-timeout-watch.json rename to x-pack/qa/full-cluster-restart/src/javaRestTest/resources/org/elasticsearch/xpack/restart/funny-timeout-watch.json diff --git a/x-pack/qa/full-cluster-restart/src/test/resources/org/elasticsearch/xpack/restart/logging-watch.json b/x-pack/qa/full-cluster-restart/src/javaRestTest/resources/org/elasticsearch/xpack/restart/logging-watch.json similarity index 100% rename from x-pack/qa/full-cluster-restart/src/test/resources/org/elasticsearch/xpack/restart/logging-watch.json rename to x-pack/qa/full-cluster-restart/src/javaRestTest/resources/org/elasticsearch/xpack/restart/logging-watch.json diff --git a/x-pack/qa/full-cluster-restart/src/test/resources/org/elasticsearch/xpack/restart/simple-watch.json b/x-pack/qa/full-cluster-restart/src/javaRestTest/resources/org/elasticsearch/xpack/restart/simple-watch.json similarity index 100% rename from x-pack/qa/full-cluster-restart/src/test/resources/org/elasticsearch/xpack/restart/simple-watch.json rename to x-pack/qa/full-cluster-restart/src/javaRestTest/resources/org/elasticsearch/xpack/restart/simple-watch.json diff --git a/x-pack/qa/full-cluster-restart/src/test/resources/org/elasticsearch/xpack/restart/throttle-period-watch.json b/x-pack/qa/full-cluster-restart/src/javaRestTest/resources/org/elasticsearch/xpack/restart/throttle-period-watch.json similarity index 100% rename from x-pack/qa/full-cluster-restart/src/test/resources/org/elasticsearch/xpack/restart/throttle-period-watch.json rename to x-pack/qa/full-cluster-restart/src/javaRestTest/resources/org/elasticsearch/xpack/restart/throttle-period-watch.json diff --git a/x-pack/qa/full-cluster-restart/src/test/resources/system_key b/x-pack/qa/full-cluster-restart/src/javaRestTest/resources/system_key similarity index 100% rename from x-pack/qa/full-cluster-restart/src/test/resources/system_key rename to x-pack/qa/full-cluster-restart/src/javaRestTest/resources/system_key diff --git a/x-pack/qa/full-cluster-restart/src/test/java/org/elasticsearch/xpack/restart/CoreFullClusterRestartIT.java b/x-pack/qa/full-cluster-restart/src/test/java/org/elasticsearch/xpack/restart/CoreFullClusterRestartIT.java deleted file mode 100644 index e06cb12f747a7..0000000000000 --- a/x-pack/qa/full-cluster-restart/src/test/java/org/elasticsearch/xpack/restart/CoreFullClusterRestartIT.java +++ /dev/null @@ -1,24 +0,0 @@ -/* - * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one - * or more contributor license agreements. Licensed under the Elastic License - * 2.0; you may not use this file except in compliance with the Elastic License - * 2.0. - */ -package org.elasticsearch.xpack.restart; - -import org.elasticsearch.common.settings.Settings; -import org.elasticsearch.common.util.concurrent.ThreadContext; -import org.elasticsearch.upgrades.FullClusterRestartIT; - -import java.nio.charset.StandardCharsets; -import java.util.Base64; - -public class CoreFullClusterRestartIT extends FullClusterRestartIT { - - @Override - protected Settings restClientSettings() { - String token = "Basic " + Base64.getEncoder().encodeToString("test_user:x-pack-test-password".getBytes(StandardCharsets.UTF_8)); - return Settings.builder().put(ThreadContext.PREFIX + ".Authorization", token).build(); - } - -} diff --git a/x-pack/qa/full-cluster-restart/src/test/java/org/elasticsearch/xpack/restart/QueryBuilderBWCIT.java b/x-pack/qa/full-cluster-restart/src/test/java/org/elasticsearch/xpack/restart/QueryBuilderBWCIT.java deleted file mode 100644 index cffc6881df645..0000000000000 --- a/x-pack/qa/full-cluster-restart/src/test/java/org/elasticsearch/xpack/restart/QueryBuilderBWCIT.java +++ /dev/null @@ -1,22 +0,0 @@ -/* - * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one - * or more contributor license agreements. Licensed under the Elastic License - * 2.0; you may not use this file except in compliance with the Elastic License - * 2.0. - */ -package org.elasticsearch.xpack.restart; - -import org.elasticsearch.common.settings.Settings; -import org.elasticsearch.common.util.concurrent.ThreadContext; - -import java.nio.charset.StandardCharsets; -import java.util.Base64; - -public class QueryBuilderBWCIT extends org.elasticsearch.upgrades.QueryBuilderBWCIT { - - @Override - protected Settings restClientSettings() { - String token = "Basic " + Base64.getEncoder().encodeToString("test_user:x-pack-test-password".getBytes(StandardCharsets.UTF_8)); - return Settings.builder().put(ThreadContext.PREFIX + ".Authorization", token).build(); - } -} From 0f64f844c72d0b9e7a3b07069ed73cddd6011aa2 Mon Sep 17 00:00:00 2001 From: Mark Vieira Date: Wed, 18 Jan 2023 12:10:39 -0800 Subject: [PATCH 02/38] Fix NPE --- .../test/cluster/local/DefaultLocalClusterSpecBuilder.java | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/test/test-clusters/src/main/java/org/elasticsearch/test/cluster/local/DefaultLocalClusterSpecBuilder.java b/test/test-clusters/src/main/java/org/elasticsearch/test/cluster/local/DefaultLocalClusterSpecBuilder.java index 8d8ae010c552f..271b0d3b5e36d 100644 --- a/test/test-clusters/src/main/java/org/elasticsearch/test/cluster/local/DefaultLocalClusterSpecBuilder.java +++ b/test/test-clusters/src/main/java/org/elasticsearch/test/cluster/local/DefaultLocalClusterSpecBuilder.java @@ -166,7 +166,7 @@ private LocalNodeSpec build(LocalClusterSpec cluster) { return new LocalNodeSpec( cluster, name, - Optional.of(getVersion()).orElse(Version.CURRENT), + Optional.ofNullable(getVersion()).orElse(Version.CURRENT), getSettingsProviders(), getSettings(), getEnvironmentProviders(), From 4b774537310528a6ca8b42a478b7509999434c56 Mon Sep 17 00:00:00 2001 From: Mark Vieira Date: Wed, 18 Jan 2023 13:02:18 -0800 Subject: [PATCH 03/38] Wait for cluster ready --- .../test/cluster/local/LocalClusterHandle.java | 11 ++++------- 1 file changed, 4 insertions(+), 7 deletions(-) diff --git a/test/test-clusters/src/main/java/org/elasticsearch/test/cluster/local/LocalClusterHandle.java b/test/test-clusters/src/main/java/org/elasticsearch/test/cluster/local/LocalClusterHandle.java index 62ba9113d47c1..dfea1846bb365 100644 --- a/test/test-clusters/src/main/java/org/elasticsearch/test/cluster/local/LocalClusterHandle.java +++ b/test/test-clusters/src/main/java/org/elasticsearch/test/cluster/local/LocalClusterHandle.java @@ -22,6 +22,7 @@ import java.net.MalformedURLException; import java.nio.file.Files; import java.nio.file.Path; +import java.security.GeneralSecurityException; import java.time.Duration; import java.util.List; import java.util.concurrent.Callable; @@ -152,13 +153,9 @@ public void upgradeToVersion(Version version) { private void waitUntilReady() { writeUnicastHostsFile(); try { - Retry.retryUntilTrue(CLUSTER_UP_TIMEOUT, Duration.ZERO, () -> { - WaitForHttpResource wait = configureWaitForReady(); - return wait.wait(500); - }); - } catch (TimeoutException e) { - throw new RuntimeException("Timed out after " + CLUSTER_UP_TIMEOUT + " waiting for cluster '" + name + "' status to be yellow"); - } catch (ExecutionException e) { + WaitForHttpResource wait = configureWaitForReady(); + wait.wait(CLUSTER_UP_TIMEOUT.toMillis()); + } catch (Exception e) { throw new RuntimeException("An error occurred while checking cluster '" + name + "' status.", e); } } From b6b5b6fe8fa26db51bb814d8c6f263fa900b5d81 Mon Sep 17 00:00:00 2001 From: Mark Vieira Date: Wed, 18 Jan 2023 13:04:10 -0800 Subject: [PATCH 04/38] Avoid clashing with Object#wait --- .../elasticsearch/test/cluster/local/LocalClusterHandle.java | 5 +---- .../test/cluster/local/WaitForHttpResource.java | 2 +- 2 files changed, 2 insertions(+), 5 deletions(-) diff --git a/test/test-clusters/src/main/java/org/elasticsearch/test/cluster/local/LocalClusterHandle.java b/test/test-clusters/src/main/java/org/elasticsearch/test/cluster/local/LocalClusterHandle.java index dfea1846bb365..5d851d61f49e8 100644 --- a/test/test-clusters/src/main/java/org/elasticsearch/test/cluster/local/LocalClusterHandle.java +++ b/test/test-clusters/src/main/java/org/elasticsearch/test/cluster/local/LocalClusterHandle.java @@ -14,7 +14,6 @@ import org.elasticsearch.test.cluster.local.LocalClusterFactory.Node; import org.elasticsearch.test.cluster.local.model.User; import org.elasticsearch.test.cluster.util.ExceptionUtils; -import org.elasticsearch.test.cluster.util.Retry; import org.elasticsearch.test.cluster.util.Version; import java.io.IOException; @@ -22,7 +21,6 @@ import java.net.MalformedURLException; import java.nio.file.Files; import java.nio.file.Path; -import java.security.GeneralSecurityException; import java.time.Duration; import java.util.List; import java.util.concurrent.Callable; @@ -30,7 +28,6 @@ import java.util.concurrent.ForkJoinPool; import java.util.concurrent.ForkJoinWorkerThread; import java.util.concurrent.TimeUnit; -import java.util.concurrent.TimeoutException; import java.util.concurrent.atomic.AtomicBoolean; import java.util.concurrent.atomic.AtomicLong; import java.util.stream.Collectors; @@ -154,7 +151,7 @@ private void waitUntilReady() { writeUnicastHostsFile(); try { WaitForHttpResource wait = configureWaitForReady(); - wait.wait(CLUSTER_UP_TIMEOUT.toMillis()); + wait.waitFor(CLUSTER_UP_TIMEOUT.toMillis()); } catch (Exception e) { throw new RuntimeException("An error occurred while checking cluster '" + name + "' status.", e); } diff --git a/test/test-clusters/src/main/java/org/elasticsearch/test/cluster/local/WaitForHttpResource.java b/test/test-clusters/src/main/java/org/elasticsearch/test/cluster/local/WaitForHttpResource.java index edab2cdf1e7e9..f00e6f13cb314 100644 --- a/test/test-clusters/src/main/java/org/elasticsearch/test/cluster/local/WaitForHttpResource.java +++ b/test/test-clusters/src/main/java/org/elasticsearch/test/cluster/local/WaitForHttpResource.java @@ -90,7 +90,7 @@ public void setPassword(String password) { this.password = password; } - public boolean wait(int durationInMs) throws GeneralSecurityException, InterruptedException, IOException { + public boolean waitFor(long durationInMs) throws GeneralSecurityException, InterruptedException, IOException { final long waitUntil = System.nanoTime() + TimeUnit.MILLISECONDS.toNanos(durationInMs); final long sleep = Long.max(durationInMs / 10, 100); From 665227517319d1f8e65f27806c5d504c516e5c94 Mon Sep 17 00:00:00 2001 From: Mark Vieira Date: Wed, 18 Jan 2023 13:19:30 -0800 Subject: [PATCH 05/38] Skip test when non-applicable --- .../elasticsearch/xpack/restart/FullClusterRestartIT.java | 8 ++++++-- 1 file changed, 6 insertions(+), 2 deletions(-) diff --git a/x-pack/plugin/shutdown/qa/full-cluster-restart/src/javaRestTest/java/org/elasticsearch/xpack/restart/FullClusterRestartIT.java b/x-pack/plugin/shutdown/qa/full-cluster-restart/src/javaRestTest/java/org/elasticsearch/xpack/restart/FullClusterRestartIT.java index 69684377df662..e26e919913f6e 100644 --- a/x-pack/plugin/shutdown/qa/full-cluster-restart/src/javaRestTest/java/org/elasticsearch/xpack/restart/FullClusterRestartIT.java +++ b/x-pack/plugin/shutdown/qa/full-cluster-restart/src/javaRestTest/java/org/elasticsearch/xpack/restart/FullClusterRestartIT.java @@ -24,6 +24,7 @@ import org.elasticsearch.upgrades.ParameterizedFullClusterRestartTestCase; import org.elasticsearch.xcontent.XContentBuilder; import org.elasticsearch.xcontent.json.JsonXContent; +import org.junit.BeforeClass; import org.junit.ClassRule; import java.io.IOException; @@ -85,10 +86,13 @@ protected Settings restClientSettings() { .build(); } - @SuppressWarnings("unchecked") - public void testNodeShutdown() throws Exception { + @BeforeClass + public static void checkClusterVersion() { assumeTrue("no shutdown in versions before " + Version.V_7_15_0, getOldClusterVersion().onOrAfter(Version.V_7_15_0)); + } + @SuppressWarnings("unchecked") + public void testNodeShutdown() throws Exception { if (isRunningAgainstOldCluster()) { final Request getNodesReq = new Request("GET", "_nodes"); final Response getNodesResp = adminClient().performRequest(getNodesReq); From d1240a275ed7e19edf927a004409d714c1163669 Mon Sep 17 00:00:00 2001 From: Mark Vieira Date: Wed, 18 Jan 2023 13:59:55 -0800 Subject: [PATCH 06/38] Disable parallel test execution temporarily --- .../gradle/internal/test/rest/RestTestBasePlugin.java | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/build-tools-internal/src/main/java/org/elasticsearch/gradle/internal/test/rest/RestTestBasePlugin.java b/build-tools-internal/src/main/java/org/elasticsearch/gradle/internal/test/rest/RestTestBasePlugin.java index 1a7b5bc3ee2a1..fa47e29b555d7 100644 --- a/build-tools-internal/src/main/java/org/elasticsearch/gradle/internal/test/rest/RestTestBasePlugin.java +++ b/build-tools-internal/src/main/java/org/elasticsearch/gradle/internal/test/rest/RestTestBasePlugin.java @@ -120,7 +120,7 @@ public void apply(Project project) { registerDistributionInputs(task, integTestDistro); // Enable parallel execution for these tests since each test gets its own cluster - task.setMaxParallelForks(task.getProject().getGradle().getStartParameter().getMaxWorkerCount() / 2); + // task.setMaxParallelForks(task.getProject().getGradle().getStartParameter().getMaxWorkerCount() / 2); // Disable test failure reporting since this stuff is now captured in build scans task.getExtensions().getExtraProperties().set("dumpOutputOnFailure", false); From 45eadbb344b051f03eefbddaacd3fe8720263f4a Mon Sep 17 00:00:00 2001 From: Mark Vieira Date: Wed, 18 Jan 2023 15:01:05 -0800 Subject: [PATCH 07/38] Revert "Disable parallel test execution temporarily" This reverts commit d1240a275ed7e19edf927a004409d714c1163669. --- .../gradle/internal/test/rest/RestTestBasePlugin.java | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/build-tools-internal/src/main/java/org/elasticsearch/gradle/internal/test/rest/RestTestBasePlugin.java b/build-tools-internal/src/main/java/org/elasticsearch/gradle/internal/test/rest/RestTestBasePlugin.java index fa47e29b555d7..1a7b5bc3ee2a1 100644 --- a/build-tools-internal/src/main/java/org/elasticsearch/gradle/internal/test/rest/RestTestBasePlugin.java +++ b/build-tools-internal/src/main/java/org/elasticsearch/gradle/internal/test/rest/RestTestBasePlugin.java @@ -120,7 +120,7 @@ public void apply(Project project) { registerDistributionInputs(task, integTestDistro); // Enable parallel execution for these tests since each test gets its own cluster - // task.setMaxParallelForks(task.getProject().getGradle().getStartParameter().getMaxWorkerCount() / 2); + task.setMaxParallelForks(task.getProject().getGradle().getStartParameter().getMaxWorkerCount() / 2); // Disable test failure reporting since this stuff is now captured in build scans task.getExtensions().getExtraProperties().set("dumpOutputOnFailure", false); From 21d0ed84ff247742d48086c9a8a0b738ea0b72c9 Mon Sep 17 00:00:00 2001 From: Mark Vieira Date: Wed, 18 Jan 2023 15:04:09 -0800 Subject: [PATCH 08/38] Reset upgrade failure status after test class --- .../upgrades/ParameterizedFullClusterRestartTestCase.java | 6 ++++++ 1 file changed, 6 insertions(+) diff --git a/qa/full-cluster-restart/src/javaRestTest/java/org/elasticsearch/upgrades/ParameterizedFullClusterRestartTestCase.java b/qa/full-cluster-restart/src/javaRestTest/java/org/elasticsearch/upgrades/ParameterizedFullClusterRestartTestCase.java index 29e1ae3e92255..9d3b8e08c52d8 100644 --- a/qa/full-cluster-restart/src/javaRestTest/java/org/elasticsearch/upgrades/ParameterizedFullClusterRestartTestCase.java +++ b/qa/full-cluster-restart/src/javaRestTest/java/org/elasticsearch/upgrades/ParameterizedFullClusterRestartTestCase.java @@ -15,6 +15,7 @@ import org.elasticsearch.test.cluster.ElasticsearchCluster; import org.elasticsearch.test.cluster.util.Version; import org.elasticsearch.test.rest.ESRestTestCase; +import org.junit.AfterClass; import org.junit.Before; import java.util.Arrays; @@ -60,6 +61,11 @@ public void maybeUpgrade() throws Exception { assumeFalse("Cluster upgrade failed", upgradeFailed); } + @AfterClass + public static void resetUpgrade() { + upgradeFailed = false; + } + public boolean isRunningAgainstOldCluster() { return requestedUpgradeStatus == OLD; } From ae0f99c08eb7b4514e23904f8c3728c16ff6d7de Mon Sep 17 00:00:00 2001 From: Mark Vieira Date: Thu, 19 Jan 2023 08:14:03 -0800 Subject: [PATCH 09/38] Fix test --- .../elasticsearch/xpack/restart/WatcherMappingUpdateIT.java | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/x-pack/qa/full-cluster-restart/src/javaRestTest/java/org/elasticsearch/xpack/restart/WatcherMappingUpdateIT.java b/x-pack/qa/full-cluster-restart/src/javaRestTest/java/org/elasticsearch/xpack/restart/WatcherMappingUpdateIT.java index e6f8a89f08c1b..8eb16e47fbdbb 100644 --- a/x-pack/qa/full-cluster-restart/src/javaRestTest/java/org/elasticsearch/xpack/restart/WatcherMappingUpdateIT.java +++ b/x-pack/qa/full-cluster-restart/src/javaRestTest/java/org/elasticsearch/xpack/restart/WatcherMappingUpdateIT.java @@ -14,6 +14,7 @@ import org.elasticsearch.client.Request; import org.elasticsearch.client.RequestOptions; import org.elasticsearch.client.Response; +import org.elasticsearch.client.WarningFailureException; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.util.concurrent.ThreadContext; import org.elasticsearch.upgrades.FullClustRestartUpgradeStatus; @@ -97,7 +98,7 @@ private void assertNoMappingVersion(String index) throws Exception { private RequestOptions.Builder getWarningHandlerOptions(String index) { return RequestOptions.DEFAULT.toBuilder() - .setWarningsHandler(w -> w.contains(getWatcherSystemIndexWarning(index)) == false || w.size() != 1); + .setWarningsHandler(w -> w.size() > 0 && w.contains(getWatcherSystemIndexWarning(index)) == false); } private String getWatcherSystemIndexWarning(String index) { From c271e6e2251a1660cba281910d3ce980443017a1 Mon Sep 17 00:00:00 2001 From: Mark Vieira Date: Thu, 19 Jan 2023 08:19:48 -0800 Subject: [PATCH 10/38] Remove unused import --- .../org/elasticsearch/xpack/restart/WatcherMappingUpdateIT.java | 1 - 1 file changed, 1 deletion(-) diff --git a/x-pack/qa/full-cluster-restart/src/javaRestTest/java/org/elasticsearch/xpack/restart/WatcherMappingUpdateIT.java b/x-pack/qa/full-cluster-restart/src/javaRestTest/java/org/elasticsearch/xpack/restart/WatcherMappingUpdateIT.java index 8eb16e47fbdbb..b096356c1c3dd 100644 --- a/x-pack/qa/full-cluster-restart/src/javaRestTest/java/org/elasticsearch/xpack/restart/WatcherMappingUpdateIT.java +++ b/x-pack/qa/full-cluster-restart/src/javaRestTest/java/org/elasticsearch/xpack/restart/WatcherMappingUpdateIT.java @@ -14,7 +14,6 @@ import org.elasticsearch.client.Request; import org.elasticsearch.client.RequestOptions; import org.elasticsearch.client.Response; -import org.elasticsearch.client.WarningFailureException; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.util.concurrent.ThreadContext; import org.elasticsearch.upgrades.FullClustRestartUpgradeStatus; From aa8a3bc7d2d09336a35006378f3756165eaa92fb Mon Sep 17 00:00:00 2001 From: Mark Vieira Date: Thu, 19 Jan 2023 14:28:25 -0800 Subject: [PATCH 11/38] Fixes --- .../elasticsearch/test/cluster/local/LocalClusterFactory.java | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/test/test-clusters/src/main/java/org/elasticsearch/test/cluster/local/LocalClusterFactory.java b/test/test-clusters/src/main/java/org/elasticsearch/test/cluster/local/LocalClusterFactory.java index 49356f28d6ea6..2bd2360b4bb18 100644 --- a/test/test-clusters/src/main/java/org/elasticsearch/test/cluster/local/LocalClusterFactory.java +++ b/test/test-clusters/src/main/java/org/elasticsearch/test/cluster/local/LocalClusterFactory.java @@ -109,7 +109,6 @@ public synchronized void start(Version version) { distributionDescriptor = resolveDistribution(); LOGGER.info("Distribution for node '{}': {}", spec.getName(), distributionDescriptor); initializeWorkingDirectory(currentVersion != null); - initializeWorkingDirectory(); copyExtraJarFiles(); installPlugins(); if (distributionDescriptor.getType() == DistributionType.INTEG_TEST) { @@ -370,6 +369,7 @@ private void addKeystoreFiles() { file.writeTo(path); ProcessUtils.exec( + spec.getKeystorePassword(), workingDir, OS.conditional( c -> c.onWindows(() -> distributionDir.resolve("bin").resolve("elasticsearch-keystore.bat")) From afc66a8f97aa54cff94178aafcd39134367e653b Mon Sep 17 00:00:00 2001 From: Mark Vieira Date: Thu, 19 Jan 2023 15:40:49 -0800 Subject: [PATCH 12/38] Warning might not always happen --- .../xpack/restart/FullClusterRestartIT.java | 41 ++++++++----------- 1 file changed, 17 insertions(+), 24 deletions(-) diff --git a/x-pack/qa/full-cluster-restart/src/javaRestTest/java/org/elasticsearch/xpack/restart/FullClusterRestartIT.java b/x-pack/qa/full-cluster-restart/src/javaRestTest/java/org/elasticsearch/xpack/restart/FullClusterRestartIT.java index b15f6a08ef2c4..ef862739b2ee0 100644 --- a/x-pack/qa/full-cluster-restart/src/javaRestTest/java/org/elasticsearch/xpack/restart/FullClusterRestartIT.java +++ b/x-pack/qa/full-cluster-restart/src/javaRestTest/java/org/elasticsearch/xpack/restart/FullClusterRestartIT.java @@ -109,12 +109,7 @@ public void testSecurityNativeRealm() throws Exception { } else { waitForYellow(".security"); final Request getSettingsRequest = new Request("GET", "/.security/_settings/index.format"); - getSettingsRequest.setOptions( - expectWarnings( - "this request accesses system indices: [.security-7], but in a future major " - + "version, direct access to system indices will be prevented by default" - ) - ); + getSettingsRequest.setOptions(systemIndexWarningHandlerOptions(".security-7")); Response settingsResponse = client().performRequest(getSettingsRequest); Map settingsResponseMap = entityAsMap(settingsResponse); logger.info("settings response map {}", settingsResponseMap); @@ -396,12 +391,7 @@ public void testApiKeySuperuser() throws IOException { "doc_type": "foo" }"""); if (getOldClusterVersion().onOrAfter(Version.V_7_10_0)) { - indexRequest.setOptions( - expectWarnings( - "this request accesses system indices: [.security-7], but in a future major " - + "version, direct access to system indices will be prevented by default" - ).toBuilder().addHeader("Authorization", apiKeyAuthHeader) - ); + indexRequest.setOptions(systemIndexWarningHandlerOptions(".security-7").addHeader("Authorization", apiKeyAuthHeader)); } else { indexRequest.setOptions(RequestOptions.DEFAULT.toBuilder().addHeader("Authorization", apiKeyAuthHeader)); } @@ -415,12 +405,7 @@ public void testApiKeySuperuser() throws IOException { // read is ok final Request searchRequest = new Request("GET", ".security/_search"); - searchRequest.setOptions( - expectWarnings( - "this request accesses system indices: [.security-7], but in a future major " - + "version, direct access to system indices will be prevented by default" - ).toBuilder().addHeader("Authorization", apiKeyAuthHeader) - ); + searchRequest.setOptions(systemIndexWarningHandlerOptions(".security-7").addHeader("Authorization", apiKeyAuthHeader)); assertOK(client().performRequest(searchRequest)); // write must not be allowed @@ -429,12 +414,7 @@ public void testApiKeySuperuser() throws IOException { { "doc_type": "foo" }"""); - indexRequest.setOptions( - expectWarnings( - "this request accesses system indices: [.security-7], but in a future major " - + "version, direct access to system indices will be prevented by default" - ).toBuilder().addHeader("Authorization", apiKeyAuthHeader) - ); + indexRequest.setOptions(systemIndexWarningHandlerOptions(".security-7").addHeader("Authorization", apiKeyAuthHeader)); final ResponseException e = expectThrows(ResponseException.class, () -> client().performRequest(indexRequest)); assertThat(e.getResponse().getStatusLine().getStatusCode(), equalTo(403)); assertThat(e.getMessage(), containsString("is unauthorized")); @@ -1001,4 +981,17 @@ private static void createComposableTemplate(RestClient client, String templateN createIndexTemplateRequest.setEntity(templateJSON); client.performRequest(createIndexTemplateRequest); } + + private RequestOptions.Builder systemIndexWarningHandlerOptions(String index) { + return RequestOptions.DEFAULT.toBuilder() + .setWarningsHandler( + w -> w.size() > 0 + && w.contains( + "this request accesses system indices: [" + + index + + "], but in a future major " + + "version, direct access to system indices will be prevented by default" + ) == false + ); + } } From 8d4bd49c3f76b64332091330322e90ca29f42b61 Mon Sep 17 00:00:00 2001 From: Mark Vieira Date: Thu, 26 Jan 2023 14:30:20 -0800 Subject: [PATCH 13/38] Fix compilation error --- .../org/elasticsearch/test/cluster/local/LocalClusterSpec.java | 1 + 1 file changed, 1 insertion(+) diff --git a/test/test-clusters/src/main/java/org/elasticsearch/test/cluster/local/LocalClusterSpec.java b/test/test-clusters/src/main/java/org/elasticsearch/test/cluster/local/LocalClusterSpec.java index 830e00b401f67..2234b037381a8 100644 --- a/test/test-clusters/src/main/java/org/elasticsearch/test/cluster/local/LocalClusterSpec.java +++ b/test/test-clusters/src/main/java/org/elasticsearch/test/cluster/local/LocalClusterSpec.java @@ -265,6 +265,7 @@ private LocalNodeSpec getFilteredSpec(SettingsProvider filteredProvider) { n.distributionType, n.features, n.keystoreSettings, + n.keystoreFiles, n.keystorePassword, n.extraConfigFiles, n.systemProperties From b8d6eaab2e64b00c8ac7afa3beffe72399db81b9 Mon Sep 17 00:00:00 2001 From: Mark Vieira Date: Thu, 26 Jan 2023 15:38:49 -0800 Subject: [PATCH 14/38] Avoid unnecessary restarts when testing current version --- .../upgrades/ParameterizedFullClusterRestartTestCase.java | 6 +++++- .../elasticsearch/test/cluster/ElasticsearchCluster.java | 3 --- .../test/cluster/local/LocalElasticsearchCluster.java | 6 ------ 3 files changed, 5 insertions(+), 10 deletions(-) diff --git a/qa/full-cluster-restart/src/javaRestTest/java/org/elasticsearch/upgrades/ParameterizedFullClusterRestartTestCase.java b/qa/full-cluster-restart/src/javaRestTest/java/org/elasticsearch/upgrades/ParameterizedFullClusterRestartTestCase.java index 9d3b8e08c52d8..a4694e1261170 100644 --- a/qa/full-cluster-restart/src/javaRestTest/java/org/elasticsearch/upgrades/ParameterizedFullClusterRestartTestCase.java +++ b/qa/full-cluster-restart/src/javaRestTest/java/org/elasticsearch/upgrades/ParameterizedFullClusterRestartTestCase.java @@ -29,6 +29,7 @@ public abstract class ParameterizedFullClusterRestartTestCase extends ESRestTest private static final Version MINIMUM_WIRE_COMPATIBLE_VERSION = Version.fromString("7.17.0"); private static final Version OLD_CLUSTER_VERSION = Version.fromString(System.getProperty("tests.old_cluster_version")); private static boolean upgradeFailed = false; + private static boolean upgraded = false; private final FullClustRestartUpgradeStatus requestedUpgradeStatus; public ParameterizedFullClusterRestartTestCase(@Name("cluster") FullClustRestartUpgradeStatus upgradeStatus) { @@ -42,7 +43,7 @@ public static Iterable parameters() throws Exception { @Before public void maybeUpgrade() throws Exception { - if (getUpgradeCluster().getVersion().equals(OLD_CLUSTER_VERSION) && requestedUpgradeStatus == UPGRADED) { + if (upgraded == false && requestedUpgradeStatus == UPGRADED) { try { if (OLD_CLUSTER_VERSION.before(MINIMUM_WIRE_COMPATIBLE_VERSION)) { // First upgrade to latest wire compatible version @@ -54,6 +55,8 @@ public void maybeUpgrade() throws Exception { } catch (Exception e) { upgradeFailed = true; throw e; + } finally { + upgraded = true; } } @@ -63,6 +66,7 @@ public void maybeUpgrade() throws Exception { @AfterClass public static void resetUpgrade() { + upgraded = false; upgradeFailed = false; } diff --git a/test/test-clusters/src/main/java/org/elasticsearch/test/cluster/ElasticsearchCluster.java b/test/test-clusters/src/main/java/org/elasticsearch/test/cluster/ElasticsearchCluster.java index 3bc4efaeb032f..02eb3fb73df63 100644 --- a/test/test-clusters/src/main/java/org/elasticsearch/test/cluster/ElasticsearchCluster.java +++ b/test/test-clusters/src/main/java/org/elasticsearch/test/cluster/ElasticsearchCluster.java @@ -10,7 +10,6 @@ import org.elasticsearch.test.cluster.local.DefaultLocalClusterSpecBuilder; import org.elasticsearch.test.cluster.local.LocalClusterSpecBuilder; -import org.elasticsearch.test.cluster.util.Version; import org.junit.rules.TestRule; /** @@ -33,6 +32,4 @@ static LocalClusterSpecBuilder local() { return new DefaultLocalClusterSpecBuilder(); } - Version getVersion(); - } diff --git a/test/test-clusters/src/main/java/org/elasticsearch/test/cluster/local/LocalElasticsearchCluster.java b/test/test-clusters/src/main/java/org/elasticsearch/test/cluster/local/LocalElasticsearchCluster.java index dc532dfd956bb..9a5e5666f5e9a 100644 --- a/test/test-clusters/src/main/java/org/elasticsearch/test/cluster/local/LocalElasticsearchCluster.java +++ b/test/test-clusters/src/main/java/org/elasticsearch/test/cluster/local/LocalElasticsearchCluster.java @@ -113,12 +113,6 @@ public void upgradeToVersion(Version version) { handle.upgradeToVersion(version); } - @Override - public Version getVersion() { - checkHandle(); - return spec.getNodes().get(0).getVersion(); - } - private void checkHandle() { if (handle == null) { throw new IllegalStateException("Cluster handle has not been initialized. Did you forget the @ClassRule annotation?"); From a14906c42e4d05e22e0dbdfd55d51d206f97d4dc Mon Sep 17 00:00:00 2001 From: Mark Vieira Date: Fri, 27 Jan 2023 09:47:45 -0800 Subject: [PATCH 15/38] Review feedback --- .../elasticsearch/upgrades/FullClusterRestartIT.java | 2 +- .../upgrades/FullClusterRestartTestOrdering.java | 2 +- ...tatus.java => FullClusterRestartUpgradeStatus.java} | 2 +- .../ParameterizedFullClusterRestartTestCase.java | 10 +++++----- .../org/elasticsearch/upgrades/QueryBuilderBWCIT.java | 2 +- .../test/cluster/local/LocalClusterFactory.java | 8 ++++---- .../xpack/restart/FullClusterRestartIT.java | 4 ++-- .../AbstractXpackFullClusterRestartTestCase.java | 4 ++-- .../xpack/restart/CoreFullClusterRestartIT.java | 4 ++-- .../xpack/restart/FullClusterRestartIT.java | 4 ++-- .../restart/MLModelDeploymentFullClusterRestartIT.java | 4 ++-- .../MlConfigIndexMappingsFullClusterRestartIT.java | 4 ++-- .../restart/MlHiddenIndicesFullClusterRestartIT.java | 4 ++-- .../xpack/restart/MlMigrationFullClusterRestartIT.java | 4 ++-- .../elasticsearch/xpack/restart/QueryBuilderBWCIT.java | 4 ++-- .../xpack/restart/WatcherMappingUpdateIT.java | 4 ++-- 16 files changed, 33 insertions(+), 33 deletions(-) rename qa/full-cluster-restart/src/javaRestTest/java/org/elasticsearch/upgrades/{FullClustRestartUpgradeStatus.java => FullClusterRestartUpgradeStatus.java} (90%) diff --git a/qa/full-cluster-restart/src/javaRestTest/java/org/elasticsearch/upgrades/FullClusterRestartIT.java b/qa/full-cluster-restart/src/javaRestTest/java/org/elasticsearch/upgrades/FullClusterRestartIT.java index e250a945aa903..3f9a007e6bf4e 100644 --- a/qa/full-cluster-restart/src/javaRestTest/java/org/elasticsearch/upgrades/FullClusterRestartIT.java +++ b/qa/full-cluster-restart/src/javaRestTest/java/org/elasticsearch/upgrades/FullClusterRestartIT.java @@ -112,7 +112,7 @@ public class FullClusterRestartIT extends ParameterizedFullClusterRestartTestCas private String index; - public FullClusterRestartIT(@Name("cluster") FullClustRestartUpgradeStatus upgradeStatus) { + public FullClusterRestartIT(@Name("cluster") FullClusterRestartUpgradeStatus upgradeStatus) { super(upgradeStatus); } diff --git a/qa/full-cluster-restart/src/javaRestTest/java/org/elasticsearch/upgrades/FullClusterRestartTestOrdering.java b/qa/full-cluster-restart/src/javaRestTest/java/org/elasticsearch/upgrades/FullClusterRestartTestOrdering.java index 9f5c57346b945..232619ee93bb9 100644 --- a/qa/full-cluster-restart/src/javaRestTest/java/org/elasticsearch/upgrades/FullClusterRestartTestOrdering.java +++ b/qa/full-cluster-restart/src/javaRestTest/java/org/elasticsearch/upgrades/FullClusterRestartTestOrdering.java @@ -19,6 +19,6 @@ public int compare(TestMethodAndParams o1, TestMethodAndParams o2) { } private int getOrdinal(TestMethodAndParams t) { - return ((FullClustRestartUpgradeStatus) t.getInstanceArguments().get(0)).ordinal(); + return ((FullClusterRestartUpgradeStatus) t.getInstanceArguments().get(0)).ordinal(); } } diff --git a/qa/full-cluster-restart/src/javaRestTest/java/org/elasticsearch/upgrades/FullClustRestartUpgradeStatus.java b/qa/full-cluster-restart/src/javaRestTest/java/org/elasticsearch/upgrades/FullClusterRestartUpgradeStatus.java similarity index 90% rename from qa/full-cluster-restart/src/javaRestTest/java/org/elasticsearch/upgrades/FullClustRestartUpgradeStatus.java rename to qa/full-cluster-restart/src/javaRestTest/java/org/elasticsearch/upgrades/FullClusterRestartUpgradeStatus.java index dda196ddafc20..06048d020e2a0 100644 --- a/qa/full-cluster-restart/src/javaRestTest/java/org/elasticsearch/upgrades/FullClustRestartUpgradeStatus.java +++ b/qa/full-cluster-restart/src/javaRestTest/java/org/elasticsearch/upgrades/FullClusterRestartUpgradeStatus.java @@ -8,7 +8,7 @@ package org.elasticsearch.upgrades; -public enum FullClustRestartUpgradeStatus { +public enum FullClusterRestartUpgradeStatus { OLD, UPGRADED } diff --git a/qa/full-cluster-restart/src/javaRestTest/java/org/elasticsearch/upgrades/ParameterizedFullClusterRestartTestCase.java b/qa/full-cluster-restart/src/javaRestTest/java/org/elasticsearch/upgrades/ParameterizedFullClusterRestartTestCase.java index a4694e1261170..a064c87743800 100644 --- a/qa/full-cluster-restart/src/javaRestTest/java/org/elasticsearch/upgrades/ParameterizedFullClusterRestartTestCase.java +++ b/qa/full-cluster-restart/src/javaRestTest/java/org/elasticsearch/upgrades/ParameterizedFullClusterRestartTestCase.java @@ -21,8 +21,8 @@ import java.util.Arrays; import java.util.Locale; -import static org.elasticsearch.upgrades.FullClustRestartUpgradeStatus.OLD; -import static org.elasticsearch.upgrades.FullClustRestartUpgradeStatus.UPGRADED; +import static org.elasticsearch.upgrades.FullClusterRestartUpgradeStatus.OLD; +import static org.elasticsearch.upgrades.FullClusterRestartUpgradeStatus.UPGRADED; @TestCaseOrdering(FullClusterRestartTestOrdering.class) public abstract class ParameterizedFullClusterRestartTestCase extends ESRestTestCase { @@ -30,15 +30,15 @@ public abstract class ParameterizedFullClusterRestartTestCase extends ESRestTest private static final Version OLD_CLUSTER_VERSION = Version.fromString(System.getProperty("tests.old_cluster_version")); private static boolean upgradeFailed = false; private static boolean upgraded = false; - private final FullClustRestartUpgradeStatus requestedUpgradeStatus; + private final FullClusterRestartUpgradeStatus requestedUpgradeStatus; - public ParameterizedFullClusterRestartTestCase(@Name("cluster") FullClustRestartUpgradeStatus upgradeStatus) { + public ParameterizedFullClusterRestartTestCase(@Name("cluster") FullClusterRestartUpgradeStatus upgradeStatus) { this.requestedUpgradeStatus = upgradeStatus; } @ParametersFactory public static Iterable parameters() throws Exception { - return Arrays.stream(FullClustRestartUpgradeStatus.values()).map(v -> new Object[] { v }).toList(); + return Arrays.stream(FullClusterRestartUpgradeStatus.values()).map(v -> new Object[] { v }).toList(); } @Before diff --git a/qa/full-cluster-restart/src/javaRestTest/java/org/elasticsearch/upgrades/QueryBuilderBWCIT.java b/qa/full-cluster-restart/src/javaRestTest/java/org/elasticsearch/upgrades/QueryBuilderBWCIT.java index 91607dec6f721..1636644409fc7 100644 --- a/qa/full-cluster-restart/src/javaRestTest/java/org/elasticsearch/upgrades/QueryBuilderBWCIT.java +++ b/qa/full-cluster-restart/src/javaRestTest/java/org/elasticsearch/upgrades/QueryBuilderBWCIT.java @@ -79,7 +79,7 @@ protected ElasticsearchCluster getUpgradeCluster() { return cluster; } - public QueryBuilderBWCIT(@Name("cluster") FullClustRestartUpgradeStatus upgradeStatus) { + public QueryBuilderBWCIT(@Name("cluster") FullClusterRestartUpgradeStatus upgradeStatus) { super(upgradeStatus); } diff --git a/test/test-clusters/src/main/java/org/elasticsearch/test/cluster/local/LocalClusterFactory.java b/test/test-clusters/src/main/java/org/elasticsearch/test/cluster/local/LocalClusterFactory.java index 0d7340166a2a0..34e1f7285c6b0 100644 --- a/test/test-clusters/src/main/java/org/elasticsearch/test/cluster/local/LocalClusterFactory.java +++ b/test/test-clusters/src/main/java/org/elasticsearch/test/cluster/local/LocalClusterFactory.java @@ -213,12 +213,12 @@ private List readPortsFile(Path file) { } } - private void initializeWorkingDirectory(boolean preserverWorkingDirectory) { + private void initializeWorkingDirectory(boolean preserveWorkingDirectory) { try { - if (preserverWorkingDirectory == false) { - IOUtils.deleteWithRetry(workingDir); - } else { + if (preserveWorkingDirectory) { IOUtils.deleteWithRetry(distributionDir); + } else { + IOUtils.deleteWithRetry(workingDir); } try { IOUtils.syncWithLinks(distributionDescriptor.getDistributionDir(), distributionDir); diff --git a/x-pack/plugin/shutdown/qa/full-cluster-restart/src/javaRestTest/java/org/elasticsearch/xpack/restart/FullClusterRestartIT.java b/x-pack/plugin/shutdown/qa/full-cluster-restart/src/javaRestTest/java/org/elasticsearch/xpack/restart/FullClusterRestartIT.java index e26e919913f6e..07ed594770649 100644 --- a/x-pack/plugin/shutdown/qa/full-cluster-restart/src/javaRestTest/java/org/elasticsearch/xpack/restart/FullClusterRestartIT.java +++ b/x-pack/plugin/shutdown/qa/full-cluster-restart/src/javaRestTest/java/org/elasticsearch/xpack/restart/FullClusterRestartIT.java @@ -20,7 +20,7 @@ import org.elasticsearch.test.cluster.local.distribution.DistributionType; import org.elasticsearch.test.cluster.util.resource.Resource; import org.elasticsearch.test.rest.ESRestTestCase; -import org.elasticsearch.upgrades.FullClustRestartUpgradeStatus; +import org.elasticsearch.upgrades.FullClusterRestartUpgradeStatus; import org.elasticsearch.upgrades.ParameterizedFullClusterRestartTestCase; import org.elasticsearch.xcontent.XContentBuilder; import org.elasticsearch.xcontent.json.JsonXContent; @@ -65,7 +65,7 @@ public class FullClusterRestartIT extends ParameterizedFullClusterRestartTestCas .feature(FeatureFlag.TIME_SERIES_MODE) .build(); - public FullClusterRestartIT(@Name("cluster") FullClustRestartUpgradeStatus upgradeStatus) { + public FullClusterRestartIT(@Name("cluster") FullClusterRestartUpgradeStatus upgradeStatus) { super(upgradeStatus); } diff --git a/x-pack/qa/full-cluster-restart/src/javaRestTest/java/org/elasticsearch/xpack/restart/AbstractXpackFullClusterRestartTestCase.java b/x-pack/qa/full-cluster-restart/src/javaRestTest/java/org/elasticsearch/xpack/restart/AbstractXpackFullClusterRestartTestCase.java index 10486c914d470..0bc9101301a54 100644 --- a/x-pack/qa/full-cluster-restart/src/javaRestTest/java/org/elasticsearch/xpack/restart/AbstractXpackFullClusterRestartTestCase.java +++ b/x-pack/qa/full-cluster-restart/src/javaRestTest/java/org/elasticsearch/xpack/restart/AbstractXpackFullClusterRestartTestCase.java @@ -11,7 +11,7 @@ import org.elasticsearch.test.cluster.FeatureFlag; import org.elasticsearch.test.cluster.local.distribution.DistributionType; import org.elasticsearch.test.cluster.util.resource.Resource; -import org.elasticsearch.upgrades.FullClustRestartUpgradeStatus; +import org.elasticsearch.upgrades.FullClusterRestartUpgradeStatus; import org.elasticsearch.upgrades.ParameterizedFullClusterRestartTestCase; import org.junit.ClassRule; @@ -38,7 +38,7 @@ public abstract class AbstractXpackFullClusterRestartTestCase extends Parameteri .feature(FeatureFlag.TIME_SERIES_MODE) .build(); - public AbstractXpackFullClusterRestartTestCase(FullClustRestartUpgradeStatus upgradeStatus) { + public AbstractXpackFullClusterRestartTestCase(FullClusterRestartUpgradeStatus upgradeStatus) { super(upgradeStatus); } diff --git a/x-pack/qa/full-cluster-restart/src/javaRestTest/java/org/elasticsearch/xpack/restart/CoreFullClusterRestartIT.java b/x-pack/qa/full-cluster-restart/src/javaRestTest/java/org/elasticsearch/xpack/restart/CoreFullClusterRestartIT.java index cc202cd71569d..65a8b0b475f29 100644 --- a/x-pack/qa/full-cluster-restart/src/javaRestTest/java/org/elasticsearch/xpack/restart/CoreFullClusterRestartIT.java +++ b/x-pack/qa/full-cluster-restart/src/javaRestTest/java/org/elasticsearch/xpack/restart/CoreFullClusterRestartIT.java @@ -11,7 +11,7 @@ import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.util.concurrent.ThreadContext; import org.elasticsearch.test.cluster.util.resource.Resource; -import org.elasticsearch.upgrades.FullClustRestartUpgradeStatus; +import org.elasticsearch.upgrades.FullClusterRestartUpgradeStatus; import org.elasticsearch.upgrades.FullClusterRestartIT; import java.nio.charset.StandardCharsets; @@ -33,7 +33,7 @@ public class CoreFullClusterRestartIT extends FullClusterRestartIT { .keystore("xpack.security.transport.ssl.secure_key_passphrase", "testnode"); } - public CoreFullClusterRestartIT(@Name("cluster") FullClustRestartUpgradeStatus upgradeStatus) { + public CoreFullClusterRestartIT(@Name("cluster") FullClusterRestartUpgradeStatus upgradeStatus) { super(upgradeStatus); } diff --git a/x-pack/qa/full-cluster-restart/src/javaRestTest/java/org/elasticsearch/xpack/restart/FullClusterRestartIT.java b/x-pack/qa/full-cluster-restart/src/javaRestTest/java/org/elasticsearch/xpack/restart/FullClusterRestartIT.java index ef862739b2ee0..ab48825ed983a 100644 --- a/x-pack/qa/full-cluster-restart/src/javaRestTest/java/org/elasticsearch/xpack/restart/FullClusterRestartIT.java +++ b/x-pack/qa/full-cluster-restart/src/javaRestTest/java/org/elasticsearch/xpack/restart/FullClusterRestartIT.java @@ -27,7 +27,7 @@ import org.elasticsearch.rest.action.search.RestSearchAction; import org.elasticsearch.test.StreamsUtils; import org.elasticsearch.test.rest.ESRestTestCase; -import org.elasticsearch.upgrades.FullClustRestartUpgradeStatus; +import org.elasticsearch.upgrades.FullClusterRestartUpgradeStatus; import org.elasticsearch.xcontent.ObjectPath; import org.elasticsearch.xcontent.XContentBuilder; import org.elasticsearch.xcontent.XContentParser; @@ -68,7 +68,7 @@ public class FullClusterRestartIT extends AbstractXpackFullClusterRestartTestCas public static final int UPGRADE_FIELD_EXPECTED_INDEX_FORMAT_VERSION = 6; public static final int SECURITY_EXPECTED_INDEX_FORMAT_VERSION = 6; - public FullClusterRestartIT(@Name("cluster") FullClustRestartUpgradeStatus upgradeStatus) { + public FullClusterRestartIT(@Name("cluster") FullClusterRestartUpgradeStatus upgradeStatus) { super(upgradeStatus); } diff --git a/x-pack/qa/full-cluster-restart/src/javaRestTest/java/org/elasticsearch/xpack/restart/MLModelDeploymentFullClusterRestartIT.java b/x-pack/qa/full-cluster-restart/src/javaRestTest/java/org/elasticsearch/xpack/restart/MLModelDeploymentFullClusterRestartIT.java index 00e253c43cbc8..5a9e28274b84e 100644 --- a/x-pack/qa/full-cluster-restart/src/javaRestTest/java/org/elasticsearch/xpack/restart/MLModelDeploymentFullClusterRestartIT.java +++ b/x-pack/qa/full-cluster-restart/src/javaRestTest/java/org/elasticsearch/xpack/restart/MLModelDeploymentFullClusterRestartIT.java @@ -17,7 +17,7 @@ import org.elasticsearch.common.util.concurrent.ThreadContext; import org.elasticsearch.common.xcontent.support.XContentMapValues; import org.elasticsearch.core.Strings; -import org.elasticsearch.upgrades.FullClustRestartUpgradeStatus; +import org.elasticsearch.upgrades.FullClusterRestartUpgradeStatus; import org.elasticsearch.xpack.core.ml.inference.assignment.AllocationStatus; import org.junit.Before; @@ -65,7 +65,7 @@ public class MLModelDeploymentFullClusterRestartIT extends AbstractXpackFullClus RAW_MODEL_SIZE = Base64.getDecoder().decode(BASE_64_ENCODED_MODEL).length; } - public MLModelDeploymentFullClusterRestartIT(@Name("cluster") FullClustRestartUpgradeStatus upgradeStatus) { + public MLModelDeploymentFullClusterRestartIT(@Name("cluster") FullClusterRestartUpgradeStatus upgradeStatus) { super(upgradeStatus); } diff --git a/x-pack/qa/full-cluster-restart/src/javaRestTest/java/org/elasticsearch/xpack/restart/MlConfigIndexMappingsFullClusterRestartIT.java b/x-pack/qa/full-cluster-restart/src/javaRestTest/java/org/elasticsearch/xpack/restart/MlConfigIndexMappingsFullClusterRestartIT.java index da3a00574f4a3..e4ab3957f2627 100644 --- a/x-pack/qa/full-cluster-restart/src/javaRestTest/java/org/elasticsearch/xpack/restart/MlConfigIndexMappingsFullClusterRestartIT.java +++ b/x-pack/qa/full-cluster-restart/src/javaRestTest/java/org/elasticsearch/xpack/restart/MlConfigIndexMappingsFullClusterRestartIT.java @@ -15,7 +15,7 @@ import org.elasticsearch.common.util.concurrent.ThreadContext; import org.elasticsearch.common.xcontent.support.XContentMapValues; import org.elasticsearch.core.Strings; -import org.elasticsearch.upgrades.FullClustRestartUpgradeStatus; +import org.elasticsearch.upgrades.FullClusterRestartUpgradeStatus; import org.elasticsearch.xpack.test.rest.IndexMappingTemplateAsserter; import org.elasticsearch.xpack.test.rest.XPackRestTestConstants; import org.elasticsearch.xpack.test.rest.XPackRestTestHelper; @@ -36,7 +36,7 @@ public class MlConfigIndexMappingsFullClusterRestartIT extends AbstractXpackFull private static final String OLD_CLUSTER_JOB_ID = "ml-config-mappings-old-cluster-job"; private static final String NEW_CLUSTER_JOB_ID = "ml-config-mappings-new-cluster-job"; - public MlConfigIndexMappingsFullClusterRestartIT(@Name("cluster") FullClustRestartUpgradeStatus upgradeStatus) { + public MlConfigIndexMappingsFullClusterRestartIT(@Name("cluster") FullClusterRestartUpgradeStatus upgradeStatus) { super(upgradeStatus); } diff --git a/x-pack/qa/full-cluster-restart/src/javaRestTest/java/org/elasticsearch/xpack/restart/MlHiddenIndicesFullClusterRestartIT.java b/x-pack/qa/full-cluster-restart/src/javaRestTest/java/org/elasticsearch/xpack/restart/MlHiddenIndicesFullClusterRestartIT.java index 86d3e2239a36e..aeb3dad547946 100644 --- a/x-pack/qa/full-cluster-restart/src/javaRestTest/java/org/elasticsearch/xpack/restart/MlHiddenIndicesFullClusterRestartIT.java +++ b/x-pack/qa/full-cluster-restart/src/javaRestTest/java/org/elasticsearch/xpack/restart/MlHiddenIndicesFullClusterRestartIT.java @@ -18,7 +18,7 @@ import org.elasticsearch.common.xcontent.support.XContentMapValues; import org.elasticsearch.core.Strings; import org.elasticsearch.core.Tuple; -import org.elasticsearch.upgrades.FullClustRestartUpgradeStatus; +import org.elasticsearch.upgrades.FullClusterRestartUpgradeStatus; import org.elasticsearch.xcontent.XContentParser; import org.elasticsearch.xcontent.XContentParserConfiguration; import org.elasticsearch.xcontent.spi.XContentProvider; @@ -51,7 +51,7 @@ public class MlHiddenIndicesFullClusterRestartIT extends AbstractXpackFullCluste Tuple.tuple(List.of(".ml-anomalies-shared"), ".ml-anomalies-.write-" + JOB_ID) ); - public MlHiddenIndicesFullClusterRestartIT(@Name("cluster") FullClustRestartUpgradeStatus upgradeStatus) { + public MlHiddenIndicesFullClusterRestartIT(@Name("cluster") FullClusterRestartUpgradeStatus upgradeStatus) { super(upgradeStatus); } diff --git a/x-pack/qa/full-cluster-restart/src/javaRestTest/java/org/elasticsearch/xpack/restart/MlMigrationFullClusterRestartIT.java b/x-pack/qa/full-cluster-restart/src/javaRestTest/java/org/elasticsearch/xpack/restart/MlMigrationFullClusterRestartIT.java index 726e3ab559818..2bbda9123ae34 100644 --- a/x-pack/qa/full-cluster-restart/src/javaRestTest/java/org/elasticsearch/xpack/restart/MlMigrationFullClusterRestartIT.java +++ b/x-pack/qa/full-cluster-restart/src/javaRestTest/java/org/elasticsearch/xpack/restart/MlMigrationFullClusterRestartIT.java @@ -19,7 +19,7 @@ import org.elasticsearch.search.aggregations.AggregatorFactories; import org.elasticsearch.search.aggregations.bucket.terms.TermsAggregationBuilder; import org.elasticsearch.search.aggregations.metrics.MaxAggregationBuilder; -import org.elasticsearch.upgrades.FullClustRestartUpgradeStatus; +import org.elasticsearch.upgrades.FullClusterRestartUpgradeStatus; import org.elasticsearch.xpack.core.ml.MlTasks; import org.elasticsearch.xpack.core.ml.datafeed.DatafeedConfig; import org.elasticsearch.xpack.test.rest.XPackRestTestConstants; @@ -44,7 +44,7 @@ public class MlMigrationFullClusterRestartIT extends AbstractXpackFullClusterRes private static final String OLD_CLUSTER_CLOSED_JOB_ID = "migration-old-cluster-closed-job"; private static final String OLD_CLUSTER_STOPPED_DATAFEED_ID = "migration-old-cluster-stopped-datafeed"; - public MlMigrationFullClusterRestartIT(@Name("cluster") FullClustRestartUpgradeStatus upgradeStatus) { + public MlMigrationFullClusterRestartIT(@Name("cluster") FullClusterRestartUpgradeStatus upgradeStatus) { super(upgradeStatus); } diff --git a/x-pack/qa/full-cluster-restart/src/javaRestTest/java/org/elasticsearch/xpack/restart/QueryBuilderBWCIT.java b/x-pack/qa/full-cluster-restart/src/javaRestTest/java/org/elasticsearch/xpack/restart/QueryBuilderBWCIT.java index 5a2268626864e..563cde322b725 100644 --- a/x-pack/qa/full-cluster-restart/src/javaRestTest/java/org/elasticsearch/xpack/restart/QueryBuilderBWCIT.java +++ b/x-pack/qa/full-cluster-restart/src/javaRestTest/java/org/elasticsearch/xpack/restart/QueryBuilderBWCIT.java @@ -9,7 +9,7 @@ import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.util.concurrent.ThreadContext; import org.elasticsearch.test.cluster.util.resource.Resource; -import org.elasticsearch.upgrades.FullClustRestartUpgradeStatus; +import org.elasticsearch.upgrades.FullClusterRestartUpgradeStatus; import java.nio.charset.StandardCharsets; import java.util.Base64; @@ -30,7 +30,7 @@ public class QueryBuilderBWCIT extends org.elasticsearch.upgrades.QueryBuilderBW .keystore("xpack.security.transport.ssl.secure_key_passphrase", "testnode"); } - public QueryBuilderBWCIT(FullClustRestartUpgradeStatus upgradeStatus) { + public QueryBuilderBWCIT(FullClusterRestartUpgradeStatus upgradeStatus) { super(upgradeStatus); } diff --git a/x-pack/qa/full-cluster-restart/src/javaRestTest/java/org/elasticsearch/xpack/restart/WatcherMappingUpdateIT.java b/x-pack/qa/full-cluster-restart/src/javaRestTest/java/org/elasticsearch/xpack/restart/WatcherMappingUpdateIT.java index b096356c1c3dd..043b3f49a8825 100644 --- a/x-pack/qa/full-cluster-restart/src/javaRestTest/java/org/elasticsearch/xpack/restart/WatcherMappingUpdateIT.java +++ b/x-pack/qa/full-cluster-restart/src/javaRestTest/java/org/elasticsearch/xpack/restart/WatcherMappingUpdateIT.java @@ -16,7 +16,7 @@ import org.elasticsearch.client.Response; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.util.concurrent.ThreadContext; -import org.elasticsearch.upgrades.FullClustRestartUpgradeStatus; +import org.elasticsearch.upgrades.FullClusterRestartUpgradeStatus; import java.nio.charset.StandardCharsets; import java.util.Base64; @@ -27,7 +27,7 @@ public class WatcherMappingUpdateIT extends AbstractXpackFullClusterRestartTestCase { - public WatcherMappingUpdateIT(@Name("cluster") FullClustRestartUpgradeStatus upgradeStatus) { + public WatcherMappingUpdateIT(@Name("cluster") FullClusterRestartUpgradeStatus upgradeStatus) { super(upgradeStatus); } From 7669d79cabc92b620658dc72da58e7219323dafa Mon Sep 17 00:00:00 2001 From: Mark Vieira Date: Fri, 27 Jan 2023 09:50:45 -0800 Subject: [PATCH 16/38] Always call deletePortsFiles when stopping a node --- .../elasticsearch/test/cluster/local/LocalClusterFactory.java | 1 + .../elasticsearch/test/cluster/local/LocalClusterHandle.java | 2 -- 2 files changed, 1 insertion(+), 2 deletions(-) diff --git a/test/test-clusters/src/main/java/org/elasticsearch/test/cluster/local/LocalClusterFactory.java b/test/test-clusters/src/main/java/org/elasticsearch/test/cluster/local/LocalClusterFactory.java index 34e1f7285c6b0..24cebbeb7168f 100644 --- a/test/test-clusters/src/main/java/org/elasticsearch/test/cluster/local/LocalClusterFactory.java +++ b/test/test-clusters/src/main/java/org/elasticsearch/test/cluster/local/LocalClusterFactory.java @@ -138,6 +138,7 @@ public synchronized void stop(boolean forcibly) { ProcessUtils.stopHandle(process.toHandle(), forcibly); ProcessReaper.instance().unregister(getServiceName()); } + deletePortsFiles(); } public void waitForExit() { diff --git a/test/test-clusters/src/main/java/org/elasticsearch/test/cluster/local/LocalClusterHandle.java b/test/test-clusters/src/main/java/org/elasticsearch/test/cluster/local/LocalClusterHandle.java index 5d851d61f49e8..6ad2709957299 100644 --- a/test/test-clusters/src/main/java/org/elasticsearch/test/cluster/local/LocalClusterHandle.java +++ b/test/test-clusters/src/main/java/org/elasticsearch/test/cluster/local/LocalClusterHandle.java @@ -75,7 +75,6 @@ public void stop(boolean forcibly) { if (started.getAndSet(false)) { LOGGER.info("Stopping Elasticsearch test cluster '{}', forcibly: {}", name, forcibly); execute(() -> nodes.parallelStream().forEach(n -> n.stop(forcibly))); - execute(() -> nodes.parallelStream().forEach(Node::deletePortsFiles)); } else { // Make sure the process is stopped, otherwise wait execute(() -> nodes.parallelStream().forEach(Node::waitForExit)); @@ -132,7 +131,6 @@ public void upgradeNodeToVersion(int index, Version version) { Node node = nodes.get(index); node.stop(false); LOGGER.info("Upgrading node '{}' to version {}", node.getSpec().getName(), version); - node.deletePortsFiles(); node.start(version); waitUntilReady(); } From 999fefedc3ff437c614e69dba74558f243f207e2 Mon Sep 17 00:00:00 2001 From: Mark Vieira Date: Fri, 27 Jan 2023 09:57:57 -0800 Subject: [PATCH 17/38] Spotless --- .../elasticsearch/xpack/restart/CoreFullClusterRestartIT.java | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/x-pack/qa/full-cluster-restart/src/javaRestTest/java/org/elasticsearch/xpack/restart/CoreFullClusterRestartIT.java b/x-pack/qa/full-cluster-restart/src/javaRestTest/java/org/elasticsearch/xpack/restart/CoreFullClusterRestartIT.java index 65a8b0b475f29..dcdc127079637 100644 --- a/x-pack/qa/full-cluster-restart/src/javaRestTest/java/org/elasticsearch/xpack/restart/CoreFullClusterRestartIT.java +++ b/x-pack/qa/full-cluster-restart/src/javaRestTest/java/org/elasticsearch/xpack/restart/CoreFullClusterRestartIT.java @@ -11,8 +11,8 @@ import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.util.concurrent.ThreadContext; import org.elasticsearch.test.cluster.util.resource.Resource; -import org.elasticsearch.upgrades.FullClusterRestartUpgradeStatus; import org.elasticsearch.upgrades.FullClusterRestartIT; +import org.elasticsearch.upgrades.FullClusterRestartUpgradeStatus; import java.nio.charset.StandardCharsets; import java.util.Base64; From b12398e230b02051e719d25e7105e9ec0252daa3 Mon Sep 17 00:00:00 2001 From: David Turner Date: Mon, 30 Jan 2023 18:24:55 +0000 Subject: [PATCH 18/38] Fix some possible NPEs in strange JVM configs (#93352) `JvmErgonomics` requires various JVM options to be present, but if they are omitted then we throw a `NullPointerException` which looks to the user like an ES bug. They would have to be doing something a little odd to get into this state, but nonetheless it is possible to hit these NPEs. We don't need to handle such a config gracefully, but we should clarify why Elasticsearch won't start to help the user fix their config. --- .../server/cli/JvmErgonomics.java | 42 ++++++++++++------ .../elasticsearch/server/cli/JvmOption.java | 7 +++ .../server/cli/JvmErgonomicsTests.java | 43 +++++++++++++++++++ 3 files changed, 78 insertions(+), 14 deletions(-) diff --git a/distribution/tools/server-cli/src/main/java/org/elasticsearch/server/cli/JvmErgonomics.java b/distribution/tools/server-cli/src/main/java/org/elasticsearch/server/cli/JvmErgonomics.java index 46e3da3ced90b..926d5727a1b4a 100644 --- a/distribution/tools/server-cli/src/main/java/org/elasticsearch/server/cli/JvmErgonomics.java +++ b/distribution/tools/server-cli/src/main/java/org/elasticsearch/server/cli/JvmErgonomics.java @@ -64,28 +64,42 @@ static boolean tuneG1GCForSmallHeap(final long heapSize) { } static boolean tuneG1GCHeapRegion(final Map finalJvmOptions, final boolean tuneG1GCForSmallHeap) { - JvmOption g1GCHeapRegion = finalJvmOptions.get("G1HeapRegionSize"); - JvmOption g1GC = finalJvmOptions.get("UseG1GC"); - return (tuneG1GCForSmallHeap && g1GC.getMandatoryValue().equals("true") && g1GCHeapRegion.isCommandLineOrigin() == false); + return tuneG1GCForSmallHeap && usingG1GcWithoutCommandLineOriginOption(finalJvmOptions, "G1HeapRegionSize"); } static int tuneG1GCReservePercent(final Map finalJvmOptions, final boolean tuneG1GCForSmallHeap) { - JvmOption g1GC = finalJvmOptions.get("UseG1GC"); - JvmOption g1GCReservePercent = finalJvmOptions.get("G1ReservePercent"); - if (g1GC.getMandatoryValue().equals("true")) { - if (g1GCReservePercent.isCommandLineOrigin() == false && tuneG1GCForSmallHeap) { - return 15; - } else if (g1GCReservePercent.isCommandLineOrigin() == false && tuneG1GCForSmallHeap == false) { - return 25; - } + if (usingG1GcWithoutCommandLineOriginOption(finalJvmOptions, "G1ReservePercent")) { + return tuneG1GCForSmallHeap ? 15 : 25; } return 0; } static boolean tuneG1GCInitiatingHeapOccupancyPercent(final Map finalJvmOptions) { - JvmOption g1GC = finalJvmOptions.get("UseG1GC"); - JvmOption g1GCInitiatingHeapOccupancyPercent = finalJvmOptions.get("InitiatingHeapOccupancyPercent"); - return g1GCInitiatingHeapOccupancyPercent.isCommandLineOrigin() == false && g1GC.getMandatoryValue().equals("true"); + return usingG1GcWithoutCommandLineOriginOption(finalJvmOptions, "InitiatingHeapOccupancyPercent"); + } + + /** + * @return
    + *
  • {@code true} if `-XX:+UseG1GC` is in the final JVM options and {@code optionName} was not specified. + *
  • {@code false} if either `-XX:-UseG1GC` is in the final JVM options, or {@code optionName} was specified. + *
+ * + * @throws IllegalStateException if neither `-XX:+UseG1GC` nor `-XX:-UseG1GC` is in the final JVM options, or `-XX:+UseG1GC` is selected + * and {@code optionName} is not in the final JVM options. + */ + private static boolean usingG1GcWithoutCommandLineOriginOption(Map finalJvmOptions, String optionName) { + return getRequiredOption(finalJvmOptions, "UseG1GC").getMandatoryValue().equals("true") + && getRequiredOption(finalJvmOptions, optionName).isCommandLineOrigin() == false; + } + + private static JvmOption getRequiredOption(final Map finalJvmOptions, final String key) { + final var jvmOption = finalJvmOptions.get(key); + if (jvmOption == null) { + throw new IllegalStateException( + "JVM option [" + key + "] was unexpectedly missing. Elasticsearch requires this option to be present." + ); + } + return jvmOption; } private static final Pattern SYSTEM_PROPERTY = Pattern.compile("^-D(?[\\w+].*?)=(?.*)$"); diff --git a/distribution/tools/server-cli/src/main/java/org/elasticsearch/server/cli/JvmOption.java b/distribution/tools/server-cli/src/main/java/org/elasticsearch/server/cli/JvmOption.java index 39bf2e54dade0..60cbcb86c02b9 100644 --- a/distribution/tools/server-cli/src/main/java/org/elasticsearch/server/cli/JvmOption.java +++ b/distribution/tools/server-cli/src/main/java/org/elasticsearch/server/cli/JvmOption.java @@ -8,6 +8,8 @@ package org.elasticsearch.server.cli; +import org.elasticsearch.common.Strings; + import java.io.BufferedReader; import java.io.IOException; import java.io.InputStream; @@ -29,6 +31,11 @@ class JvmOption { private final String origin; JvmOption(String value, String origin) { + if (origin == null) { + throw new IllegalStateException(Strings.format(""" + Elasticsearch could not determine the origin of JVM option [%s]. \ + This indicates that it is running in an unsupported configuration.""", value)); + } this.value = value; this.origin = origin; } diff --git a/distribution/tools/server-cli/src/test/java/org/elasticsearch/server/cli/JvmErgonomicsTests.java b/distribution/tools/server-cli/src/test/java/org/elasticsearch/server/cli/JvmErgonomicsTests.java index f68a51de85c2a..0d4edfc384d46 100644 --- a/distribution/tools/server-cli/src/test/java/org/elasticsearch/server/cli/JvmErgonomicsTests.java +++ b/distribution/tools/server-cli/src/test/java/org/elasticsearch/server/cli/JvmErgonomicsTests.java @@ -19,6 +19,7 @@ import java.util.List; import java.util.Map; +import static org.hamcrest.Matchers.allOf; import static org.hamcrest.Matchers.containsString; import static org.hamcrest.Matchers.equalTo; import static org.hamcrest.Matchers.everyItem; @@ -179,4 +180,46 @@ public void testMaxDirectMemorySizeChoiceWhenSet() throws Exception { ); } + @SuppressWarnings("ConstantConditions") + public void testMissingOptionHandling() { + final Map g1GcOn = Map.of("UseG1GC", new JvmOption("true", "")); + final Map g1GcOff = Map.of("UseG1GC", new JvmOption("", "")); + + assertFalse(JvmErgonomics.tuneG1GCHeapRegion(Map.of(), false)); + assertThat( + expectThrows(IllegalStateException.class, () -> JvmErgonomics.tuneG1GCHeapRegion(Map.of(), true)).getMessage(), + allOf(containsString("[UseG1GC]"), containsString("unexpectedly missing")) + ); + assertThat( + expectThrows(IllegalStateException.class, () -> JvmErgonomics.tuneG1GCHeapRegion(g1GcOn, true)).getMessage(), + allOf(containsString("[G1HeapRegionSize]"), containsString("unexpectedly missing")) + ); + assertFalse(JvmErgonomics.tuneG1GCHeapRegion(g1GcOff, randomBoolean())); + + assertThat( + expectThrows(IllegalStateException.class, () -> JvmErgonomics.tuneG1GCReservePercent(Map.of(), randomBoolean())).getMessage(), + allOf(containsString("[UseG1GC]"), containsString("unexpectedly missing")) + ); + assertThat( + expectThrows(IllegalStateException.class, () -> JvmErgonomics.tuneG1GCReservePercent(g1GcOn, randomBoolean())).getMessage(), + allOf(containsString("[G1ReservePercent]"), containsString("unexpectedly missing")) + ); + assertEquals(0, JvmErgonomics.tuneG1GCReservePercent(g1GcOff, randomBoolean())); + + assertThat( + expectThrows(IllegalStateException.class, () -> JvmErgonomics.tuneG1GCInitiatingHeapOccupancyPercent(Map.of())).getMessage(), + allOf(containsString("[UseG1GC]"), containsString("unexpectedly missing")) + ); + assertThat( + expectThrows(IllegalStateException.class, () -> JvmErgonomics.tuneG1GCInitiatingHeapOccupancyPercent(g1GcOn)).getMessage(), + allOf(containsString("[InitiatingHeapOccupancyPercent]"), containsString("unexpectedly missing")) + ); + assertFalse(JvmErgonomics.tuneG1GCInitiatingHeapOccupancyPercent(g1GcOff)); + + assertThat( + expectThrows(IllegalStateException.class, () -> new JvmOption("OptionName", null)).getMessage(), + allOf(containsString("could not determine the origin of JVM option [OptionName]"), containsString("unsupported")) + ); + } + } From b0cc6422bfefa0e39d7fd09e20258e4f6ef95e35 Mon Sep 17 00:00:00 2001 From: Keith Massey Date: Mon, 30 Jan 2023 13:07:48 -0600 Subject: [PATCH 19/38] Download the geoip databases only when needed (#92335) This commit changes the geoip downloader so that we only download the geoip databases if you have at least one geoip processor in your cluster, or when you add a new geoip processor (or if `ingest.geoip.downloader.eager.download` is explicitly set to true). --- docs/changelog/92335.yaml | 6 + .../ingest/processors/geoip.asciidoc | 12 +- .../ingest/geoip/GeoIpDownloaderIT.java | 138 +++++++++- .../ingest/geoip/GeoIpDownloaderStatsIT.java | 32 ++- .../ingest/geoip/GeoIpDownloaderTaskIT.java | 2 +- .../ingest/geoip/GeoIpDownloader.java | 77 ++++-- .../geoip/GeoIpDownloaderTaskExecutor.java | 127 ++++++++- .../ingest/geoip/IngestGeoIpPlugin.java | 6 +- .../GeoIpDownloaderTaskExecutorTests.java | 253 ++++++++++++++++++ .../ingest/geoip/GeoIpDownloaderTests.java | 47 +++- .../IngestGeoIpClientYamlTestSuiteIT.java | 40 +++ .../upgrades/GeoIpUpgradeIT.java | 4 +- 12 files changed, 684 insertions(+), 60 deletions(-) create mode 100644 docs/changelog/92335.yaml create mode 100644 modules/ingest-geoip/src/test/java/org/elasticsearch/ingest/geoip/GeoIpDownloaderTaskExecutorTests.java diff --git a/docs/changelog/92335.yaml b/docs/changelog/92335.yaml new file mode 100644 index 0000000000000..9dc21fdcdc511 --- /dev/null +++ b/docs/changelog/92335.yaml @@ -0,0 +1,6 @@ +pr: 92335 +summary: Download the geoip databases only when needed +area: Ingest Node +type: bug +issues: + - 90673 diff --git a/docs/reference/ingest/processors/geoip.asciidoc b/docs/reference/ingest/processors/geoip.asciidoc index fa31a3bbe2543..d39f3be82d2b0 100644 --- a/docs/reference/ingest/processors/geoip.asciidoc +++ b/docs/reference/ingest/processors/geoip.asciidoc @@ -11,7 +11,10 @@ IPv4 or IPv6 address. By default, the processor uses the GeoLite2 City, GeoLite2 Country, and GeoLite2 ASN GeoIP2 databases from http://dev.maxmind.com/geoip/geoip2/geolite2/[MaxMind], shared under the -CC BY-SA 4.0 license. {es} automatically downloads updates for +CC BY-SA 4.0 license. It automatically downloads these databases if either +`ingest.geoip.downloader.eager.download` is set to true, or your cluster +has at least one pipeline with a `geoip` processor. {es} +automatically downloads updates for these databases from the Elastic GeoIP endpoint: https://geoip.elastic.co/v1/database. To get download statistics for these updates, use the <>. @@ -412,6 +415,13 @@ If `true`, {es} automatically downloads and manages updates for GeoIP2 databases from the `ingest.geoip.downloader.endpoint`. If `false`, {es} does not download updates and deletes all downloaded databases. Defaults to `true`. +[[ingest-geoip-downloader-eager-download]] +(<>, Boolean) +If `true`, {es} downloads GeoIP2 databases immediately, regardless of whether a +pipeline exists with a geoip processor. If `false`, {es} only begins downloading +the databases if a pipeline with a geoip processor exists or is added. Defaults +to `false`. + [[ingest-geoip-downloader-endpoint]] `ingest.geoip.downloader.endpoint`:: (<>, string) diff --git a/modules/ingest-geoip/src/internalClusterTest/java/org/elasticsearch/ingest/geoip/GeoIpDownloaderIT.java b/modules/ingest-geoip/src/internalClusterTest/java/org/elasticsearch/ingest/geoip/GeoIpDownloaderIT.java index 0e164cab818b2..f3f37f50147fb 100644 --- a/modules/ingest-geoip/src/internalClusterTest/java/org/elasticsearch/ingest/geoip/GeoIpDownloaderIT.java +++ b/modules/ingest-geoip/src/internalClusterTest/java/org/elasticsearch/ingest/geoip/GeoIpDownloaderIT.java @@ -27,10 +27,13 @@ import org.elasticsearch.index.query.BoolQueryBuilder; import org.elasticsearch.index.query.MatchQueryBuilder; import org.elasticsearch.index.query.RangeQueryBuilder; +import org.elasticsearch.ingest.AbstractProcessor; import org.elasticsearch.ingest.IngestDocument; +import org.elasticsearch.ingest.Processor; import org.elasticsearch.ingest.geoip.stats.GeoIpDownloaderStatsAction; import org.elasticsearch.persistent.PersistentTaskParams; import org.elasticsearch.persistent.PersistentTasksCustomMetadata; +import org.elasticsearch.plugins.IngestPlugin; import org.elasticsearch.plugins.Plugin; import org.elasticsearch.reindex.ReindexPlugin; import org.elasticsearch.search.SearchHit; @@ -51,11 +54,13 @@ import java.util.ArrayList; import java.util.Arrays; import java.util.Collection; +import java.util.HashMap; import java.util.Iterator; import java.util.List; import java.util.Map; import java.util.Set; import java.util.concurrent.TimeUnit; +import java.util.function.BiConsumer; import java.util.stream.Collectors; import java.util.stream.Stream; import java.util.stream.StreamSupport; @@ -82,7 +87,12 @@ public class GeoIpDownloaderIT extends AbstractGeoIpIT { @Override protected Collection> nodePlugins() { - return Arrays.asList(ReindexPlugin.class, IngestGeoIpPlugin.class, GeoIpProcessorNonIngestNodeIT.IngestGeoIpSettingsPlugin.class); + return Arrays.asList( + ReindexPlugin.class, + IngestGeoIpPlugin.class, + GeoIpProcessorNonIngestNodeIT.IngestGeoIpSettingsPlugin.class, + NonGeoProcessorsPlugin.class + ); } @Override @@ -104,7 +114,7 @@ public void cleanUp() throws Exception { .setPersistentSettings( Settings.builder() .putNull(GeoIpDownloaderTaskExecutor.ENABLED_SETTING.getKey()) - .putNull(GeoIpDownloader.POLL_INTERVAL_SETTING.getKey()) + .putNull(GeoIpDownloaderTaskExecutor.POLL_INTERVAL_SETTING.getKey()) .putNull("ingest.geoip.database_validity") ) .get(); @@ -149,6 +159,7 @@ public void cleanUp() throws Exception { @TestLogging(value = "org.elasticsearch.ingest.geoip:TRACE", reason = "https://github.com/elastic/elasticsearch/issues/75221") public void testInvalidTimestamp() throws Exception { assumeTrue("only test with fixture to have stable results", ENDPOINT != null); + putGeoIpPipeline(); ClusterUpdateSettingsResponse settingsResponse = client().admin() .cluster() .prepareUpdateSettings() @@ -160,7 +171,7 @@ public void testInvalidTimestamp() throws Exception { assertEquals(Set.of("GeoLite2-ASN.mmdb", "GeoLite2-City.mmdb", "GeoLite2-Country.mmdb"), state.getDatabases().keySet()); }, 2, TimeUnit.MINUTES); - putPipeline(); + putGeoIpPipeline(); verifyUpdatedDatabase(); settingsResponse = client().admin() @@ -172,7 +183,9 @@ public void testInvalidTimestamp() throws Exception { settingsResponse = client().admin() .cluster() .prepareUpdateSettings() - .setPersistentSettings(Settings.builder().put(GeoIpDownloader.POLL_INTERVAL_SETTING.getKey(), TimeValue.timeValueDays(2))) + .setPersistentSettings( + Settings.builder().put(GeoIpDownloaderTaskExecutor.POLL_INTERVAL_SETTING.getKey(), TimeValue.timeValueDays(2)) + ) .get(); assertTrue(settingsResponse.isAcknowledged()); List geoIpTmpDirs = getGeoIpTmpDirs(); @@ -186,7 +199,7 @@ public void testInvalidTimestamp() throws Exception { } } }); - putPipeline(); + putGeoIpPipeline(); assertBusy(() -> { SimulateDocumentBaseResult result = simulatePipeline(); assertThat(result.getFailure(), nullValue()); @@ -221,7 +234,9 @@ public void testUpdatedTimestamp() throws Exception { ClusterUpdateSettingsResponse settingsResponse = client().admin() .cluster() .prepareUpdateSettings() - .setPersistentSettings(Settings.builder().put(GeoIpDownloader.POLL_INTERVAL_SETTING.getKey(), TimeValue.timeValueDays(2))) + .setPersistentSettings( + Settings.builder().put(GeoIpDownloaderTaskExecutor.POLL_INTERVAL_SETTING.getKey(), TimeValue.timeValueDays(2)) + ) .get(); assertTrue(settingsResponse.isAcknowledged()); assertBusy(() -> assertNotEquals(lastCheck, getGeoIpTaskState().getDatabases().get("GeoLite2-ASN.mmdb").lastCheck())); @@ -229,6 +244,7 @@ public void testUpdatedTimestamp() throws Exception { } public void testGeoIpDatabasesDownload() throws Exception { + putGeoIpPipeline(); ClusterUpdateSettingsResponse settingsResponse = client().admin() .cluster() .prepareUpdateSettings() @@ -283,12 +299,34 @@ public void testGeoIpDatabasesDownload() throws Exception { } } + public void testGeoIpDatabasesDownloadNoGeoipProcessors() throws Exception { + assumeTrue("only test with fixture to have stable results", ENDPOINT != null); + String pipelineId = randomAlphaOfLength(10); + putGeoIpPipeline(pipelineId); + ClusterUpdateSettingsResponse settingsResponse = client().admin() + .cluster() + .prepareUpdateSettings() + .setPersistentSettings(Settings.builder().put(GeoIpDownloaderTaskExecutor.ENABLED_SETTING.getKey(), true)) + .get(); + assertTrue(settingsResponse.isAcknowledged()); + assertBusy(() -> { assertNull(getTask().getState()); }); + putNonGeoipPipeline(pipelineId); + assertBusy(() -> { assertNull(getTask().getState()); }); + putNonGeoipPipeline(pipelineId); + assertNull(getTask().getState()); + putGeoIpPipeline(); + assertBusy(() -> { + GeoIpTaskState state = getGeoIpTaskState(); + assertEquals(Set.of("GeoLite2-ASN.mmdb", "GeoLite2-City.mmdb", "GeoLite2-Country.mmdb"), state.getDatabases().keySet()); + }, 2, TimeUnit.MINUTES); + } + @TestLogging(value = "org.elasticsearch.ingest.geoip:TRACE", reason = "https://github.com/elastic/elasticsearch/issues/69972") public void testUseGeoIpProcessorWithDownloadedDBs() throws Exception { assumeTrue("only test with fixture to have stable results", ENDPOINT != null); setupDatabasesInConfigDirectory(); // setup: - putPipeline(); + putGeoIpPipeline(); // verify before updating dbs { @@ -355,7 +393,7 @@ public void testUseGeoIpProcessorWithDownloadedDBs() throws Exception { @TestLogging(value = "org.elasticsearch.ingest.geoip:TRACE", reason = "https://github.com/elastic/elasticsearch/issues/79074") public void testStartWithNoDatabases() throws Exception { assumeTrue("only test with fixture to have stable results", ENDPOINT != null); - putPipeline(); + putGeoIpPipeline(); // Behaviour without any databases loaded: { @@ -438,7 +476,21 @@ private SimulateDocumentBaseResult simulatePipeline() throws IOException { return (SimulateDocumentBaseResult) simulateResponse.getResults().get(0); } - private void putPipeline() throws IOException { + /** + * This creates a pipeline with a geoip processor, which ought to cause the geoip downloader to begin (assuming it is enabled). + * @throws IOException + */ + private void putGeoIpPipeline() throws IOException { + putGeoIpPipeline("_id"); + } + + /** + * This creates a pipeline named pipelineId with a geoip processor, which ought to cause the geoip downloader to begin (assuming it is + * enabled). + * @param pipelineId The name of the new pipeline with a geoip processor + * @throws IOException + */ + private void putGeoIpPipeline(String pipelineId) throws IOException { BytesReference bytes; try (XContentBuilder builder = JsonXContent.contentBuilder()) { builder.startObject(); @@ -484,7 +536,45 @@ private void putPipeline() throws IOException { builder.endObject(); bytes = BytesReference.bytes(builder); } - assertAcked(client().admin().cluster().preparePutPipeline("_id", bytes, XContentType.JSON).get()); + assertAcked(client().admin().cluster().preparePutPipeline(pipelineId, bytes, XContentType.JSON).get()); + } + + /** + * This creates a pipeline named pipelineId that does _not_ have a geoip processor. + * @throws IOException + */ + private void putNonGeoipPipeline(String pipelineId) throws IOException { + BytesReference bytes; + try (XContentBuilder builder = JsonXContent.contentBuilder()) { + builder.startObject(); + { + builder.startArray("processors"); + { + builder.startObject(); + { + builder.startObject(NonGeoProcessorsPlugin.NON_GEO_PROCESSOR_TYPE); + builder.endObject(); + } + builder.endObject(); + builder.startObject(); + { + builder.startObject(NonGeoProcessorsPlugin.NON_GEO_PROCESSOR_TYPE); + builder.endObject(); + } + builder.endObject(); + builder.startObject(); + { + builder.startObject(NonGeoProcessorsPlugin.NON_GEO_PROCESSOR_TYPE); + builder.endObject(); + } + builder.endObject(); + } + builder.endArray(); + } + builder.endObject(); + bytes = BytesReference.bytes(builder); + } + assertAcked(client().admin().cluster().preparePutPipeline(pipelineId, bytes, XContentType.JSON).get()); } private List getGeoIpTmpDirs() throws IOException { @@ -624,4 +714,32 @@ public int read(byte[] b, int off, int len) throws IOException { return read; } } + + /** + * This class defines a processor of type "test". + */ + public static final class NonGeoProcessorsPlugin extends Plugin implements IngestPlugin { + public static final String NON_GEO_PROCESSOR_TYPE = "test"; + + @Override + public Map getProcessors(Processor.Parameters parameters) { + Map procMap = new HashMap<>(); + procMap.put(NON_GEO_PROCESSOR_TYPE, (factories, tag, description, config) -> new AbstractProcessor(tag, description) { + @Override + public void execute(IngestDocument ingestDocument, BiConsumer handler) {} + + @Override + public String getType() { + return NON_GEO_PROCESSOR_TYPE; + } + + @Override + public boolean isAsync() { + return false; + } + + }); + return procMap; + } + } } diff --git a/modules/ingest-geoip/src/internalClusterTest/java/org/elasticsearch/ingest/geoip/GeoIpDownloaderStatsIT.java b/modules/ingest-geoip/src/internalClusterTest/java/org/elasticsearch/ingest/geoip/GeoIpDownloaderStatsIT.java index 6076063a38b5c..eea763351dd09 100644 --- a/modules/ingest-geoip/src/internalClusterTest/java/org/elasticsearch/ingest/geoip/GeoIpDownloaderStatsIT.java +++ b/modules/ingest-geoip/src/internalClusterTest/java/org/elasticsearch/ingest/geoip/GeoIpDownloaderStatsIT.java @@ -20,6 +20,8 @@ import org.elasticsearch.xcontent.ToXContent; import org.elasticsearch.xcontent.XContentBuilder; import org.elasticsearch.xcontent.XContentFactory; +import org.elasticsearch.xcontent.XContentType; +import org.elasticsearch.xcontent.json.JsonXContent; import org.junit.After; import java.io.IOException; @@ -29,6 +31,7 @@ import java.util.Map; import java.util.stream.Collectors; +import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertAcked; import static org.elasticsearch.xcontent.ToXContent.EMPTY_PARAMS; import static org.hamcrest.Matchers.containsInAnyOrder; import static org.hamcrest.Matchers.equalTo; @@ -80,7 +83,7 @@ public void testStats() throws Exception { assertThat(jsonMapView.get("stats.databases_count"), equalTo(0)); assertThat(jsonMapView.get("stats.total_download_time"), equalTo(0)); assertEquals(0, jsonMapView.>get("nodes").size()); - + putPipeline(); ClusterUpdateSettingsResponse settingsResponse = client().admin() .cluster() .prepareUpdateSettings() @@ -108,6 +111,33 @@ public void testStats() throws Exception { }); } + private void putPipeline() throws IOException { + BytesReference bytes; + try (XContentBuilder builder = JsonXContent.contentBuilder()) { + builder.startObject(); + { + builder.startArray("processors"); + { + builder.startObject(); + { + builder.startObject("geoip"); + { + builder.field("field", "ip"); + builder.field("target_field", "ip-city"); + builder.field("database_file", "GeoLite2-City.mmdb"); + } + builder.endObject(); + } + builder.endObject(); + } + builder.endArray(); + } + builder.endObject(); + bytes = BytesReference.bytes(builder); + } + assertAcked(client().admin().cluster().preparePutPipeline("_id", bytes, XContentType.JSON).get()); + } + public static Map convertToMap(ToXContent part) throws IOException { XContentBuilder builder = XContentFactory.jsonBuilder(); part.toXContent(builder, EMPTY_PARAMS); diff --git a/modules/ingest-geoip/src/internalClusterTest/java/org/elasticsearch/ingest/geoip/GeoIpDownloaderTaskIT.java b/modules/ingest-geoip/src/internalClusterTest/java/org/elasticsearch/ingest/geoip/GeoIpDownloaderTaskIT.java index 60be668272b2c..83fde48b39f3d 100644 --- a/modules/ingest-geoip/src/internalClusterTest/java/org/elasticsearch/ingest/geoip/GeoIpDownloaderTaskIT.java +++ b/modules/ingest-geoip/src/internalClusterTest/java/org/elasticsearch/ingest/geoip/GeoIpDownloaderTaskIT.java @@ -48,7 +48,7 @@ public void cleanUp() throws Exception { .setPersistentSettings( Settings.builder() .putNull(GeoIpDownloaderTaskExecutor.ENABLED_SETTING.getKey()) - .putNull(GeoIpDownloader.POLL_INTERVAL_SETTING.getKey()) + .putNull(GeoIpDownloaderTaskExecutor.POLL_INTERVAL_SETTING.getKey()) .putNull("ingest.geoip.database_validity") ) .get() diff --git a/modules/ingest-geoip/src/main/java/org/elasticsearch/ingest/geoip/GeoIpDownloader.java b/modules/ingest-geoip/src/main/java/org/elasticsearch/ingest/geoip/GeoIpDownloader.java index 6776ab9d629a2..0732674632b34 100644 --- a/modules/ingest-geoip/src/main/java/org/elasticsearch/ingest/geoip/GeoIpDownloader.java +++ b/modules/ingest-geoip/src/main/java/org/elasticsearch/ingest/geoip/GeoIpDownloader.java @@ -10,7 +10,6 @@ import org.apache.logging.log4j.LogManager; import org.apache.logging.log4j.Logger; -import org.apache.logging.log4j.util.Supplier; import org.elasticsearch.ElasticsearchException; import org.elasticsearch.action.ActionListener; import org.elasticsearch.action.admin.indices.flush.FlushRequest; @@ -48,6 +47,7 @@ import java.util.List; import java.util.Map; import java.util.Objects; +import java.util.function.Supplier; /** * Main component responsible for downloading new GeoIP databases. @@ -59,14 +59,6 @@ public class GeoIpDownloader extends AllocatedPersistentTask { private static final Logger logger = LogManager.getLogger(GeoIpDownloader.class); - public static final Setting POLL_INTERVAL_SETTING = Setting.timeSetting( - "ingest.geoip.downloader.poll.interval", - TimeValue.timeValueDays(3), - TimeValue.timeValueDays(1), - Property.Dynamic, - Property.NodeScope - ); - // for overriding in tests private static final String DEFAULT_ENDPOINT = System.getProperty( "ingest.geoip.downloader.endpoint.default", @@ -91,9 +83,16 @@ public class GeoIpDownloader extends AllocatedPersistentTask { // visible for testing protected volatile GeoIpTaskState state; - private volatile TimeValue pollInterval; private volatile Scheduler.ScheduledCancellable scheduled; private volatile GeoIpDownloaderStats stats = GeoIpDownloaderStats.EMPTY; + private final Supplier pollIntervalSupplier; + private final Supplier eagerDownloadSupplier; + /* + * This variable tells us whether we have at least one pipeline with a geoip processor. If there are no geoip processors then we do + * not download geoip databases (unless configured to eagerly download). Access is not protected because it is set in the constructor + * and then only ever updated on the cluster state update thread (it is also read on the generic thread). Non-private for unit testing. + */ + private final Supplier atLeastOneGeoipProcessorSupplier; GeoIpDownloader( Client client, @@ -106,7 +105,10 @@ public class GeoIpDownloader extends AllocatedPersistentTask { String action, String description, TaskId parentTask, - Map headers + Map headers, + Supplier pollIntervalSupplier, + Supplier eagerDownloadSupplier, + Supplier atLeastOneGeoipProcessorSupplier ) { super(id, type, action, description, parentTask, headers); this.httpClient = httpClient; @@ -114,15 +116,9 @@ public class GeoIpDownloader extends AllocatedPersistentTask { this.clusterService = clusterService; this.threadPool = threadPool; endpoint = ENDPOINT_SETTING.get(settings); - pollInterval = POLL_INTERVAL_SETTING.get(settings); - clusterService.getClusterSettings().addSettingsUpdateConsumer(POLL_INTERVAL_SETTING, this::setPollInterval); - } - - public void setPollInterval(TimeValue pollInterval) { - this.pollInterval = pollInterval; - if (scheduled != null && scheduled.cancel()) { - scheduleNextRun(TimeValue.ZERO); - } + this.pollIntervalSupplier = pollIntervalSupplier; + this.eagerDownloadSupplier = eagerDownloadSupplier; + this.atLeastOneGeoipProcessorSupplier = atLeastOneGeoipProcessorSupplier; } // visible for testing @@ -130,6 +126,7 @@ void updateDatabases() throws IOException { var clusterState = clusterService.state(); var geoipIndex = clusterState.getMetadata().getIndicesLookup().get(GeoIpDownloader.DATABASES_INDEX); if (geoipIndex != null) { + logger.trace("The {} index is not null", GeoIpDownloader.DATABASES_INDEX); if (clusterState.getRoutingTable().index(geoipIndex.getWriteIndex()).allPrimaryShardsActive() == false) { throw new ElasticsearchException("not all primary shards of [" + DATABASES_INDEX + "] index are active"); } @@ -138,13 +135,18 @@ void updateDatabases() throws IOException { throw blockException; } } - - logger.debug("updating geoip databases"); - List> response = fetchDatabasesOverview(); - for (Map res : response) { - if (res.get("name").toString().endsWith(".tgz")) { - processDatabase(res); + if (eagerDownloadSupplier.get() || atLeastOneGeoipProcessorSupplier.get()) { + logger.trace("Updating geoip databases"); + List> response = fetchDatabasesOverview(); + for (Map res : response) { + if (res.get("name").toString().endsWith(".tgz")) { + processDatabase(res); + } } + } else { + logger.trace( + "Not updating geoip databases because no geoip processors exist in the cluster and eager downloading is not configured" + ); } } @@ -186,7 +188,7 @@ void processDatabase(Map databaseInfo) { } } catch (Exception e) { stats = stats.failedDownload(); - logger.error((Supplier) () -> "error downloading geoip database [" + name + "]", e); + logger.error((org.apache.logging.log4j.util.Supplier) () -> "error downloading geoip database [" + name + "]", e); } } @@ -266,6 +268,9 @@ void setState(GeoIpTaskState state) { this.state = state; } + /** + * Downloads the geoip databases now, and schedules them to be downloaded again after pollInterval. + */ void runDownloader() { if (isCancelled() || isCompleted()) { return; @@ -281,7 +286,22 @@ void runDownloader() { } catch (Exception e) { logger.error("exception during geoip databases cleanup", e); } - scheduleNextRun(pollInterval); + scheduleNextRun(pollIntervalSupplier.get()); + } + + /** + * This method requests that the downloader be rescheduled to run immediately (presumably because a dynamic property supplied by + * pollIntervalSupplier or eagerDownloadSupplier has changed, or a pipeline with a geoip processor has been added). This method does + * nothing if this task is cancelled, completed, or has not yet been scheduled to run for the first time. It cancels any existing + * scheduled run. + */ + public void requestReschedule() { + if (isCancelled() || isCompleted()) { + return; + } + if (scheduled != null && scheduled.cancel()) { + scheduleNextRun(TimeValue.ZERO); + } } private void cleanDatabases() { @@ -321,4 +341,5 @@ private void scheduleNextRun(TimeValue time) { scheduled = threadPool.schedule(this::runDownloader, time, ThreadPool.Names.GENERIC); } } + } diff --git a/modules/ingest-geoip/src/main/java/org/elasticsearch/ingest/geoip/GeoIpDownloaderTaskExecutor.java b/modules/ingest-geoip/src/main/java/org/elasticsearch/ingest/geoip/GeoIpDownloaderTaskExecutor.java index c56fd9c2d0c53..7457738b75301 100644 --- a/modules/ingest-geoip/src/main/java/org/elasticsearch/ingest/geoip/GeoIpDownloaderTaskExecutor.java +++ b/modules/ingest-geoip/src/main/java/org/elasticsearch/ingest/geoip/GeoIpDownloaderTaskExecutor.java @@ -17,15 +17,20 @@ import org.elasticsearch.client.internal.Client; import org.elasticsearch.client.internal.OriginSettingClient; import org.elasticsearch.cluster.ClusterChangedEvent; +import org.elasticsearch.cluster.ClusterState; import org.elasticsearch.cluster.ClusterStateListener; import org.elasticsearch.cluster.metadata.IndexAbstraction; import org.elasticsearch.cluster.node.DiscoveryNode; import org.elasticsearch.cluster.service.ClusterService; import org.elasticsearch.common.settings.Setting; import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.core.TimeValue; import org.elasticsearch.gateway.GatewayService; import org.elasticsearch.index.Index; +import org.elasticsearch.ingest.IngestMetadata; import org.elasticsearch.ingest.IngestService; +import org.elasticsearch.ingest.Pipeline; +import org.elasticsearch.ingest.PipelineConfiguration; import org.elasticsearch.persistent.AllocatedPersistentTask; import org.elasticsearch.persistent.PersistentTaskState; import org.elasticsearch.persistent.PersistentTasksCustomMetadata; @@ -35,7 +40,10 @@ import org.elasticsearch.threadpool.ThreadPool; import org.elasticsearch.transport.RemoteTransportException; +import java.util.List; import java.util.Map; +import java.util.Objects; +import java.util.concurrent.atomic.AtomicBoolean; import java.util.concurrent.atomic.AtomicReference; import static org.elasticsearch.ingest.geoip.GeoIpDownloader.DATABASES_INDEX; @@ -56,6 +64,20 @@ public final class GeoIpDownloaderTaskExecutor extends PersistentTasksExecutor POLL_INTERVAL_SETTING = Setting.timeSetting( + "ingest.geoip.downloader.poll.interval", + TimeValue.timeValueDays(3), + TimeValue.timeValueDays(1), + Setting.Property.Dynamic, + Setting.Property.NodeScope + ); + + public static final Setting EAGER_DOWNLOAD_SETTING = Setting.boolSetting( + "ingest.geoip.downloader.eager.download", + false, + Setting.Property.Dynamic, + Setting.Property.NodeScope + ); private static final Logger logger = LogManager.getLogger(GeoIpDownloader.class); @@ -66,6 +88,10 @@ public final class GeoIpDownloaderTaskExecutor extends PersistentTasksExecutor currentTask = new AtomicReference<>(); + private volatile TimeValue pollInterval; + private volatile boolean eagerDownload; + private volatile boolean atLeastOneGeoipProcessor; + private final AtomicBoolean taskIsBootstrapped = new AtomicBoolean(false); GeoIpDownloaderTaskExecutor(Client client, HttpClient httpClient, ClusterService clusterService, ThreadPool threadPool) { super(GEOIP_DOWNLOADER, ThreadPool.Names.GENERIC); @@ -75,9 +101,18 @@ public final class GeoIpDownloaderTaskExecutor extends PersistentTasksExecutor pollInterval, + () -> eagerDownload, + () -> atLeastOneGeoipProcessor ); } @@ -140,12 +198,65 @@ public void clusterChanged(ClusterChangedEvent event) { return; } - clusterService.removeListener(this); - if (ENABLED_SETTING.get(event.state().getMetadata().settings(), settings)) { - startTask(() -> clusterService.addListener(this)); - } else { - stopTask(() -> clusterService.addListener(this)); + if (taskIsBootstrapped.getAndSet(true) == false) { + this.atLeastOneGeoipProcessor = hasAtLeastOneGeoipProcessor(event.state()); + if (ENABLED_SETTING.get(event.state().getMetadata().settings(), settings)) { + startTask(() -> taskIsBootstrapped.set(false)); + } else { + stopTask(() -> taskIsBootstrapped.set(false)); + } } + + if (event.metadataChanged() && event.changedCustomMetadataSet().contains(IngestMetadata.TYPE)) { + boolean newAtLeastOneGeoipProcessor = hasAtLeastOneGeoipProcessor(event.state()); + if (newAtLeastOneGeoipProcessor && atLeastOneGeoipProcessor == false) { + atLeastOneGeoipProcessor = true; + logger.trace("Scheduling runDownloader because a geoip processor has been added"); + GeoIpDownloader currentDownloader = getCurrentTask(); + if (currentDownloader != null) { + currentDownloader.requestReschedule(); + } + } else { + atLeastOneGeoipProcessor = newAtLeastOneGeoipProcessor; + } + } + } + + @SuppressWarnings("unchecked") + static boolean hasAtLeastOneGeoipProcessor(ClusterState clusterState) { + List pipelineDefinitions = IngestService.getPipelines(clusterState); + return pipelineDefinitions.stream().anyMatch(pipelineDefinition -> { + Map pipelineMap = pipelineDefinition.getConfigAsMap(); + return hasAtLeastOneGeoipProcessor((List>) pipelineMap.get(Pipeline.PROCESSORS_KEY)); + }); + } + + private static boolean hasAtLeastOneGeoipProcessor(List> processors) { + return processors != null && processors.stream().anyMatch(GeoIpDownloaderTaskExecutor::hasAtLeastOneGeoipProcessor); + } + + private static boolean hasAtLeastOneGeoipProcessor(Map processor) { + return processor != null + && (processor.containsKey(GeoIpProcessor.TYPE) + || isProcessorWithOnFailureGeoIpProcessor(processor) + || isForeachProcessorWithGeoipProcessor(processor)); + } + + @SuppressWarnings("unchecked") + private static boolean isProcessorWithOnFailureGeoIpProcessor(Map processor) { + return processor != null + && processor.values() + .stream() + .anyMatch( + value -> value instanceof Map + && hasAtLeastOneGeoipProcessor(((Map>>) value).get("on_failure")) + ); + } + + @SuppressWarnings("unchecked") + private static boolean isForeachProcessorWithGeoipProcessor(Map processor) { + return processor.containsKey("foreach") + && hasAtLeastOneGeoipProcessor(((Map>) processor.get("foreach")).get("processor")); } private void startTask(Runnable onFailure) { diff --git a/modules/ingest-geoip/src/main/java/org/elasticsearch/ingest/geoip/IngestGeoIpPlugin.java b/modules/ingest-geoip/src/main/java/org/elasticsearch/ingest/geoip/IngestGeoIpPlugin.java index f69558171fd44..8aaf476b353ea 100644 --- a/modules/ingest-geoip/src/main/java/org/elasticsearch/ingest/geoip/IngestGeoIpPlugin.java +++ b/modules/ingest-geoip/src/main/java/org/elasticsearch/ingest/geoip/IngestGeoIpPlugin.java @@ -85,9 +85,10 @@ public class IngestGeoIpPlugin extends Plugin implements IngestPlugin, SystemInd public List> getSettings() { return Arrays.asList( CACHE_SIZE, + GeoIpDownloaderTaskExecutor.EAGER_DOWNLOAD_SETTING, + GeoIpDownloaderTaskExecutor.ENABLED_SETTING, GeoIpDownloader.ENDPOINT_SETTING, - GeoIpDownloader.POLL_INTERVAL_SETTING, - GeoIpDownloaderTaskExecutor.ENABLED_SETTING + GeoIpDownloaderTaskExecutor.POLL_INTERVAL_SETTING ); } @@ -126,6 +127,7 @@ public Collection createComponents( } geoIpDownloaderTaskExecutor = new GeoIpDownloaderTaskExecutor(client, new HttpClient(), clusterService, threadPool); + geoIpDownloaderTaskExecutor.init(); return List.of(databaseRegistry.get(), geoIpDownloaderTaskExecutor); } diff --git a/modules/ingest-geoip/src/test/java/org/elasticsearch/ingest/geoip/GeoIpDownloaderTaskExecutorTests.java b/modules/ingest-geoip/src/test/java/org/elasticsearch/ingest/geoip/GeoIpDownloaderTaskExecutorTests.java new file mode 100644 index 0000000000000..5cbe205f5c9c7 --- /dev/null +++ b/modules/ingest-geoip/src/test/java/org/elasticsearch/ingest/geoip/GeoIpDownloaderTaskExecutorTests.java @@ -0,0 +1,253 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0 and the Server Side Public License, v 1; you may not use this file except + * in compliance with, at your election, the Elastic License 2.0 or the Server + * Side Public License, v 1. + */ + +package org.elasticsearch.ingest.geoip; + +import org.elasticsearch.cluster.ClusterState; +import org.elasticsearch.cluster.metadata.Metadata; +import org.elasticsearch.common.bytes.BytesArray; +import org.elasticsearch.ingest.IngestMetadata; +import org.elasticsearch.ingest.PipelineConfiguration; +import org.elasticsearch.test.ESTestCase; +import org.elasticsearch.xcontent.XContentType; + +import java.util.HashMap; +import java.util.List; +import java.util.Map; + +import static org.mockito.Mockito.mock; +import static org.mockito.Mockito.when; + +public class GeoIpDownloaderTaskExecutorTests extends ESTestCase { + public void testHasAtLeastOneGeoipProcessor() { + Map configs = new HashMap<>(); + IngestMetadata ingestMetadata = new IngestMetadata(configs); + ClusterState clusterState = mock(ClusterState.class); + Metadata metadata = mock(Metadata.class); + when(metadata.custom(IngestMetadata.TYPE)).thenReturn(ingestMetadata); + when(clusterState.getMetadata()).thenReturn(metadata); + List expectHitsInputs = getPipelinesWithGeoIpProcessors(); + List expectMissesInputs = getPipelinesWithoutGeoIpProcessors(); + { + // Test that hasAtLeastOneGeoipProcessor returns true for any pipeline with a geoip processor: + for (String pipeline : expectHitsInputs) { + configs.clear(); + configs.put("_id1", new PipelineConfiguration("_id1", new BytesArray(pipeline), XContentType.JSON)); + assertTrue(GeoIpDownloaderTaskExecutor.hasAtLeastOneGeoipProcessor(clusterState)); + } + } + { + // Test that hasAtLeastOneGeoipProcessor returns false for any pipeline without a geoip processor: + for (String pipeline : expectMissesInputs) { + configs.clear(); + configs.put("_id1", new PipelineConfiguration("_id1", new BytesArray(pipeline), XContentType.JSON)); + assertFalse(GeoIpDownloaderTaskExecutor.hasAtLeastOneGeoipProcessor(clusterState)); + } + } + { + /* + * Now test that hasAtLeastOneGeoipProcessor returns true for a mix of pipelines, some which have geoip processors and some + * which do not: + */ + configs.clear(); + for (String pipeline : expectHitsInputs) { + String id = randomAlphaOfLength(20); + configs.put(id, new PipelineConfiguration(id, new BytesArray(pipeline), XContentType.JSON)); + } + for (String pipeline : expectMissesInputs) { + String id = randomAlphaOfLength(20); + configs.put(id, new PipelineConfiguration(id, new BytesArray(pipeline), XContentType.JSON)); + } + assertTrue(GeoIpDownloaderTaskExecutor.hasAtLeastOneGeoipProcessor(clusterState)); + } + } + + /* + * This method returns an assorted list of pipelines that have geoip processors -- ones that ought to cause hasAtLeastOneGeoipProcessor + * to return true. + */ + private List getPipelinesWithGeoIpProcessors() { + String simpleGeoIpProcessor = """ + { + "processors":[ + { + "geoip":{ + "field":"provider" + } + } + ] + } + """; + String onFailureWithGeoIpProcessor = """ + { + "processors":[ + { + "rename":{ + "field":"provider", + "target_field":"cloud.provider", + "on_failure":[ + { + "geoip":{ + "field":"error.message" + } + } + ] + } + } + ] + } + """; + String foreachWithGeoIpProcessor = """ + { + "processors":[ + { + "foreach":{ + "field":"values", + "processor": + { + "geoip":{ + "field":"someField" + } + } + } + } + ] + } + """; + String nestedForeachWithGeoIpProcessor = """ + { + "processors":[ + { + "foreach":{ + "field":"values", + "processor": + { + "foreach":{ + "field":"someField", + "processor": + { + "geoip":{ + "field":"someField" + } + } + } + } + } + } + ] + } + """; + String nestedForeachWithOnFailureWithGeoIpProcessor = """ + { + "processors":[ + { + "foreach":{ + "field":"values", + "processor": + { + "foreach":{ + "field":"someField", + "processor": + { + "rename":{ + "field":"provider", + "target_field":"cloud.provider", + "on_failure":[ + { + "geoip":{ + "field":"error.message" + } + } + ] + } + } + } + } + } + } + ] + } + """; + String onFailureWithForeachWithGeoIp = """ + { + "processors":[ + { + "rename":{ + "field":"provider", + "target_field":"cloud.provider", + "on_failure":[ + { + "foreach":{ + "field":"values", + "processor": + { + "geoip":{ + "field":"someField" + } + } + } + } + ] + } + } + ] + } + """; + return List.of( + simpleGeoIpProcessor, + onFailureWithGeoIpProcessor, + foreachWithGeoIpProcessor, + nestedForeachWithGeoIpProcessor, + nestedForeachWithOnFailureWithGeoIpProcessor, + onFailureWithForeachWithGeoIp + ); + } + + /* + * This method returns an assorted list of pipelines that _do not_ have geoip processors -- ones that ought to cause + * hasAtLeastOneGeoipProcessor to return false. + */ + private List getPipelinesWithoutGeoIpProcessors() { + String empty = """ + { + } + """; + String noProcessors = """ + { + "processors":[ + ] + } + """; + String onFailureWithForeachWithSet = """ + { + "processors":[ + { + "rename":{ + "field":"provider", + "target_field":"cloud.provider", + "on_failure":[ + { + "foreach":{ + "field":"values", + "processor": + { + "set":{ + "field":"someField" + } + } + } + } + ] + } + } + ] + } + """; + return List.of(empty, noProcessors, onFailureWithForeachWithSet); + } +} diff --git a/modules/ingest-geoip/src/test/java/org/elasticsearch/ingest/geoip/GeoIpDownloaderTests.java b/modules/ingest-geoip/src/test/java/org/elasticsearch/ingest/geoip/GeoIpDownloaderTests.java index 16088deb86b3d..9f3334a07d8f3 100644 --- a/modules/ingest-geoip/src/test/java/org/elasticsearch/ingest/geoip/GeoIpDownloaderTests.java +++ b/modules/ingest-geoip/src/test/java/org/elasticsearch/ingest/geoip/GeoIpDownloaderTests.java @@ -53,6 +53,7 @@ import java.util.List; import java.util.Map; import java.util.Set; +import java.util.concurrent.atomic.AtomicBoolean; import java.util.concurrent.atomic.AtomicInteger; import java.util.function.BiConsumer; @@ -81,7 +82,12 @@ public void setup() { when(clusterService.getClusterSettings()).thenReturn( new ClusterSettings( Settings.EMPTY, - Set.of(GeoIpDownloader.ENDPOINT_SETTING, GeoIpDownloader.POLL_INTERVAL_SETTING, GeoIpDownloaderTaskExecutor.ENABLED_SETTING) + Set.of( + GeoIpDownloaderTaskExecutor.EAGER_DOWNLOAD_SETTING, + GeoIpDownloader.ENDPOINT_SETTING, + GeoIpDownloaderTaskExecutor.POLL_INTERVAL_SETTING, + GeoIpDownloaderTaskExecutor.ENABLED_SETTING + ) ) ); ClusterState state = createClusterState(new PersistentTasksCustomMetadata(1L, Map.of())); @@ -98,7 +104,10 @@ public void setup() { "", "", EMPTY_TASK_ID, - Collections.emptyMap() + Collections.emptyMap(), + () -> GeoIpDownloaderTaskExecutor.POLL_INTERVAL_SETTING.getDefault(Settings.EMPTY), + () -> GeoIpDownloaderTaskExecutor.EAGER_DOWNLOAD_SETTING.getDefault(Settings.EMPTY), + () -> true ); } @@ -252,7 +261,10 @@ public void testProcessDatabaseNew() throws IOException { "", "", EMPTY_TASK_ID, - Collections.emptyMap() + Collections.emptyMap(), + () -> GeoIpDownloaderTaskExecutor.POLL_INTERVAL_SETTING.getDefault(Settings.EMPTY), + () -> GeoIpDownloaderTaskExecutor.EAGER_DOWNLOAD_SETTING.getDefault(Settings.EMPTY), + () -> true ) { @Override void updateTaskState() { @@ -298,7 +310,10 @@ public void testProcessDatabaseUpdate() throws IOException { "", "", EMPTY_TASK_ID, - Collections.emptyMap() + Collections.emptyMap(), + () -> GeoIpDownloaderTaskExecutor.POLL_INTERVAL_SETTING.getDefault(Settings.EMPTY), + () -> GeoIpDownloaderTaskExecutor.EAGER_DOWNLOAD_SETTING.getDefault(Settings.EMPTY), + () -> true ) { @Override void updateTaskState() { @@ -346,7 +361,10 @@ public void testProcessDatabaseSame() throws IOException { "", "", EMPTY_TASK_ID, - Collections.emptyMap() + Collections.emptyMap(), + () -> GeoIpDownloaderTaskExecutor.POLL_INTERVAL_SETTING.getDefault(Settings.EMPTY), + () -> GeoIpDownloaderTaskExecutor.EAGER_DOWNLOAD_SETTING.getDefault(Settings.EMPTY), + () -> true ) { @Override void updateTaskState() { @@ -387,7 +405,10 @@ public void testUpdateTaskState() { "", "", EMPTY_TASK_ID, - Collections.emptyMap() + Collections.emptyMap(), + () -> GeoIpDownloaderTaskExecutor.POLL_INTERVAL_SETTING.getDefault(Settings.EMPTY), + () -> GeoIpDownloaderTaskExecutor.EAGER_DOWNLOAD_SETTING.getDefault(Settings.EMPTY), + () -> true ) { @Override public void updatePersistentTaskState(PersistentTaskState state, ActionListener> listener) { @@ -414,7 +435,10 @@ public void testUpdateTaskStateError() { "", "", EMPTY_TASK_ID, - Collections.emptyMap() + Collections.emptyMap(), + () -> GeoIpDownloaderTaskExecutor.POLL_INTERVAL_SETTING.getDefault(Settings.EMPTY), + () -> GeoIpDownloaderTaskExecutor.EAGER_DOWNLOAD_SETTING.getDefault(Settings.EMPTY), + () -> true ) { @Override public void updatePersistentTaskState(PersistentTaskState state, ActionListener> listener) { @@ -440,6 +464,7 @@ public void testUpdateDatabases() throws IOException { builder.close(); when(httpClient.getBytes("a.b?elastic_geoip_service_tos=agree")).thenReturn(baos.toByteArray()); Iterator> it = maps.iterator(); + final AtomicBoolean atLeastOneGeoipProcessor = new AtomicBoolean(false); geoIpDownloader = new GeoIpDownloader( client, httpClient, @@ -451,7 +476,10 @@ public void testUpdateDatabases() throws IOException { "", "", EMPTY_TASK_ID, - Collections.emptyMap() + Collections.emptyMap(), + () -> GeoIpDownloaderTaskExecutor.POLL_INTERVAL_SETTING.getDefault(Settings.EMPTY), + () -> GeoIpDownloaderTaskExecutor.EAGER_DOWNLOAD_SETTING.getDefault(Settings.EMPTY), + atLeastOneGeoipProcessor::get ) { @Override void processDatabase(Map databaseInfo) { @@ -459,6 +487,9 @@ void processDatabase(Map databaseInfo) { } }; geoIpDownloader.updateDatabases(); + assertTrue(it.hasNext()); + atLeastOneGeoipProcessor.set(true); + geoIpDownloader.updateDatabases(); assertFalse(it.hasNext()); } diff --git a/modules/ingest-geoip/src/yamlRestTest/java/org/elasticsearch/ingest/geoip/IngestGeoIpClientYamlTestSuiteIT.java b/modules/ingest-geoip/src/yamlRestTest/java/org/elasticsearch/ingest/geoip/IngestGeoIpClientYamlTestSuiteIT.java index 5b40f4a6ada43..8584229ec171e 100644 --- a/modules/ingest-geoip/src/yamlRestTest/java/org/elasticsearch/ingest/geoip/IngestGeoIpClientYamlTestSuiteIT.java +++ b/modules/ingest-geoip/src/yamlRestTest/java/org/elasticsearch/ingest/geoip/IngestGeoIpClientYamlTestSuiteIT.java @@ -11,11 +11,17 @@ import com.carrotsearch.randomizedtesting.annotations.Name; import com.carrotsearch.randomizedtesting.annotations.ParametersFactory; +import org.apache.http.entity.ByteArrayEntity; +import org.apache.http.entity.ContentType; import org.elasticsearch.client.Request; +import org.elasticsearch.common.bytes.BytesReference; import org.elasticsearch.test.rest.yaml.ClientYamlTestCandidate; import org.elasticsearch.test.rest.yaml.ESClientYamlSuiteTestCase; +import org.elasticsearch.xcontent.XContentBuilder; +import org.elasticsearch.xcontent.json.JsonXContent; import org.junit.Before; +import java.io.IOException; import java.util.List; import java.util.Map; import java.util.stream.Collectors; @@ -36,6 +42,7 @@ public static Iterable parameters() throws Exception { @Before public void waitForDatabases() throws Exception { + putGeoipPipeline(); assertBusy(() -> { Request request = new Request("GET", "/_ingest/geoip/stats"); Map response = entityAsMap(client().performRequest(request)); @@ -53,4 +60,37 @@ public void waitForDatabases() throws Exception { }); } + /** + * This creates a pipeline with a geoip processor so that the GeoipDownloader will download its databases. + * @throws IOException + */ + private void putGeoipPipeline() throws IOException { + final BytesReference bytes; + try (XContentBuilder builder = JsonXContent.contentBuilder()) { + builder.startObject(); + { + builder.startArray("processors"); + { + builder.startObject(); + { + builder.startObject("geoip"); + { + builder.field("field", "ip"); + builder.field("target_field", "ip-city"); + builder.field("database_file", "GeoLite2-City.mmdb"); + } + builder.endObject(); + } + builder.endObject(); + } + builder.endArray(); + } + builder.endObject(); + bytes = BytesReference.bytes(builder); + } + Request putPipelineRequest = new Request("PUT", "/_ingest/pipeline/pipeline-with-geoip"); + putPipelineRequest.setEntity(new ByteArrayEntity(bytes.array(), ContentType.APPLICATION_JSON)); + client().performRequest(putPipelineRequest); + } + } diff --git a/x-pack/qa/rolling-upgrade/src/test/java/org/elasticsearch/upgrades/GeoIpUpgradeIT.java b/x-pack/qa/rolling-upgrade/src/test/java/org/elasticsearch/upgrades/GeoIpUpgradeIT.java index 3dedd041d6465..eb0e97e1ecce1 100644 --- a/x-pack/qa/rolling-upgrade/src/test/java/org/elasticsearch/upgrades/GeoIpUpgradeIT.java +++ b/x-pack/qa/rolling-upgrade/src/test/java/org/elasticsearch/upgrades/GeoIpUpgradeIT.java @@ -26,7 +26,9 @@ public void testGeoIpDownloader() throws Exception { assertBusy(() -> { Response response = client().performRequest(new Request("GET", "_ingest/geoip/stats")); String tasks = EntityUtils.toString(response.getEntity(), StandardCharsets.UTF_8); - assertThat(tasks, Matchers.containsString("failed_downloads\":1")); + // The geoip downloader doesn't actually do anything since there are no geoip processors: + assertThat(tasks, Matchers.containsString("failed_downloads\":0")); + assertThat(tasks, Matchers.containsString("successful_downloads\":0")); }); } } From 9d03b143e0ac33158ccbd0c1cf2a7e749174c458 Mon Sep 17 00:00:00 2001 From: Mark Vieira Date: Mon, 30 Jan 2023 13:50:04 -0800 Subject: [PATCH 20/38] Add JDK 20 to Java support compatibility testing matrix --- .ci/matrix-runtime-javas.yml | 1 + 1 file changed, 1 insertion(+) diff --git a/.ci/matrix-runtime-javas.yml b/.ci/matrix-runtime-javas.yml index a6b2d4a15d848..07582c4892d52 100644 --- a/.ci/matrix-runtime-javas.yml +++ b/.ci/matrix-runtime-javas.yml @@ -10,3 +10,4 @@ ES_RUNTIME_JAVA: - openjdk17 - openjdk18 - openjdk19 + - openjdk20 From 180caf0dc8cb4d0af4a7df545f2c24de7505ca46 Mon Sep 17 00:00:00 2001 From: Mark Vieira Date: Mon, 30 Jan 2023 14:49:58 -0800 Subject: [PATCH 21/38] Bump bundled JDK to Java 19.0.2 (#93354) Closes https://github.com/elastic/elasticsearch/issues/93025 --- build-tools-internal/version.properties | 2 +- docs/changelog/93354.yaml | 6 ++++++ gradle/verification-metadata.xml | 26 ++++++++++++------------- 3 files changed, 20 insertions(+), 14 deletions(-) create mode 100644 docs/changelog/93354.yaml diff --git a/build-tools-internal/version.properties b/build-tools-internal/version.properties index 8071a0dba037c..6130b599610a9 100644 --- a/build-tools-internal/version.properties +++ b/build-tools-internal/version.properties @@ -2,7 +2,7 @@ elasticsearch = 8.7.0 lucene = 9.5.0-snapshot-d19c3e2e0ed bundled_jdk_vendor = openjdk -bundled_jdk = 19.0.1+10@afdd2e245b014143b62ccb916125e3ce +bundled_jdk = 19.0.2+7@fdb695a9d9064ad6b064dc6df578380c # optional dependencies spatial4j = 0.7 diff --git a/docs/changelog/93354.yaml b/docs/changelog/93354.yaml new file mode 100644 index 0000000000000..2ad4d27a069cc --- /dev/null +++ b/docs/changelog/93354.yaml @@ -0,0 +1,6 @@ +pr: 93354 +summary: Bump bundled JDK to Java 19.0.2 +area: Packaging +type: upgrade +issues: + - 93025 diff --git a/gradle/verification-metadata.xml b/gradle/verification-metadata.xml index b049f21a3bcfc..84dd73a6b0f52 100644 --- a/gradle/verification-metadata.xml +++ b/gradle/verification-metadata.xml @@ -1662,25 +1662,25 @@ - - - + + + - - + + - - - + + + - - + + - - - + + + From de8eda45ebaabb3a8fb68c1242a3213ea13dcf86 Mon Sep 17 00:00:00 2001 From: Mark Vieira Date: Mon, 30 Jan 2023 15:15:05 -0800 Subject: [PATCH 22/38] Make `--debug-server-jvm` work with new test framework (#93355) --- .../testclusters/StandaloneRestIntegTestTask.java | 1 + .../test/cluster/local/LocalClusterFactory.java | 12 +++++++++++- 2 files changed, 12 insertions(+), 1 deletion(-) diff --git a/build-tools/src/main/java/org/elasticsearch/gradle/testclusters/StandaloneRestIntegTestTask.java b/build-tools/src/main/java/org/elasticsearch/gradle/testclusters/StandaloneRestIntegTestTask.java index ab1436bb9a317..3754f57dc3788 100644 --- a/build-tools/src/main/java/org/elasticsearch/gradle/testclusters/StandaloneRestIntegTestTask.java +++ b/build-tools/src/main/java/org/elasticsearch/gradle/testclusters/StandaloneRestIntegTestTask.java @@ -71,6 +71,7 @@ public StandaloneRestIntegTestTask() { @Option(option = "debug-server-jvm", description = "Enable debugging configuration, to allow attaching a debugger to elasticsearch.") public void setDebugServer(boolean enabled) { this.debugServer = enabled; + systemProperty("tests.cluster.debug.enabled", Boolean.toString(enabled)); } @Nested diff --git a/test/test-clusters/src/main/java/org/elasticsearch/test/cluster/local/LocalClusterFactory.java b/test/test-clusters/src/main/java/org/elasticsearch/test/cluster/local/LocalClusterFactory.java index 24cebbeb7168f..e63f8236d58d4 100644 --- a/test/test-clusters/src/main/java/org/elasticsearch/test/cluster/local/LocalClusterFactory.java +++ b/test/test-clusters/src/main/java/org/elasticsearch/test/cluster/local/LocalClusterFactory.java @@ -59,6 +59,9 @@ public class LocalClusterFactory implements ClusterFactory getEnvironmentVariables() { .collect(Collectors.joining(" ")); } + String debugArgs = ""; + if (Boolean.getBoolean(TESTS_CLUSTER_DEBUG_ENABLED_SYSPROP)) { + int port = DEFAULT_DEBUG_PORT + spec.getCluster().getNodes().indexOf(spec); + debugArgs = ENABLE_DEBUG_JVM_ARGS + port; + } + String heapSize = System.getProperty("tests.heap.size", "512m"); environment.put("ES_JAVA_OPTS", "-Xms" + heapSize + " -Xmx" + heapSize + " -ea -esa " // Support passing in additional JVM arguments + System.getProperty("tests.jvm.argline", "") + " " + featureFlagProperties - + systemProperties); + + systemProperties + + debugArgs); return environment; } From c97e56e6f220c230f363b55a5673276abdf036a2 Mon Sep 17 00:00:00 2001 From: Mark Vieira Date: Mon, 30 Jan 2023 15:52:44 -0800 Subject: [PATCH 23/38] Fix MapperSizeClientYamlTestSuiteIT when FIPS is enabled (#93357) The issue with this test failure is actually that we were silently failing to install the plugin under test into the cluster. The root cause here was the FIPS security policy file was not copied into cluster config directory before we attempting to run the plugin installer. Since we pass the FIPS JVM arguments to all CLI tools as well this caused plugin installation to fail. We now ensure that these files are copied before we attempt to run _any_ ES tools. Closes https://github.com/elastic/elasticsearch/issues/93303 --- .../test/mapper_size/10_basic.yml | 4 ---- .../cluster/local/LocalClusterFactory.java | 21 ++++++++++++------- 2 files changed, 14 insertions(+), 11 deletions(-) diff --git a/plugins/mapper-size/src/yamlRestTest/resources/rest-api-spec/test/mapper_size/10_basic.yml b/plugins/mapper-size/src/yamlRestTest/resources/rest-api-spec/test/mapper_size/10_basic.yml index d9b8dc0b01647..434368ed2f5b2 100644 --- a/plugins/mapper-size/src/yamlRestTest/resources/rest-api-spec/test/mapper_size/10_basic.yml +++ b/plugins/mapper-size/src/yamlRestTest/resources/rest-api-spec/test/mapper_size/10_basic.yml @@ -4,10 +4,6 @@ --- "Mapper Size": - - skip: - version: "all" - reason: "AwaitsFix https://github.com/elastic/elasticsearch/issues/93303" - - do: indices.create: index: test diff --git a/test/test-clusters/src/main/java/org/elasticsearch/test/cluster/local/LocalClusterFactory.java b/test/test-clusters/src/main/java/org/elasticsearch/test/cluster/local/LocalClusterFactory.java index e63f8236d58d4..963566f52e8a9 100644 --- a/test/test-clusters/src/main/java/org/elasticsearch/test/cluster/local/LocalClusterFactory.java +++ b/test/test-clusters/src/main/java/org/elasticsearch/test/cluster/local/LocalClusterFactory.java @@ -112,22 +112,20 @@ public synchronized void start(Version version) { distributionDescriptor = resolveDistribution(); LOGGER.info("Distribution for node '{}': {}", spec.getName(), distributionDescriptor); initializeWorkingDirectory(currentVersion != null); + createConfigDirectory(); + copyExtraConfigFiles(); // extra config files might be needed for running cli tools like plugin install copyExtraJarFiles(); installPlugins(); if (distributionDescriptor.getType() == DistributionType.INTEG_TEST) { installModules(); } currentVersion = spec.getVersion(); + } else { + createConfigDirectory(); + copyExtraConfigFiles(); } - try { - IOUtils.deleteWithRetry(configDir); - Files.createDirectories(configDir); - } catch (IOException e) { - throw new UncheckedIOException("An error occurred creating config directory", e); - } writeConfiguration(); - copyExtraConfigFiles(); createKeystore(); addKeystoreSettings(); addKeystoreFiles(); @@ -209,6 +207,15 @@ public void waitUntilReady() { } } + private void createConfigDirectory() { + try { + IOUtils.deleteWithRetry(configDir); + Files.createDirectories(configDir); + } catch (IOException e) { + throw new UncheckedIOException("An error occurred creating config directory", e); + } + } + private List readPortsFile(Path file) { try (Stream lines = Files.lines(file, StandardCharsets.UTF_8)) { return lines.map(String::trim).collect(Collectors.toList()); From c839c40de216bc1981d7aa6a09b412e53c671155 Mon Sep 17 00:00:00 2001 From: David Turner Date: Tue, 31 Jan 2023 07:02:44 +0000 Subject: [PATCH 24/38] Add ActionListener#run (#93338) It's pretty common to run a block of code in a `try ... catch` block that just passes exceptions off to a listener's `onFailure` method. This commit adds a small utility to encapsulate this, enabling some one-liners. --- .../test/rest/WaitForRefreshAndCloseIT.java | 7 +--- .../ingest/common/GrokProcessorGetAction.java | 21 +++++------ .../elasticsearch/action/ActionListener.java | 14 ++++--- .../action/SingleResultDeduplicator.java | 6 +-- .../TransportUpdateDesiredNodesAction.java | 8 ++-- ...TransportPrevalidateNodeRemovalAction.java | 8 ++-- .../state/TransportClusterStateAction.java | 10 ++--- .../action/bulk/TransportBulkAction.java | 10 ++--- .../action/support/ChannelActionListener.java | 6 +-- .../support/ListenableActionFuture.java | 10 ++--- .../action/support/TransportAction.java | 6 +-- .../broadcast/TransportBroadcastAction.java | 12 +----- .../node/TransportBroadcastByNodeAction.java | 6 +-- .../util/concurrent/ListenableFuture.java | 37 +++++++------------ .../snapshots/SnapshotShardsService.java | 8 ++-- .../xpack/ccr/action/ShardChangesAction.java | 6 +-- 16 files changed, 61 insertions(+), 114 deletions(-) diff --git a/distribution/archives/integ-test-zip/src/javaRestTest/java/org/elasticsearch/test/rest/WaitForRefreshAndCloseIT.java b/distribution/archives/integ-test-zip/src/javaRestTest/java/org/elasticsearch/test/rest/WaitForRefreshAndCloseIT.java index bdb0a76cf9709..19afb4932ff2c 100644 --- a/distribution/archives/integ-test-zip/src/javaRestTest/java/org/elasticsearch/test/rest/WaitForRefreshAndCloseIT.java +++ b/distribution/archives/integ-test-zip/src/javaRestTest/java/org/elasticsearch/test/rest/WaitForRefreshAndCloseIT.java @@ -10,6 +10,7 @@ import org.apache.http.util.EntityUtils; import org.elasticsearch.action.ActionFuture; +import org.elasticsearch.action.ActionListener; import org.elasticsearch.action.support.PlainActionFuture; import org.elasticsearch.client.Request; import org.elasticsearch.client.Response; @@ -117,11 +118,7 @@ private ActionFuture start(Request request) { client().performRequestAsync(request, new ResponseListener() { @Override public void onSuccess(Response response) { - try { - future.onResponse(EntityUtils.toString(response.getEntity())); - } catch (IOException e) { - future.onFailure(e); - } + ActionListener.completeWith(future, () -> EntityUtils.toString(response.getEntity())); } @Override diff --git a/modules/ingest-common/src/main/java/org/elasticsearch/ingest/common/GrokProcessorGetAction.java b/modules/ingest-common/src/main/java/org/elasticsearch/ingest/common/GrokProcessorGetAction.java index 5dcf944eaa2ad..ad00956d2dde7 100644 --- a/modules/ingest-common/src/main/java/org/elasticsearch/ingest/common/GrokProcessorGetAction.java +++ b/modules/ingest-common/src/main/java/org/elasticsearch/ingest/common/GrokProcessorGetAction.java @@ -144,18 +144,15 @@ public TransportAction(TransportService transportService, ActionFilters actionFi @Override protected void doExecute(Task task, Request request, ActionListener listener) { - try { - listener.onResponse( - new Response( - request.getEcsCompatibility().equals(Grok.ECS_COMPATIBILITY_MODES[0]) - ? request.sorted() ? sortedLegacyGrokPatterns : legacyGrokPatterns - : request.sorted() ? sortedEcsV1GrokPatterns - : ecsV1GrokPatterns - ) - ); - } catch (Exception e) { - listener.onFailure(e); - } + ActionListener.completeWith( + listener, + () -> new Response( + request.getEcsCompatibility().equals(Grok.ECS_COMPATIBILITY_MODES[0]) + ? request.sorted() ? sortedLegacyGrokPatterns : legacyGrokPatterns + : request.sorted() ? sortedEcsV1GrokPatterns + : ecsV1GrokPatterns + ) + ); } } diff --git a/server/src/main/java/org/elasticsearch/action/ActionListener.java b/server/src/main/java/org/elasticsearch/action/ActionListener.java index 6974ddb127603..dd8b629f208c3 100644 --- a/server/src/main/java/org/elasticsearch/action/ActionListener.java +++ b/server/src/main/java/org/elasticsearch/action/ActionListener.java @@ -309,11 +309,7 @@ static ActionListener() { @Override public void onResponse(Response response) { - try { - delegate.onResponse(response); - } catch (Exception e) { - onFailure(e); - } + ActionListener.run(delegate, l -> l.onResponse(response)); } @Override @@ -566,4 +562,12 @@ public String toString() { } } + static > void run(L listener, CheckedConsumer action) { + try { + action.accept(listener); + } catch (Exception e) { + listener.onFailure(e); + } + } + } diff --git a/server/src/main/java/org/elasticsearch/action/SingleResultDeduplicator.java b/server/src/main/java/org/elasticsearch/action/SingleResultDeduplicator.java index 20db5fb6efca2..273c542bc825c 100644 --- a/server/src/main/java/org/elasticsearch/action/SingleResultDeduplicator.java +++ b/server/src/main/java/org/elasticsearch/action/SingleResultDeduplicator.java @@ -89,10 +89,6 @@ public void onFailure(Exception e) { } }); }); - try { - executeAction.accept(wrappedListener); - } catch (Exception e) { - wrappedListener.onFailure(e); - } + ActionListener.run(wrappedListener, executeAction::accept); } } diff --git a/server/src/main/java/org/elasticsearch/action/admin/cluster/desirednodes/TransportUpdateDesiredNodesAction.java b/server/src/main/java/org/elasticsearch/action/admin/cluster/desirednodes/TransportUpdateDesiredNodesAction.java index b0d7000afa8ac..19e5762b0a72f 100644 --- a/server/src/main/java/org/elasticsearch/action/admin/cluster/desirednodes/TransportUpdateDesiredNodesAction.java +++ b/server/src/main/java/org/elasticsearch/action/admin/cluster/desirednodes/TransportUpdateDesiredNodesAction.java @@ -79,9 +79,9 @@ protected void masterOperation( Task task, UpdateDesiredNodesRequest request, ClusterState state, - ActionListener listener + ActionListener responseListener ) throws Exception { - try { + ActionListener.run(responseListener, listener -> { settingsValidator.validate(request.getNodes()); clusterService.submitStateUpdateTask( "update-desired-nodes", @@ -89,9 +89,7 @@ protected void masterOperation( ClusterStateTaskConfig.build(Priority.URGENT, request.masterNodeTimeout()), taskExecutor ); - } catch (Exception e) { - listener.onFailure(e); - } + }); } @Override diff --git a/server/src/main/java/org/elasticsearch/action/admin/cluster/node/shutdown/TransportPrevalidateNodeRemovalAction.java b/server/src/main/java/org/elasticsearch/action/admin/cluster/node/shutdown/TransportPrevalidateNodeRemovalAction.java index 376abf0863410..f3ce39a0cdbf1 100644 --- a/server/src/main/java/org/elasticsearch/action/admin/cluster/node/shutdown/TransportPrevalidateNodeRemovalAction.java +++ b/server/src/main/java/org/elasticsearch/action/admin/cluster/node/shutdown/TransportPrevalidateNodeRemovalAction.java @@ -83,14 +83,12 @@ protected void masterOperation( Task task, PrevalidateNodeRemovalRequest request, ClusterState state, - ActionListener listener + ActionListener responseListener ) { - try { + ActionListener.run(responseListener, listener -> { Set requestNodes = resolveNodes(request, state.nodes()); doPrevalidation(request, requestNodes, state, listener); - } catch (Exception e) { - listener.onFailure(e); - } + }); } public static Set resolveNodes(PrevalidateNodeRemovalRequest request, DiscoveryNodes discoveryNodes) { diff --git a/server/src/main/java/org/elasticsearch/action/admin/cluster/state/TransportClusterStateAction.java b/server/src/main/java/org/elasticsearch/action/admin/cluster/state/TransportClusterStateAction.java index 48370ca2199ce..c6af59caa7f17 100644 --- a/server/src/main/java/org/elasticsearch/action/admin/cluster/state/TransportClusterStateAction.java +++ b/server/src/main/java/org/elasticsearch/action/admin/cluster/state/TransportClusterStateAction.java @@ -123,13 +123,11 @@ public void onClusterServiceClose() { @Override public void onTimeout(TimeValue timeout) { - try { - if (cancellableTask.notifyIfCancelled(listener) == false) { - listener.onResponse(new ClusterStateResponse(state.getClusterName(), null, true)); + ActionListener.run(listener, l -> { + if (cancellableTask.notifyIfCancelled(l) == false) { + l.onResponse(new ClusterStateResponse(state.getClusterName(), null, true)); } - } catch (Exception e) { - listener.onFailure(e); - } + }); } }, clusterState -> cancellableTask.isCancelled() || acceptableClusterStateOrFailedPredicate.test(clusterState)); } diff --git a/server/src/main/java/org/elasticsearch/action/bulk/TransportBulkAction.java b/server/src/main/java/org/elasticsearch/action/bulk/TransportBulkAction.java index f2ee085e9ac19..66b365f6a092e 100644 --- a/server/src/main/java/org/elasticsearch/action/bulk/TransportBulkAction.java +++ b/server/src/main/java/org/elasticsearch/action/bulk/TransportBulkAction.java @@ -255,7 +255,7 @@ protected void doInternalExecute(Task task, BulkRequest bulkRequest, String exec // this method (doExecute) will be called again, but with the bulk requests updated from the ingest node processing but // also with IngestService.NOOP_PIPELINE_NAME on each request. This ensures that this on the second time through this method, // this path is never taken. - try { + ActionListener.run(listener, l -> { if (Assertions.ENABLED) { final boolean arePipelinesResolved = bulkRequest.requests() .stream() @@ -265,13 +265,11 @@ protected void doInternalExecute(Task task, BulkRequest bulkRequest, String exec assert arePipelinesResolved : bulkRequest; } if (clusterService.localNode().isIngestNode()) { - processBulkIndexIngestRequest(task, bulkRequest, executorName, listener); + processBulkIndexIngestRequest(task, bulkRequest, executorName, l); } else { - ingestForwarder.forwardIngestRequest(BulkAction.INSTANCE, bulkRequest, listener); + ingestForwarder.forwardIngestRequest(BulkAction.INSTANCE, bulkRequest, l); } - } catch (Exception e) { - listener.onFailure(e); - } + }); return; } diff --git a/server/src/main/java/org/elasticsearch/action/support/ChannelActionListener.java b/server/src/main/java/org/elasticsearch/action/support/ChannelActionListener.java index 53674d8ac4ba3..7e778f8a5fd8d 100644 --- a/server/src/main/java/org/elasticsearch/action/support/ChannelActionListener.java +++ b/server/src/main/java/org/elasticsearch/action/support/ChannelActionListener.java @@ -35,11 +35,7 @@ public ChannelActionListener(TransportChannel channel, String actionName, Reques @Override public void onResponse(Response response) { - try { - channel.sendResponse(response); - } catch (Exception e) { - onFailure(e); - } + ActionListener.run(this, l -> l.channel.sendResponse(response)); } @Override diff --git a/server/src/main/java/org/elasticsearch/action/support/ListenableActionFuture.java b/server/src/main/java/org/elasticsearch/action/support/ListenableActionFuture.java index d506e9cefa840..fcb079ef1fd83 100644 --- a/server/src/main/java/org/elasticsearch/action/support/ListenableActionFuture.java +++ b/server/src/main/java/org/elasticsearch/action/support/ListenableActionFuture.java @@ -78,13 +78,9 @@ protected void done(boolean success) { } private void executeListener(final ActionListener listener) { - try { - // we use a timeout of 0 to by pass assertion forbidding to call actionGet() (blocking) on a network thread. - // here we know we will never block - listener.onResponse(actionGet(0)); - } catch (Exception e) { - listener.onFailure(e); - } + // we use a timeout of 0 to by pass assertion forbidding to call actionGet() (blocking) on a network thread. + // here we know we will never block + ActionListener.completeWith(listener, () -> actionGet(0)); } } diff --git a/server/src/main/java/org/elasticsearch/action/support/TransportAction.java b/server/src/main/java/org/elasticsearch/action/support/TransportAction.java index de39117529647..4d3b9b0c15ff0 100644 --- a/server/src/main/java/org/elasticsearch/action/support/TransportAction.java +++ b/server/src/main/java/org/elasticsearch/action/support/TransportAction.java @@ -111,11 +111,7 @@ private TaskResultStoringActionListener(TaskManager taskManager, Task task, Acti @Override public void onResponse(Response response) { - try { - taskManager.storeResult(task, response, delegate); - } catch (Exception e) { - delegate.onFailure(e); - } + ActionListener.run(delegate, l -> taskManager.storeResult(task, response, l)); } @Override diff --git a/server/src/main/java/org/elasticsearch/action/support/broadcast/TransportBroadcastAction.java b/server/src/main/java/org/elasticsearch/action/support/broadcast/TransportBroadcastAction.java index 7e0b636d0056b..6132b61a304cb 100644 --- a/server/src/main/java/org/elasticsearch/action/support/broadcast/TransportBroadcastAction.java +++ b/server/src/main/java/org/elasticsearch/action/support/broadcast/TransportBroadcastAction.java @@ -137,11 +137,7 @@ protected AsyncBroadcastAction(Task task, Request request, ActionListener(0), clusterState)); - } catch (Exception e) { - listener.onFailure(e); - } + ActionListener.completeWith(listener, () -> newResponse(request, new AtomicReferenceArray(0), clusterState)); return; } // count the local operations, and perform the non local ones @@ -247,11 +243,7 @@ protected AtomicReferenceArray shardsResponses() { } protected void finishHim() { - try { - listener.onResponse(newResponse(request, shardsResponses, clusterState)); - } catch (Exception e) { - listener.onFailure(e); - } + ActionListener.completeWith(listener, () -> newResponse(request, shardsResponses, clusterState)); } void setFailure(ShardIterator shardIt, int shardIndex, Exception e) { diff --git a/server/src/main/java/org/elasticsearch/action/support/broadcast/node/TransportBroadcastByNodeAction.java b/server/src/main/java/org/elasticsearch/action/support/broadcast/node/TransportBroadcastByNodeAction.java index 4d0c914fa544d..82cc91e620d7e 100644 --- a/server/src/main/java/org/elasticsearch/action/support/broadcast/node/TransportBroadcastByNodeAction.java +++ b/server/src/main/java/org/elasticsearch/action/support/broadcast/node/TransportBroadcastByNodeAction.java @@ -336,11 +336,7 @@ public void start() { cancellableTask.addListener(this); } if (nodeIds.size() == 0) { - try { - onCompletion(); - } catch (Exception e) { - listener.onFailure(e); - } + ActionListener.run(listener, ignored -> onCompletion()); } else { int nodeIndex = -1; for (Map.Entry> entry : nodeIds.entrySet()) { diff --git a/server/src/main/java/org/elasticsearch/common/util/concurrent/ListenableFuture.java b/server/src/main/java/org/elasticsearch/common/util/concurrent/ListenableFuture.java index a02c5027d6df9..f2788d278c814 100644 --- a/server/src/main/java/org/elasticsearch/common/util/concurrent/ListenableFuture.java +++ b/server/src/main/java/org/elasticsearch/common/util/concurrent/ListenableFuture.java @@ -105,33 +105,24 @@ protected void done(boolean ignored) { } private void notifyListenerDirectly(ActionListener listener) { - try { - // call get in a non-blocking fashion as we could be on a network thread - // or another thread like the scheduler, which we should never block! - assert done; - V value = FutureUtils.get(ListenableFuture.this, 0L, TimeUnit.NANOSECONDS); - listener.onResponse(value); - } catch (Exception e) { - listener.onFailure(e); - } + // call get in a non-blocking fashion as we could be on a network thread + // or another thread like the scheduler, which we should never block! + assert done; + ActionListener.completeWith(listener, () -> FutureUtils.get(ListenableFuture.this, 0L, TimeUnit.NANOSECONDS)); } private void notifyListener(ActionListener listener, ExecutorService executorService) { - try { - executorService.execute(new Runnable() { - @Override - public void run() { - notifyListenerDirectly(listener); - } + ActionListener.run(listener, l -> executorService.execute(new Runnable() { + @Override + public void run() { + notifyListenerDirectly(l); + } - @Override - public String toString() { - return "ListenableFuture notification"; - } - }); - } catch (Exception e) { - listener.onFailure(e); - } + @Override + public String toString() { + return "ListenableFuture notification"; + } + })); } @Override diff --git a/server/src/main/java/org/elasticsearch/snapshots/SnapshotShardsService.java b/server/src/main/java/org/elasticsearch/snapshots/SnapshotShardsService.java index 16b5a78bac4dd..d6b5ffd5f42d4 100644 --- a/server/src/main/java/org/elasticsearch/snapshots/SnapshotShardsService.java +++ b/server/src/main/java/org/elasticsearch/snapshots/SnapshotShardsService.java @@ -344,9 +344,9 @@ private void snapshot( final IndexShardSnapshotStatus snapshotStatus, Version version, final long entryStartTime, - ActionListener listener + ActionListener resultListener ) { - try { + ActionListener.run(resultListener, listener -> { if (snapshotStatus.isAborted()) { throw new AbortedSnapshotException(); } @@ -387,9 +387,7 @@ private void snapshot( IOUtils.close(snapshotRef); throw e; } - } catch (Exception e) { - listener.onFailure(e); - } + }); } /** diff --git a/x-pack/plugin/ccr/src/main/java/org/elasticsearch/xpack/ccr/action/ShardChangesAction.java b/x-pack/plugin/ccr/src/main/java/org/elasticsearch/xpack/ccr/action/ShardChangesAction.java index 6e24d97cef49c..179e4a7e21388 100644 --- a/x-pack/plugin/ccr/src/main/java/org/elasticsearch/xpack/ccr/action/ShardChangesAction.java +++ b/x-pack/plugin/ccr/src/main/java/org/elasticsearch/xpack/ccr/action/ShardChangesAction.java @@ -447,11 +447,7 @@ private void globalCheckpointAdvanced( final ActionListener listener ) { logger.trace("{} global checkpoint advanced to [{}] after waiting for [{}]", shardId, globalCheckpoint, request.getFromSeqNo()); - try { - super.asyncShardOperation(request, shardId, listener); - } catch (final IOException caught) { - listener.onFailure(caught); - } + ActionListener.run(listener, l -> super.asyncShardOperation(request, shardId, l)); } private void globalCheckpointAdvancementFailure( From 060382738b570c91c1d09dd82abf7b1d7d52d558 Mon Sep 17 00:00:00 2001 From: David Kyle Date: Tue, 31 Jan 2023 09:19:23 +0000 Subject: [PATCH 25/38] [ML] Wait for _infer to work after restart in full cluster restart tests (#93327) --- .../restart/MLModelDeploymentFullClusterRestartIT.java | 6 ++++-- 1 file changed, 4 insertions(+), 2 deletions(-) diff --git a/x-pack/qa/full-cluster-restart/src/javaRestTest/java/org/elasticsearch/xpack/restart/MLModelDeploymentFullClusterRestartIT.java b/x-pack/qa/full-cluster-restart/src/javaRestTest/java/org/elasticsearch/xpack/restart/MLModelDeploymentFullClusterRestartIT.java index 5a9e28274b84e..25a14c47e52c7 100644 --- a/x-pack/qa/full-cluster-restart/src/javaRestTest/java/org/elasticsearch/xpack/restart/MLModelDeploymentFullClusterRestartIT.java +++ b/x-pack/qa/full-cluster-restart/src/javaRestTest/java/org/elasticsearch/xpack/restart/MLModelDeploymentFullClusterRestartIT.java @@ -105,8 +105,10 @@ public void testDeploymentSurvivesRestart() throws Exception { request.addParameter("timeout", "70s"); })); waitForDeploymentStarted(modelId); - assertInfer(modelId); - assertNewInfer(modelId); + assertBusy(() -> { + assertInfer(modelId); + assertNewInfer(modelId); + }, 90, TimeUnit.SECONDS); stopDeployment(modelId); } } From 39ba013d944e07031b17e60e864054f25803090d Mon Sep 17 00:00:00 2001 From: Iraklis Psaroudakis Date: Tue, 31 Jan 2023 11:31:36 +0200 Subject: [PATCH 26/38] Unpromotables skip replication and peer recovery (#93210) For skipping replication: * ReplicationTracker and Group filter shards that are promotable to primary * Remove unpromotable shards from in sync allocations in metadata * There is a new Refresh action for unpromotable replica shards Fixes ES-4861 For skipping peer recovery: * Unpromotable shards pass directly to STARTED skipping some intermediate peer recovery stages and messages Fixes ES-5257 --- docs/changelog/93210.yaml | 5 + .../cluster/routing/ShardRoutingRoleIT.java | 173 +++++++++++++++++- .../refresh/ReplicaShardRefreshRequest.java | 58 ------ .../refresh/TransportShardRefreshAction.java | 80 +++++--- ...ansportUnpromotableShardRefreshAction.java | 47 +++++ .../UnpromotableShardRefreshRequest.java | 59 ++++++ .../replication/ReplicationOperation.java | 1 + .../cluster/routing/IndexRoutingTable.java | 4 +- .../routing/IndexShardRoutingTable.java | 14 +- .../allocation/IndexMetadataUpdater.java | 14 +- .../index/seqno/ReplicationTracker.java | 23 ++- .../elasticsearch/index/shard/IndexShard.java | 28 ++- .../index/shard/ReplicationGroup.java | 5 +- .../recovery/PeerRecoveryTargetService.java | 83 +++++---- .../recovery/RecoveriesCollection.java | 1 + ...portVerifyShardBeforeCloseActionTests.java | 2 +- ...TransportResyncReplicationActionTests.java | 2 +- .../ReplicationOperationTests.java | 2 +- .../TransportReplicationActionTests.java | 7 +- .../cluster/ClusterStateTests.java | 6 +- .../metadata/AutoExpandReplicasTests.java | 8 +- .../index/engine/EngineTestCase.java | 2 +- 22 files changed, 464 insertions(+), 160 deletions(-) create mode 100644 docs/changelog/93210.yaml delete mode 100644 server/src/main/java/org/elasticsearch/action/admin/indices/refresh/ReplicaShardRefreshRequest.java create mode 100644 server/src/main/java/org/elasticsearch/action/admin/indices/refresh/TransportUnpromotableShardRefreshAction.java create mode 100644 server/src/main/java/org/elasticsearch/action/admin/indices/refresh/UnpromotableShardRefreshRequest.java diff --git a/docs/changelog/93210.yaml b/docs/changelog/93210.yaml new file mode 100644 index 0000000000000..179f4ab9dec8d --- /dev/null +++ b/docs/changelog/93210.yaml @@ -0,0 +1,5 @@ +pr: 93210 +summary: Unpromotables skip replication and peer recovery +area: Allocation +type: enhancement +issues: [] diff --git a/server/src/internalClusterTest/java/org/elasticsearch/cluster/routing/ShardRoutingRoleIT.java b/server/src/internalClusterTest/java/org/elasticsearch/cluster/routing/ShardRoutingRoleIT.java index 5b47e6d08acc4..2f186a41139b7 100644 --- a/server/src/internalClusterTest/java/org/elasticsearch/cluster/routing/ShardRoutingRoleIT.java +++ b/server/src/internalClusterTest/java/org/elasticsearch/cluster/routing/ShardRoutingRoleIT.java @@ -10,10 +10,13 @@ import org.apache.logging.log4j.LogManager; import org.apache.logging.log4j.Logger; +import org.elasticsearch.action.ActionResponse; +import org.elasticsearch.action.admin.indices.refresh.TransportUnpromotableShardRefreshAction; import org.elasticsearch.cluster.ClusterChangedEvent; import org.elasticsearch.cluster.ClusterState; import org.elasticsearch.cluster.ClusterStateListener; import org.elasticsearch.cluster.metadata.IndexMetadata; +import org.elasticsearch.cluster.node.DiscoveryNode; import org.elasticsearch.cluster.routing.allocation.RoutingAllocation; import org.elasticsearch.cluster.routing.allocation.command.AllocationCommand; import org.elasticsearch.cluster.routing.allocation.command.CancelAllocationCommand; @@ -28,6 +31,7 @@ import org.elasticsearch.index.IndexService; import org.elasticsearch.index.IndexSettings; import org.elasticsearch.index.engine.EngineFactory; +import org.elasticsearch.index.engine.EngineTestCase; import org.elasticsearch.index.engine.InternalEngine; import org.elasticsearch.index.engine.NoOpEngine; import org.elasticsearch.index.shard.IndexShard; @@ -39,6 +43,8 @@ import org.elasticsearch.snapshots.SnapshotState; import org.elasticsearch.test.ESIntegTestCase; import org.elasticsearch.test.XContentTestUtils; +import org.elasticsearch.test.transport.MockTransportService; +import org.elasticsearch.transport.TransportService; import java.io.IOException; import java.util.Arrays; @@ -46,16 +52,22 @@ import java.util.HashSet; import java.util.List; import java.util.Map; +import java.util.Objects; import java.util.Optional; +import java.util.concurrent.atomic.AtomicInteger; +import java.util.stream.Collectors; import java.util.stream.IntStream; import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertAcked; import static org.hamcrest.Matchers.anEmptyMap; import static org.hamcrest.Matchers.containsString; import static org.hamcrest.Matchers.empty; +import static org.hamcrest.Matchers.equalTo; import static org.hamcrest.Matchers.hasItem; import static org.hamcrest.Matchers.instanceOf; +import static org.hamcrest.Matchers.is; import static org.hamcrest.Matchers.not; +import static org.hamcrest.Matchers.startsWith; @SuppressWarnings("resource") public class ShardRoutingRoleIT extends ESIntegTestCase { @@ -65,6 +77,7 @@ public class ShardRoutingRoleIT extends ESIntegTestCase { public static class TestPlugin extends Plugin implements ClusterPlugin, EnginePlugin { volatile int numIndexingCopies = 1; + static final String NODE_ATTR_UNPROMOTABLE_ONLY = "unpromotableonly"; @Override public ShardRoutingRoleStrategy getShardRoutingRoleStrategy() { @@ -93,12 +106,55 @@ public Decision canForceAllocatePrimary(ShardRouting shardRouting, RoutingNode n } return super.canForceAllocatePrimary(shardRouting, node, allocation); } + + @Override + public Decision canAllocate(ShardRouting shardRouting, RoutingNode node, RoutingAllocation allocation) { + var nodesWithUnpromotableOnly = allocation.getClusterState() + .nodes() + .stream() + .filter(n -> Objects.equals("true", n.getAttributes().get(NODE_ATTR_UNPROMOTABLE_ONLY))) + .map(DiscoveryNode::getName) + .collect(Collectors.toUnmodifiableSet()); + if (nodesWithUnpromotableOnly.isEmpty() == false) { + if (nodesWithUnpromotableOnly.contains(node.node().getName())) { + if (shardRouting.isPromotableToPrimary()) { + return allocation.decision( + Decision.NO, + "test", + "shard is promotable to primary so may not be assigned to [" + node.node().getName() + "]" + ); + } + } else { + if (shardRouting.isPromotableToPrimary() == false) { + return allocation.decision( + Decision.NO, + "test", + "shard is not promotable to primary so may not be assigned to [" + node.node().getName() + "]" + ); + } + } + } + return Decision.YES; + } }); } @Override public Optional getEngineFactory(IndexSettings indexSettings) { - return Optional.of(config -> config.isPromotableToPrimary() ? new InternalEngine(config) : new NoOpEngine(config)); + return Optional.of(config -> { + if (config.isPromotableToPrimary()) { + return new InternalEngine(config); + } else { + try { + config.getStore().createEmpty(); + } catch (IOException e) { + logger.error("Error creating empty store", e); + throw new RuntimeException(e); + } + + return new NoOpEngine(EngineTestCase.copy(config, () -> -1L)); + } + }); } } @@ -109,7 +165,7 @@ protected boolean addMockInternalEngine() { @Override protected Collection> nodePlugins() { - return CollectionUtils.appendToCopy(super.nodePlugins(), TestPlugin.class); + return CollectionUtils.concatLists(List.of(MockTransportService.TestPlugin.class, TestPlugin.class), super.nodePlugins()); } @Override @@ -193,11 +249,32 @@ private static void assertRolesInRoutingTableXContent(ClusterState state) { } } - public void testShardCreation() { + private static void installMockTransportVerifications(RoutingTableWatcher routingTableWatcher) { + for (var transportService : internalCluster().getInstances(TransportService.class)) { + MockTransportService mockTransportService = (MockTransportService) transportService; + mockTransportService.addSendBehavior((connection, requestId, action, request, options) -> { + if (routingTableWatcher.numIndexingCopies == 1) { + assertThat("no recovery action should be exchanged", action, not(startsWith("internal:index/shard/recovery/"))); + assertThat("no replicated action should be exchanged", action, not(containsString("[r]"))); + } + connection.sendRequest(requestId, action, request, options); + }); + mockTransportService.addRequestHandlingBehavior( + TransportUnpromotableShardRefreshAction.NAME, + (handler, request, channel, task) -> { + // Skip handling the request and send an immediate empty response + channel.sendResponse(ActionResponse.Empty.INSTANCE); + } + ); + } + } + + public void testShardCreation() throws Exception { var routingTableWatcher = new RoutingTableWatcher(); var numDataNodes = routingTableWatcher.numReplicas + 2; internalCluster().ensureAtLeastNumDataNodes(numDataNodes); + installMockTransportVerifications(routingTableWatcher); getMasterNodePlugin().numIndexingCopies = routingTableWatcher.numIndexingCopies; final var masterClusterService = internalCluster().getCurrentMasterNodeInstance(ClusterService.class); @@ -234,6 +311,7 @@ public void testShardCreation() { ensureGreen(INDEX_NAME); assertEngineTypes(); + indexRandom(randomBoolean(), INDEX_NAME, randomIntBetween(50, 100)); // removing replicas drops SEARCH_ONLY copies first while (routingTableWatcher.numReplicas > 0) { @@ -341,6 +419,7 @@ public void testPromotion() { var numDataNodes = routingTableWatcher.numReplicas + 2; internalCluster().ensureAtLeastNumDataNodes(numDataNodes); + installMockTransportVerifications(routingTableWatcher); getMasterNodePlugin().numIndexingCopies = routingTableWatcher.numIndexingCopies; final var masterClusterService = internalCluster().getCurrentMasterNodeInstance(ClusterService.class); @@ -399,7 +478,7 @@ public AllocationCommand getCancelPrimaryCommand() { return null; } - public void testSearchRouting() { + public void testSearchRouting() throws Exception { var routingTableWatcher = new RoutingTableWatcher(); routingTableWatcher.numReplicas = Math.max(1, routingTableWatcher.numReplicas); @@ -407,6 +486,7 @@ public void testSearchRouting() { getMasterNodePlugin().numIndexingCopies = routingTableWatcher.numIndexingCopies; internalCluster().ensureAtLeastNumDataNodes(routingTableWatcher.numReplicas + 1); + installMockTransportVerifications(routingTableWatcher); final var masterClusterService = internalCluster().getCurrentMasterNodeInstance(ClusterService.class); try { @@ -414,7 +494,7 @@ public void testSearchRouting() { masterClusterService.addListener(routingTableWatcher); createIndex(INDEX_NAME, routingTableWatcher.getIndexSettings()); - // TODO index some documents here once recovery/replication ignore unpromotable shards + indexRandom(randomBoolean(), INDEX_NAME, randomIntBetween(50, 100)); ensureGreen(INDEX_NAME); assertEngineTypes(); @@ -483,6 +563,7 @@ public void testClosedIndex() { var numDataNodes = routingTableWatcher.numReplicas + 2; internalCluster().ensureAtLeastNumDataNodes(numDataNodes); + installMockTransportVerifications(routingTableWatcher); getMasterNodePlugin().numIndexingCopies = routingTableWatcher.numIndexingCopies; final var masterClusterService = internalCluster().getCurrentMasterNodeInstance(ClusterService.class); @@ -501,4 +582,86 @@ public void testClosedIndex() { masterClusterService.removeListener(routingTableWatcher); } } + + public void testRefreshOfUnpromotableShards() throws Exception { + var routingTableWatcher = new RoutingTableWatcher(); + + var numDataNodes = routingTableWatcher.numReplicas + 2; + internalCluster().ensureAtLeastNumDataNodes(numDataNodes); + installMockTransportVerifications(routingTableWatcher); + getMasterNodePlugin().numIndexingCopies = routingTableWatcher.numIndexingCopies; + final AtomicInteger refreshUnpromotableActions = new AtomicInteger(0); + + for (var transportService : internalCluster().getInstances(TransportService.class)) { + MockTransportService mockTransportService = (MockTransportService) transportService; + mockTransportService.addSendBehavior((connection, requestId, action, request, options) -> { + if (action.startsWith(TransportUnpromotableShardRefreshAction.NAME)) { + refreshUnpromotableActions.incrementAndGet(); + } + connection.sendRequest(requestId, action, request, options); + }); + } + + final var masterClusterService = internalCluster().getCurrentMasterNodeInstance(ClusterService.class); + try { + // verify the correct number of shard copies of each role as the routing table evolves + masterClusterService.addListener(routingTableWatcher); + + createIndex( + INDEX_NAME, + Settings.builder() + .put(routingTableWatcher.getIndexSettings()) + .put(IndexSettings.INDEX_CHECK_ON_STARTUP.getKey(), false) + .put(IndexSettings.INDEX_REFRESH_INTERVAL_SETTING.getKey(), -1) + .build() + ); + ensureGreen(INDEX_NAME); + assertEngineTypes(); + + indexRandom(true, INDEX_NAME, randomIntBetween(1, 10)); + + // Each primary will send a TransportUnpromotableShardRefreshAction to each of the unpromotable replica shards + assertThat( + refreshUnpromotableActions.get(), + is(equalTo((routingTableWatcher.numReplicas - (routingTableWatcher.numIndexingCopies - 1)) * routingTableWatcher.numShards)) + ); + } finally { + masterClusterService.removeListener(routingTableWatcher); + } + } + + public void testNodesWithUnpromotableShardsNeverGetReplicationActions() throws Exception { + var routingTableWatcher = new RoutingTableWatcher(); + var additionalNumberOfNodesWithUnpromotableShards = randomIntBetween(1, 3); + routingTableWatcher.numReplicas = routingTableWatcher.numIndexingCopies + additionalNumberOfNodesWithUnpromotableShards - 1; + internalCluster().ensureAtLeastNumDataNodes(routingTableWatcher.numIndexingCopies + 1); + final List nodesWithUnpromotableOnly = internalCluster().startDataOnlyNodes( + additionalNumberOfNodesWithUnpromotableShards, + Settings.builder().put("node.attr." + TestPlugin.NODE_ATTR_UNPROMOTABLE_ONLY, "true").build() + ); + installMockTransportVerifications(routingTableWatcher); + getMasterNodePlugin().numIndexingCopies = routingTableWatcher.numIndexingCopies; + + for (var transportService : internalCluster().getInstances(TransportService.class)) { + MockTransportService mockTransportService = (MockTransportService) transportService; + mockTransportService.addSendBehavior((connection, requestId, action, request, options) -> { + if (nodesWithUnpromotableOnly.contains(connection.getNode().getName())) { + assertThat(action, not(containsString("[r]"))); + } + connection.sendRequest(requestId, action, request, options); + }); + } + + final var masterClusterService = internalCluster().getCurrentMasterNodeInstance(ClusterService.class); + try { + // verify the correct number of shard copies of each role as the routing table evolves + masterClusterService.addListener(routingTableWatcher); + createIndex(INDEX_NAME, routingTableWatcher.getIndexSettings()); + ensureGreen(INDEX_NAME); + indexRandom(randomBoolean(), INDEX_NAME, randomIntBetween(50, 100)); + } finally { + masterClusterService.removeListener(routingTableWatcher); + } + } + } diff --git a/server/src/main/java/org/elasticsearch/action/admin/indices/refresh/ReplicaShardRefreshRequest.java b/server/src/main/java/org/elasticsearch/action/admin/indices/refresh/ReplicaShardRefreshRequest.java deleted file mode 100644 index a10d03bf30c10..0000000000000 --- a/server/src/main/java/org/elasticsearch/action/admin/indices/refresh/ReplicaShardRefreshRequest.java +++ /dev/null @@ -1,58 +0,0 @@ -/* - * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one - * or more contributor license agreements. Licensed under the Elastic License - * 2.0 and the Server Side Public License, v 1; you may not use this file except - * in compliance with, at your election, the Elastic License 2.0 or the Server - * Side Public License, v 1. - */ - -package org.elasticsearch.action.admin.indices.refresh; - -import org.elasticsearch.TransportVersion; -import org.elasticsearch.action.support.replication.ReplicationRequest; -import org.elasticsearch.common.io.stream.StreamInput; -import org.elasticsearch.common.io.stream.StreamOutput; -import org.elasticsearch.core.Nullable; -import org.elasticsearch.index.shard.ShardId; -import org.elasticsearch.tasks.TaskId; - -import java.io.IOException; - -public class ReplicaShardRefreshRequest extends ReplicationRequest { - - @Nullable - private final Long segmentGeneration; - - public ReplicaShardRefreshRequest(ShardId shardId, TaskId parentTaskId, @Nullable Long segmentGeneration) { - super(shardId); - setParentTask(parentTaskId); - this.segmentGeneration = segmentGeneration; - } - - public ReplicaShardRefreshRequest(StreamInput in) throws IOException { - super(in); - if (in.getTransportVersion().onOrAfter(TransportVersion.V_8_7_0)) { - this.segmentGeneration = in.readOptionalVLong(); - } else { - this.segmentGeneration = null; - } - } - - @Override - public void writeTo(StreamOutput out) throws IOException { - super.writeTo(out); - if (out.getTransportVersion().onOrAfter(TransportVersion.V_8_7_0)) { - out.writeOptionalVLong(segmentGeneration); - } - } - - @Nullable - public Long getSegmentGeneration() { - return segmentGeneration; - } - - @Override - public String toString() { - return "ReplicaShardRefreshRequest{" + shardId + '}'; - } -} diff --git a/server/src/main/java/org/elasticsearch/action/admin/indices/refresh/TransportShardRefreshAction.java b/server/src/main/java/org/elasticsearch/action/admin/indices/refresh/TransportShardRefreshAction.java index 53c83a99183d8..c7e7ab9733827 100644 --- a/server/src/main/java/org/elasticsearch/action/admin/indices/refresh/TransportShardRefreshAction.java +++ b/server/src/main/java/org/elasticsearch/action/admin/indices/refresh/TransportShardRefreshAction.java @@ -9,30 +9,38 @@ package org.elasticsearch.action.admin.indices.refresh; import org.elasticsearch.action.ActionListener; +import org.elasticsearch.action.ActionListenerResponseHandler; import org.elasticsearch.action.ActionType; import org.elasticsearch.action.support.ActionFilters; +import org.elasticsearch.action.support.RefCountingListener; import org.elasticsearch.action.support.replication.BasicReplicationRequest; import org.elasticsearch.action.support.replication.ReplicationResponse; import org.elasticsearch.action.support.replication.TransportReplicationAction; +import org.elasticsearch.cluster.ClusterState; import org.elasticsearch.cluster.action.shard.ShardStateAction; import org.elasticsearch.cluster.node.DiscoveryNode; +import org.elasticsearch.cluster.routing.ShardRouting; import org.elasticsearch.cluster.service.ClusterService; import org.elasticsearch.common.inject.Inject; import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.settings.Settings; -import org.elasticsearch.index.engine.Engine; import org.elasticsearch.index.shard.IndexShard; import org.elasticsearch.indices.IndicesService; import org.elasticsearch.logging.LogManager; import org.elasticsearch.logging.Logger; +import org.elasticsearch.tasks.Task; import org.elasticsearch.threadpool.ThreadPool; +import org.elasticsearch.transport.TransportRequestOptions; +import org.elasticsearch.transport.TransportResponse; import org.elasticsearch.transport.TransportService; import java.io.IOException; +import java.util.function.Predicate; +import java.util.stream.Collectors; public class TransportShardRefreshAction extends TransportReplicationAction< BasicReplicationRequest, - ReplicaShardRefreshRequest, + BasicReplicationRequest, ReplicationResponse> { private static final Logger logger = LogManager.getLogger(TransportShardRefreshAction.class); @@ -41,8 +49,6 @@ public class TransportShardRefreshAction extends TransportReplicationAction< public static final ActionType TYPE = new ActionType<>(NAME, ReplicationResponse::new); public static final String SOURCE_API = "api"; - private final Settings settings; - @Inject public TransportShardRefreshAction( Settings settings, @@ -63,10 +69,10 @@ public TransportShardRefreshAction( shardStateAction, actionFilters, BasicReplicationRequest::new, - ReplicaShardRefreshRequest::new, + BasicReplicationRequest::new, ThreadPool.Names.REFRESH ); - this.settings = settings; + new TransportUnpromotableShardRefreshAction(transportService, actionFilters, indicesService); } @Override @@ -78,31 +84,53 @@ protected ReplicationResponse newResponseInstance(StreamInput in) throws IOExcep protected void shardOperationOnPrimary( BasicReplicationRequest shardRequest, IndexShard primary, - ActionListener> listener + ActionListener> listener ) { - ActionListener.completeWith(listener, () -> { + try (var listeners = new RefCountingListener(listener.map(v -> new PrimaryResult<>(shardRequest, new ReplicationResponse())))) { var refreshResult = primary.refresh(SOURCE_API); logger.trace("{} refresh request executed on primary", primary.shardId()); - var shardRefreshRequest = new ReplicaShardRefreshRequest( - primary.shardId(), - shardRequest.getParentTask(), - refreshResult.generation() - ); - return new PrimaryResult<>(shardRefreshRequest, new ReplicationResponse()); - }); + + // Forward the request to all nodes that hold unpromotable replica shards + final ClusterState clusterState = clusterService.state(); + final Task parentTaskId = taskManager.getTask(shardRequest.getParentTask().getId()); + clusterState.routingTable() + .shardRoutingTable(shardRequest.shardId()) + .assignedShards() + .stream() + .filter(Predicate.not(ShardRouting::isPromotableToPrimary)) + .map(ShardRouting::currentNodeId) + .collect(Collectors.toUnmodifiableSet()) + .forEach(nodeId -> { + final DiscoveryNode node = clusterState.nodes().get(nodeId); + UnpromotableShardRefreshRequest request = new UnpromotableShardRefreshRequest( + primary.shardId(), + refreshResult.generation() + ); + logger.trace("forwarding refresh request [{}] to node [{}]", request, node); + transportService.sendChildRequest( + node, + TransportUnpromotableShardRefreshAction.NAME, + request, + parentTaskId, + TransportRequestOptions.EMPTY, + new ActionListenerResponseHandler<>( + listeners.acquire(ignored -> {}), + (in) -> TransportResponse.Empty.INSTANCE, + ThreadPool.Names.REFRESH + ) + ); + }); + } catch (Exception e) { + listener.onFailure(e); + } } @Override - protected void shardOperationOnReplica(ReplicaShardRefreshRequest request, IndexShard replica, ActionListener listener) { - if (DiscoveryNode.isStateless(settings) && replica.routingEntry().isPromotableToPrimary() == false) { - assert request.getSegmentGeneration() != Engine.RefreshResult.UNKNOWN_GENERATION; - replica.waitForSegmentGeneration(request.getSegmentGeneration(), listener.map(l -> new ReplicaResult())); - } else { - ActionListener.completeWith(listener, () -> { - replica.refresh(SOURCE_API); - logger.trace("{} refresh request executed on replica", replica.shardId()); - return new ReplicaResult(); - }); - } + protected void shardOperationOnReplica(BasicReplicationRequest request, IndexShard replica, ActionListener listener) { + ActionListener.completeWith(listener, () -> { + replica.refresh(SOURCE_API); + logger.trace("{} refresh request executed on replica", replica.shardId()); + return new ReplicaResult(); + }); } } diff --git a/server/src/main/java/org/elasticsearch/action/admin/indices/refresh/TransportUnpromotableShardRefreshAction.java b/server/src/main/java/org/elasticsearch/action/admin/indices/refresh/TransportUnpromotableShardRefreshAction.java new file mode 100644 index 0000000000000..500a53513a60b --- /dev/null +++ b/server/src/main/java/org/elasticsearch/action/admin/indices/refresh/TransportUnpromotableShardRefreshAction.java @@ -0,0 +1,47 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0 and the Server Side Public License, v 1; you may not use this file except + * in compliance with, at your election, the Elastic License 2.0 or the Server + * Side Public License, v 1. + */ + +package org.elasticsearch.action.admin.indices.refresh; + +import org.elasticsearch.action.ActionListener; +import org.elasticsearch.action.ActionResponse; +import org.elasticsearch.action.support.ActionFilters; +import org.elasticsearch.action.support.HandledTransportAction; +import org.elasticsearch.common.inject.Inject; +import org.elasticsearch.index.engine.Engine; +import org.elasticsearch.index.shard.IndexShard; +import org.elasticsearch.indices.IndicesService; +import org.elasticsearch.tasks.Task; +import org.elasticsearch.threadpool.ThreadPool; +import org.elasticsearch.transport.TransportService; + +public class TransportUnpromotableShardRefreshAction extends HandledTransportAction { + public static final String NAME = RefreshAction.NAME + "[u]"; + + private final IndicesService indicesService; + + @Inject + public TransportUnpromotableShardRefreshAction( + TransportService transportService, + ActionFilters actionFilters, + IndicesService indicesService + ) { + super(NAME, transportService, actionFilters, UnpromotableShardRefreshRequest::new, ThreadPool.Names.REFRESH); + this.indicesService = indicesService; + } + + @Override + protected void doExecute(Task task, UnpromotableShardRefreshRequest request, ActionListener responseListener) { + ActionListener.run(responseListener, listener -> { + assert request.getSegmentGeneration() != Engine.RefreshResult.UNKNOWN_GENERATION + : "The request segment is " + request.getSegmentGeneration(); + IndexShard shard = indicesService.indexServiceSafe(request.getShardId().getIndex()).getShard(request.getShardId().id()); + shard.waitForSegmentGeneration(request.getSegmentGeneration(), listener.map(l -> ActionResponse.Empty.INSTANCE)); + }); + } +} diff --git a/server/src/main/java/org/elasticsearch/action/admin/indices/refresh/UnpromotableShardRefreshRequest.java b/server/src/main/java/org/elasticsearch/action/admin/indices/refresh/UnpromotableShardRefreshRequest.java new file mode 100644 index 0000000000000..52ef3917ce722 --- /dev/null +++ b/server/src/main/java/org/elasticsearch/action/admin/indices/refresh/UnpromotableShardRefreshRequest.java @@ -0,0 +1,59 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0 and the Server Side Public License, v 1; you may not use this file except + * in compliance with, at your election, the Elastic License 2.0 or the Server + * Side Public License, v 1. + */ + +package org.elasticsearch.action.admin.indices.refresh; + +import org.elasticsearch.action.ActionRequest; +import org.elasticsearch.action.ActionRequestValidationException; +import org.elasticsearch.common.io.stream.StreamInput; +import org.elasticsearch.common.io.stream.StreamOutput; +import org.elasticsearch.index.shard.ShardId; + +import java.io.IOException; + +public class UnpromotableShardRefreshRequest extends ActionRequest { + + private final ShardId shardId; + private final long segmentGeneration; + + public UnpromotableShardRefreshRequest(final ShardId shardId, long segmentGeneration) { + this.shardId = shardId; + this.segmentGeneration = segmentGeneration; + } + + public UnpromotableShardRefreshRequest(StreamInput in) throws IOException { + super(in); + shardId = new ShardId(in); + segmentGeneration = in.readVLong(); + } + + @Override + public ActionRequestValidationException validate() { + return null; + } + + @Override + public void writeTo(StreamOutput out) throws IOException { + super.writeTo(out); + shardId.writeTo(out); + out.writeVLong(segmentGeneration); + } + + public ShardId getShardId() { + return shardId; + } + + public long getSegmentGeneration() { + return segmentGeneration; + } + + @Override + public String toString() { + return "UnpromotableShardRefreshRequest{" + "shardId=" + shardId + ", segmentGeneration=" + segmentGeneration + '}'; + } +} diff --git a/server/src/main/java/org/elasticsearch/action/support/replication/ReplicationOperation.java b/server/src/main/java/org/elasticsearch/action/support/replication/ReplicationOperation.java index 8ec274bc410f6..6b1916b4ec843 100644 --- a/server/src/main/java/org/elasticsearch/action/support/replication/ReplicationOperation.java +++ b/server/src/main/java/org/elasticsearch/action/support/replication/ReplicationOperation.java @@ -213,6 +213,7 @@ private void performOnReplica( final long maxSeqNoOfUpdatesOrDeletes, final PendingReplicationActions pendingReplicationActions ) { + assert shard.isPromotableToPrimary() : "only promotable shards should receive replication requests"; if (logger.isTraceEnabled()) { logger.trace("[{}] sending op [{}] to replica {} for request [{}]", shard.shardId(), opType, shard, replicaRequest); } diff --git a/server/src/main/java/org/elasticsearch/cluster/routing/IndexRoutingTable.java b/server/src/main/java/org/elasticsearch/cluster/routing/IndexRoutingTable.java index a26e36aa39f9b..0c62dce1b2209 100644 --- a/server/src/main/java/org/elasticsearch/cluster/routing/IndexRoutingTable.java +++ b/server/src/main/java/org/elasticsearch/cluster/routing/IndexRoutingTable.java @@ -136,7 +136,9 @@ boolean validate(Metadata metadata) { ); } final Set inSyncAllocationIds = indexMetadata.inSyncAllocationIds(shardRouting.id()); - if (shardRouting.active() && inSyncAllocationIds.contains(shardRouting.allocationId().getId()) == false) { + if (shardRouting.active() + && shardRouting.isPromotableToPrimary() + && inSyncAllocationIds.contains(shardRouting.allocationId().getId()) == false) { throw new IllegalStateException( "active shard routing " + shardRouting diff --git a/server/src/main/java/org/elasticsearch/cluster/routing/IndexShardRoutingTable.java b/server/src/main/java/org/elasticsearch/cluster/routing/IndexShardRoutingTable.java index 0dd85d873463d..3a5a369caa3f2 100644 --- a/server/src/main/java/org/elasticsearch/cluster/routing/IndexShardRoutingTable.java +++ b/server/src/main/java/org/elasticsearch/cluster/routing/IndexShardRoutingTable.java @@ -499,15 +499,17 @@ public ShardRouting getByAllocationId(String allocationId) { return null; } - public Set getAllAllocationIds() { + public Set getPromotableAllocationIds() { assert MasterService.assertNotMasterUpdateThread("not using this on the master thread so we don't have to pre-compute this"); Set allAllocationIds = new HashSet<>(); for (ShardRouting shard : shards) { - if (shard.relocating()) { - allAllocationIds.add(shard.getTargetRelocatingShard().allocationId().getId()); - } - if (shard.assignedToNode()) { - allAllocationIds.add(shard.allocationId().getId()); + if (shard.isPromotableToPrimary()) { + if (shard.relocating()) { + allAllocationIds.add(shard.getTargetRelocatingShard().allocationId().getId()); + } + if (shard.assignedToNode()) { + allAllocationIds.add(shard.allocationId().getId()); + } } } return allAllocationIds; diff --git a/server/src/main/java/org/elasticsearch/cluster/routing/allocation/IndexMetadataUpdater.java b/server/src/main/java/org/elasticsearch/cluster/routing/allocation/IndexMetadataUpdater.java index 469e7f7efe36c..e0b53e312e400 100644 --- a/server/src/main/java/org/elasticsearch/cluster/routing/allocation/IndexMetadataUpdater.java +++ b/server/src/main/java/org/elasticsearch/cluster/routing/allocation/IndexMetadataUpdater.java @@ -69,12 +69,14 @@ public void shardStarted(ShardRouting initializingShard, ShardRouting startedSha + "] and startedShard.allocationId [" + startedShard.allocationId().getId() + "] have to have the same"; - Updates updates = changes(startedShard.shardId()); - updates.addedAllocationIds.add(startedShard.allocationId().getId()); - if (startedShard.primary() - // started shard has to have null recoverySource; have to pick up recoverySource from its initializing state - && (initializingShard.recoverySource() == RecoverySource.ExistingStoreRecoverySource.FORCE_STALE_PRIMARY_INSTANCE)) { - updates.removedAllocationIds.add(RecoverySource.ExistingStoreRecoverySource.FORCED_ALLOCATION_ID); + if (startedShard.isPromotableToPrimary()) { + Updates updates = changes(startedShard.shardId()); + updates.addedAllocationIds.add(startedShard.allocationId().getId()); + if (startedShard.primary() + // started shard has to have null recoverySource; have to pick up recoverySource from its initializing state + && (initializingShard.recoverySource() == RecoverySource.ExistingStoreRecoverySource.FORCE_STALE_PRIMARY_INSTANCE)) { + updates.removedAllocationIds.add(RecoverySource.ExistingStoreRecoverySource.FORCED_ALLOCATION_ID); + } } } diff --git a/server/src/main/java/org/elasticsearch/index/seqno/ReplicationTracker.java b/server/src/main/java/org/elasticsearch/index/seqno/ReplicationTracker.java index 86290ca79a65a..12ae735d16b55 100644 --- a/server/src/main/java/org/elasticsearch/index/seqno/ReplicationTracker.java +++ b/server/src/main/java/org/elasticsearch/index/seqno/ReplicationTracker.java @@ -263,6 +263,7 @@ public synchronized RetentionLeases getRetentionLeases(final boolean expireLease final long retentionLeaseMillis = indexSettings.getRetentionLeaseMillis(); final Set leaseIdsForCurrentPeers = routingTable.assignedShards() .stream() + .filter(ShardRouting::isPromotableToPrimary) .map(ReplicationTracker::getPeerRecoveryRetentionLeaseId) .collect(Collectors.toSet()); final boolean allShardsStarted = routingTable.allShardsStarted(); @@ -607,7 +608,7 @@ public synchronized void renewPeerRecoveryRetentionLeases() { boolean renewalNeeded = false; for (int copy = 0; copy < routingTable.size(); copy++) { final ShardRouting shardRouting = routingTable.shard(copy); - if (shardRouting.assignedToNode() == false) { + if (shardRouting.assignedToNode() == false || shardRouting.isPromotableToPrimary() == false) { continue; } final RetentionLease retentionLease = retentionLeases.get(getPeerRecoveryRetentionLeaseId(shardRouting)); @@ -628,7 +629,7 @@ public synchronized void renewPeerRecoveryRetentionLeases() { if (renewalNeeded) { for (int copy = 0; copy < routingTable.size(); copy++) { final ShardRouting shardRouting = routingTable.shard(copy); - if (shardRouting.assignedToNode()) { + if (shardRouting.assignedToNode() && shardRouting.isPromotableToPrimary()) { final RetentionLease retentionLease = retentionLeases.get(getPeerRecoveryRetentionLeaseId(shardRouting)); if (retentionLease != null) { final CheckpointState checkpointState = checkpoints.get(shardRouting.allocationId().getId()); @@ -874,8 +875,15 @@ private boolean invariant() { assert replicationGroup == null || replicationGroup.equals(calculateReplicationGroup()) : "cached replication group out of sync: expected: " + calculateReplicationGroup() + " but was: " + replicationGroup; + if (replicationGroup != null) { + assert replicationGroup.getReplicationTargets().stream().allMatch(ShardRouting::isPromotableToPrimary) + : "expected all replication target shards of the replication group to be promotable to primary"; + assert replicationGroup.getSkippedShards().stream().allMatch(ShardRouting::isPromotableToPrimary) + : "expected all skipped shards of the replication group to be promotable to primary"; + } + // all assigned shards from the routing table are tracked - assert routingTable == null || checkpoints.keySet().containsAll(routingTable.getAllAllocationIds()) + assert routingTable == null || checkpoints.keySet().containsAll(routingTable.getPromotableAllocationIds()) : "local checkpoints " + checkpoints + " not in-sync with routing table " + routingTable; for (Map.Entry entry : checkpoints.entrySet()) { @@ -895,7 +903,7 @@ private boolean invariant() { if (primaryMode && indexSettings.isSoftDeleteEnabled() && hasAllPeerRecoveryRetentionLeases) { // all tracked shard copies have a corresponding peer-recovery retention lease for (final ShardRouting shardRouting : routingTable.assignedShards()) { - if (checkpoints.get(shardRouting.allocationId().getId()).tracked) { + if (shardRouting.isPromotableToPrimary() && checkpoints.get(shardRouting.allocationId().getId()).tracked) { assert retentionLeases.contains(getPeerRecoveryRetentionLeaseId(shardRouting)) : "no retention lease for tracked shard [" + shardRouting + "] in " + retentionLeases; assert PEER_RECOVERY_RETENTION_LEASE_SOURCE.equals( @@ -1151,6 +1159,7 @@ private void addPeerRecoveryRetentionLeaseForSolePrimary() { } else if (hasAllPeerRecoveryRetentionLeases == false && routingTable.assignedShards() .stream() + .filter(ShardRouting::isPromotableToPrimary) .allMatch( shardRouting -> retentionLeases.contains(getPeerRecoveryRetentionLeaseId(shardRouting)) || checkpoints.get(shardRouting.allocationId().getId()).tracked == false @@ -1185,6 +1194,7 @@ public synchronized void updateFromMaster( // remove entries which don't exist on master Set initializingAllocationIds = routingTable.getAllInitializingShards() .stream() + .filter(ShardRouting::isPromotableToPrimary) .map(ShardRouting::allocationId) .map(AllocationId::getId) .collect(Collectors.toSet()); @@ -1495,7 +1505,10 @@ public synchronized boolean hasAllPeerRecoveryRetentionLeases() { */ public synchronized void createMissingPeerRecoveryRetentionLeases(ActionListener listener) { if (hasAllPeerRecoveryRetentionLeases == false) { - final List shardRoutings = routingTable.assignedShards(); + final List shardRoutings = routingTable.assignedShards() + .stream() + .filter(ShardRouting::isPromotableToPrimary) + .toList(); final GroupedActionListener groupedActionListener = new GroupedActionListener<>( shardRoutings.size(), ActionListener.wrap(vs -> { diff --git a/server/src/main/java/org/elasticsearch/index/shard/IndexShard.java b/server/src/main/java/org/elasticsearch/index/shard/IndexShard.java index 280934c73364f..871928a96e4b6 100644 --- a/server/src/main/java/org/elasticsearch/index/shard/IndexShard.java +++ b/server/src/main/java/org/elasticsearch/index/shard/IndexShard.java @@ -55,7 +55,6 @@ import org.elasticsearch.common.util.concurrent.AbstractRunnable; import org.elasticsearch.common.util.concurrent.AsyncIOProcessor; import org.elasticsearch.common.util.concurrent.ThreadContext; -import org.elasticsearch.common.util.set.Sets; import org.elasticsearch.common.xcontent.XContentHelper; import org.elasticsearch.core.Booleans; import org.elasticsearch.core.CheckedConsumer; @@ -709,17 +708,26 @@ public void onFailure(Exception e) { if (indexSettings.isSoftDeleteEnabled() && useRetentionLeasesInPeerRecovery == false) { final RetentionLeases retentionLeases = replicationTracker.getRetentionLeases(); - final Set shardRoutings = Sets.newHashSetWithExpectedSize(routingTable.size()); + boolean allShardsUseRetentionLeases = true; for (int copy = 0; copy < routingTable.size(); copy++) { - shardRoutings.add(routingTable.shard(copy)); - } - shardRoutings.addAll(routingTable.assignedShards()); // include relocation targets - if (shardRoutings.stream() - .allMatch( - shr -> shr.assignedToNode() && retentionLeases.contains(ReplicationTracker.getPeerRecoveryRetentionLeaseId(shr)) - )) { - useRetentionLeasesInPeerRecovery = true; + ShardRouting shardRouting = routingTable.shard(copy); + if (shardRouting.isPromotableToPrimary()) { + if (shardRouting.assignedToNode() == false + || retentionLeases.contains(ReplicationTracker.getPeerRecoveryRetentionLeaseId(shardRouting)) == false) { + allShardsUseRetentionLeases = false; + break; + } + if (this.shardRouting.relocating()) { + ShardRouting shardRoutingReloc = this.shardRouting.getTargetRelocatingShard(); + if (shardRoutingReloc.assignedToNode() == false + || retentionLeases.contains(ReplicationTracker.getPeerRecoveryRetentionLeaseId(shardRoutingReloc)) == false) { + allShardsUseRetentionLeases = false; + break; + } + } + } } + useRetentionLeasesInPeerRecovery = allShardsUseRetentionLeases; } } diff --git a/server/src/main/java/org/elasticsearch/index/shard/ReplicationGroup.java b/server/src/main/java/org/elasticsearch/index/shard/ReplicationGroup.java index cf3b8fc0fbaf3..53f932faf4512 100644 --- a/server/src/main/java/org/elasticsearch/index/shard/ReplicationGroup.java +++ b/server/src/main/java/org/elasticsearch/index/shard/ReplicationGroup.java @@ -40,11 +40,14 @@ public ReplicationGroup( this.trackedAllocationIds = trackedAllocationIds; this.version = version; - this.unavailableInSyncShards = Sets.difference(inSyncAllocationIds, routingTable.getAllAllocationIds()); + this.unavailableInSyncShards = Sets.difference(inSyncAllocationIds, routingTable.getPromotableAllocationIds()); this.replicationTargets = new ArrayList<>(); this.skippedShards = new ArrayList<>(); for (int copy = 0; copy < routingTable.size(); copy++) { ShardRouting shard = routingTable.shard(copy); + if (shard.isPromotableToPrimary() == false) { + continue; + } if (shard.unassigned()) { assert shard.primary() == false : "primary shard should not be unassigned in a replication group: " + shard; skippedShards.add(shard); diff --git a/server/src/main/java/org/elasticsearch/indices/recovery/PeerRecoveryTargetService.java b/server/src/main/java/org/elasticsearch/indices/recovery/PeerRecoveryTargetService.java index 1d3120310f0c9..abd1ef4aaf958 100644 --- a/server/src/main/java/org/elasticsearch/indices/recovery/PeerRecoveryTargetService.java +++ b/server/src/main/java/org/elasticsearch/indices/recovery/PeerRecoveryTargetService.java @@ -216,43 +216,62 @@ private void doRecovery(final long recoveryId, final StartRecoveryRequest preExi } final RecoveryTarget recoveryTarget = recoveryRef.target(); assert recoveryTarget.sourceNode() != null : "cannot do a recovery without a source node"; - final RecoveryState.Timer timer = recoveryTarget.state().getTimer(); + final RecoveryState recoveryState = recoveryTarget.state(); + final RecoveryState.Timer timer = recoveryState.getTimer(); + final IndexShard indexShard = recoveryTarget.indexShard(); + + final var failureHandler = ActionListener.notifyOnce(ActionListener.runBefore(ActionListener.noop().delegateResponse((l, e) -> { + // this will be logged as warning later on... + logger.trace("unexpected error while preparing shard for peer recovery, failing recovery", e); + onGoingRecoveries.failRecovery( + recoveryId, + new RecoveryFailedException(recoveryTarget.state(), "failed to prepare shard for recovery", e), + true + ); + }), recoveryRef::close)); - record StartRecoveryRequestToSend(StartRecoveryRequest startRecoveryRequest, String actionName, TransportRequest requestToSend) {} - final ActionListener toSendListener = ActionListener.notifyOnce( - ActionListener.runBefore(new ActionListener<>() { - @Override - public void onResponse(StartRecoveryRequestToSend r) { - logger.trace( - "{} [{}]: recovery from {}", - r.startRecoveryRequest().shardId(), - r.actionName(), - r.startRecoveryRequest().sourceNode() - ); - transportService.sendRequest( - r.startRecoveryRequest().sourceNode(), - r.actionName(), - r.requestToSend(), - new RecoveryResponseHandler(r.startRecoveryRequest(), timer) - ); - } + if (indexShard.routingEntry().isPromotableToPrimary() == false) { + assert preExistingRequest == null; + assert indexShard.indexSettings().getIndexMetadata().isSearchableSnapshot() == false; + try { + indexShard.preRecovery(failureHandler.map(v -> { + logger.trace("{} preparing shard for peer recovery", recoveryTarget.shardId()); + indexShard.prepareForIndexRecovery(); + // Skip unnecessary intermediate stages + recoveryState.setStage(RecoveryState.Stage.VERIFY_INDEX); + recoveryState.setStage(RecoveryState.Stage.TRANSLOG); + indexShard.openEngineAndSkipTranslogRecovery(); + recoveryState.getIndex().setFileDetailsComplete(); + recoveryState.setStage(RecoveryState.Stage.FINALIZE); + onGoingRecoveries.markRecoveryAsDone(recoveryId); + return null; + })); + } catch (Exception e) { + failureHandler.onFailure(e); + } - @Override - public void onFailure(Exception e) { - // this will be logged as warning later on... - logger.trace("unexpected error while preparing shard for peer recovery, failing recovery", e); - onGoingRecoveries.failRecovery( - recoveryId, - new RecoveryFailedException(recoveryTarget.state(), "failed to prepare shard for recovery", e), - true - ); - } - }, recoveryRef::close) - ); + return; + } + + record StartRecoveryRequestToSend(StartRecoveryRequest startRecoveryRequest, String actionName, TransportRequest requestToSend) {} + final ActionListener toSendListener = failureHandler.map(r -> { + logger.trace( + "{} [{}]: recovery from {}", + r.startRecoveryRequest().shardId(), + r.actionName(), + r.startRecoveryRequest().sourceNode() + ); + transportService.sendRequest( + r.startRecoveryRequest().sourceNode(), + r.actionName(), + r.requestToSend(), + new RecoveryResponseHandler(r.startRecoveryRequest(), timer) + ); + return null; + }); if (preExistingRequest == null) { try { - final IndexShard indexShard = recoveryTarget.indexShard(); indexShard.preRecovery(toSendListener.delegateFailure((l, v) -> ActionListener.completeWith(l, () -> { logger.trace("{} preparing shard for peer recovery", recoveryTarget.shardId()); indexShard.prepareForIndexRecovery(); diff --git a/server/src/main/java/org/elasticsearch/indices/recovery/RecoveriesCollection.java b/server/src/main/java/org/elasticsearch/indices/recovery/RecoveriesCollection.java index d896425eef2cf..59ed1ba2b871f 100644 --- a/server/src/main/java/org/elasticsearch/indices/recovery/RecoveriesCollection.java +++ b/server/src/main/java/org/elasticsearch/indices/recovery/RecoveriesCollection.java @@ -164,6 +164,7 @@ public RecoveryRef getRecoverySafe(long id, ShardId shardId) { throw new IndexShardClosedException(shardId); } assert recoveryRef.target().shardId().equals(shardId); + assert recoveryRef.target().indexShard().routingEntry().isPromotableToPrimary(); return recoveryRef; } diff --git a/server/src/test/java/org/elasticsearch/action/admin/indices/close/TransportVerifyShardBeforeCloseActionTests.java b/server/src/test/java/org/elasticsearch/action/admin/indices/close/TransportVerifyShardBeforeCloseActionTests.java index 8dd0e89e1cbbe..cf096e35bdbc0 100644 --- a/server/src/test/java/org/elasticsearch/action/admin/indices/close/TransportVerifyShardBeforeCloseActionTests.java +++ b/server/src/test/java/org/elasticsearch/action/admin/indices/close/TransportVerifyShardBeforeCloseActionTests.java @@ -234,7 +234,7 @@ public void testUnavailableShardsMarkedAsStale() throws Exception { final long primaryTerm = indexMetadata.primaryTerm(0); final Set inSyncAllocationIds = indexMetadata.inSyncAllocationIds(0); - final Set trackedShards = shardRoutingTable.getAllAllocationIds(); + final Set trackedShards = shardRoutingTable.getPromotableAllocationIds(); List unavailableShards = randomSubsetOf(randomIntBetween(1, nbReplicas), shardRoutingTable.replicaShards()); IndexShardRoutingTable.Builder shardRoutingTableBuilder = new IndexShardRoutingTable.Builder(shardRoutingTable); diff --git a/server/src/test/java/org/elasticsearch/action/resync/TransportResyncReplicationActionTests.java b/server/src/test/java/org/elasticsearch/action/resync/TransportResyncReplicationActionTests.java index 4a3498ea6baae..919737caf2c7a 100644 --- a/server/src/test/java/org/elasticsearch/action/resync/TransportResyncReplicationActionTests.java +++ b/server/src/test/java/org/elasticsearch/action/resync/TransportResyncReplicationActionTests.java @@ -152,7 +152,7 @@ public void testResyncDoesNotBlockOnPrimaryAction() throws Exception { new ReplicationGroup( shardRoutingTable, clusterService.state().metadata().index(index).inSyncAllocationIds(shardId.id()), - shardRoutingTable.getAllAllocationIds(), + shardRoutingTable.getPromotableAllocationIds(), 0 ) ); diff --git a/server/src/test/java/org/elasticsearch/action/support/replication/ReplicationOperationTests.java b/server/src/test/java/org/elasticsearch/action/support/replication/ReplicationOperationTests.java index 8dab09fb6015f..543b673635ee0 100644 --- a/server/src/test/java/org/elasticsearch/action/support/replication/ReplicationOperationTests.java +++ b/server/src/test/java/org/elasticsearch/action/support/replication/ReplicationOperationTests.java @@ -507,7 +507,7 @@ public void testPrimaryFailureHandlingReplicaResponse() throws Exception { final Set inSyncAllocationIds = indexMetadata.inSyncAllocationIds(0); final IndexShardRoutingTable shardRoutingTable = state.routingTable().index(index).shard(shardId.id()); - final Set trackedShards = shardRoutingTable.getAllAllocationIds(); + final Set trackedShards = shardRoutingTable.getPromotableAllocationIds(); final ReplicationGroup initialReplicationGroup = new ReplicationGroup(shardRoutingTable, inSyncAllocationIds, trackedShards, 0); final Thread testThread = Thread.currentThread(); diff --git a/server/src/test/java/org/elasticsearch/action/support/replication/TransportReplicationActionTests.java b/server/src/test/java/org/elasticsearch/action/support/replication/TransportReplicationActionTests.java index 97605ec71928f..e64dddff3cdd3 100644 --- a/server/src/test/java/org/elasticsearch/action/support/replication/TransportReplicationActionTests.java +++ b/server/src/test/java/org/elasticsearch/action/support/replication/TransportReplicationActionTests.java @@ -928,7 +928,12 @@ public void testSeqNoIsSetOnPrimary() { Set inSyncIds = randomBoolean() ? singleton(routingEntry.allocationId().getId()) : clusterService.state().metadata().index(index).inSyncAllocationIds(0); - ReplicationGroup replicationGroup = new ReplicationGroup(shardRoutingTable, inSyncIds, shardRoutingTable.getAllAllocationIds(), 0); + ReplicationGroup replicationGroup = new ReplicationGroup( + shardRoutingTable, + inSyncIds, + shardRoutingTable.getPromotableAllocationIds(), + 0 + ); when(shard.getReplicationGroup()).thenReturn(replicationGroup); PendingReplicationActions replicationActions = new PendingReplicationActions(shardId, threadPool); replicationActions.accept(replicationGroup); diff --git a/server/src/test/java/org/elasticsearch/cluster/ClusterStateTests.java b/server/src/test/java/org/elasticsearch/cluster/ClusterStateTests.java index e4c13a13a16ba..f26caf2f98e5e 100644 --- a/server/src/test/java/org/elasticsearch/cluster/ClusterStateTests.java +++ b/server/src/test/java/org/elasticsearch/cluster/ClusterStateTests.java @@ -134,7 +134,7 @@ public void testToXContent() throws IOException { IndexRoutingTable index = clusterState.getRoutingTable().getIndicesRouting().get("index"); String ephemeralId = clusterState.getNodes().get("nodeId1").getEphemeralId(); - String allocationId = index.shard(0).getAllAllocationIds().iterator().next(); + String allocationId = index.shard(0).getPromotableAllocationIds().iterator().next(); XContentBuilder builder = JsonXContent.contentBuilder(); builder.startObject(); @@ -374,7 +374,7 @@ public void testToXContent_FlatSettingTrue_ReduceMappingFalse() throws IOExcepti IndexRoutingTable index = clusterState.getRoutingTable().getIndicesRouting().get("index"); String ephemeralId = clusterState.getNodes().get("nodeId1").getEphemeralId(); - String allocationId = index.shard(0).getAllAllocationIds().iterator().next(); + String allocationId = index.shard(0).getPromotableAllocationIds().iterator().next(); XContentBuilder builder = JsonXContent.contentBuilder().prettyPrint(); builder.startObject(); @@ -606,7 +606,7 @@ public void testToXContent_FlatSettingFalse_ReduceMappingTrue() throws IOExcepti IndexRoutingTable index = clusterState.getRoutingTable().getIndicesRouting().get("index"); String ephemeralId = clusterState.getNodes().get("nodeId1").getEphemeralId(); - String allocationId = index.shard(0).getAllAllocationIds().iterator().next(); + String allocationId = index.shard(0).getPromotableAllocationIds().iterator().next(); XContentBuilder builder = JsonXContent.contentBuilder().prettyPrint(); builder.startObject(); diff --git a/server/src/test/java/org/elasticsearch/cluster/metadata/AutoExpandReplicasTests.java b/server/src/test/java/org/elasticsearch/cluster/metadata/AutoExpandReplicasTests.java index fedbc31fcdeb7..c82b13918835e 100644 --- a/server/src/test/java/org/elasticsearch/cluster/metadata/AutoExpandReplicasTests.java +++ b/server/src/test/java/org/elasticsearch/cluster/metadata/AutoExpandReplicasTests.java @@ -162,7 +162,11 @@ public void testAutoExpandWhenNodeLeavesAndPossiblyRejoins() throws InterruptedE postTable = state.routingTable().index("index").shard(0); assertTrue("not all shards started in " + state.toString(), postTable.allShardsStarted()); - assertThat(postTable.toString(), postTable.getAllAllocationIds(), everyItem(is(in(preTable.getAllAllocationIds())))); + assertThat( + postTable.toString(), + postTable.getPromotableAllocationIds(), + everyItem(is(in(preTable.getPromotableAllocationIds()))) + ); } else { // fake an election where conflicting nodes are removed and readded state = ClusterState.builder(state).nodes(DiscoveryNodes.builder(state.nodes()).masterNodeId(null).build()).build(); @@ -199,7 +203,7 @@ public void testAutoExpandWhenNodeLeavesAndPossiblyRejoins() throws InterruptedE .map(shr -> shr.allocationId().getId()) .collect(Collectors.toSet()); - assertThat(postTable.toString(), unchangedAllocationIds, everyItem(is(in(postTable.getAllAllocationIds())))); + assertThat(postTable.toString(), unchangedAllocationIds, everyItem(is(in(postTable.getPromotableAllocationIds())))); RoutingNodesHelper.asStream(postTable).forEach(shardRouting -> { if (shardRouting.assignedToNode() && unchangedAllocationIds.contains(shardRouting.allocationId().getId())) { diff --git a/test/framework/src/main/java/org/elasticsearch/index/engine/EngineTestCase.java b/test/framework/src/main/java/org/elasticsearch/index/engine/EngineTestCase.java index 277dfeb913525..68b4f18fbcfd2 100644 --- a/test/framework/src/main/java/org/elasticsearch/index/engine/EngineTestCase.java +++ b/test/framework/src/main/java/org/elasticsearch/index/engine/EngineTestCase.java @@ -249,7 +249,7 @@ public static FieldType randomIdFieldType() { return randomBoolean() ? ProvidedIdFieldMapper.Defaults.FIELD_TYPE : TsidExtractingIdFieldMapper.FIELD_TYPE; } - public EngineConfig copy(EngineConfig config, LongSupplier globalCheckpointSupplier) { + public static EngineConfig copy(EngineConfig config, LongSupplier globalCheckpointSupplier) { return new EngineConfig( config.getShardId(), config.getThreadPool(), From 3d38173e567d1dce31df6811ae5f8826f87dfe25 Mon Sep 17 00:00:00 2001 From: Marwane Chahoud Date: Tue, 31 Jan 2023 10:42:49 +0100 Subject: [PATCH 27/38] Add a section about token-based authentication (#93344) * Add a section about token-based authentication It took me a considerable time to figure out the syntax for a token-based authentication, and I said why not add it to the documentation * Update x-pack/docs/en/watcher/input/http.asciidoc * Update x-pack/docs/en/watcher/input/http.asciidoc --------- Co-authored-by: Abdon Pijpelink --- x-pack/docs/en/watcher/input/http.asciidoc | 22 ++++++++++++++++++++++ 1 file changed, 22 insertions(+) diff --git a/x-pack/docs/en/watcher/input/http.asciidoc b/x-pack/docs/en/watcher/input/http.asciidoc index 023884dd8e53f..ca1738b5952d2 100644 --- a/x-pack/docs/en/watcher/input/http.asciidoc +++ b/x-pack/docs/en/watcher/input/http.asciidoc @@ -139,6 +139,28 @@ http://openweathermap.org/appid[OpenWeatherMap] service: -------------------------------------------------- // NOTCONSOLE +===== Using token-based authentication + +You can also call an API using a `Bearer token` instead of basic authentication. The `request.headers` object contains the HTTP headers: + +[source,js] +-------------------------------------------------- +"input" : { + "http" : { + "request" : { + "url": "https://api.example.com/v1/something", + "headers": { + "authorization" : "Bearer ABCD1234...", + "content-type": "application/json" + # other headers params.. + }, + "connection_timeout": "30s" + } + } +} +-------------------------------------------------- +// NOTCONSOLE + ==== Using templates The `http` input supports templating. You can use <> when From 71c280b85d1cd58d10ca6a50e4fd5cb923b0ad26 Mon Sep 17 00:00:00 2001 From: Nikolaj Volgushev Date: Tue, 31 Jan 2023 11:37:31 +0100 Subject: [PATCH 28/38] Build role for remote access authentication (#93316) This PR adds support for building roles for remote_access authentication instances, under the new remote cluster security model. This change is stand-alone and not wired up to active code flows yet. A proof of concept in #92089 highlights how the model change in this PR fits into the broader context of the fulfilling cluster processing cross cluster requests. --- .../authc/RemoteAccessAuthentication.java | 3 + .../xpack/core/security/authc/Subject.java | 26 +++- .../authz/RoleDescriptorsIntersection.java | 4 + .../security/authz/store/RoleReference.java | 33 ++++ .../authz/store/RoleReferenceResolver.java | 5 + .../authc/AuthenticationTestHelper.java | 55 +++---- .../core/security/authc/SubjectTests.java | 79 ++++++++++ .../authz/store/RoleReferenceTests.java | 13 ++ .../authz/store/RoleDescriptorStore.java | 15 ++ .../authz/store/CompositeRolesStoreTests.java | 141 ++++++++++++++++++ 10 files changed, 346 insertions(+), 28 deletions(-) diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/authc/RemoteAccessAuthentication.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/authc/RemoteAccessAuthentication.java index 089dd2be2a77f..d6c50bf66bd15 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/authc/RemoteAccessAuthentication.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/authc/RemoteAccessAuthentication.java @@ -10,6 +10,7 @@ import org.apache.lucene.util.BytesRef; import org.elasticsearch.TransportVersion; import org.elasticsearch.common.bytes.AbstractBytesReference; +import org.elasticsearch.common.bytes.BytesArray; import org.elasticsearch.common.bytes.BytesReference; import org.elasticsearch.common.io.stream.BytesStreamOutput; import org.elasticsearch.common.io.stream.StreamInput; @@ -148,6 +149,8 @@ public Map copyWithRemoteAccessEntries(final Map } public static final class RoleDescriptorsBytes extends AbstractBytesReference { + + public static final RoleDescriptorsBytes EMPTY = new RoleDescriptorsBytes(new BytesArray("{}")); private final BytesReference rawBytes; public RoleDescriptorsBytes(BytesReference rawBytes) { diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/authc/Subject.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/authc/Subject.java index 7329db1d17996..5ff7d9749d2b6 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/authc/Subject.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/authc/Subject.java @@ -19,6 +19,8 @@ import org.elasticsearch.xpack.core.security.user.AnonymousUser; import org.elasticsearch.xpack.core.security.user.User; +import java.util.ArrayList; +import java.util.List; import java.util.Map; import java.util.Objects; @@ -105,8 +107,7 @@ public RoleReferenceIntersection getRoleReferenceIntersection(@Nullable Anonymou case SERVICE_ACCOUNT: return new RoleReferenceIntersection(new RoleReference.ServiceAccountRoleReference(user.principal())); case REMOTE_ACCESS: - assert false : "unsupported subject type: [" + type + "]"; - throw new UnsupportedOperationException("unsupported subject type: [" + type + "]"); + return buildRoleReferencesForRemoteAccess(); default: assert false : "unknown subject type: [" + type + "]"; throw new IllegalStateException("unknown subject type: [" + type + "]"); @@ -231,6 +232,27 @@ private RoleReferenceIntersection buildRoleReferencesForApiKey() { ); } + private RoleReferenceIntersection buildRoleReferencesForRemoteAccess() { + final List roleReferences = new ArrayList<>(4); + @SuppressWarnings("unchecked") + final List remoteAccessRoleDescriptorsBytes = (List< + RemoteAccessAuthentication.RoleDescriptorsBytes>) metadata.get(AuthenticationField.REMOTE_ACCESS_ROLE_DESCRIPTORS_KEY); + if (remoteAccessRoleDescriptorsBytes.isEmpty()) { + // If the remote access role descriptors are empty, the remote user has no privileges. We need to add an empty role to restrict + // access of the overall intersection accordingly + roleReferences.add(new RoleReference.RemoteAccessRoleReference(RemoteAccessAuthentication.RoleDescriptorsBytes.EMPTY)); + } else { + // TODO handle this once we support API keys as querying subjects + assert remoteAccessRoleDescriptorsBytes.size() == 1 + : "only a singleton list of remote access role descriptors bytes is supported"; + for (RemoteAccessAuthentication.RoleDescriptorsBytes roleDescriptorsBytes : remoteAccessRoleDescriptorsBytes) { + roleReferences.add(new RoleReference.RemoteAccessRoleReference(roleDescriptorsBytes)); + } + } + roleReferences.addAll(buildRoleReferencesForApiKey().getRoleReferences()); + return new RoleReferenceIntersection(List.copyOf(roleReferences)); + } + private static boolean isEmptyRoleDescriptorsBytes(BytesReference roleDescriptorsBytes) { return roleDescriptorsBytes == null || (roleDescriptorsBytes.length() == 2 && "{}".equals(roleDescriptorsBytes.utf8ToString())); } diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/authz/RoleDescriptorsIntersection.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/authz/RoleDescriptorsIntersection.java index 16bf0a074c675..30139ae1b1dba 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/authz/RoleDescriptorsIntersection.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/authz/RoleDescriptorsIntersection.java @@ -26,6 +26,10 @@ public record RoleDescriptorsIntersection(Collection> roleDe public static RoleDescriptorsIntersection EMPTY = new RoleDescriptorsIntersection(Collections.emptyList()); + public RoleDescriptorsIntersection(RoleDescriptor roleDescriptor) { + this(List.of(Set.of(roleDescriptor))); + } + public RoleDescriptorsIntersection(StreamInput in) throws IOException { this(in.readImmutableList(inner -> inner.readSet(RoleDescriptor::new))); } diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/authz/store/RoleReference.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/authz/store/RoleReference.java index 9ceeb724b4202..3a4a377713294 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/authz/store/RoleReference.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/authz/store/RoleReference.java @@ -10,6 +10,7 @@ import org.elasticsearch.action.ActionListener; import org.elasticsearch.common.bytes.BytesReference; import org.elasticsearch.common.hash.MessageDigests; +import org.elasticsearch.xpack.core.security.authc.RemoteAccessAuthentication; import java.util.HashSet; import java.util.List; @@ -116,6 +117,38 @@ public ApiKeyRoleType getRoleType() { } } + final class RemoteAccessRoleReference implements RoleReference { + + private final RemoteAccessAuthentication.RoleDescriptorsBytes roleDescriptorsBytes; + private RoleKey id = null; + + public RemoteAccessRoleReference(RemoteAccessAuthentication.RoleDescriptorsBytes roleDescriptorsBytes) { + this.roleDescriptorsBytes = roleDescriptorsBytes; + } + + @Override + public RoleKey id() { + // Hashing can be expensive. memorize the result in case the method is called multiple times. + if (id == null) { + final String roleDescriptorsHash = MessageDigests.toHexString( + MessageDigests.digest(roleDescriptorsBytes, MessageDigests.sha256()) + ); + id = new RoleKey(Set.of("remote_access:" + roleDescriptorsHash), "remote_access"); + } + return id; + } + + @Override + public void resolve(RoleReferenceResolver resolver, ActionListener listener) { + resolver.resolveRemoteAccessRoleReference(this, listener); + } + + public RemoteAccessAuthentication.RoleDescriptorsBytes getRoleDescriptorsBytes() { + return roleDescriptorsBytes; + } + + } + /** * Same as {@link ApiKeyRoleReference} but for BWC purpose (prior to v7.9.0) */ diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/authz/store/RoleReferenceResolver.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/authz/store/RoleReferenceResolver.java index 44522b5884521..e39b26afdacad 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/authz/store/RoleReferenceResolver.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/authz/store/RoleReferenceResolver.java @@ -25,4 +25,9 @@ void resolveBwcApiKeyRoleReference( ); void resolveServiceAccountRoleReference(ServiceAccountRoleReference roleReference, ActionListener listener); + + void resolveRemoteAccessRoleReference( + RoleReference.RemoteAccessRoleReference remoteAccessRoleReference, + ActionListener listener + ); } diff --git a/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/security/authc/AuthenticationTestHelper.java b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/security/authc/AuthenticationTestHelper.java index 6dcf4c54ef98c..69f2b7d9ced1d 100644 --- a/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/security/authc/AuthenticationTestHelper.java +++ b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/security/authc/AuthenticationTestHelper.java @@ -237,43 +237,46 @@ public static String randomInternalRoleName() { ); } - public static RemoteAccessAuthentication randomRemoteAccessAuthentication() { + public static RemoteAccessAuthentication randomRemoteAccessAuthentication(RoleDescriptorsIntersection roleDescriptorsIntersection) { try { // TODO add apikey() once we have querying-cluster-side API key support final Authentication authentication = ESTestCase.randomFrom( AuthenticationTestHelper.builder().realm(), AuthenticationTestHelper.builder().internal(SystemUser.INSTANCE) ).build(); - return new RemoteAccessAuthentication( - authentication, - new RoleDescriptorsIntersection( - List.of( - // TODO randomize to add a second set once we have querying-cluster-side API key support - Set.of( - new RoleDescriptor( - "a", - null, - new RoleDescriptor.IndicesPrivileges[] { - RoleDescriptor.IndicesPrivileges.builder() - .indices("index1") - .privileges("read", "read_cross_cluster") - .build() }, - null, - null, - null, - null, - null, - null - ) - ) - ) - ) - ); + return new RemoteAccessAuthentication(authentication, roleDescriptorsIntersection); } catch (IOException e) { throw new UncheckedIOException(e); } } + public static RemoteAccessAuthentication randomRemoteAccessAuthentication() { + return randomRemoteAccessAuthentication( + new RoleDescriptorsIntersection( + List.of( + // TODO randomize to add a second set once we have querying-cluster-side API key support + Set.of( + new RoleDescriptor( + "_remote_user", + null, + new RoleDescriptor.IndicesPrivileges[] { + RoleDescriptor.IndicesPrivileges.builder() + .indices("index1") + .privileges("read", "read_cross_cluster") + .build() }, + null, + null, + null, + null, + null, + null + ) + ) + ) + ) + ); + } + public static class AuthenticationTestBuilder { private TransportVersion transportVersion; private Authentication authenticatingAuthentication; diff --git a/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/security/authc/SubjectTests.java b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/security/authc/SubjectTests.java index fefec2d24a7a9..7f575f29457b6 100644 --- a/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/security/authc/SubjectTests.java +++ b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/security/authc/SubjectTests.java @@ -16,6 +16,7 @@ import org.elasticsearch.test.ESTestCase; import org.elasticsearch.test.TransportVersionUtils; import org.elasticsearch.xpack.core.security.authc.service.ServiceAccountSettings; +import org.elasticsearch.xpack.core.security.authz.RoleDescriptorsIntersection; import org.elasticsearch.xpack.core.security.authz.store.RoleReference; import org.elasticsearch.xpack.core.security.authz.store.RoleReference.ApiKeyRoleReference; import org.elasticsearch.xpack.core.security.authz.store.RoleReference.BwcApiKeyRoleReference; @@ -35,7 +36,10 @@ import static org.elasticsearch.xpack.core.security.authc.AuthenticationField.API_KEY_REALM_NAME; import static org.elasticsearch.xpack.core.security.authc.AuthenticationField.API_KEY_REALM_TYPE; import static org.elasticsearch.xpack.core.security.authc.AuthenticationField.API_KEY_ROLE_DESCRIPTORS_KEY; +import static org.elasticsearch.xpack.core.security.authc.AuthenticationField.REMOTE_ACCESS_REALM_NAME; +import static org.elasticsearch.xpack.core.security.authc.AuthenticationField.REMOTE_ACCESS_REALM_TYPE; import static org.elasticsearch.xpack.core.security.authc.Subject.FLEET_SERVER_ROLE_DESCRIPTOR_BYTES_V_7_14; +import static org.elasticsearch.xpack.core.security.authz.store.RoleReference.RemoteAccessRoleReference; import static org.hamcrest.Matchers.arrayContaining; import static org.hamcrest.Matchers.contains; import static org.hamcrest.Matchers.equalTo; @@ -145,6 +149,81 @@ public void testGetRoleReferencesForApiKey() { } } + public void testGetRoleReferencesForRemoteAccess() { + Map authMetadata = new HashMap<>(); + final String apiKeyId = randomAlphaOfLength(12); + authMetadata.put(AuthenticationField.API_KEY_ID_KEY, apiKeyId); + authMetadata.put(AuthenticationField.API_KEY_NAME_KEY, randomBoolean() ? null : randomAlphaOfLength(12)); + final BytesReference roleBytes = new BytesArray(""" + {"role":{"indices":[{"names":["index*"],"privileges":["read"]}]}}"""); + final BytesReference limitedByRoleBytes = new BytesArray(""" + {"limited-by-role":{"indices":[{"names":["*"],"privileges":["all"]}]}}"""); + + final boolean emptyRoleBytes = randomBoolean(); + + authMetadata.put( + AuthenticationField.API_KEY_ROLE_DESCRIPTORS_KEY, + emptyRoleBytes ? randomFrom(Arrays.asList(null, new BytesArray("{}"))) : roleBytes + ); + authMetadata.put(AuthenticationField.API_KEY_LIMITED_ROLE_DESCRIPTORS_KEY, limitedByRoleBytes); + + final RemoteAccessAuthentication remoteAccessAuthentication = randomBoolean() + ? AuthenticationTestHelper.randomRemoteAccessAuthentication(RoleDescriptorsIntersection.EMPTY) + : AuthenticationTestHelper.randomRemoteAccessAuthentication(); + authMetadata = remoteAccessAuthentication.copyWithRemoteAccessEntries(authMetadata); + + final Subject subject = new Subject( + new User("joe"), + new Authentication.RealmRef(REMOTE_ACCESS_REALM_NAME, REMOTE_ACCESS_REALM_TYPE, "node"), + TransportVersion.CURRENT, + authMetadata + ); + + final RoleReferenceIntersection roleReferenceIntersection = subject.getRoleReferenceIntersection(getAnonymousUser()); + final List roleReferences = roleReferenceIntersection.getRoleReferences(); + if (emptyRoleBytes) { + assertThat(roleReferences, contains(isA(RemoteAccessRoleReference.class), isA(ApiKeyRoleReference.class))); + + final RemoteAccessRoleReference remoteAccessRoleReference = (RemoteAccessRoleReference) roleReferences.get(0); + assertThat( + remoteAccessRoleReference.getRoleDescriptorsBytes(), + equalTo( + remoteAccessAuthentication.getRoleDescriptorsBytesList().isEmpty() + ? RemoteAccessAuthentication.RoleDescriptorsBytes.EMPTY + : remoteAccessAuthentication.getRoleDescriptorsBytesList().get(0) + ) + ); + + final ApiKeyRoleReference roleReference = (ApiKeyRoleReference) roleReferences.get(1); + assertThat(roleReference.getApiKeyId(), equalTo(apiKeyId)); + assertThat(roleReference.getRoleDescriptorsBytes(), equalTo(authMetadata.get(API_KEY_LIMITED_ROLE_DESCRIPTORS_KEY))); + + } else { + assertThat( + roleReferences, + contains(isA(RemoteAccessRoleReference.class), isA(ApiKeyRoleReference.class), isA(ApiKeyRoleReference.class)) + ); + + final RemoteAccessRoleReference remoteAccessRoleReference = (RemoteAccessRoleReference) roleReferences.get(0); + assertThat( + remoteAccessRoleReference.getRoleDescriptorsBytes(), + equalTo( + remoteAccessAuthentication.getRoleDescriptorsBytesList().isEmpty() + ? RemoteAccessAuthentication.RoleDescriptorsBytes.EMPTY + : remoteAccessAuthentication.getRoleDescriptorsBytesList().get(0) + ) + ); + + final ApiKeyRoleReference roleReference = (ApiKeyRoleReference) roleReferences.get(1); + assertThat(roleReference.getApiKeyId(), equalTo(apiKeyId)); + assertThat(roleReference.getRoleDescriptorsBytes(), equalTo(authMetadata.get(API_KEY_ROLE_DESCRIPTORS_KEY))); + + final ApiKeyRoleReference limitedByRoleReference = (ApiKeyRoleReference) roleReferences.get(2); + assertThat(limitedByRoleReference.getApiKeyId(), equalTo(apiKeyId)); + assertThat(limitedByRoleReference.getRoleDescriptorsBytes(), equalTo(authMetadata.get(API_KEY_LIMITED_ROLE_DESCRIPTORS_KEY))); + } + } + public void testGetRoleReferencesForApiKeyBwc() { Map authMetadata = new HashMap<>(); final String apiKeyId = randomAlphaOfLength(12); diff --git a/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/security/authz/store/RoleReferenceTests.java b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/security/authz/store/RoleReferenceTests.java index 35ba3e171d6c5..c10b01f59c2ae 100644 --- a/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/security/authz/store/RoleReferenceTests.java +++ b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/security/authz/store/RoleReferenceTests.java @@ -10,6 +10,7 @@ import org.elasticsearch.common.bytes.BytesArray; import org.elasticsearch.common.hash.MessageDigests; import org.elasticsearch.test.ESTestCase; +import org.elasticsearch.xpack.core.security.authc.RemoteAccessAuthentication; import java.util.Set; @@ -65,6 +66,18 @@ public void testApiKeyRoleReference() { assertThat(roleKey.getSource(), equalTo("apikey_" + apiKeyRoleType)); } + public void testRemoteAccessRoleReference() { + final var roleDescriptorsBytes = new RemoteAccessAuthentication.RoleDescriptorsBytes(new BytesArray(randomAlphaOfLength(50))); + final var remoteAccessRoleReference = new RoleReference.RemoteAccessRoleReference(roleDescriptorsBytes); + + final RoleKey roleKey = remoteAccessRoleReference.id(); + assertThat( + roleKey.getNames(), + hasItem("remote_access:" + MessageDigests.toHexString(MessageDigests.digest(roleDescriptorsBytes, MessageDigests.sha256()))) + ); + assertThat(roleKey.getSource(), equalTo("remote_access")); + } + public void testServiceAccountRoleReference() { final String principal = randomAlphaOfLength(8) + "/" + randomAlphaOfLength(8); final RoleReference.ServiceAccountRoleReference serviceAccountRoleReference = new RoleReference.ServiceAccountRoleReference( diff --git a/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/authz/store/RoleDescriptorStore.java b/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/authz/store/RoleDescriptorStore.java index 756849193fe6c..acae8a3e255f1 100644 --- a/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/authz/store/RoleDescriptorStore.java +++ b/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/authz/store/RoleDescriptorStore.java @@ -132,6 +132,21 @@ public void resolveServiceAccountRoleReference( })); } + @Override + public void resolveRemoteAccessRoleReference( + RoleReference.RemoteAccessRoleReference remoteAccessRoleReference, + ActionListener listener + ) { + final Set roleDescriptors = remoteAccessRoleReference.getRoleDescriptorsBytes().toRoleDescriptors(); + if (roleDescriptors.isEmpty()) { + listener.onResponse(RolesRetrievalResult.EMPTY); + return; + } + final RolesRetrievalResult rolesRetrievalResult = new RolesRetrievalResult(); + rolesRetrievalResult.addDescriptors(Set.copyOf(roleDescriptors)); + listener.onResponse(rolesRetrievalResult); + } + private void resolveRoleNames(Set roleNames, ActionListener listener) { roleDescriptors(roleNames, ActionListener.wrap(rolesRetrievalResult -> { logDeprecatedRoles(rolesRetrievalResult.getRoleDescriptors()); diff --git a/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/authz/store/CompositeRolesStoreTests.java b/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/authz/store/CompositeRolesStoreTests.java index c7ad83832a7ee..b418740b1f01c 100644 --- a/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/authz/store/CompositeRolesStoreTests.java +++ b/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/authz/store/CompositeRolesStoreTests.java @@ -17,6 +17,7 @@ import org.elasticsearch.action.admin.cluster.settings.ClusterUpdateSettingsAction; import org.elasticsearch.action.admin.cluster.state.ClusterStateAction; import org.elasticsearch.action.admin.cluster.stats.ClusterStatsAction; +import org.elasticsearch.action.admin.indices.create.CreateIndexAction; import org.elasticsearch.action.delete.DeleteAction; import org.elasticsearch.action.get.GetAction; import org.elasticsearch.action.index.IndexAction; @@ -65,6 +66,7 @@ import org.elasticsearch.xpack.core.security.authc.Subject; import org.elasticsearch.xpack.core.security.authz.RoleDescriptor; import org.elasticsearch.xpack.core.security.authz.RoleDescriptor.IndicesPrivileges; +import org.elasticsearch.xpack.core.security.authz.RoleDescriptorsIntersection; import org.elasticsearch.xpack.core.security.authz.accesscontrol.DocumentSubsetBitsetCache; import org.elasticsearch.xpack.core.security.authz.accesscontrol.IndicesAccessControl; import org.elasticsearch.xpack.core.security.authz.permission.ClusterPermission; @@ -102,6 +104,7 @@ import org.elasticsearch.xpack.security.audit.index.IndexNameResolver; import org.elasticsearch.xpack.security.authc.ApiKeyService; import org.elasticsearch.xpack.security.authc.service.ServiceAccountService; +import org.elasticsearch.xpack.security.authz.RBACEngine; import org.elasticsearch.xpack.security.support.CacheInvalidatorRegistry; import org.elasticsearch.xpack.security.support.SecurityIndexManager; import org.hamcrest.BaseMatcher; @@ -1978,6 +1981,144 @@ public void testApiKeyAuthUsesApiKeyServiceWithScopedRole() throws Exception { assertThat(role.names()[0], containsString("user_role_")); } + public void testGetRoleForRemoteAccessAuthentication() throws Exception { + final FileRolesStore fileRolesStore = mock(FileRolesStore.class); + doCallRealMethod().when(fileRolesStore).accept(anySet(), anyActionListener()); + final NativeRolesStore nativeRolesStore = mock(NativeRolesStore.class); + doCallRealMethod().when(nativeRolesStore).accept(anySet(), anyActionListener()); + when(fileRolesStore.roleDescriptors(anySet())).thenReturn(Collections.emptySet()); + doAnswer((invocationOnMock) -> { + @SuppressWarnings("unchecked") + ActionListener callback = (ActionListener) invocationOnMock.getArguments()[1]; + callback.onResponse(RoleRetrievalResult.failure(new RuntimeException("intentionally failed!"))); + return null; + }).when(nativeRolesStore).getRoleDescriptors(isASet(), anyActionListener()); + final ReservedRolesStore reservedRolesStore = spy(new ReservedRolesStore()); + ThreadContext threadContext = new ThreadContext(SECURITY_ENABLED_SETTINGS); + final ClusterService clusterService = mock(ClusterService.class); + when(clusterService.getClusterSettings()).thenReturn( + new ClusterSettings(SECURITY_ENABLED_SETTINGS, Set.of(ApiKeyService.DELETE_RETENTION_PERIOD)) + ); + final ApiKeyService apiKeyService = spy( + new ApiKeyService( + SECURITY_ENABLED_SETTINGS, + Clock.systemUTC(), + mock(Client.class), + mock(SecurityIndexManager.class), + clusterService, + mock(CacheInvalidatorRegistry.class), + mock(ThreadPool.class) + ) + ); + final NativePrivilegeStore nativePrivStore = mock(NativePrivilegeStore.class); + doAnswer(invocationOnMock -> { + @SuppressWarnings("unchecked") + ActionListener> listener = (ActionListener< + Collection>) invocationOnMock.getArguments()[2]; + listener.onResponse(Collections.emptyList()); + return Void.TYPE; + }).when(nativePrivStore).getPrivileges(anyCollection(), anyCollection(), anyActionListener()); + + final AtomicReference> effectiveRoleDescriptors = new AtomicReference>(); + final CompositeRolesStore compositeRolesStore = buildCompositeRolesStore( + SECURITY_ENABLED_SETTINGS, + fileRolesStore, + nativeRolesStore, + reservedRolesStore, + nativePrivStore, + null, + apiKeyService, + null, + null, + effectiveRoleDescriptors::set + ); + AuditUtil.getOrGenerateRequestId(threadContext); + final TransportVersion version = TransportVersion.CURRENT; + final String apiKeyRoleName = "user_role_" + randomAlphaOfLength(4); + final Authentication apiKeyAuthentication = createApiKeyAuthentication( + apiKeyService, + randomValueOtherThanMany( + authc -> authc.getAuthenticationType() == AuthenticationType.API_KEY, + () -> AuthenticationTestHelper.builder().build() + ), + Collections.singleton( + new RoleDescriptor( + apiKeyRoleName, + null, + new IndicesPrivileges[] { IndicesPrivileges.builder().indices("index*").privileges("all").build() }, + null + ) + ), + null, + version + ); + final boolean emptyRemoteRole = randomBoolean(); + final Authentication authentication = apiKeyAuthentication.toRemoteAccess( + AuthenticationTestHelper.randomRemoteAccessAuthentication( + emptyRemoteRole + ? RoleDescriptorsIntersection.EMPTY + : new RoleDescriptorsIntersection( + new RoleDescriptor( + RBACEngine.REMOTE_USER_ROLE_NAME, + null, + new RoleDescriptor.IndicesPrivileges[] { + RoleDescriptor.IndicesPrivileges.builder().indices("index1").privileges("read").build() }, + null, + null, + null, + null, + null, + null + ) + ) + ) + ); + + final PlainActionFuture roleFuture = new PlainActionFuture<>(); + compositeRolesStore.getRole(authentication.getEffectiveSubject(), roleFuture); + final Role role = roleFuture.actionGet(); + assertThat(effectiveRoleDescriptors.get(), is(nullValue())); + + verify(apiKeyService, times(1)).parseRoleDescriptorsBytes(anyString(), any(BytesReference.class), any()); + assertThat(role.names().length, is(1)); + assertThat(role.names()[0], equalTo(apiKeyRoleName)); + + // Smoke-test for authorization + final Metadata indexMetadata = Metadata.builder() + .put( + IndexMetadata.builder("index1") + .settings( + Settings.builder() + .put(IndexMetadata.SETTING_NUMBER_OF_SHARDS, 1) + .put(IndexMetadata.SETTING_NUMBER_OF_REPLICAS, 1) + .put(IndexMetadata.SETTING_VERSION_CREATED, Version.CURRENT) + ) + ) + .put( + IndexMetadata.builder("index2") + .settings( + Settings.builder() + .put(IndexMetadata.SETTING_NUMBER_OF_SHARDS, 1) + .put(IndexMetadata.SETTING_NUMBER_OF_REPLICAS, 1) + .put(IndexMetadata.SETTING_VERSION_CREATED, Version.CURRENT) + ) + ) + .build(); + final var emptyCache = new FieldPermissionsCache(Settings.EMPTY); + assertThat( + role.authorize(SearchAction.NAME, Sets.newHashSet("index1"), indexMetadata.getIndicesLookup(), emptyCache).isGranted(), + is(false == emptyRemoteRole) + ); + assertThat( + role.authorize(CreateIndexAction.NAME, Sets.newHashSet("index1"), indexMetadata.getIndicesLookup(), emptyCache).isGranted(), + is(false) + ); + assertThat( + role.authorize(SearchAction.NAME, Sets.newHashSet("index2"), indexMetadata.getIndicesLookup(), emptyCache).isGranted(), + is(false) + ); + } + public void testGetRolesForRunAs() { final ApiKeyService apiKeyService = mock(ApiKeyService.class); final ServiceAccountService serviceAccountService = mock(ServiceAccountService.class); From 0e87d582662bdc924f11ec299dec06baba9d6c64 Mon Sep 17 00:00:00 2001 From: Ievgen Degtiarenko Date: Tue, 31 Jan 2023 11:48:03 +0100 Subject: [PATCH 29/38] Cleanup allocation commands test (#93368) --- .../allocation/AllocationCommandsTests.java | 92 ++++--------------- .../cluster/ESAllocationTestCase.java | 4 - 2 files changed, 17 insertions(+), 79 deletions(-) diff --git a/server/src/test/java/org/elasticsearch/cluster/routing/allocation/AllocationCommandsTests.java b/server/src/test/java/org/elasticsearch/cluster/routing/allocation/AllocationCommandsTests.java index 19c2732c0867a..5785e040c616f 100644 --- a/server/src/test/java/org/elasticsearch/cluster/routing/allocation/AllocationCommandsTests.java +++ b/server/src/test/java/org/elasticsearch/cluster/routing/allocation/AllocationCommandsTests.java @@ -13,7 +13,6 @@ import org.elasticsearch.Version; import org.elasticsearch.action.ActionListener; import org.elasticsearch.cluster.ClusterInfo; -import org.elasticsearch.cluster.ClusterModule; import org.elasticsearch.cluster.ClusterName; import org.elasticsearch.cluster.ClusterState; import org.elasticsearch.cluster.ESAllocationTestCase; @@ -59,8 +58,6 @@ import static java.util.Collections.emptyMap; import static java.util.Collections.singleton; -import static org.elasticsearch.cluster.ClusterModule.BALANCED_ALLOCATOR; -import static org.elasticsearch.cluster.ClusterModule.DESIRED_BALANCE_ALLOCATOR; import static org.elasticsearch.cluster.routing.RoutingNodesHelper.shardsWithState; import static org.elasticsearch.cluster.routing.ShardRoutingState.INITIALIZING; import static org.elasticsearch.cluster.routing.ShardRoutingState.RELOCATING; @@ -77,10 +74,7 @@ public class AllocationCommandsTests extends ESAllocationTestCase { public void testMoveShardCommand() { AllocationService allocation = createAllocationService( - Settings.builder() - .put("cluster.routing.allocation.node_concurrent_recoveries", 10) - .put(ClusterModule.SHARDS_ALLOCATOR_TYPE_SETTING.getKey(), randomShardsAllocator()) - .build() + Settings.builder().put("cluster.routing.allocation.node_concurrent_recoveries", 10).build() ); logger.info("creating an index with 1 shard, no replica"); @@ -144,7 +138,6 @@ public void testAllocateCommand() { Settings.builder() .put(EnableAllocationDecider.CLUSTER_ROUTING_ALLOCATION_ENABLE_SETTING.getKey(), "none") .put(EnableAllocationDecider.CLUSTER_ROUTING_REBALANCE_ENABLE_SETTING.getKey(), "none") - .put(ClusterModule.SHARDS_ALLOCATOR_TYPE_SETTING.getKey(), randomShardsAllocator()) .build() ); final String index = "test"; @@ -363,7 +356,6 @@ public void testAllocateStalePrimaryCommand() { Settings.builder() .put(EnableAllocationDecider.CLUSTER_ROUTING_ALLOCATION_ENABLE_SETTING.getKey(), "none") .put(EnableAllocationDecider.CLUSTER_ROUTING_REBALANCE_ENABLE_SETTING.getKey(), "none") - .put(ClusterModule.SHARDS_ALLOCATOR_TYPE_SETTING.getKey(), randomShardsAllocator()) .build() ); final String index = "test"; @@ -427,7 +419,6 @@ public void testCancelCommand() { Settings.builder() .put(EnableAllocationDecider.CLUSTER_ROUTING_ALLOCATION_ENABLE_SETTING.getKey(), "none") .put(EnableAllocationDecider.CLUSTER_ROUTING_REBALANCE_ENABLE_SETTING.getKey(), "none") - .put(ClusterModule.SHARDS_ALLOCATOR_TYPE_SETTING.getKey(), randomShardsAllocator()) .build() ); @@ -724,32 +715,7 @@ public void testSerialization() throws Exception { in = new NamedWriteableAwareStreamInput(in, namedWriteableRegistry); // Now we can read them! - AllocationCommands sCommands = AllocationCommands.readFrom(in); - - assertThat(sCommands.commands().size(), equalTo(5)); - assertThat(((AllocateEmptyPrimaryAllocationCommand) (sCommands.commands().get(0))).shardId(), equalTo(1)); - assertThat(((AllocateEmptyPrimaryAllocationCommand) (sCommands.commands().get(0))).index(), equalTo("test")); - assertThat(((AllocateEmptyPrimaryAllocationCommand) (sCommands.commands().get(0))).node(), equalTo("node1")); - assertThat(((AllocateEmptyPrimaryAllocationCommand) (sCommands.commands().get(0))).acceptDataLoss(), equalTo(true)); - - assertThat(((AllocateStalePrimaryAllocationCommand) (sCommands.commands().get(1))).shardId(), equalTo(2)); - assertThat(((AllocateStalePrimaryAllocationCommand) (sCommands.commands().get(1))).index(), equalTo("test")); - assertThat(((AllocateStalePrimaryAllocationCommand) (sCommands.commands().get(1))).node(), equalTo("node1")); - assertThat(((AllocateStalePrimaryAllocationCommand) (sCommands.commands().get(1))).acceptDataLoss(), equalTo(true)); - - assertThat(((AllocateReplicaAllocationCommand) (sCommands.commands().get(2))).shardId(), equalTo(2)); - assertThat(((AllocateReplicaAllocationCommand) (sCommands.commands().get(2))).index(), equalTo("test")); - assertThat(((AllocateReplicaAllocationCommand) (sCommands.commands().get(2))).node(), equalTo("node1")); - - assertThat(((MoveAllocationCommand) (sCommands.commands().get(3))).shardId(), equalTo(3)); - assertThat(((MoveAllocationCommand) (sCommands.commands().get(3))).index(), equalTo("test")); - assertThat(((MoveAllocationCommand) (sCommands.commands().get(3))).fromNode(), equalTo("node2")); - assertThat(((MoveAllocationCommand) (sCommands.commands().get(3))).toNode(), equalTo("node3")); - - assertThat(((CancelAllocationCommand) (sCommands.commands().get(4))).shardId(), equalTo(4)); - assertThat(((CancelAllocationCommand) (sCommands.commands().get(4))).index(), equalTo("test")); - assertThat(((CancelAllocationCommand) (sCommands.commands().get(4))).node(), equalTo("node5")); - assertThat(((CancelAllocationCommand) (sCommands.commands().get(4))).allowPrimary(), equalTo(true)); + assertThat(AllocationCommands.readFrom(in), equalTo(commands)); } public void testXContent() throws Exception { @@ -802,32 +768,19 @@ public void testXContent() throws Exception { // move two tokens, parser expected to be "on" `commands` field parser.nextToken(); parser.nextToken(); - AllocationCommands sCommands = AllocationCommands.fromXContent(parser); - - assertThat(sCommands.commands().size(), equalTo(5)); - assertThat(((AllocateEmptyPrimaryAllocationCommand) (sCommands.commands().get(0))).shardId(), equalTo(1)); - assertThat(((AllocateEmptyPrimaryAllocationCommand) (sCommands.commands().get(0))).index(), equalTo("test")); - assertThat(((AllocateEmptyPrimaryAllocationCommand) (sCommands.commands().get(0))).node(), equalTo("node1")); - assertThat(((AllocateEmptyPrimaryAllocationCommand) (sCommands.commands().get(0))).acceptDataLoss(), equalTo(true)); - - assertThat(((AllocateStalePrimaryAllocationCommand) (sCommands.commands().get(1))).shardId(), equalTo(2)); - assertThat(((AllocateStalePrimaryAllocationCommand) (sCommands.commands().get(1))).index(), equalTo("test")); - assertThat(((AllocateStalePrimaryAllocationCommand) (sCommands.commands().get(1))).node(), equalTo("node1")); - assertThat(((AllocateStalePrimaryAllocationCommand) (sCommands.commands().get(1))).acceptDataLoss(), equalTo(true)); - - assertThat(((AllocateReplicaAllocationCommand) (sCommands.commands().get(2))).shardId(), equalTo(2)); - assertThat(((AllocateReplicaAllocationCommand) (sCommands.commands().get(2))).index(), equalTo("test")); - assertThat(((AllocateReplicaAllocationCommand) (sCommands.commands().get(2))).node(), equalTo("node1")); - - assertThat(((MoveAllocationCommand) (sCommands.commands().get(3))).shardId(), equalTo(3)); - assertThat(((MoveAllocationCommand) (sCommands.commands().get(3))).index(), equalTo("test")); - assertThat(((MoveAllocationCommand) (sCommands.commands().get(3))).fromNode(), equalTo("node2")); - assertThat(((MoveAllocationCommand) (sCommands.commands().get(3))).toNode(), equalTo("node3")); - - assertThat(((CancelAllocationCommand) (sCommands.commands().get(4))).shardId(), equalTo(4)); - assertThat(((CancelAllocationCommand) (sCommands.commands().get(4))).index(), equalTo("test")); - assertThat(((CancelAllocationCommand) (sCommands.commands().get(4))).node(), equalTo("node5")); - assertThat(((CancelAllocationCommand) (sCommands.commands().get(4))).allowPrimary(), equalTo(true)); + + assertThat( + AllocationCommands.fromXContent(parser), + equalTo( + new AllocationCommands( + new AllocateEmptyPrimaryAllocationCommand("test", 1, "node1", true), + new AllocateStalePrimaryAllocationCommand("test", 2, "node1", true), + new AllocateReplicaAllocationCommand("test", 2, "node1"), + new MoveAllocationCommand("test", 3, "node2", "node3"), + new CancelAllocationCommand("test", 4, "node5", true) + ) + ) + ); } @Override @@ -837,10 +790,7 @@ protected NamedXContentRegistry xContentRegistry() { public void testMoveShardToNonDataNode() { AllocationService allocation = createAllocationService( - Settings.builder() - .put("cluster.routing.allocation.node_concurrent_recoveries", 10) - .put(ClusterModule.SHARDS_ALLOCATOR_TYPE_SETTING.getKey(), randomShardsAllocator()) - .build() + Settings.builder().put("cluster.routing.allocation.node_concurrent_recoveries", 10).build() ); logger.info("creating an index with 1 shard, no replica"); @@ -910,10 +860,7 @@ public void testMoveShardToNonDataNode() { public void testMoveShardFromNonDataNode() { AllocationService allocation = createAllocationService( - Settings.builder() - .put("cluster.routing.allocation.node_concurrent_recoveries", 10) - .put(ClusterModule.SHARDS_ALLOCATOR_TYPE_SETTING.getKey(), randomShardsAllocator()) - .build() + Settings.builder().put("cluster.routing.allocation.node_concurrent_recoveries", 10).build() ); logger.info("creating an index with 1 shard, no replica"); @@ -985,7 +932,6 @@ public void testConflictingCommandsInSingleRequest() { Settings.builder() .put(EnableAllocationDecider.CLUSTER_ROUTING_ALLOCATION_ENABLE_SETTING.getKey(), "none") .put(EnableAllocationDecider.CLUSTER_ROUTING_REBALANCE_ENABLE_SETTING.getKey(), "none") - .put(ClusterModule.SHARDS_ALLOCATOR_TYPE_SETTING.getKey(), randomShardsAllocator()) .build() ); @@ -1091,8 +1037,4 @@ public void testConflictingCommandsInSingleRequest() { ); }).getMessage(), containsString("all copies of [" + index3 + "][0] are already assigned. Use the move allocation command instead")); } - - private static String randomShardsAllocator() { - return randomFrom(BALANCED_ALLOCATOR, DESIRED_BALANCE_ALLOCATOR); - } } diff --git a/test/framework/src/main/java/org/elasticsearch/cluster/ESAllocationTestCase.java b/test/framework/src/main/java/org/elasticsearch/cluster/ESAllocationTestCase.java index 6c294502de5cc..18a611e39b5a2 100644 --- a/test/framework/src/main/java/org/elasticsearch/cluster/ESAllocationTestCase.java +++ b/test/framework/src/main/java/org/elasticsearch/cluster/ESAllocationTestCase.java @@ -57,10 +57,6 @@ import static org.elasticsearch.common.settings.ClusterSettings.createBuiltInClusterSettings; public abstract class ESAllocationTestCase extends ESTestCase { - private static final ClusterSettings EMPTY_CLUSTER_SETTINGS = new ClusterSettings( - Settings.EMPTY, - ClusterSettings.BUILT_IN_CLUSTER_SETTINGS - ); public static final SnapshotsInfoService SNAPSHOT_INFO_SERVICE_WITH_NO_SHARD_SIZES = () -> new SnapshotShardSizeInfo(Map.of()) { @Override From 4dd3b9a44ddcc8cf0d062f7c9958670deadf2ff2 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Francisco=20Fern=C3=A1ndez=20Casta=C3=B1o?= Date: Tue, 31 Jan 2023 11:59:20 +0100 Subject: [PATCH 30/38] Link to the time-units doc in S3 repository docs instead of explaining it in words (#93351) --- .../snapshot-restore/repository-s3.asciidoc | 13 ++++++------- 1 file changed, 6 insertions(+), 7 deletions(-) diff --git a/docs/reference/snapshot-restore/repository-s3.asciidoc b/docs/reference/snapshot-restore/repository-s3.asciidoc index 4ead755c409e5..de72511010b51 100644 --- a/docs/reference/snapshot-restore/repository-s3.asciidoc +++ b/docs/reference/snapshot-restore/repository-s3.asciidoc @@ -143,11 +143,9 @@ settings belong in the `elasticsearch.yml` file. `read_timeout`:: - The maximum time {es} will wait to receive the next byte of data over an established, - open connection to the repository before it closes the connection. The value should - specify the unit. - For example, a value of `5s` specifies a 5 second timeout. The default value - is 50 seconds. + (<>) The maximum time {es} will wait to receive the next byte + of data over an established, open connection to the repository before it closes the + connection. The default value is 50 seconds. `max_retries`:: @@ -285,7 +283,7 @@ multiple deployments may share the same bucket. `chunk_size`:: - Big files can be broken down into chunks during snapshotting if needed. + (<>) Big files can be broken down into chunks during snapshotting if needed. Specify the chunk size as a value and unit, for example: `1TB`, `1GB`, `10MB`. Defaults to the maximum size of a blob in the S3 which is `5TB`. @@ -304,7 +302,8 @@ include::repository-shared-settings.asciidoc[] `buffer_size`:: - Minimum threshold below which the chunk is uploaded using a single request. + (<>) Minimum threshold below which the chunk is + uploaded using a single request. Beyond this threshold, the S3 repository will use the https://docs.aws.amazon.com/AmazonS3/latest/dev/uploadobjusingmpu.html[AWS Multipart Upload API] to split the chunk into several parts, each of From 4fb06b2f35fa27b871616d04a32fd2a043c36570 Mon Sep 17 00:00:00 2001 From: Simon Cooper Date: Tue, 31 Jan 2023 11:24:32 +0000 Subject: [PATCH 31/38] Migrate misc packages to TransportVersion (#93272) --- .../AutoDateHistogramAggregationBuilder.java | 5 ++-- .../histogram/InternalAutoDateHistogram.java | 6 ++--- .../metric/MatrixStatsAggregationBuilder.java | 5 ++-- .../ingest/common/GrokProcessorGetAction.java | 6 ++--- .../stats/GeoIpDownloaderStatsAction.java | 6 ++--- .../mustache/MultiSearchTemplateResponse.java | 6 ++--- .../percolator/PercolateQueryBuilder.java | 8 +++--- .../upgrades/DesiredNodesUpgradeIT.java | 4 +-- .../main/java/org/elasticsearch/Build.java | 4 +-- .../TransportUpdateDesiredNodesAction.java | 2 +- .../UpdateDesiredNodesRequest.java | 3 +-- .../cluster/node/info/PluginsAndModules.java | 4 +-- .../admin/indices/rollover/Condition.java | 4 +-- .../MaxPrimaryShardDocsCondition.java | 6 ++--- .../indices/rollover/MinAgeCondition.java | 6 ++--- .../indices/rollover/MinDocsCondition.java | 6 ++--- .../MinPrimaryShardDocsCondition.java | 6 ++--- .../MinPrimaryShardSizeCondition.java | 6 ++--- .../indices/rollover/MinSizeCondition.java | 6 ++--- .../indices/rollover/RolloverRequest.java | 2 +- .../action/bulk/BulkItemResponse.java | 3 +-- .../action/explain/ExplainRequest.java | 3 +-- .../action/explain/ExplainResponse.java | 3 +-- .../coordination/JoinValidationService.java | 2 +- .../PublicationTransportHandler.java | 25 +++++++++--------- .../cluster/metadata/DesiredNode.java | 9 ++++--- .../cluster/metadata/Metadata.java | 2 +- .../cluster/metadata/RepositoryMetadata.java | 4 +-- .../cluster/node/DiscoveryNode.java | 7 ++--- .../cluster/routing/ShardRouting.java | 2 +- .../common/compress/CompressedXContent.java | 6 ++--- .../common/document/DocumentField.java | 10 +++---- .../common/io/stream/DelayableWriteable.java | 26 +++++++++---------- .../io/stream/RecyclerBytesStreamOutput.java | 2 +- .../common/io/stream/StreamInput.java | 4 +-- .../common/io/stream/StreamOutput.java | 4 +-- .../elasticsearch/common/unit/Processors.java | 16 ++++++------ .../common/xcontent/XContentHelper.java | 4 +-- .../discovery/DiscoveryStats.java | 10 +++---- .../gateway/LocalAllocateDangledIndices.java | 5 ++-- ...ransportNodesListGatewayStartedShards.java | 10 +++---- .../health/metadata/HealthMetadata.java | 9 +++---- .../index/translog/Translog.java | 10 ++++--- .../recovery/RecoverySnapshotFileRequest.java | 4 +-- .../elasticsearch/monitor/jvm/JvmInfo.java | 6 ++--- .../org/elasticsearch/monitor/os/OsInfo.java | 8 +++--- .../org/elasticsearch/monitor/os/OsStats.java | 6 ++--- .../plugins/PluginDescriptor.java | 23 ++++++++-------- .../plugins/PluginRuntimeInfo.java | 8 +++--- .../script/ScriptContextStats.java | 8 +++--- .../elasticsearch/script/ScriptException.java | 6 ++--- .../org/elasticsearch/script/ScriptStats.java | 6 ++--- .../org/elasticsearch/script/TimeSeries.java | 6 ++--- .../tasks/TaskCancellationService.java | 6 ++--- .../transport/ActionTransportException.java | 6 ++--- .../transport/BytesTransportRequest.java | 10 +++---- .../transport/ConnectTransportException.java | 6 ++--- .../transport/ProxyConnectionStrategy.java | 5 ++-- .../transport/RemoteConnectionInfo.java | 6 ++--- .../transport/TransportStats.java | 5 ++-- .../action/OriginalIndicesTests.java | 10 +++---- .../GetStoredScriptRequestTests.java | 6 ++--- .../TransportResolveIndexActionTests.java | 12 ++++++--- ...TransportFieldCapabilitiesActionTests.java | 9 ++++--- .../action/get/MultiGetShardRequestTests.java | 14 +++++----- .../PublicationTransportHandlerTests.java | 2 +- .../cluster/metadata/DesiredNodeTests.java | 13 +++++----- .../io/stream/DelayableWriteableTests.java | 25 ++++++++++-------- .../reindex/BulkByScrollResponseTests.java | 4 +-- .../reindex/BulkByScrollTaskStatusTests.java | 6 ++--- .../license/GetFeatureUsageResponse.java | 10 +++---- .../protocol/xpack/XPackInfoRequest.java | 10 +++---- .../protocol/xpack/XPackInfoResponse.java | 10 +++---- .../xpack/graph/GraphExploreRequest.java | 6 ++--- .../ilm/IndexLifecycleExplainResponse.java | 3 +-- .../ilm/IndexLifecycleFeatureSetUsage.java | 5 ++-- .../xpack/core/ilm/RolloverAction.java | 5 ++-- .../xpack/core/indexing/IndexerJobStats.java | 3 +-- .../pivot/DateHistogramGroupSource.java | 6 ++--- .../action/MonitoringBulkRequestTests.java | 2 +- .../blobstore/testkit/BlobAnalyzeAction.java | 8 +++--- .../testkit/RepositoryAnalyzeAction.java | 10 ++++--- 82 files changed, 298 insertions(+), 283 deletions(-) diff --git a/modules/aggregations/src/main/java/org/elasticsearch/aggregations/bucket/histogram/AutoDateHistogramAggregationBuilder.java b/modules/aggregations/src/main/java/org/elasticsearch/aggregations/bucket/histogram/AutoDateHistogramAggregationBuilder.java index 3c1742c2af77a..cac35ce644bf7 100644 --- a/modules/aggregations/src/main/java/org/elasticsearch/aggregations/bucket/histogram/AutoDateHistogramAggregationBuilder.java +++ b/modules/aggregations/src/main/java/org/elasticsearch/aggregations/bucket/histogram/AutoDateHistogramAggregationBuilder.java @@ -9,7 +9,6 @@ package org.elasticsearch.aggregations.bucket.histogram; import org.elasticsearch.TransportVersion; -import org.elasticsearch.Version; import org.elasticsearch.common.Rounding; import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.StreamOutput; @@ -123,7 +122,7 @@ public AutoDateHistogramAggregationBuilder(String name) { public AutoDateHistogramAggregationBuilder(StreamInput in) throws IOException { super(in); numBuckets = in.readVInt(); - if (in.getVersion().onOrAfter(Version.V_7_3_0)) { + if (in.getTransportVersion().onOrAfter(TransportVersion.V_7_3_0)) { minimumIntervalExpression = in.readOptionalString(); } } @@ -131,7 +130,7 @@ public AutoDateHistogramAggregationBuilder(StreamInput in) throws IOException { @Override protected void innerWriteTo(StreamOutput out) throws IOException { out.writeVInt(numBuckets); - if (out.getVersion().onOrAfter(Version.V_7_3_0)) { + if (out.getTransportVersion().onOrAfter(TransportVersion.V_7_3_0)) { out.writeOptionalString(minimumIntervalExpression); } } diff --git a/modules/aggregations/src/main/java/org/elasticsearch/aggregations/bucket/histogram/InternalAutoDateHistogram.java b/modules/aggregations/src/main/java/org/elasticsearch/aggregations/bucket/histogram/InternalAutoDateHistogram.java index 4860bedaee61a..c91a6bed8a716 100644 --- a/modules/aggregations/src/main/java/org/elasticsearch/aggregations/bucket/histogram/InternalAutoDateHistogram.java +++ b/modules/aggregations/src/main/java/org/elasticsearch/aggregations/bucket/histogram/InternalAutoDateHistogram.java @@ -8,7 +8,7 @@ package org.elasticsearch.aggregations.bucket.histogram; import org.apache.lucene.util.PriorityQueue; -import org.elasticsearch.Version; +import org.elasticsearch.TransportVersion; import org.elasticsearch.aggregations.bucket.histogram.AutoDateHistogramAggregationBuilder.RoundingInfo; import org.elasticsearch.common.Rounding; import org.elasticsearch.common.io.stream.StreamInput; @@ -226,7 +226,7 @@ public InternalAutoDateHistogram(StreamInput in) throws IOException { format = in.readNamedWriteable(DocValueFormat.class); buckets = in.readList(stream -> new Bucket(stream, format)); this.targetBuckets = in.readVInt(); - if (in.getVersion().onOrAfter(Version.V_8_3_0)) { + if (in.getTransportVersion().onOrAfter(TransportVersion.V_8_3_0)) { bucketInnerInterval = in.readVLong(); } else { bucketInnerInterval = 1; // Calculated on merge. @@ -239,7 +239,7 @@ protected void doWriteTo(StreamOutput out) throws IOException { out.writeNamedWriteable(format); out.writeList(buckets); out.writeVInt(targetBuckets); - if (out.getVersion().onOrAfter(Version.V_8_3_0)) { + if (out.getTransportVersion().onOrAfter(TransportVersion.V_8_3_0)) { out.writeVLong(bucketInnerInterval); } } diff --git a/modules/aggregations/src/main/java/org/elasticsearch/aggregations/metric/MatrixStatsAggregationBuilder.java b/modules/aggregations/src/main/java/org/elasticsearch/aggregations/metric/MatrixStatsAggregationBuilder.java index ee45a32dd8501..d8edb19c2782b 100644 --- a/modules/aggregations/src/main/java/org/elasticsearch/aggregations/metric/MatrixStatsAggregationBuilder.java +++ b/modules/aggregations/src/main/java/org/elasticsearch/aggregations/metric/MatrixStatsAggregationBuilder.java @@ -8,7 +8,6 @@ package org.elasticsearch.aggregations.metric; import org.elasticsearch.TransportVersion; -import org.elasticsearch.Version; import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.StreamOutput; import org.elasticsearch.search.MultiValueMode; @@ -56,14 +55,14 @@ public boolean supportsSampling() { */ public MatrixStatsAggregationBuilder(StreamInput in) throws IOException { super(in); - if (in.getVersion().onOrAfter(Version.V_8_7_0)) { + if (in.getTransportVersion().onOrAfter(TransportVersion.V_8_7_0)) { multiValueMode = MultiValueMode.readMultiValueModeFrom(in); } } @Override protected void innerWriteTo(StreamOutput out) throws IOException { - if (out.getVersion().onOrAfter(Version.V_8_7_0)) { + if (out.getTransportVersion().onOrAfter(TransportVersion.V_8_7_0)) { multiValueMode.writeTo(out); } } diff --git a/modules/ingest-common/src/main/java/org/elasticsearch/ingest/common/GrokProcessorGetAction.java b/modules/ingest-common/src/main/java/org/elasticsearch/ingest/common/GrokProcessorGetAction.java index ad00956d2dde7..a13b7d21bc115 100644 --- a/modules/ingest-common/src/main/java/org/elasticsearch/ingest/common/GrokProcessorGetAction.java +++ b/modules/ingest-common/src/main/java/org/elasticsearch/ingest/common/GrokProcessorGetAction.java @@ -7,7 +7,7 @@ */ package org.elasticsearch.ingest.common; -import org.elasticsearch.Version; +import org.elasticsearch.TransportVersion; import org.elasticsearch.action.ActionListener; import org.elasticsearch.action.ActionRequest; import org.elasticsearch.action.ActionRequestValidationException; @@ -57,7 +57,7 @@ public Request(boolean sorted, String ecsCompatibility) { Request(StreamInput in) throws IOException { super(in); this.sorted = in.readBoolean(); - this.ecsCompatibility = in.getVersion().onOrAfter(Version.V_8_0_0) + this.ecsCompatibility = in.getTransportVersion().onOrAfter(TransportVersion.V_8_0_0) ? in.readString() : GrokProcessor.DEFAULT_ECS_COMPATIBILITY_MODE; } @@ -71,7 +71,7 @@ public ActionRequestValidationException validate() { public void writeTo(StreamOutput out) throws IOException { super.writeTo(out); out.writeBoolean(sorted); - if (out.getVersion().onOrAfter(Version.V_8_0_0)) { + if (out.getTransportVersion().onOrAfter(TransportVersion.V_8_0_0)) { out.writeString(ecsCompatibility); } } diff --git a/modules/ingest-geoip/src/main/java/org/elasticsearch/ingest/geoip/stats/GeoIpDownloaderStatsAction.java b/modules/ingest-geoip/src/main/java/org/elasticsearch/ingest/geoip/stats/GeoIpDownloaderStatsAction.java index a160dfeec9b4a..228758e886c69 100644 --- a/modules/ingest-geoip/src/main/java/org/elasticsearch/ingest/geoip/stats/GeoIpDownloaderStatsAction.java +++ b/modules/ingest-geoip/src/main/java/org/elasticsearch/ingest/geoip/stats/GeoIpDownloaderStatsAction.java @@ -8,7 +8,7 @@ package org.elasticsearch.ingest.geoip.stats; -import org.elasticsearch.Version; +import org.elasticsearch.TransportVersion; import org.elasticsearch.action.ActionType; import org.elasticsearch.action.FailedNodeException; import org.elasticsearch.action.support.nodes.BaseNodeResponse; @@ -166,7 +166,7 @@ protected NodeResponse(StreamInput in) throws IOException { stats = in.readBoolean() ? new GeoIpDownloaderStats(in) : null; databases = in.readSet(StreamInput::readString); filesInTemp = in.readSet(StreamInput::readString); - configDatabases = in.getVersion().onOrAfter(Version.V_8_0_0) ? in.readSet(StreamInput::readString) : null; + configDatabases = in.getTransportVersion().onOrAfter(TransportVersion.V_8_0_0) ? in.readSet(StreamInput::readString) : null; } protected NodeResponse( @@ -208,7 +208,7 @@ public void writeTo(StreamOutput out) throws IOException { } out.writeCollection(databases, StreamOutput::writeString); out.writeCollection(filesInTemp, StreamOutput::writeString); - if (out.getVersion().onOrAfter(Version.V_8_0_0)) { + if (out.getTransportVersion().onOrAfter(TransportVersion.V_8_0_0)) { out.writeCollection(configDatabases, StreamOutput::writeString); } } diff --git a/modules/lang-mustache/src/main/java/org/elasticsearch/script/mustache/MultiSearchTemplateResponse.java b/modules/lang-mustache/src/main/java/org/elasticsearch/script/mustache/MultiSearchTemplateResponse.java index 5aa962973b6f8..f426480155356 100644 --- a/modules/lang-mustache/src/main/java/org/elasticsearch/script/mustache/MultiSearchTemplateResponse.java +++ b/modules/lang-mustache/src/main/java/org/elasticsearch/script/mustache/MultiSearchTemplateResponse.java @@ -10,7 +10,7 @@ import org.elasticsearch.ElasticsearchException; import org.elasticsearch.ExceptionsHelper; -import org.elasticsearch.Version; +import org.elasticsearch.TransportVersion; import org.elasticsearch.action.ActionResponse; import org.elasticsearch.action.search.MultiSearchResponse; import org.elasticsearch.common.Strings; @@ -102,7 +102,7 @@ public String toString() { MultiSearchTemplateResponse(StreamInput in) throws IOException { super(in); items = in.readArray(Item::new, Item[]::new); - if (in.getVersion().onOrAfter(Version.V_7_0_0)) { + if (in.getTransportVersion().onOrAfter(TransportVersion.V_7_0_0)) { tookInMillis = in.readVLong(); } else { tookInMillis = -1L; @@ -136,7 +136,7 @@ public TimeValue getTook() { @Override public void writeTo(StreamOutput out) throws IOException { out.writeArray(items); - if (out.getVersion().onOrAfter(Version.V_7_0_0)) { + if (out.getTransportVersion().onOrAfter(TransportVersion.V_7_0_0)) { out.writeVLong(tookInMillis); } } diff --git a/modules/percolator/src/main/java/org/elasticsearch/percolator/PercolateQueryBuilder.java b/modules/percolator/src/main/java/org/elasticsearch/percolator/PercolateQueryBuilder.java index ec924eef6184d..f2d06e2b72d47 100644 --- a/modules/percolator/src/main/java/org/elasticsearch/percolator/PercolateQueryBuilder.java +++ b/modules/percolator/src/main/java/org/elasticsearch/percolator/PercolateQueryBuilder.java @@ -217,12 +217,12 @@ protected PercolateQueryBuilder(String field, Supplier documentS super(in); field = in.readString(); name = in.readOptionalString(); - if (in.getVersion().before(Version.V_8_0_0)) { + if (in.getTransportVersion().before(TransportVersion.V_8_0_0)) { String documentType = in.readOptionalString(); assert documentType == null; } indexedDocumentIndex = in.readOptionalString(); - if (in.getVersion().before(Version.V_8_0_0)) { + if (in.getTransportVersion().before(TransportVersion.V_8_0_0)) { String indexedDocumentType = in.readOptionalString(); assert indexedDocumentType == null; } @@ -259,12 +259,12 @@ protected void doWriteTo(StreamOutput out) throws IOException { } out.writeString(field); out.writeOptionalString(name); - if (out.getVersion().before(Version.V_8_0_0)) { + if (out.getTransportVersion().before(TransportVersion.V_8_0_0)) { // In 7x, typeless percolate queries are represented by null documentType values out.writeOptionalString(null); } out.writeOptionalString(indexedDocumentIndex); - if (out.getVersion().before(Version.V_8_0_0)) { + if (out.getTransportVersion().before(TransportVersion.V_8_0_0)) { // In 7x, typeless percolate queries are represented by null indexedDocumentType values out.writeOptionalString(null); } diff --git a/qa/rolling-upgrade/src/test/java/org/elasticsearch/upgrades/DesiredNodesUpgradeIT.java b/qa/rolling-upgrade/src/test/java/org/elasticsearch/upgrades/DesiredNodesUpgradeIT.java index e0c42429f71c5..2cbfe030be09c 100644 --- a/qa/rolling-upgrade/src/test/java/org/elasticsearch/upgrades/DesiredNodesUpgradeIT.java +++ b/qa/rolling-upgrade/src/test/java/org/elasticsearch/upgrades/DesiredNodesUpgradeIT.java @@ -43,9 +43,9 @@ public void testUpgradeDesiredNodes() throws Exception { return; } - if (UPGRADE_FROM_VERSION.onOrAfter(Processors.DOUBLE_PROCESSORS_SUPPORT_VERSION)) { + if (UPGRADE_FROM_VERSION.transportVersion.onOrAfter(Processors.DOUBLE_PROCESSORS_SUPPORT_VERSION)) { assertUpgradedNodesCanReadDesiredNodes(); - } else if (UPGRADE_FROM_VERSION.onOrAfter(DesiredNode.RANGE_FLOAT_PROCESSORS_SUPPORT_VERSION)) { + } else if (UPGRADE_FROM_VERSION.transportVersion.onOrAfter(DesiredNode.RANGE_FLOAT_PROCESSORS_SUPPORT_VERSION)) { assertDesiredNodesUpdatedWithRoundedUpFloatsAreIdempotent(); } else { assertDesiredNodesWithFloatProcessorsAreRejectedInOlderVersions(); diff --git a/server/src/main/java/org/elasticsearch/Build.java b/server/src/main/java/org/elasticsearch/Build.java index 9cddfd504a0e5..7279e0c4aa4c1 100644 --- a/server/src/main/java/org/elasticsearch/Build.java +++ b/server/src/main/java/org/elasticsearch/Build.java @@ -151,7 +151,7 @@ static URL getElasticsearchCodeSourceLocation() { public static Build readBuild(StreamInput in) throws IOException { final Type type; // be lenient when reading on the wire, the enumeration values from other versions might be different than what we know - if (in.getVersion().before(Version.V_8_3_0)) { + if (in.getTransportVersion().before(TransportVersion.V_8_3_0)) { // this was the flavor, which is always the default distribution now in.readString(); } @@ -167,7 +167,7 @@ public static Build readBuild(StreamInput in) throws IOException { } public static void writeBuild(Build build, StreamOutput out) throws IOException { - if (out.getVersion().before(Version.V_8_3_0)) { + if (out.getTransportVersion().before(TransportVersion.V_8_3_0)) { // this was the flavor, which is always the default distribution now out.writeString("default"); } diff --git a/server/src/main/java/org/elasticsearch/action/admin/cluster/desirednodes/TransportUpdateDesiredNodesAction.java b/server/src/main/java/org/elasticsearch/action/admin/cluster/desirednodes/TransportUpdateDesiredNodesAction.java index 19e5762b0a72f..e1fcf41de5ebc 100644 --- a/server/src/main/java/org/elasticsearch/action/admin/cluster/desirednodes/TransportUpdateDesiredNodesAction.java +++ b/server/src/main/java/org/elasticsearch/action/admin/cluster/desirednodes/TransportUpdateDesiredNodesAction.java @@ -95,7 +95,7 @@ protected void masterOperation( @Override protected void doExecute(Task task, UpdateDesiredNodesRequest request, ActionListener listener) { final var minNodeVersion = clusterService.state().nodes().getMinNodeVersion(); - if (request.isCompatibleWithVersion(minNodeVersion) == false) { + if (request.isCompatibleWithVersion(minNodeVersion.transportVersion) == false) { listener.onFailure( new IllegalArgumentException( "Unable to use processor ranges, floating-point (with greater precision) processors " diff --git a/server/src/main/java/org/elasticsearch/action/admin/cluster/desirednodes/UpdateDesiredNodesRequest.java b/server/src/main/java/org/elasticsearch/action/admin/cluster/desirednodes/UpdateDesiredNodesRequest.java index fa95c4a7df69e..825db3c31a998 100644 --- a/server/src/main/java/org/elasticsearch/action/admin/cluster/desirednodes/UpdateDesiredNodesRequest.java +++ b/server/src/main/java/org/elasticsearch/action/admin/cluster/desirednodes/UpdateDesiredNodesRequest.java @@ -9,7 +9,6 @@ package org.elasticsearch.action.admin.cluster.desirednodes; import org.elasticsearch.TransportVersion; -import org.elasticsearch.Version; import org.elasticsearch.action.ActionRequestValidationException; import org.elasticsearch.action.ValidateActions; import org.elasticsearch.action.support.master.AcknowledgedRequest; @@ -99,7 +98,7 @@ public boolean isDryRun() { return dryRun; } - public boolean isCompatibleWithVersion(Version version) { + public boolean isCompatibleWithVersion(TransportVersion version) { if (version.onOrAfter(DesiredNode.RANGE_FLOAT_PROCESSORS_SUPPORT_VERSION)) { return true; } diff --git a/server/src/main/java/org/elasticsearch/action/admin/cluster/node/info/PluginsAndModules.java b/server/src/main/java/org/elasticsearch/action/admin/cluster/node/info/PluginsAndModules.java index 974e90b11d8dd..e6a91b152cad5 100644 --- a/server/src/main/java/org/elasticsearch/action/admin/cluster/node/info/PluginsAndModules.java +++ b/server/src/main/java/org/elasticsearch/action/admin/cluster/node/info/PluginsAndModules.java @@ -8,7 +8,7 @@ package org.elasticsearch.action.admin.cluster.node.info; -import org.elasticsearch.Version; +import org.elasticsearch.TransportVersion; import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.StreamOutput; import org.elasticsearch.node.ReportingService; @@ -41,7 +41,7 @@ public PluginsAndModules(StreamInput in) throws IOException { @Override public void writeTo(StreamOutput out) throws IOException { - if (out.getVersion().onOrAfter(Version.V_8_3_0)) { + if (out.getTransportVersion().onOrAfter(TransportVersion.V_8_3_0)) { out.writeList(plugins); } else { out.writeList(plugins.stream().map(PluginRuntimeInfo::descriptor).toList()); diff --git a/server/src/main/java/org/elasticsearch/action/admin/indices/rollover/Condition.java b/server/src/main/java/org/elasticsearch/action/admin/indices/rollover/Condition.java index b61e73bbfa26b..ba7d6b03043c5 100644 --- a/server/src/main/java/org/elasticsearch/action/admin/indices/rollover/Condition.java +++ b/server/src/main/java/org/elasticsearch/action/admin/indices/rollover/Condition.java @@ -8,7 +8,7 @@ package org.elasticsearch.action.admin.indices.rollover; -import org.elasticsearch.Version; +import org.elasticsearch.TransportVersion; import org.elasticsearch.common.io.stream.NamedWriteable; import org.elasticsearch.common.unit.ByteSizeValue; import org.elasticsearch.xcontent.ToXContentFragment; @@ -41,7 +41,7 @@ protected Condition(String name, Type type) { * Checks if this condition is available in a specific version. * This makes sure BWC when introducing a new condition which is not recognized by older versions. */ - boolean includedInVersion(Version version) { + boolean includedInVersion(TransportVersion version) { return true; } diff --git a/server/src/main/java/org/elasticsearch/action/admin/indices/rollover/MaxPrimaryShardDocsCondition.java b/server/src/main/java/org/elasticsearch/action/admin/indices/rollover/MaxPrimaryShardDocsCondition.java index c27b4a7b7e739..678ec96c217ca 100644 --- a/server/src/main/java/org/elasticsearch/action/admin/indices/rollover/MaxPrimaryShardDocsCondition.java +++ b/server/src/main/java/org/elasticsearch/action/admin/indices/rollover/MaxPrimaryShardDocsCondition.java @@ -8,7 +8,7 @@ package org.elasticsearch.action.admin.indices.rollover; -import org.elasticsearch.Version; +import org.elasticsearch.TransportVersion; import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.StreamOutput; import org.elasticsearch.xcontent.XContentBuilder; @@ -62,7 +62,7 @@ public static MaxPrimaryShardDocsCondition fromXContent(XContentParser parser) t } @Override - boolean includedInVersion(Version version) { - return version.onOrAfter(Version.V_8_2_0); + boolean includedInVersion(TransportVersion version) { + return version.onOrAfter(TransportVersion.V_8_2_0); } } diff --git a/server/src/main/java/org/elasticsearch/action/admin/indices/rollover/MinAgeCondition.java b/server/src/main/java/org/elasticsearch/action/admin/indices/rollover/MinAgeCondition.java index ddcfadd53dd74..98958d3b015c7 100644 --- a/server/src/main/java/org/elasticsearch/action/admin/indices/rollover/MinAgeCondition.java +++ b/server/src/main/java/org/elasticsearch/action/admin/indices/rollover/MinAgeCondition.java @@ -8,7 +8,7 @@ package org.elasticsearch.action.admin.indices.rollover; -import org.elasticsearch.Version; +import org.elasticsearch.TransportVersion; import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.StreamOutput; import org.elasticsearch.core.TimeValue; @@ -64,7 +64,7 @@ public static MinAgeCondition fromXContent(XContentParser parser) throws IOExcep } @Override - boolean includedInVersion(Version version) { - return version.onOrAfter(Version.V_8_4_0); + boolean includedInVersion(TransportVersion version) { + return version.onOrAfter(TransportVersion.V_8_4_0); } } diff --git a/server/src/main/java/org/elasticsearch/action/admin/indices/rollover/MinDocsCondition.java b/server/src/main/java/org/elasticsearch/action/admin/indices/rollover/MinDocsCondition.java index 9a4fffc17018f..8c6274cfadb81 100644 --- a/server/src/main/java/org/elasticsearch/action/admin/indices/rollover/MinDocsCondition.java +++ b/server/src/main/java/org/elasticsearch/action/admin/indices/rollover/MinDocsCondition.java @@ -8,7 +8,7 @@ package org.elasticsearch.action.admin.indices.rollover; -import org.elasticsearch.Version; +import org.elasticsearch.TransportVersion; import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.StreamOutput; import org.elasticsearch.xcontent.XContentBuilder; @@ -62,7 +62,7 @@ public static MinDocsCondition fromXContent(XContentParser parser) throws IOExce } @Override - boolean includedInVersion(Version version) { - return version.onOrAfter(Version.V_8_4_0); + boolean includedInVersion(TransportVersion version) { + return version.onOrAfter(TransportVersion.V_8_4_0); } } diff --git a/server/src/main/java/org/elasticsearch/action/admin/indices/rollover/MinPrimaryShardDocsCondition.java b/server/src/main/java/org/elasticsearch/action/admin/indices/rollover/MinPrimaryShardDocsCondition.java index e1aee305742f3..6aaea57e5b55b 100644 --- a/server/src/main/java/org/elasticsearch/action/admin/indices/rollover/MinPrimaryShardDocsCondition.java +++ b/server/src/main/java/org/elasticsearch/action/admin/indices/rollover/MinPrimaryShardDocsCondition.java @@ -8,7 +8,7 @@ package org.elasticsearch.action.admin.indices.rollover; -import org.elasticsearch.Version; +import org.elasticsearch.TransportVersion; import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.StreamOutput; import org.elasticsearch.xcontent.XContentBuilder; @@ -62,7 +62,7 @@ public static MinPrimaryShardDocsCondition fromXContent(XContentParser parser) t } @Override - boolean includedInVersion(Version version) { - return version.onOrAfter(Version.V_8_4_0); + boolean includedInVersion(TransportVersion version) { + return version.onOrAfter(TransportVersion.V_8_4_0); } } diff --git a/server/src/main/java/org/elasticsearch/action/admin/indices/rollover/MinPrimaryShardSizeCondition.java b/server/src/main/java/org/elasticsearch/action/admin/indices/rollover/MinPrimaryShardSizeCondition.java index 5ec8d26d9672a..d7149e2a91be4 100644 --- a/server/src/main/java/org/elasticsearch/action/admin/indices/rollover/MinPrimaryShardSizeCondition.java +++ b/server/src/main/java/org/elasticsearch/action/admin/indices/rollover/MinPrimaryShardSizeCondition.java @@ -8,7 +8,7 @@ package org.elasticsearch.action.admin.indices.rollover; -import org.elasticsearch.Version; +import org.elasticsearch.TransportVersion; import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.StreamOutput; import org.elasticsearch.common.unit.ByteSizeValue; @@ -63,7 +63,7 @@ public static MinPrimaryShardSizeCondition fromXContent(XContentParser parser) t } @Override - boolean includedInVersion(Version version) { - return version.onOrAfter(Version.V_8_4_0); + boolean includedInVersion(TransportVersion version) { + return version.onOrAfter(TransportVersion.V_8_4_0); } } diff --git a/server/src/main/java/org/elasticsearch/action/admin/indices/rollover/MinSizeCondition.java b/server/src/main/java/org/elasticsearch/action/admin/indices/rollover/MinSizeCondition.java index 82cf3c0daf301..52db7ff90cf26 100644 --- a/server/src/main/java/org/elasticsearch/action/admin/indices/rollover/MinSizeCondition.java +++ b/server/src/main/java/org/elasticsearch/action/admin/indices/rollover/MinSizeCondition.java @@ -8,7 +8,7 @@ package org.elasticsearch.action.admin.indices.rollover; -import org.elasticsearch.Version; +import org.elasticsearch.TransportVersion; import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.StreamOutput; import org.elasticsearch.common.unit.ByteSizeValue; @@ -63,7 +63,7 @@ public static MinSizeCondition fromXContent(XContentParser parser) throws IOExce } @Override - boolean includedInVersion(Version version) { - return version.onOrAfter(Version.V_8_4_0); + boolean includedInVersion(TransportVersion version) { + return version.onOrAfter(TransportVersion.V_8_4_0); } } diff --git a/server/src/main/java/org/elasticsearch/action/admin/indices/rollover/RolloverRequest.java b/server/src/main/java/org/elasticsearch/action/admin/indices/rollover/RolloverRequest.java index 9916acbef125f..fd773a9e19b58 100644 --- a/server/src/main/java/org/elasticsearch/action/admin/indices/rollover/RolloverRequest.java +++ b/server/src/main/java/org/elasticsearch/action/admin/indices/rollover/RolloverRequest.java @@ -212,7 +212,7 @@ public void writeTo(StreamOutput out) throws IOException { out.writeOptionalString(newIndexName); out.writeBoolean(dryRun); out.writeCollection( - conditions.values().stream().filter(c -> c.includedInVersion(out.getVersion())).toList(), + conditions.values().stream().filter(c -> c.includedInVersion(out.getTransportVersion())).toList(), StreamOutput::writeNamedWriteable ); createIndexRequest.writeTo(out); diff --git a/server/src/main/java/org/elasticsearch/action/bulk/BulkItemResponse.java b/server/src/main/java/org/elasticsearch/action/bulk/BulkItemResponse.java index 1f140be7522ad..b5894d322b90e 100644 --- a/server/src/main/java/org/elasticsearch/action/bulk/BulkItemResponse.java +++ b/server/src/main/java/org/elasticsearch/action/bulk/BulkItemResponse.java @@ -11,7 +11,6 @@ import org.elasticsearch.ElasticsearchException; import org.elasticsearch.ExceptionsHelper; import org.elasticsearch.TransportVersion; -import org.elasticsearch.Version; import org.elasticsearch.action.DocWriteRequest.OpType; import org.elasticsearch.action.DocWriteResponse; import org.elasticsearch.action.delete.DeleteResponse; @@ -254,7 +253,7 @@ public Failure(StreamInput in) throws IOException { @Override public void writeTo(StreamOutput out) throws IOException { out.writeString(index); - if (out.getVersion().before(Version.V_8_0_0)) { + if (out.getTransportVersion().before(TransportVersion.V_8_0_0)) { out.writeString(MapperService.SINGLE_MAPPING_NAME); } out.writeOptionalString(id); diff --git a/server/src/main/java/org/elasticsearch/action/explain/ExplainRequest.java b/server/src/main/java/org/elasticsearch/action/explain/ExplainRequest.java index 43a00accdb998..685eb0b8a1995 100644 --- a/server/src/main/java/org/elasticsearch/action/explain/ExplainRequest.java +++ b/server/src/main/java/org/elasticsearch/action/explain/ExplainRequest.java @@ -9,7 +9,6 @@ package org.elasticsearch.action.explain; import org.elasticsearch.TransportVersion; -import org.elasticsearch.Version; import org.elasticsearch.action.ActionRequestValidationException; import org.elasticsearch.action.ValidateActions; import org.elasticsearch.action.support.single.shard.SingleShardRequest; @@ -161,7 +160,7 @@ public ActionRequestValidationException validate() { @Override public void writeTo(StreamOutput out) throws IOException { super.writeTo(out); - if (out.getVersion().before(Version.V_8_0_0)) { + if (out.getTransportVersion().before(TransportVersion.V_8_0_0)) { out.writeString(MapperService.SINGLE_MAPPING_NAME); } out.writeString(id); diff --git a/server/src/main/java/org/elasticsearch/action/explain/ExplainResponse.java b/server/src/main/java/org/elasticsearch/action/explain/ExplainResponse.java index 880ed44db4460..97c56069fa762 100644 --- a/server/src/main/java/org/elasticsearch/action/explain/ExplainResponse.java +++ b/server/src/main/java/org/elasticsearch/action/explain/ExplainResponse.java @@ -10,7 +10,6 @@ import org.apache.lucene.search.Explanation; import org.elasticsearch.TransportVersion; -import org.elasticsearch.Version; import org.elasticsearch.action.ActionResponse; import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.StreamOutput; @@ -119,7 +118,7 @@ public RestStatus status() { @Override public void writeTo(StreamOutput out) throws IOException { out.writeString(index); - if (out.getVersion().before(Version.V_8_0_0)) { + if (out.getTransportVersion().before(TransportVersion.V_8_0_0)) { out.writeString(MapperService.SINGLE_MAPPING_NAME); } out.writeString(id); diff --git a/server/src/main/java/org/elasticsearch/cluster/coordination/JoinValidationService.java b/server/src/main/java/org/elasticsearch/cluster/coordination/JoinValidationService.java index f2dde93f16e84..ecadc771ebdaf 100644 --- a/server/src/main/java/org/elasticsearch/cluster/coordination/JoinValidationService.java +++ b/server/src/main/java/org/elasticsearch/cluster/coordination/JoinValidationService.java @@ -308,7 +308,7 @@ protected void doRun() throws Exception { transportService.sendRequest( discoveryNode, JOIN_VALIDATE_ACTION_NAME, - new BytesTransportRequest(bytes, discoveryNode.getVersion()), + new BytesTransportRequest(bytes, discoveryNode.getVersion().transportVersion), REQUEST_OPTIONS, new CleanableResponseHandler<>( listener, diff --git a/server/src/main/java/org/elasticsearch/cluster/coordination/PublicationTransportHandler.java b/server/src/main/java/org/elasticsearch/cluster/coordination/PublicationTransportHandler.java index 526eac3f2687d..4d6b4ce1edd07 100644 --- a/server/src/main/java/org/elasticsearch/cluster/coordination/PublicationTransportHandler.java +++ b/server/src/main/java/org/elasticsearch/cluster/coordination/PublicationTransportHandler.java @@ -10,6 +10,7 @@ import org.apache.logging.log4j.LogManager; import org.apache.logging.log4j.Logger; import org.elasticsearch.ElasticsearchException; +import org.elasticsearch.TransportVersion; import org.elasticsearch.Version; import org.elasticsearch.action.ActionListener; import org.elasticsearch.action.ActionRunnable; @@ -85,7 +86,7 @@ public class PublicationTransportHandler { TransportRequestOptions.Type.STATE ); - public static final Version INCLUDES_LAST_COMMITTED_DATA_VERSION = Version.V_8_6_0; + public static final TransportVersion INCLUDES_LAST_COMMITTED_DATA_VERSION = TransportVersion.V_8_6_0; private final SerializationStatsTracker serializationStatsTracker = new SerializationStatsTracker(); @@ -125,7 +126,7 @@ private PublishWithJoinResponse handleIncomingPublishRequest(BytesTransportReque in = new InputStreamStreamInput(compressor.threadLocalInputStream(in)); } in = new NamedWriteableAwareStreamInput(in, namedWriteableRegistry); - in.setVersion(request.version()); + in.setTransportVersion(request.version()); // If true we received full cluster state - otherwise diffs if (in.readBoolean()) { final ClusterState incomingState; @@ -226,7 +227,7 @@ public PublicationContext newPublicationContext(ClusterStatePublicationEvent clu } private ReleasableBytesReference serializeFullClusterState(ClusterState clusterState, DiscoveryNode node) { - final Version nodeVersion = node.getVersion(); + final TransportVersion serializeVersion = node.getVersion().transportVersion; final RecyclerBytesStreamOutput bytesStream = transportService.newNetworkBytesStream(); boolean success = false; try { @@ -236,7 +237,7 @@ private ReleasableBytesReference serializeFullClusterState(ClusterState clusterS CompressorFactory.COMPRESSOR.threadLocalOutputStream(Streams.flushOnCloseStream(bytesStream)) ) ) { - stream.setVersion(nodeVersion); + stream.setTransportVersion(serializeVersion); stream.writeBoolean(true); clusterState.writeTo(stream); uncompressedBytes = stream.position(); @@ -246,9 +247,9 @@ private ReleasableBytesReference serializeFullClusterState(ClusterState clusterS final ReleasableBytesReference result = new ReleasableBytesReference(bytesStream.bytes(), bytesStream); serializationStatsTracker.serializedFullState(uncompressedBytes, result.length()); logger.trace( - "serialized full cluster state version [{}] for node version [{}] with size [{}]", + "serialized full cluster state version [{}] using transport version [{}] with size [{}]", clusterState.version(), - nodeVersion, + serializeVersion, result.length() ); success = true; @@ -262,7 +263,7 @@ private ReleasableBytesReference serializeFullClusterState(ClusterState clusterS private ReleasableBytesReference serializeDiffClusterState(ClusterState newState, Diff diff, DiscoveryNode node) { final long clusterStateVersion = newState.version(); - final Version nodeVersion = node.getVersion(); + final TransportVersion serializeVersion = node.getVersion().transportVersion; final RecyclerBytesStreamOutput bytesStream = transportService.newNetworkBytesStream(); boolean success = false; try { @@ -272,10 +273,10 @@ private ReleasableBytesReference serializeDiffClusterState(ClusterState newState CompressorFactory.COMPRESSOR.threadLocalOutputStream(Streams.flushOnCloseStream(bytesStream)) ) ) { - stream.setVersion(nodeVersion); + stream.setTransportVersion(serializeVersion); stream.writeBoolean(false); diff.writeTo(stream); - if (nodeVersion.onOrAfter(INCLUDES_LAST_COMMITTED_DATA_VERSION)) { + if (serializeVersion.onOrAfter(INCLUDES_LAST_COMMITTED_DATA_VERSION)) { stream.writeBoolean(newState.metadata().clusterUUIDCommitted()); newState.getLastCommittedConfiguration().writeTo(stream); } @@ -286,9 +287,9 @@ private ReleasableBytesReference serializeDiffClusterState(ClusterState newState final ReleasableBytesReference result = new ReleasableBytesReference(bytesStream.bytes(), bytesStream); serializationStatsTracker.serializedDiff(uncompressedBytes, result.length()); logger.trace( - "serialized cluster state diff for version [{}] for node version [{}] with size [{}]", + "serialized cluster state diff for version [{}] using transport version [{}] with size [{}]", clusterStateVersion, - nodeVersion, + serializeVersion, result.length() ); success = true; @@ -466,7 +467,7 @@ private void sendClusterState( transportService.sendChildRequest( destination, PUBLISH_STATE_ACTION_NAME, - new BytesTransportRequest(bytes, destination.getVersion()), + new BytesTransportRequest(bytes, destination.getVersion().transportVersion), task, STATE_REQUEST_OPTIONS, new CleanableResponseHandler<>(listener, PublishWithJoinResponse::new, ThreadPool.Names.CLUSTER_COORDINATION, bytes::decRef) diff --git a/server/src/main/java/org/elasticsearch/cluster/metadata/DesiredNode.java b/server/src/main/java/org/elasticsearch/cluster/metadata/DesiredNode.java index 32b060cc9682a..2c29df2e661c7 100644 --- a/server/src/main/java/org/elasticsearch/cluster/metadata/DesiredNode.java +++ b/server/src/main/java/org/elasticsearch/cluster/metadata/DesiredNode.java @@ -8,6 +8,7 @@ package org.elasticsearch.cluster.metadata; +import org.elasticsearch.TransportVersion; import org.elasticsearch.Version; import org.elasticsearch.cluster.node.DiscoveryNode; import org.elasticsearch.cluster.node.DiscoveryNodeRole; @@ -38,7 +39,7 @@ import static org.elasticsearch.node.NodeRoleSettings.NODE_ROLES_SETTING; public final class DesiredNode implements Writeable, ToXContentObject, Comparable { - public static final Version RANGE_FLOAT_PROCESSORS_SUPPORT_VERSION = Version.V_8_3_0; + public static final TransportVersion RANGE_FLOAT_PROCESSORS_SUPPORT_VERSION = TransportVersion.V_8_3_0; private static final ParseField SETTINGS_FIELD = new ParseField("settings"); private static final ParseField PROCESSORS_FIELD = new ParseField("processors"); @@ -173,7 +174,7 @@ public static DesiredNode readFrom(StreamInput in) throws IOException { final var settings = Settings.readSettingsFromStream(in); final Processors processors; final ProcessorsRange processorsRange; - if (in.getTransportVersion().onOrAfter(RANGE_FLOAT_PROCESSORS_SUPPORT_VERSION.transportVersion)) { + if (in.getTransportVersion().onOrAfter(RANGE_FLOAT_PROCESSORS_SUPPORT_VERSION)) { processors = in.readOptionalWriteable(Processors::readFrom); processorsRange = in.readOptionalWriteable(ProcessorsRange::readFrom); } else { @@ -189,7 +190,7 @@ public static DesiredNode readFrom(StreamInput in) throws IOException { @Override public void writeTo(StreamOutput out) throws IOException { settings.writeTo(out); - if (out.getTransportVersion().onOrAfter(RANGE_FLOAT_PROCESSORS_SUPPORT_VERSION.transportVersion)) { + if (out.getTransportVersion().onOrAfter(RANGE_FLOAT_PROCESSORS_SUPPORT_VERSION)) { out.writeOptionalWriteable(processors); out.writeOptionalWriteable(processorsRange); } else { @@ -296,7 +297,7 @@ public Set getRoles() { return roles; } - public boolean isCompatibleWithVersion(Version version) { + public boolean isCompatibleWithVersion(TransportVersion version) { if (version.onOrAfter(RANGE_FLOAT_PROCESSORS_SUPPORT_VERSION)) { return true; } diff --git a/server/src/main/java/org/elasticsearch/cluster/metadata/Metadata.java b/server/src/main/java/org/elasticsearch/cluster/metadata/Metadata.java index 06f4b29b681b3..4d19109738a88 100644 --- a/server/src/main/java/org/elasticsearch/cluster/metadata/Metadata.java +++ b/server/src/main/java/org/elasticsearch/cluster/metadata/Metadata.java @@ -1446,7 +1446,7 @@ private static class MetadataDiff implements Diff { private static final TransportVersion NOOP_METADATA_DIFF_VERSION = TransportVersion.V_8_5_0; private static final TransportVersion NOOP_METADATA_DIFF_SAFE_VERSION = - PublicationTransportHandler.INCLUDES_LAST_COMMITTED_DATA_VERSION.transportVersion; + PublicationTransportHandler.INCLUDES_LAST_COMMITTED_DATA_VERSION; private final long version; private final String clusterUUID; diff --git a/server/src/main/java/org/elasticsearch/cluster/metadata/RepositoryMetadata.java b/server/src/main/java/org/elasticsearch/cluster/metadata/RepositoryMetadata.java index 7790e265220ae..66e339a474ed7 100644 --- a/server/src/main/java/org/elasticsearch/cluster/metadata/RepositoryMetadata.java +++ b/server/src/main/java/org/elasticsearch/cluster/metadata/RepositoryMetadata.java @@ -129,7 +129,7 @@ public long pendingGeneration() { public RepositoryMetadata(StreamInput in) throws IOException { name = in.readString(); - if (in.getVersion().onOrAfter(SnapshotsService.UUIDS_IN_REPO_DATA_VERSION)) { + if (in.getTransportVersion().onOrAfter(SnapshotsService.UUIDS_IN_REPO_DATA_VERSION.transportVersion)) { uuid = in.readString(); } else { uuid = RepositoryData.MISSING_UUID; @@ -148,7 +148,7 @@ public RepositoryMetadata(StreamInput in) throws IOException { @Override public void writeTo(StreamOutput out) throws IOException { out.writeString(name); - if (out.getVersion().onOrAfter(SnapshotsService.UUIDS_IN_REPO_DATA_VERSION)) { + if (out.getTransportVersion().onOrAfter(SnapshotsService.UUIDS_IN_REPO_DATA_VERSION.transportVersion)) { out.writeString(uuid); } out.writeString(type); diff --git a/server/src/main/java/org/elasticsearch/cluster/node/DiscoveryNode.java b/server/src/main/java/org/elasticsearch/cluster/node/DiscoveryNode.java index 68bb027245861..4db38a81c5f3c 100644 --- a/server/src/main/java/org/elasticsearch/cluster/node/DiscoveryNode.java +++ b/server/src/main/java/org/elasticsearch/cluster/node/DiscoveryNode.java @@ -8,6 +8,7 @@ package org.elasticsearch.cluster.node; +import org.elasticsearch.TransportVersion; import org.elasticsearch.Version; import org.elasticsearch.common.Strings; import org.elasticsearch.common.UUIDs; @@ -63,7 +64,7 @@ public static boolean isStateless(final Settings settings) { } static final String COORDINATING_ONLY = "coordinating_only"; - public static final Version EXTERNAL_ID_VERSION = Version.V_8_3_0; + public static final TransportVersion EXTERNAL_ID_VERSION = TransportVersion.V_8_3_0; public static final Comparator DISCOVERY_NODE_COMPARATOR = Comparator.comparing(DiscoveryNode::getName) .thenComparing(DiscoveryNode::getId); @@ -411,7 +412,7 @@ public DiscoveryNode(StreamInput in) throws IOException { } this.roles = Collections.unmodifiableSortedSet(roles); this.version = Version.readVersion(in); - if (in.getVersion().onOrAfter(EXTERNAL_ID_VERSION)) { + if (in.getTransportVersion().onOrAfter(EXTERNAL_ID_VERSION)) { this.externalId = readStringLiteral.read(in); } else { this.externalId = nodeName; @@ -444,7 +445,7 @@ public void writeTo(StreamOutput out) throws IOException { o.writeBoolean(role.canContainData()); }); Version.writeVersion(version, out); - if (out.getVersion().onOrAfter(EXTERNAL_ID_VERSION)) { + if (out.getTransportVersion().onOrAfter(EXTERNAL_ID_VERSION)) { out.writeString(externalId); } } diff --git a/server/src/main/java/org/elasticsearch/cluster/routing/ShardRouting.java b/server/src/main/java/org/elasticsearch/cluster/routing/ShardRouting.java index 2e993cc120c18..37ae9784d9cfa 100644 --- a/server/src/main/java/org/elasticsearch/cluster/routing/ShardRouting.java +++ b/server/src/main/java/org/elasticsearch/cluster/routing/ShardRouting.java @@ -393,7 +393,7 @@ public void writeToThin(StreamOutput out) throws IOException { role.writeTo(out); } else if (role != Role.DEFAULT) { throw new IllegalStateException( - Strings.format("cannot send role [%s] to node of version [%s]", role, out.getTransportVersion()) + Strings.format("cannot send role [%s] with transport version [%s]", role, out.getTransportVersion()) ); } } diff --git a/server/src/main/java/org/elasticsearch/common/compress/CompressedXContent.java b/server/src/main/java/org/elasticsearch/common/compress/CompressedXContent.java index b6b69a9d4d22a..2de39744e40a7 100644 --- a/server/src/main/java/org/elasticsearch/common/compress/CompressedXContent.java +++ b/server/src/main/java/org/elasticsearch/common/compress/CompressedXContent.java @@ -9,7 +9,7 @@ package org.elasticsearch.common.compress; import org.elasticsearch.ElasticsearchException; -import org.elasticsearch.Version; +import org.elasticsearch.TransportVersion; import org.elasticsearch.common.bytes.BytesArray; import org.elasticsearch.common.bytes.BytesReference; import org.elasticsearch.common.hash.MessageDigests; @@ -209,7 +209,7 @@ public String getSha256() { public static CompressedXContent readCompressedString(StreamInput in) throws IOException { final String sha256; final byte[] compressedData; - if (in.getVersion().onOrAfter(Version.V_8_0_0)) { + if (in.getTransportVersion().onOrAfter(TransportVersion.V_8_0_0)) { sha256 = in.readString(); compressedData = in.readByteArray(); } else { @@ -221,7 +221,7 @@ public static CompressedXContent readCompressedString(StreamInput in) throws IOE } public void writeTo(StreamOutput out) throws IOException { - if (out.getVersion().onOrAfter(Version.V_8_0_0)) { + if (out.getTransportVersion().onOrAfter(TransportVersion.V_8_0_0)) { out.writeString(sha256); } else { int crc32 = crc32FromCompressed(bytes); diff --git a/server/src/main/java/org/elasticsearch/common/document/DocumentField.java b/server/src/main/java/org/elasticsearch/common/document/DocumentField.java index ec6f378c4c07c..5828b485ce36d 100644 --- a/server/src/main/java/org/elasticsearch/common/document/DocumentField.java +++ b/server/src/main/java/org/elasticsearch/common/document/DocumentField.java @@ -8,7 +8,7 @@ package org.elasticsearch.common.document; -import org.elasticsearch.Version; +import org.elasticsearch.TransportVersion; import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.StreamOutput; import org.elasticsearch.common.io.stream.Writeable; @@ -44,12 +44,12 @@ public class DocumentField implements Writeable, Iterable { public DocumentField(StreamInput in) throws IOException { name = in.readString(); values = in.readList(StreamInput::readGenericValue); - if (in.getVersion().onOrAfter(Version.V_7_16_0)) { + if (in.getTransportVersion().onOrAfter(TransportVersion.V_7_16_0)) { ignoredValues = in.readList(StreamInput::readGenericValue); } else { ignoredValues = Collections.emptyList(); } - if (in.getVersion().onOrAfter(Version.V_8_2_0)) { + if (in.getTransportVersion().onOrAfter(TransportVersion.V_8_2_0)) { lookupFields = in.readList(LookupField::new); } else { lookupFields = List.of(); @@ -114,10 +114,10 @@ public List getIgnoredValues() { public void writeTo(StreamOutput out) throws IOException { out.writeString(name); out.writeCollection(values, StreamOutput::writeGenericValue); - if (out.getVersion().onOrAfter(Version.V_7_16_0)) { + if (out.getTransportVersion().onOrAfter(TransportVersion.V_7_16_0)) { out.writeCollection(ignoredValues, StreamOutput::writeGenericValue); } - if (out.getVersion().onOrAfter(Version.V_8_2_0)) { + if (out.getTransportVersion().onOrAfter(TransportVersion.V_8_2_0)) { out.writeList(lookupFields); } else { if (lookupFields.isEmpty() == false) { diff --git a/server/src/main/java/org/elasticsearch/common/io/stream/DelayableWriteable.java b/server/src/main/java/org/elasticsearch/common/io/stream/DelayableWriteable.java index 5c637f27dfd19..8ec408c2b08c3 100644 --- a/server/src/main/java/org/elasticsearch/common/io/stream/DelayableWriteable.java +++ b/server/src/main/java/org/elasticsearch/common/io/stream/DelayableWriteable.java @@ -8,7 +8,7 @@ package org.elasticsearch.common.io.stream; -import org.elasticsearch.Version; +import org.elasticsearch.TransportVersion; import org.elasticsearch.common.bytes.BytesReference; import org.elasticsearch.common.bytes.ReleasableBytesReference; import org.elasticsearch.core.Releasable; @@ -49,12 +49,12 @@ public static DelayableWriteable referencing(T referenc * when {@link #expand()} is called. */ public static DelayableWriteable delayed(Writeable.Reader reader, StreamInput in) throws IOException { - return new Serialized<>(reader, in.getVersion(), in.namedWriteableRegistry(), in.readReleasableBytesReference()); + return new Serialized<>(reader, in.getTransportVersion(), in.namedWriteableRegistry(), in.readReleasableBytesReference()); } public static DelayableWriteable referencing(Writeable.Reader reader, StreamInput in) throws IOException { try (ReleasableBytesReference serialized = in.readReleasableBytesReference()) { - return new Referencing<>(deserialize(reader, in.getVersion(), in.namedWriteableRegistry(), serialized)); + return new Referencing<>(deserialize(reader, in.getTransportVersion(), in.namedWriteableRegistry(), serialized)); } } @@ -103,12 +103,12 @@ public T expand() { public Serialized asSerialized(Reader reader, NamedWriteableRegistry registry) { BytesStreamOutput buffer; try { - buffer = writeToBuffer(Version.CURRENT); + buffer = writeToBuffer(TransportVersion.CURRENT); } catch (IOException e) { throw new RuntimeException("unexpected error writing writeable to buffer", e); } // TODO: this path is currently not used in production code, if it ever is this should start using pooled buffers - return new Serialized<>(reader, Version.CURRENT, registry, ReleasableBytesReference.wrap(buffer.bytes())); + return new Serialized<>(reader, TransportVersion.CURRENT, registry, ReleasableBytesReference.wrap(buffer.bytes())); } @Override @@ -121,9 +121,9 @@ public long getSerializedSize() { return DelayableWriteable.getSerializedSize(reference); } - private BytesStreamOutput writeToBuffer(Version version) throws IOException { + private BytesStreamOutput writeToBuffer(TransportVersion version) throws IOException { try (BytesStreamOutput buffer = new BytesStreamOutput()) { - buffer.setVersion(version); + buffer.setTransportVersion(version); reference.writeTo(buffer); return buffer; } @@ -141,13 +141,13 @@ public void close() { */ public static class Serialized extends DelayableWriteable { private final Writeable.Reader reader; - private final Version serializedAtVersion; + private final TransportVersion serializedAtVersion; private final NamedWriteableRegistry registry; private final ReleasableBytesReference serialized; private Serialized( Writeable.Reader reader, - Version serializedAtVersion, + TransportVersion serializedAtVersion, NamedWriteableRegistry registry, ReleasableBytesReference serialized ) { @@ -159,7 +159,7 @@ private Serialized( @Override public void writeTo(StreamOutput out) throws IOException { - if (out.getVersion() == serializedAtVersion) { + if (out.getTransportVersion() == serializedAtVersion) { /* * If the version *does* line up we can just copy the bytes * which is good because this is how shard request caching @@ -214,7 +214,7 @@ public void close() { */ public static long getSerializedSize(Writeable ref) { try (CountingStreamOutput out = new CountingStreamOutput()) { - out.setVersion(Version.CURRENT); + out.setTransportVersion(TransportVersion.CURRENT); ref.writeTo(out); return out.size; } catch (IOException exc) { @@ -224,7 +224,7 @@ public static long getSerializedSize(Writeable ref) { private static T deserialize( Reader reader, - Version serializedAtVersion, + TransportVersion serializedAtVersion, NamedWriteableRegistry registry, BytesReference serialized ) throws IOException { @@ -233,7 +233,7 @@ private static T deserialize( ? serialized.streamInput() : new NamedWriteableAwareStreamInput(serialized.streamInput(), registry) ) { - in.setVersion(serializedAtVersion); + in.setTransportVersion(serializedAtVersion); return reader.read(in); } } diff --git a/server/src/main/java/org/elasticsearch/common/io/stream/RecyclerBytesStreamOutput.java b/server/src/main/java/org/elasticsearch/common/io/stream/RecyclerBytesStreamOutput.java index 7c372f6b52bff..4ebebdbd8e9bb 100644 --- a/server/src/main/java/org/elasticsearch/common/io/stream/RecyclerBytesStreamOutput.java +++ b/server/src/main/java/org/elasticsearch/common/io/stream/RecyclerBytesStreamOutput.java @@ -127,7 +127,7 @@ public void writeWithSizePrefix(Writeable writeable) throws IOException { // manipulation of the offsets on the pages after writing to tmp. This will require adjustments to the places in this class // that make assumptions about the page size try (RecyclerBytesStreamOutput tmp = new RecyclerBytesStreamOutput(recycler)) { - tmp.setVersion(getVersion()); + tmp.setTransportVersion(getTransportVersion()); writeable.writeTo(tmp); int size = tmp.size(); writeVInt(size); diff --git a/server/src/main/java/org/elasticsearch/common/io/stream/StreamInput.java b/server/src/main/java/org/elasticsearch/common/io/stream/StreamInput.java index cab1e3611295b..faea2ad8bc864 100644 --- a/server/src/main/java/org/elasticsearch/common/io/stream/StreamInput.java +++ b/server/src/main/java/org/elasticsearch/common/io/stream/StreamInput.java @@ -769,10 +769,10 @@ public Object readGenericValue() throws IOException { case 6 -> readByteArray(); case 7 -> readArrayList(); case 8 -> readArray(); - case 9 -> getVersion().onOrAfter(Version.V_8_7_0) + case 9 -> getTransportVersion().onOrAfter(TransportVersion.V_8_7_0) ? readOrderedMap(StreamInput::readGenericValue, StreamInput::readGenericValue) : readOrderedMap(StreamInput::readString, StreamInput::readGenericValue); - case 10 -> getVersion().onOrAfter(Version.V_8_7_0) + case 10 -> getTransportVersion().onOrAfter(TransportVersion.V_8_7_0) ? readMap(StreamInput::readGenericValue, StreamInput::readGenericValue) : readMap(StreamInput::readString, StreamInput::readGenericValue); case 11 -> readByte(); diff --git a/server/src/main/java/org/elasticsearch/common/io/stream/StreamOutput.java b/server/src/main/java/org/elasticsearch/common/io/stream/StreamOutput.java index f69a229c901f4..19b792f332541 100644 --- a/server/src/main/java/org/elasticsearch/common/io/stream/StreamOutput.java +++ b/server/src/main/java/org/elasticsearch/common/io/stream/StreamOutput.java @@ -591,7 +591,7 @@ public void writeMapWithConsistentOrder(@Nullable Map .iterator(); while (iterator.hasNext()) { Map.Entry next = iterator.next(); - if (this.getVersion().onOrAfter(Version.V_8_7_0)) { + if (this.getTransportVersion().onOrAfter(TransportVersion.V_8_7_0)) { this.writeGenericValue(next.getKey()); } else { this.writeString(next.getKey()); @@ -722,7 +722,7 @@ public final void writeOptionalInstant(@Nullable Instant instant) throws IOExcep } else { o.writeByte((byte) 10); } - if (o.getVersion().onOrAfter(Version.V_8_7_0)) { + if (o.getTransportVersion().onOrAfter(TransportVersion.V_8_7_0)) { final Map map = (Map) v; o.writeMap(map, StreamOutput::writeGenericValue, StreamOutput::writeGenericValue); } else { diff --git a/server/src/main/java/org/elasticsearch/common/unit/Processors.java b/server/src/main/java/org/elasticsearch/common/unit/Processors.java index b95bdd3615f3c..89db778266eae 100644 --- a/server/src/main/java/org/elasticsearch/common/unit/Processors.java +++ b/server/src/main/java/org/elasticsearch/common/unit/Processors.java @@ -8,7 +8,7 @@ package org.elasticsearch.common.unit; -import org.elasticsearch.Version; +import org.elasticsearch.TransportVersion; import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.StreamOutput; import org.elasticsearch.common.io.stream.Writeable; @@ -29,8 +29,8 @@ public class Processors implements Writeable, Comparable, ToXContent public static final Processors ZERO = new Processors(0.0); public static final Processors MAX_PROCESSORS = new Processors(Double.MAX_VALUE); - public static final Version FLOAT_PROCESSORS_SUPPORT_VERSION = Version.V_8_3_0; - public static final Version DOUBLE_PROCESSORS_SUPPORT_VERSION = Version.V_8_5_0; + public static final TransportVersion FLOAT_PROCESSORS_SUPPORT_VERSION = TransportVersion.V_8_3_0; + public static final TransportVersion DOUBLE_PROCESSORS_SUPPORT_VERSION = TransportVersion.V_8_5_0; static final int NUMBER_OF_DECIMAL_PLACES = 5; private static final double MIN_REPRESENTABLE_PROCESSORS = 1E-5; @@ -63,9 +63,9 @@ public static Processors of(Double count) { public static Processors readFrom(StreamInput in) throws IOException { final double processorCount; - if (in.getVersion().before(FLOAT_PROCESSORS_SUPPORT_VERSION)) { + if (in.getTransportVersion().before(FLOAT_PROCESSORS_SUPPORT_VERSION)) { processorCount = in.readInt(); - } else if (in.getVersion().before(DOUBLE_PROCESSORS_SUPPORT_VERSION)) { + } else if (in.getTransportVersion().before(DOUBLE_PROCESSORS_SUPPORT_VERSION)) { processorCount = in.readFloat(); } else { processorCount = in.readDouble(); @@ -75,10 +75,10 @@ public static Processors readFrom(StreamInput in) throws IOException { @Override public void writeTo(StreamOutput out) throws IOException { - if (out.getVersion().before(FLOAT_PROCESSORS_SUPPORT_VERSION)) { + if (out.getTransportVersion().before(FLOAT_PROCESSORS_SUPPORT_VERSION)) { assert hasDecimals() == false; out.writeInt((int) count); - } else if (out.getVersion().before(DOUBLE_PROCESSORS_SUPPORT_VERSION)) { + } else if (out.getTransportVersion().before(DOUBLE_PROCESSORS_SUPPORT_VERSION)) { out.writeFloat((float) count); } else { out.writeDouble(count); @@ -143,7 +143,7 @@ private boolean hasDecimals() { return ((int) count) != Math.ceil(count); } - public boolean isCompatibleWithVersion(Version version) { + public boolean isCompatibleWithVersion(TransportVersion version) { if (version.onOrAfter(FLOAT_PROCESSORS_SUPPORT_VERSION)) { return true; } diff --git a/server/src/main/java/org/elasticsearch/common/xcontent/XContentHelper.java b/server/src/main/java/org/elasticsearch/common/xcontent/XContentHelper.java index a71f4fe70f324..fda5055e5585a 100644 --- a/server/src/main/java/org/elasticsearch/common/xcontent/XContentHelper.java +++ b/server/src/main/java/org/elasticsearch/common/xcontent/XContentHelper.java @@ -10,7 +10,7 @@ import org.elasticsearch.ElasticsearchGenerationException; import org.elasticsearch.ElasticsearchParseException; -import org.elasticsearch.Version; +import org.elasticsearch.TransportVersion; import org.elasticsearch.common.Strings; import org.elasticsearch.common.bytes.BytesArray; import org.elasticsearch.common.bytes.BytesReference; @@ -594,7 +594,7 @@ public static BytesReference childBytes(XContentParser parser) throws IOExceptio * @param xContentType an instance to serialize */ public static void writeTo(StreamOutput out, XContentType xContentType) throws IOException { - if (out.getVersion().before(Version.V_8_0_0)) { + if (out.getTransportVersion().before(TransportVersion.V_8_0_0)) { // when sending an enumeration to { public Request(StreamInput in) throws IOException { super(in); shardId = new ShardId(in); - if (in.getVersion().onOrAfter(Version.V_7_6_0)) { + if (in.getTransportVersion().onOrAfter(TransportVersion.V_7_6_0)) { customDataPath = in.readString(); } else { customDataPath = null; @@ -222,7 +222,7 @@ public String getCustomDataPath() { public void writeTo(StreamOutput out) throws IOException { super.writeTo(out); shardId.writeTo(out); - if (out.getVersion().onOrAfter(Version.V_7_6_0)) { + if (out.getTransportVersion().onOrAfter(TransportVersion.V_7_6_0)) { out.writeString(customDataPath); } } @@ -262,7 +262,7 @@ public static class NodeRequest extends TransportRequest { public NodeRequest(StreamInput in) throws IOException { super(in); shardId = new ShardId(in); - if (in.getVersion().onOrAfter(Version.V_7_6_0)) { + if (in.getTransportVersion().onOrAfter(TransportVersion.V_7_6_0)) { customDataPath = in.readString(); } else { customDataPath = null; @@ -278,7 +278,7 @@ public NodeRequest(Request request) { public void writeTo(StreamOutput out) throws IOException { super.writeTo(out); shardId.writeTo(out); - if (out.getVersion().onOrAfter(Version.V_7_6_0)) { + if (out.getTransportVersion().onOrAfter(TransportVersion.V_7_6_0)) { assert customDataPath != null; out.writeString(customDataPath); } diff --git a/server/src/main/java/org/elasticsearch/health/metadata/HealthMetadata.java b/server/src/main/java/org/elasticsearch/health/metadata/HealthMetadata.java index 4a20913c094c7..859a4fc2a8c15 100644 --- a/server/src/main/java/org/elasticsearch/health/metadata/HealthMetadata.java +++ b/server/src/main/java/org/elasticsearch/health/metadata/HealthMetadata.java @@ -9,7 +9,6 @@ package org.elasticsearch.health.metadata; import org.elasticsearch.TransportVersion; -import org.elasticsearch.Version; import org.elasticsearch.cluster.AbstractNamedDiffable; import org.elasticsearch.cluster.ClusterState; import org.elasticsearch.cluster.NamedDiff; @@ -117,7 +116,7 @@ public record Disk( ) implements ToXContentFragment, Writeable { public static final String TYPE = "disk"; - public static Version VERSION_SUPPORTING_HEADROOM_FIELDS = Version.V_8_5_0; + public static final TransportVersion VERSION_SUPPORTING_HEADROOM_FIELDS = TransportVersion.V_8_5_0; private static final ParseField HIGH_WATERMARK_FIELD = new ParseField("high_watermark"); private static final ParseField HIGH_MAX_HEADROOM_FIELD = new ParseField("high_max_headroom"); @@ -140,10 +139,10 @@ static Disk readFrom(StreamInput in) throws IOException { FROZEN_FLOOD_STAGE_WATERMARK_FIELD.getPreferredName() ); ByteSizeValue frozenFloodStageMaxHeadroom = ByteSizeValue.readFrom(in); - ByteSizeValue highMaxHeadroom = in.getVersion().onOrAfter(VERSION_SUPPORTING_HEADROOM_FIELDS) + ByteSizeValue highMaxHeadroom = in.getTransportVersion().onOrAfter(VERSION_SUPPORTING_HEADROOM_FIELDS) ? ByteSizeValue.readFrom(in) : ByteSizeValue.MINUS_ONE; - ByteSizeValue floodStageMaxHeadroom = in.getVersion().onOrAfter(VERSION_SUPPORTING_HEADROOM_FIELDS) + ByteSizeValue floodStageMaxHeadroom = in.getTransportVersion().onOrAfter(VERSION_SUPPORTING_HEADROOM_FIELDS) ? ByteSizeValue.readFrom(in) : ByteSizeValue.MINUS_ONE; return new Disk( @@ -162,7 +161,7 @@ public void writeTo(StreamOutput out) throws IOException { out.writeString(describeFloodStageWatermark()); out.writeString(describeFrozenFloodStageWatermark()); frozenFloodStageMaxHeadroom.writeTo(out); - if (out.getVersion().onOrAfter(VERSION_SUPPORTING_HEADROOM_FIELDS)) { + if (out.getTransportVersion().onOrAfter(VERSION_SUPPORTING_HEADROOM_FIELDS)) { highMaxHeadroom.writeTo(out); floodStageMaxHeadroom.writeTo(out); } diff --git a/server/src/main/java/org/elasticsearch/index/translog/Translog.java b/server/src/main/java/org/elasticsearch/index/translog/Translog.java index daf29a95c15a6..8eba045ee3cc8 100644 --- a/server/src/main/java/org/elasticsearch/index/translog/Translog.java +++ b/server/src/main/java/org/elasticsearch/index/translog/Translog.java @@ -9,7 +9,7 @@ package org.elasticsearch.index.translog; import org.apache.lucene.store.AlreadyClosedException; -import org.elasticsearch.Version; +import org.elasticsearch.TransportVersion; import org.elasticsearch.common.Strings; import org.elasticsearch.common.UUIDs; import org.elasticsearch.common.bytes.BytesReference; @@ -1240,7 +1240,9 @@ public long version() { } private void write(final StreamOutput out) throws IOException { - final int format = out.getVersion().onOrAfter(Version.V_8_0_0) ? SERIALIZATION_FORMAT : FORMAT_NO_VERSION_TYPE; + final int format = out.getTransportVersion().onOrAfter(TransportVersion.V_8_0_0) + ? SERIALIZATION_FORMAT + : FORMAT_NO_VERSION_TYPE; out.writeVInt(format); out.writeString(id); if (format < FORMAT_NO_DOC_TYPE) { @@ -1401,7 +1403,9 @@ public BytesReference source() { } private void write(final StreamOutput out) throws IOException { - final int format = out.getVersion().onOrAfter(Version.V_8_0_0) ? SERIALIZATION_FORMAT : FORMAT_NO_VERSION_TYPE; + final int format = out.getTransportVersion().onOrAfter(TransportVersion.V_8_0_0) + ? SERIALIZATION_FORMAT + : FORMAT_NO_VERSION_TYPE; out.writeVInt(format); if (format < FORMAT_NO_DOC_TYPE) { out.writeString(MapperService.SINGLE_MAPPING_NAME); diff --git a/server/src/main/java/org/elasticsearch/indices/recovery/RecoverySnapshotFileRequest.java b/server/src/main/java/org/elasticsearch/indices/recovery/RecoverySnapshotFileRequest.java index 2f2558eab3d82..47cf4ef4824ee 100644 --- a/server/src/main/java/org/elasticsearch/indices/recovery/RecoverySnapshotFileRequest.java +++ b/server/src/main/java/org/elasticsearch/indices/recovery/RecoverySnapshotFileRequest.java @@ -50,8 +50,8 @@ public RecoverySnapshotFileRequest(StreamInput in) throws IOException { @Override public void writeTo(StreamOutput out) throws IOException { - assert out.getVersion().onOrAfter(RecoverySettings.SNAPSHOT_RECOVERIES_SUPPORTED_VERSION) - : "Unexpected serialization version " + out.getVersion(); + assert out.getTransportVersion().onOrAfter(RecoverySettings.SNAPSHOT_RECOVERIES_SUPPORTED_VERSION.transportVersion) + : "Unexpected serialization version " + out.getTransportVersion(); super.writeTo(out); out.writeLong(recoveryId); shardId.writeTo(out); diff --git a/server/src/main/java/org/elasticsearch/monitor/jvm/JvmInfo.java b/server/src/main/java/org/elasticsearch/monitor/jvm/JvmInfo.java index c4b450946adc0..f928f83531aa0 100644 --- a/server/src/main/java/org/elasticsearch/monitor/jvm/JvmInfo.java +++ b/server/src/main/java/org/elasticsearch/monitor/jvm/JvmInfo.java @@ -9,7 +9,7 @@ package org.elasticsearch.monitor.jvm; import org.apache.lucene.util.Constants; -import org.elasticsearch.Version; +import org.elasticsearch.TransportVersion; import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.StreamOutput; import org.elasticsearch.common.io.stream.Writeable; @@ -268,7 +268,7 @@ public JvmInfo(StreamInput in) throws IOException { vmName = in.readString(); vmVersion = in.readString(); vmVendor = in.readString(); - if (in.getVersion().before(Version.V_8_3_0)) { + if (in.getTransportVersion().before(TransportVersion.V_8_3_0)) { // Before 8.0 the no-jdk distributions could have bundledJdk false, this is always true now. in.readBoolean(); } @@ -302,7 +302,7 @@ public void writeTo(StreamOutput out) throws IOException { out.writeString(vmName); out.writeString(vmVersion); out.writeString(vmVendor); - if (out.getVersion().before(Version.V_8_3_0)) { + if (out.getTransportVersion().before(TransportVersion.V_8_3_0)) { out.writeBoolean(true); } out.writeOptionalBoolean(usingBundledJdk); diff --git a/server/src/main/java/org/elasticsearch/monitor/os/OsInfo.java b/server/src/main/java/org/elasticsearch/monitor/os/OsInfo.java index a7be10d87308f..d956ee4f6852c 100644 --- a/server/src/main/java/org/elasticsearch/monitor/os/OsInfo.java +++ b/server/src/main/java/org/elasticsearch/monitor/os/OsInfo.java @@ -8,7 +8,7 @@ package org.elasticsearch.monitor.os; -import org.elasticsearch.Version; +import org.elasticsearch.TransportVersion; import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.StreamOutput; import org.elasticsearch.common.unit.Processors; @@ -19,7 +19,7 @@ import java.io.IOException; public class OsInfo implements ReportingService.Info { - private static final Version DOUBLE_PRECISION_ALLOCATED_PROCESSORS_SUPPORT = Version.V_8_5_0; + private static final TransportVersion DOUBLE_PRECISION_ALLOCATED_PROCESSORS_SUPPORT = TransportVersion.V_8_5_0; private final long refreshInterval; private final int availableProcessors; @@ -50,7 +50,7 @@ public OsInfo( public OsInfo(StreamInput in) throws IOException { this.refreshInterval = in.readLong(); this.availableProcessors = in.readInt(); - if (in.getVersion().onOrAfter(DOUBLE_PRECISION_ALLOCATED_PROCESSORS_SUPPORT)) { + if (in.getTransportVersion().onOrAfter(DOUBLE_PRECISION_ALLOCATED_PROCESSORS_SUPPORT)) { this.allocatedProcessors = Processors.readFrom(in); } else { this.allocatedProcessors = Processors.of((double) in.readInt()); @@ -65,7 +65,7 @@ public OsInfo(StreamInput in) throws IOException { public void writeTo(StreamOutput out) throws IOException { out.writeLong(refreshInterval); out.writeInt(availableProcessors); - if (out.getVersion().onOrAfter(DOUBLE_PRECISION_ALLOCATED_PROCESSORS_SUPPORT)) { + if (out.getTransportVersion().onOrAfter(DOUBLE_PRECISION_ALLOCATED_PROCESSORS_SUPPORT)) { allocatedProcessors.writeTo(out); } else { out.writeInt(getAllocatedProcessors()); diff --git a/server/src/main/java/org/elasticsearch/monitor/os/OsStats.java b/server/src/main/java/org/elasticsearch/monitor/os/OsStats.java index 1d56d2f1387fc..dab3eb100e9fb 100644 --- a/server/src/main/java/org/elasticsearch/monitor/os/OsStats.java +++ b/server/src/main/java/org/elasticsearch/monitor/os/OsStats.java @@ -10,7 +10,7 @@ import org.apache.logging.log4j.LogManager; import org.apache.logging.log4j.Logger; -import org.elasticsearch.Version; +import org.elasticsearch.TransportVersion; import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.StreamOutput; import org.elasticsearch.common.io.stream.Writeable; @@ -276,7 +276,7 @@ public Mem(StreamInput in) throws IOException { total = 0; } this.total = total; - if (in.getVersion().onOrAfter(Version.V_8_0_0)) { + if (in.getTransportVersion().onOrAfter(TransportVersion.V_8_0_0)) { long adjustedTotal = in.readLong(); assert adjustedTotal >= 0 : "expected adjusted total memory to be positive, got: " + adjustedTotal; if (adjustedTotal < 0) { @@ -299,7 +299,7 @@ public Mem(StreamInput in) throws IOException { @Override public void writeTo(StreamOutput out) throws IOException { out.writeLong(total); - if (out.getVersion().onOrAfter(Version.V_8_0_0)) { + if (out.getTransportVersion().onOrAfter(TransportVersion.V_8_0_0)) { out.writeLong(adjustedTotal); } out.writeLong(free); diff --git a/server/src/main/java/org/elasticsearch/plugins/PluginDescriptor.java b/server/src/main/java/org/elasticsearch/plugins/PluginDescriptor.java index df8cbed44e6f9..dd56e18957318 100644 --- a/server/src/main/java/org/elasticsearch/plugins/PluginDescriptor.java +++ b/server/src/main/java/org/elasticsearch/plugins/PluginDescriptor.java @@ -8,6 +8,7 @@ package org.elasticsearch.plugins; +import org.elasticsearch.TransportVersion; import org.elasticsearch.Version; import org.elasticsearch.common.Strings; import org.elasticsearch.common.io.stream.StreamInput; @@ -45,9 +46,9 @@ public class PluginDescriptor implements Writeable, ToXContentObject { public static final String ES_PLUGIN_POLICY = "plugin-security.policy"; - private static final Version LICENSED_PLUGINS_SUPPORT = Version.V_7_11_0; - private static final Version MODULE_NAME_SUPPORT = Version.V_8_3_0; - private static final Version BOOTSTRAP_SUPPORT_REMOVED = Version.V_8_4_0; + private static final TransportVersion LICENSED_PLUGINS_SUPPORT = TransportVersion.V_7_11_0; + private static final TransportVersion MODULE_NAME_SUPPORT = TransportVersion.V_8_3_0; + private static final TransportVersion BOOTSTRAP_SUPPORT_REMOVED = TransportVersion.V_8_4_0; private final String name; private final String description; @@ -118,7 +119,7 @@ public PluginDescriptor(final StreamInput in) throws IOException { elasticsearchVersion = Version.readVersion(in); javaVersion = in.readString(); this.classname = in.readString(); - if (in.getVersion().onOrAfter(MODULE_NAME_SUPPORT)) { + if (in.getTransportVersion().onOrAfter(MODULE_NAME_SUPPORT)) { this.moduleName = in.readOptionalString(); } else { this.moduleName = null; @@ -126,8 +127,8 @@ public PluginDescriptor(final StreamInput in) throws IOException { extendedPlugins = in.readStringList(); hasNativeController = in.readBoolean(); - if (in.getVersion().onOrAfter(LICENSED_PLUGINS_SUPPORT)) { - if (in.getVersion().before(BOOTSTRAP_SUPPORT_REMOVED)) { + if (in.getTransportVersion().onOrAfter(LICENSED_PLUGINS_SUPPORT)) { + if (in.getTransportVersion().before(BOOTSTRAP_SUPPORT_REMOVED)) { in.readString(); // plugin type in.readOptionalString(); // java opts } @@ -136,7 +137,7 @@ public PluginDescriptor(final StreamInput in) throws IOException { isLicensed = false; } - if (in.getVersion().onOrAfter(Version.V_8_4_0)) { + if (in.getTransportVersion().onOrAfter(TransportVersion.V_8_4_0)) { isModular = in.readBoolean(); isStable = in.readBoolean(); } else { @@ -153,20 +154,20 @@ public void writeTo(final StreamOutput out) throws IOException { Version.writeVersion(elasticsearchVersion, out); out.writeString(javaVersion); out.writeString(classname); - if (out.getVersion().onOrAfter(MODULE_NAME_SUPPORT)) { + if (out.getTransportVersion().onOrAfter(MODULE_NAME_SUPPORT)) { out.writeOptionalString(moduleName); } out.writeStringCollection(extendedPlugins); out.writeBoolean(hasNativeController); - if (out.getVersion().onOrAfter(LICENSED_PLUGINS_SUPPORT)) { - if (out.getVersion().before(BOOTSTRAP_SUPPORT_REMOVED)) { + if (out.getTransportVersion().onOrAfter(LICENSED_PLUGINS_SUPPORT)) { + if (out.getTransportVersion().before(BOOTSTRAP_SUPPORT_REMOVED)) { out.writeString("ISOLATED"); out.writeOptionalString(null); } out.writeBoolean(isLicensed); } - if (out.getVersion().onOrAfter(Version.V_8_4_0)) { + if (out.getTransportVersion().onOrAfter(TransportVersion.V_8_4_0)) { out.writeBoolean(isModular); out.writeBoolean(isStable); } diff --git a/server/src/main/java/org/elasticsearch/plugins/PluginRuntimeInfo.java b/server/src/main/java/org/elasticsearch/plugins/PluginRuntimeInfo.java index bf6c077b4de4a..f58f14bcd7a77 100644 --- a/server/src/main/java/org/elasticsearch/plugins/PluginRuntimeInfo.java +++ b/server/src/main/java/org/elasticsearch/plugins/PluginRuntimeInfo.java @@ -8,7 +8,7 @@ package org.elasticsearch.plugins; -import org.elasticsearch.Version; +import org.elasticsearch.TransportVersion; import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.StreamOutput; import org.elasticsearch.common.io.stream.Writeable; @@ -40,7 +40,7 @@ public PluginRuntimeInfo(StreamInput in) throws IOException { } private static Boolean readIsOfficial(StreamInput in) throws IOException { - if (in.getVersion().onOrAfter(Version.V_8_3_0)) { + if (in.getTransportVersion().onOrAfter(TransportVersion.V_8_3_0)) { return in.readBoolean(); } else { return null; @@ -48,7 +48,7 @@ private static Boolean readIsOfficial(StreamInput in) throws IOException { } private static PluginApiInfo readApiInfo(StreamInput in) throws IOException { - if (in.getVersion().onOrAfter(Version.V_8_3_0)) { + if (in.getTransportVersion().onOrAfter(TransportVersion.V_8_3_0)) { return in.readOptionalWriteable(PluginApiInfo::new); } else { return null; @@ -72,7 +72,7 @@ public XContentBuilder toXContent(XContentBuilder builder, Params params) throws @Override public void writeTo(StreamOutput out) throws IOException { descriptor.writeTo(out); - if (out.getVersion().onOrAfter(Version.V_8_3_0)) { + if (out.getTransportVersion().onOrAfter(TransportVersion.V_8_3_0)) { out.writeBoolean(isOfficial); out.writeOptionalWriteable(pluginApiInfo); } diff --git a/server/src/main/java/org/elasticsearch/script/ScriptContextStats.java b/server/src/main/java/org/elasticsearch/script/ScriptContextStats.java index acde189431c44..6e5eba4834ac4 100644 --- a/server/src/main/java/org/elasticsearch/script/ScriptContextStats.java +++ b/server/src/main/java/org/elasticsearch/script/ScriptContextStats.java @@ -8,7 +8,7 @@ package org.elasticsearch.script; -import org.elasticsearch.Version; +import org.elasticsearch.TransportVersion; import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.StreamOutput; import org.elasticsearch.common.io.stream.Writeable; @@ -45,10 +45,10 @@ public ScriptContextStats(StreamInput in) throws IOException { compilations = in.readVLong(); cacheEvictions = in.readVLong(); compilationLimitTriggered = in.readVLong(); - if (in.getVersion().onOrAfter(Version.V_8_1_0)) { + if (in.getTransportVersion().onOrAfter(TransportVersion.V_8_1_0)) { compilationsHistory = new TimeSeries(in); cacheEvictionsHistory = new TimeSeries(in); - } else if (in.getVersion().onOrAfter(Version.V_8_0_0)) { + } else if (in.getTransportVersion().onOrAfter(TransportVersion.V_8_0_0)) { compilationsHistory = new TimeSeries(in).withTotal(compilations); cacheEvictionsHistory = new TimeSeries(in).withTotal(cacheEvictions); } else { @@ -63,7 +63,7 @@ public void writeTo(StreamOutput out) throws IOException { out.writeVLong(compilations); out.writeVLong(cacheEvictions); out.writeVLong(compilationLimitTriggered); - if (out.getVersion().onOrAfter(Version.V_8_0_0)) { + if (out.getTransportVersion().onOrAfter(TransportVersion.V_8_0_0)) { compilationsHistory.writeTo(out); cacheEvictionsHistory.writeTo(out); } diff --git a/server/src/main/java/org/elasticsearch/script/ScriptException.java b/server/src/main/java/org/elasticsearch/script/ScriptException.java index 05a291b6562bc..b066ddb24d1ee 100644 --- a/server/src/main/java/org/elasticsearch/script/ScriptException.java +++ b/server/src/main/java/org/elasticsearch/script/ScriptException.java @@ -9,7 +9,7 @@ package org.elasticsearch.script; import org.elasticsearch.ElasticsearchException; -import org.elasticsearch.Version; +import org.elasticsearch.TransportVersion; import org.elasticsearch.common.Strings; import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.StreamOutput; @@ -78,7 +78,7 @@ public ScriptException(StreamInput in) throws IOException { scriptStack = Arrays.asList(in.readStringArray()); script = in.readString(); lang = in.readString(); - if (in.getVersion().onOrAfter(Version.V_7_7_0) && in.readBoolean()) { + if (in.getTransportVersion().onOrAfter(TransportVersion.V_7_7_0) && in.readBoolean()) { pos = new Position(in); } else { pos = null; @@ -91,7 +91,7 @@ public void writeTo(StreamOutput out) throws IOException { out.writeStringArray(scriptStack.toArray(new String[0])); out.writeString(script); out.writeString(lang); - if (out.getVersion().onOrAfter(Version.V_7_7_0)) { + if (out.getTransportVersion().onOrAfter(TransportVersion.V_7_7_0)) { if (pos == null) { out.writeBoolean(false); } else { diff --git a/server/src/main/java/org/elasticsearch/script/ScriptStats.java b/server/src/main/java/org/elasticsearch/script/ScriptStats.java index 0b161ff7bca87..76f512d627817 100644 --- a/server/src/main/java/org/elasticsearch/script/ScriptStats.java +++ b/server/src/main/java/org/elasticsearch/script/ScriptStats.java @@ -8,7 +8,7 @@ package org.elasticsearch.script; -import org.elasticsearch.Version; +import org.elasticsearch.TransportVersion; import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.StreamOutput; import org.elasticsearch.common.io.stream.Writeable; @@ -76,7 +76,7 @@ public ScriptStats(ScriptContextStats context) { } public ScriptStats(StreamInput in) throws IOException { - if (in.getVersion().onOrAfter(Version.V_8_1_0)) { + if (in.getTransportVersion().onOrAfter(TransportVersion.V_8_1_0)) { compilationsHistory = new TimeSeries(in); cacheEvictionsHistory = new TimeSeries(in); compilations = compilationsHistory.total; @@ -93,7 +93,7 @@ public ScriptStats(StreamInput in) throws IOException { @Override public void writeTo(StreamOutput out) throws IOException { - if (out.getVersion().onOrAfter(Version.V_8_1_0)) { + if (out.getTransportVersion().onOrAfter(TransportVersion.V_8_1_0)) { compilationsHistory.writeTo(out); cacheEvictionsHistory.writeTo(out); } else { diff --git a/server/src/main/java/org/elasticsearch/script/TimeSeries.java b/server/src/main/java/org/elasticsearch/script/TimeSeries.java index 8399a65a57e08..0311d3322e456 100644 --- a/server/src/main/java/org/elasticsearch/script/TimeSeries.java +++ b/server/src/main/java/org/elasticsearch/script/TimeSeries.java @@ -8,7 +8,7 @@ package org.elasticsearch.script; -import org.elasticsearch.Version; +import org.elasticsearch.TransportVersion; import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.StreamOutput; import org.elasticsearch.common.io.stream.Writeable; @@ -49,7 +49,7 @@ public TimeSeries(StreamInput in) throws IOException { fiveMinutes = in.readVLong(); fifteenMinutes = in.readVLong(); twentyFourHours = in.readVLong(); - if (in.getVersion().onOrAfter(Version.V_8_1_0)) { + if (in.getTransportVersion().onOrAfter(TransportVersion.V_8_1_0)) { total = in.readVLong(); } else { total = 0; @@ -70,7 +70,7 @@ public void writeTo(StreamOutput out) throws IOException { out.writeVLong(fiveMinutes); out.writeVLong(fifteenMinutes); out.writeVLong(twentyFourHours); - if (out.getVersion().onOrAfter(Version.V_8_1_0)) { + if (out.getTransportVersion().onOrAfter(TransportVersion.V_8_1_0)) { out.writeVLong(total); } } diff --git a/server/src/main/java/org/elasticsearch/tasks/TaskCancellationService.java b/server/src/main/java/org/elasticsearch/tasks/TaskCancellationService.java index 5d6fda57b19f0..122a25a613917 100644 --- a/server/src/main/java/org/elasticsearch/tasks/TaskCancellationService.java +++ b/server/src/main/java/org/elasticsearch/tasks/TaskCancellationService.java @@ -12,7 +12,7 @@ import org.apache.logging.log4j.Logger; import org.elasticsearch.ElasticsearchSecurityException; import org.elasticsearch.ExceptionsHelper; -import org.elasticsearch.Version; +import org.elasticsearch.TransportVersion; import org.elasticsearch.action.ActionListener; import org.elasticsearch.action.ResultDeduplicator; import org.elasticsearch.action.StepListener; @@ -294,7 +294,7 @@ private BanParentTaskRequest(StreamInput in) throws IOException { parentTaskId = TaskId.readFromStream(in); ban = in.readBoolean(); reason = ban ? in.readString() : null; - if (in.getVersion().onOrAfter(Version.V_7_8_0)) { + if (in.getTransportVersion().onOrAfter(TransportVersion.V_7_8_0)) { waitForCompletion = in.readBoolean(); } else { waitForCompletion = false; @@ -309,7 +309,7 @@ public void writeTo(StreamOutput out) throws IOException { if (ban) { out.writeString(reason); } - if (out.getVersion().onOrAfter(Version.V_7_8_0)) { + if (out.getTransportVersion().onOrAfter(TransportVersion.V_7_8_0)) { out.writeBoolean(waitForCompletion); } } diff --git a/server/src/main/java/org/elasticsearch/transport/ActionTransportException.java b/server/src/main/java/org/elasticsearch/transport/ActionTransportException.java index 8f6f0fd477b1a..444a34e4bcc3e 100644 --- a/server/src/main/java/org/elasticsearch/transport/ActionTransportException.java +++ b/server/src/main/java/org/elasticsearch/transport/ActionTransportException.java @@ -8,7 +8,7 @@ package org.elasticsearch.transport; -import org.elasticsearch.Version; +import org.elasticsearch.TransportVersion; import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.StreamOutput; import org.elasticsearch.common.network.NetworkAddress; @@ -24,7 +24,7 @@ public class ActionTransportException extends TransportException { public ActionTransportException(StreamInput in) throws IOException { super(in); - if (in.getVersion().before(Version.V_8_1_0)) { + if (in.getTransportVersion().before(TransportVersion.V_8_1_0)) { in.readOptionalWriteable(TransportAddress::new); in.readOptionalString(); } @@ -45,7 +45,7 @@ public ActionTransportException(String name, InetSocketAddress address, String a @Override public void writeTo(StreamOutput out) throws IOException { super.writeTo(out); - if (out.getVersion().before(Version.V_8_1_0)) { + if (out.getTransportVersion().before(TransportVersion.V_8_1_0)) { out.writeMissingWriteable(TransportAddress.class); out.writeMissingString(); // action } diff --git a/server/src/main/java/org/elasticsearch/transport/BytesTransportRequest.java b/server/src/main/java/org/elasticsearch/transport/BytesTransportRequest.java index 1f05adbac0336..51aca540f7aa6 100644 --- a/server/src/main/java/org/elasticsearch/transport/BytesTransportRequest.java +++ b/server/src/main/java/org/elasticsearch/transport/BytesTransportRequest.java @@ -8,7 +8,7 @@ package org.elasticsearch.transport; -import org.elasticsearch.Version; +import org.elasticsearch.TransportVersion; import org.elasticsearch.common.bytes.BytesReference; import org.elasticsearch.common.bytes.ReleasableBytesReference; import org.elasticsearch.common.io.stream.StreamInput; @@ -24,20 +24,20 @@ public class BytesTransportRequest extends TransportRequest implements RefCounted { final ReleasableBytesReference bytes; - private final Version version; + private final TransportVersion version; public BytesTransportRequest(StreamInput in) throws IOException { super(in); bytes = in.readReleasableBytesReference(); - version = in.getVersion(); + version = in.getTransportVersion(); } - public BytesTransportRequest(ReleasableBytesReference bytes, Version version) { + public BytesTransportRequest(ReleasableBytesReference bytes, TransportVersion version) { this.bytes = bytes; this.version = version; } - public Version version() { + public TransportVersion version() { return this.version; } diff --git a/server/src/main/java/org/elasticsearch/transport/ConnectTransportException.java b/server/src/main/java/org/elasticsearch/transport/ConnectTransportException.java index eddd6d6f108ba..e6e566e26b03b 100644 --- a/server/src/main/java/org/elasticsearch/transport/ConnectTransportException.java +++ b/server/src/main/java/org/elasticsearch/transport/ConnectTransportException.java @@ -8,7 +8,7 @@ package org.elasticsearch.transport; -import org.elasticsearch.Version; +import org.elasticsearch.TransportVersion; import org.elasticsearch.cluster.node.DiscoveryNode; import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.StreamOutput; @@ -35,7 +35,7 @@ public ConnectTransportException(DiscoveryNode node, String msg, String action, public ConnectTransportException(StreamInput in) throws IOException { super(in); - if (in.getVersion().before(Version.V_8_1_0)) { + if (in.getTransportVersion().before(TransportVersion.V_8_1_0)) { in.readOptionalWriteable(DiscoveryNode::new); } } @@ -43,7 +43,7 @@ public ConnectTransportException(StreamInput in) throws IOException { @Override public void writeTo(StreamOutput out) throws IOException { super.writeTo(out); - if (out.getVersion().before(Version.V_8_1_0)) { + if (out.getTransportVersion().before(TransportVersion.V_8_1_0)) { out.writeMissingWriteable(DiscoveryNode.class); } } diff --git a/server/src/main/java/org/elasticsearch/transport/ProxyConnectionStrategy.java b/server/src/main/java/org/elasticsearch/transport/ProxyConnectionStrategy.java index d49e760ab85ab..a410f0e912f8e 100644 --- a/server/src/main/java/org/elasticsearch/transport/ProxyConnectionStrategy.java +++ b/server/src/main/java/org/elasticsearch/transport/ProxyConnectionStrategy.java @@ -8,6 +8,7 @@ package org.elasticsearch.transport; +import org.elasticsearch.TransportVersion; import org.elasticsearch.Version; import org.elasticsearch.action.ActionListener; import org.elasticsearch.cluster.ClusterName; @@ -316,7 +317,7 @@ public ProxyModeInfo(String address, String serverName, int maxSocketConnections private ProxyModeInfo(StreamInput input) throws IOException { address = input.readString(); - if (input.getVersion().onOrAfter(Version.V_7_7_0)) { + if (input.getTransportVersion().onOrAfter(TransportVersion.V_7_7_0)) { serverName = input.readString(); } else { serverName = null; @@ -337,7 +338,7 @@ public XContentBuilder toXContent(XContentBuilder builder, Params params) throws @Override public void writeTo(StreamOutput out) throws IOException { out.writeString(address); - if (out.getVersion().onOrAfter(Version.V_7_7_0)) { + if (out.getTransportVersion().onOrAfter(TransportVersion.V_7_7_0)) { out.writeString(serverName); } out.writeVInt(maxSocketConnections); diff --git a/server/src/main/java/org/elasticsearch/transport/RemoteConnectionInfo.java b/server/src/main/java/org/elasticsearch/transport/RemoteConnectionInfo.java index fbbd2483e6939..b21455829fb05 100644 --- a/server/src/main/java/org/elasticsearch/transport/RemoteConnectionInfo.java +++ b/server/src/main/java/org/elasticsearch/transport/RemoteConnectionInfo.java @@ -8,7 +8,7 @@ package org.elasticsearch.transport; -import org.elasticsearch.Version; +import org.elasticsearch.TransportVersion; import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.StreamOutput; import org.elasticsearch.common.io.stream.Writeable; @@ -40,7 +40,7 @@ public RemoteConnectionInfo(String clusterAlias, ModeInfo modeInfo, TimeValue in } public RemoteConnectionInfo(StreamInput input) throws IOException { - if (input.getVersion().onOrAfter(Version.V_7_6_0)) { + if (input.getTransportVersion().onOrAfter(TransportVersion.V_7_6_0)) { RemoteConnectionStrategy.ConnectionStrategy mode = input.readEnum(RemoteConnectionStrategy.ConnectionStrategy.class); modeInfo = mode.getReader().read(input); initialConnectionTimeout = input.readTimeValue(); @@ -79,7 +79,7 @@ public boolean isSkipUnavailable() { @Override public void writeTo(StreamOutput out) throws IOException { - if (out.getVersion().onOrAfter(Version.V_7_6_0)) { + if (out.getTransportVersion().onOrAfter(TransportVersion.V_7_6_0)) { out.writeEnum(modeInfo.modeType()); modeInfo.writeTo(out); out.writeTimeValue(initialConnectionTimeout); diff --git a/server/src/main/java/org/elasticsearch/transport/TransportStats.java b/server/src/main/java/org/elasticsearch/transport/TransportStats.java index fe0f1742f9eb6..694fb0628d145 100644 --- a/server/src/main/java/org/elasticsearch/transport/TransportStats.java +++ b/server/src/main/java/org/elasticsearch/transport/TransportStats.java @@ -8,6 +8,7 @@ package org.elasticsearch.transport; +import org.elasticsearch.TransportVersion; import org.elasticsearch.Version; import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.StreamOutput; @@ -59,7 +60,7 @@ public TransportStats(StreamInput in) throws IOException { rxSize = in.readVLong(); txCount = in.readVLong(); txSize = in.readVLong(); - if (in.getVersion().onOrAfter(Version.V_8_1_0) && in.readBoolean()) { + if (in.getTransportVersion().onOrAfter(TransportVersion.V_8_1_0) && in.readBoolean()) { inboundHandlingTimeBucketFrequencies = new long[HandlingTimeTracker.BUCKET_COUNT]; for (int i = 0; i < inboundHandlingTimeBucketFrequencies.length; i++) { inboundHandlingTimeBucketFrequencies[i] = in.readVLong(); @@ -83,7 +84,7 @@ public void writeTo(StreamOutput out) throws IOException { out.writeVLong(rxSize); out.writeVLong(txCount); out.writeVLong(txSize); - if (out.getVersion().onOrAfter(Version.V_8_1_0)) { + if (out.getTransportVersion().onOrAfter(TransportVersion.V_8_1_0)) { assert (inboundHandlingTimeBucketFrequencies.length > 0) == (outboundHandlingTimeBucketFrequencies.length > 0); out.writeBoolean(inboundHandlingTimeBucketFrequencies.length > 0); for (long handlingTimeBucketFrequency : inboundHandlingTimeBucketFrequencies) { diff --git a/server/src/test/java/org/elasticsearch/action/OriginalIndicesTests.java b/server/src/test/java/org/elasticsearch/action/OriginalIndicesTests.java index 5d87e3b7a4961..5b3fe6e904f13 100644 --- a/server/src/test/java/org/elasticsearch/action/OriginalIndicesTests.java +++ b/server/src/test/java/org/elasticsearch/action/OriginalIndicesTests.java @@ -8,7 +8,7 @@ package org.elasticsearch.action; -import org.elasticsearch.Version; +import org.elasticsearch.TransportVersion; import org.elasticsearch.action.support.IndicesOptions; import org.elasticsearch.common.io.stream.BytesStreamOutput; import org.elasticsearch.common.io.stream.StreamInput; @@ -16,7 +16,7 @@ import java.io.IOException; -import static org.elasticsearch.test.VersionUtils.randomCompatibleVersion; +import static org.elasticsearch.test.TransportVersionUtils.randomCompatibleVersion; import static org.hamcrest.CoreMatchers.equalTo; public class OriginalIndicesTests extends ESTestCase { @@ -34,18 +34,18 @@ public void testOriginalIndicesSerialization() throws IOException { OriginalIndices originalIndices = randomOriginalIndices(); BytesStreamOutput out = new BytesStreamOutput(); - out.setVersion(randomCompatibleVersion(random(), Version.CURRENT)); + out.setTransportVersion(randomCompatibleVersion(random(), TransportVersion.CURRENT)); OriginalIndices.writeOriginalIndices(originalIndices, out); StreamInput in = out.bytes().streamInput(); - in.setVersion(out.getVersion()); + in.setTransportVersion(out.getTransportVersion()); OriginalIndices originalIndices2 = OriginalIndices.readOriginalIndices(in); assertThat(originalIndices2.indices(), equalTo(originalIndices.indices())); // indices options are not equivalent when sent to an older version and re-read due // to the addition of hidden indices as expand to hidden indices is always true when // read from a prior version - if (out.getVersion().onOrAfter(Version.V_7_7_0) || originalIndices.indicesOptions().expandWildcardsHidden()) { + if (out.getTransportVersion().onOrAfter(TransportVersion.V_7_7_0) || originalIndices.indicesOptions().expandWildcardsHidden()) { assertThat(originalIndices2.indicesOptions(), equalTo(originalIndices.indicesOptions())); } else if (originalIndices.indicesOptions().expandWildcardsHidden()) { assertThat(originalIndices2.indicesOptions(), equalTo(originalIndices.indicesOptions())); diff --git a/server/src/test/java/org/elasticsearch/action/admin/cluster/storedscripts/GetStoredScriptRequestTests.java b/server/src/test/java/org/elasticsearch/action/admin/cluster/storedscripts/GetStoredScriptRequestTests.java index e5ecc8722146c..cf7bda6fc4c0b 100644 --- a/server/src/test/java/org/elasticsearch/action/admin/cluster/storedscripts/GetStoredScriptRequestTests.java +++ b/server/src/test/java/org/elasticsearch/action/admin/cluster/storedscripts/GetStoredScriptRequestTests.java @@ -14,7 +14,7 @@ import java.io.IOException; -import static org.elasticsearch.test.VersionUtils.randomVersion; +import static org.elasticsearch.test.TransportVersionUtils.randomVersion; import static org.hamcrest.CoreMatchers.equalTo; public class GetStoredScriptRequestTests extends ESTestCase { @@ -22,11 +22,11 @@ public void testGetIndexedScriptRequestSerialization() throws IOException { GetStoredScriptRequest request = new GetStoredScriptRequest("id"); BytesStreamOutput out = new BytesStreamOutput(); - out.setVersion(randomVersion(random())); + out.setTransportVersion(randomVersion(random())); request.writeTo(out); StreamInput in = out.bytes().streamInput(); - in.setVersion(out.getVersion()); + in.setTransportVersion(out.getTransportVersion()); GetStoredScriptRequest request2 = new GetStoredScriptRequest(in); assertThat(request2.id(), equalTo(request.id())); diff --git a/server/src/test/java/org/elasticsearch/action/admin/indices/resolve/TransportResolveIndexActionTests.java b/server/src/test/java/org/elasticsearch/action/admin/indices/resolve/TransportResolveIndexActionTests.java index 9b751702dfd6c..cf3fd1efb37f8 100644 --- a/server/src/test/java/org/elasticsearch/action/admin/indices/resolve/TransportResolveIndexActionTests.java +++ b/server/src/test/java/org/elasticsearch/action/admin/indices/resolve/TransportResolveIndexActionTests.java @@ -8,6 +8,7 @@ package org.elasticsearch.action.admin.indices.resolve; +import org.elasticsearch.TransportVersion; import org.elasticsearch.Version; import org.elasticsearch.action.ActionListener; import org.elasticsearch.action.support.ActionFilter; @@ -54,8 +55,10 @@ public void testCCSCompatibilityCheck() throws Exception { @Override public void writeTo(StreamOutput out) throws IOException { super.writeTo(out); - if (out.getVersion().before(Version.CURRENT)) { - throw new IllegalArgumentException("This request isn't serializable to nodes before " + Version.CURRENT); + if (out.getTransportVersion().before(TransportVersion.CURRENT)) { + throw new IllegalArgumentException( + "This request isn't serializable before transport version " + TransportVersion.CURRENT + ); } } }; @@ -81,7 +84,10 @@ public void writeTo(StreamOutput out) throws IOException { assertThat(ex.getMessage(), containsString("not compatible with version")); assertThat(ex.getMessage(), containsString("and the 'search.check_ccs_compatibility' setting is enabled.")); - assertEquals("This request isn't serializable to nodes before " + Version.CURRENT, ex.getCause().getMessage()); + assertEquals( + "This request isn't serializable before transport version " + TransportVersion.CURRENT, + ex.getCause().getMessage() + ); } finally { assertTrue(ESTestCase.terminate(threadPool)); } diff --git a/server/src/test/java/org/elasticsearch/action/fieldcaps/TransportFieldCapabilitiesActionTests.java b/server/src/test/java/org/elasticsearch/action/fieldcaps/TransportFieldCapabilitiesActionTests.java index 1e5f9762b1b5e..a390924b2e20b 100644 --- a/server/src/test/java/org/elasticsearch/action/fieldcaps/TransportFieldCapabilitiesActionTests.java +++ b/server/src/test/java/org/elasticsearch/action/fieldcaps/TransportFieldCapabilitiesActionTests.java @@ -8,6 +8,7 @@ package org.elasticsearch.action.fieldcaps; +import org.elasticsearch.TransportVersion; import org.elasticsearch.Version; import org.elasticsearch.action.ActionListener; import org.elasticsearch.action.support.ActionFilter; @@ -56,8 +57,10 @@ public void testCCSCompatibilityCheck() throws Exception { fieldCapsRequest.indexFilter(new DummyQueryBuilder() { @Override protected void doWriteTo(StreamOutput out) throws IOException { - if (out.getVersion().before(Version.CURRENT)) { - throw new IllegalArgumentException("This query isn't serializable to nodes before " + Version.CURRENT); + if (out.getTransportVersion().before(TransportVersion.CURRENT)) { + throw new IllegalArgumentException( + "This query isn't serializable before transport version " + TransportVersion.CURRENT + ); } } }); @@ -88,7 +91,7 @@ protected void doWriteTo(StreamOutput out) throws IOException { containsString("[class org.elasticsearch.action.fieldcaps.FieldCapabilitiesRequest] is not compatible with version") ); assertThat(ex.getMessage(), containsString("and the 'search.check_ccs_compatibility' setting is enabled.")); - assertEquals("This query isn't serializable to nodes before " + Version.CURRENT, ex.getCause().getMessage()); + assertEquals("This query isn't serializable before transport version " + TransportVersion.CURRENT, ex.getCause().getMessage()); } finally { assertTrue(ESTestCase.terminate(threadPool)); } diff --git a/server/src/test/java/org/elasticsearch/action/get/MultiGetShardRequestTests.java b/server/src/test/java/org/elasticsearch/action/get/MultiGetShardRequestTests.java index 2546e4ef2a0ec..879e4c73dd3cb 100644 --- a/server/src/test/java/org/elasticsearch/action/get/MultiGetShardRequestTests.java +++ b/server/src/test/java/org/elasticsearch/action/get/MultiGetShardRequestTests.java @@ -8,7 +8,7 @@ package org.elasticsearch.action.get; -import org.elasticsearch.Version; +import org.elasticsearch.TransportVersion; import org.elasticsearch.common.io.stream.BytesStreamOutput; import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.StreamOutput; @@ -18,7 +18,7 @@ import java.io.IOException; -import static org.elasticsearch.test.VersionUtils.randomVersionBetween; +import static org.elasticsearch.test.TransportVersionUtils.randomVersionBetween; import static org.hamcrest.CoreMatchers.equalTo; public class MultiGetShardRequestTests extends ESTestCase { @@ -26,15 +26,15 @@ public void testSerialization() throws IOException { MultiGetShardRequest multiGetShardRequest = createTestInstance(randomBoolean()); BytesStreamOutput out = new BytesStreamOutput(); - Version minVersion = Version.CURRENT.minimumCompatibilityVersion(); + TransportVersion minVersion = TransportVersion.CURRENT.minimumCompatibilityVersion(); if (multiGetShardRequest.isForceSyntheticSource()) { - minVersion = Version.V_8_4_0; + minVersion = TransportVersion.V_8_4_0; } - out.setVersion(randomVersionBetween(random(), minVersion, Version.CURRENT)); + out.setTransportVersion(randomVersionBetween(random(), minVersion, TransportVersion.CURRENT)); multiGetShardRequest.writeTo(out); StreamInput in = out.bytes().streamInput(); - in.setVersion(out.getVersion()); + in.setTransportVersion(out.getTransportVersion()); MultiGetShardRequest multiGetShardRequest2 = new MultiGetShardRequest(in); assertThat(multiGetShardRequest2.index(), equalTo(multiGetShardRequest.index())); assertThat(multiGetShardRequest2.preference(), equalTo(multiGetShardRequest.preference())); @@ -58,7 +58,7 @@ public void testSerialization() throws IOException { public void testForceSyntheticUnsupported() { MultiGetShardRequest request = createTestInstance(true); StreamOutput out = new BytesStreamOutput(); - out.setVersion(Version.V_8_3_0); + out.setTransportVersion(TransportVersion.V_8_3_0); Exception e = expectThrows(IllegalArgumentException.class, () -> request.writeTo(out)); assertEquals(e.getMessage(), "force_synthetic_source is not supported before 8.4.0"); } diff --git a/server/src/test/java/org/elasticsearch/cluster/coordination/PublicationTransportHandlerTests.java b/server/src/test/java/org/elasticsearch/cluster/coordination/PublicationTransportHandlerTests.java index ccd34e74c610f..a4b6b1f7fe871 100644 --- a/server/src/test/java/org/elasticsearch/cluster/coordination/PublicationTransportHandlerTests.java +++ b/server/src/test/java/org/elasticsearch/cluster/coordination/PublicationTransportHandlerTests.java @@ -137,7 +137,7 @@ private static boolean isDiff(BytesTransportRequest request, DiscoveryNode node) if (compressor != null) { in = new InputStreamStreamInput(compressor.threadLocalInputStream(in)); } - in.setVersion(node.getVersion()); + in.setTransportVersion(node.getVersion().transportVersion); return in.readBoolean() == false; } finally { IOUtils.close(in); diff --git a/server/src/test/java/org/elasticsearch/cluster/metadata/DesiredNodeTests.java b/server/src/test/java/org/elasticsearch/cluster/metadata/DesiredNodeTests.java index 5451da0c22351..826edafbae354 100644 --- a/server/src/test/java/org/elasticsearch/cluster/metadata/DesiredNodeTests.java +++ b/server/src/test/java/org/elasticsearch/cluster/metadata/DesiredNodeTests.java @@ -8,6 +8,7 @@ package org.elasticsearch.cluster.metadata; +import org.elasticsearch.TransportVersion; import org.elasticsearch.Version; import org.elasticsearch.cluster.node.DiscoveryNodeRole; import org.elasticsearch.common.settings.Settings; @@ -201,8 +202,8 @@ public void testDesiredNodeIsCompatible() { ByteSizeValue.ofGb(1), Version.CURRENT ); - assertThat(desiredNode.isCompatibleWithVersion(Version.V_8_2_0), is(equalTo(false))); - assertThat(desiredNode.isCompatibleWithVersion(Version.V_8_3_0), is(equalTo(true))); + assertThat(desiredNode.isCompatibleWithVersion(TransportVersion.V_8_2_0), is(equalTo(false))); + assertThat(desiredNode.isCompatibleWithVersion(TransportVersion.V_8_3_0), is(equalTo(true))); } { @@ -213,14 +214,14 @@ public void testDesiredNodeIsCompatible() { ByteSizeValue.ofGb(1), Version.CURRENT ); - assertThat(desiredNode.isCompatibleWithVersion(Version.V_8_2_0), is(equalTo(false))); - assertThat(desiredNode.isCompatibleWithVersion(Version.V_8_3_0), is(equalTo(true))); + assertThat(desiredNode.isCompatibleWithVersion(TransportVersion.V_8_2_0), is(equalTo(false))); + assertThat(desiredNode.isCompatibleWithVersion(TransportVersion.V_8_3_0), is(equalTo(true))); } { final var desiredNode = new DesiredNode(settings, 2.0f, ByteSizeValue.ofGb(1), ByteSizeValue.ofGb(1), Version.CURRENT); - assertThat(desiredNode.isCompatibleWithVersion(Version.V_8_2_0), is(equalTo(true))); - assertThat(desiredNode.isCompatibleWithVersion(Version.V_8_3_0), is(equalTo(true))); + assertThat(desiredNode.isCompatibleWithVersion(TransportVersion.V_8_2_0), is(equalTo(true))); + assertThat(desiredNode.isCompatibleWithVersion(TransportVersion.V_8_3_0), is(equalTo(true))); } } diff --git a/server/src/test/java/org/elasticsearch/common/io/stream/DelayableWriteableTests.java b/server/src/test/java/org/elasticsearch/common/io/stream/DelayableWriteableTests.java index 5fd585cdb35b0..078a09bf44fe1 100644 --- a/server/src/test/java/org/elasticsearch/common/io/stream/DelayableWriteableTests.java +++ b/server/src/test/java/org/elasticsearch/common/io/stream/DelayableWriteableTests.java @@ -8,9 +8,9 @@ package org.elasticsearch.common.io.stream; -import org.elasticsearch.Version; +import org.elasticsearch.TransportVersion; import org.elasticsearch.test.ESTestCase; -import org.elasticsearch.test.VersionUtils; +import org.elasticsearch.test.TransportVersionUtils; import java.io.IOException; @@ -87,19 +87,19 @@ public int hashCode() { } private static class SneakOtherSideVersionOnWire implements Writeable { - private final Version version; + private final TransportVersion version; SneakOtherSideVersionOnWire() { - version = Version.CURRENT; + version = TransportVersion.CURRENT; } SneakOtherSideVersionOnWire(StreamInput in) throws IOException { - version = Version.readVersion(in); + version = TransportVersion.readVersion(in); } @Override public void writeTo(StreamOutput out) throws IOException { - Version.writeVersion(out.getVersion(), out); + TransportVersion.writeVersion(out.getTransportVersion(), out); } } @@ -144,7 +144,7 @@ public void testRoundTripFromDelayedFromOldVersionWithNamedWriteable() throws IO } public void testSerializesWithRemoteVersion() throws IOException { - Version remoteVersion = VersionUtils.randomCompatibleVersion(random(), Version.CURRENT); + TransportVersion remoteVersion = TransportVersionUtils.randomCompatibleVersion(random(), TransportVersion.CURRENT); DelayableWriteable original = DelayableWriteable.referencing(new SneakOtherSideVersionOnWire()); assertThat(roundTrip(original, SneakOtherSideVersionOnWire::new, remoteVersion).expand().version, equalTo(remoteVersion)); } @@ -157,14 +157,14 @@ public void testAsSerializedIsNoopOnSerialized() throws IOException { } private void roundTripTestCase(DelayableWriteable original, Writeable.Reader reader) throws IOException { - DelayableWriteable roundTripped = roundTrip(original, reader, Version.CURRENT); + DelayableWriteable roundTripped = roundTrip(original, reader, TransportVersion.CURRENT); assertThat(roundTripped.expand(), equalTo(original.expand())); } private DelayableWriteable roundTrip( DelayableWriteable original, Writeable.Reader reader, - Version version + TransportVersion version ) throws IOException { DelayableWriteable delayed = copyInstance( original, @@ -192,7 +192,10 @@ protected NamedWriteableRegistry writableRegistry() { return new NamedWriteableRegistry(singletonList(new NamedWriteableRegistry.Entry(Example.class, "example", Example::new))); } - private static Version randomOldVersion() { - return randomValueOtherThanMany(Version.CURRENT::before, () -> VersionUtils.randomCompatibleVersion(random(), Version.CURRENT)); + private static TransportVersion randomOldVersion() { + return randomValueOtherThanMany( + TransportVersion.CURRENT::before, + () -> TransportVersionUtils.randomCompatibleVersion(random(), TransportVersion.CURRENT) + ); } } diff --git a/server/src/test/java/org/elasticsearch/index/reindex/BulkByScrollResponseTests.java b/server/src/test/java/org/elasticsearch/index/reindex/BulkByScrollResponseTests.java index 04ebb70ec328c..244a66338ce94 100644 --- a/server/src/test/java/org/elasticsearch/index/reindex/BulkByScrollResponseTests.java +++ b/server/src/test/java/org/elasticsearch/index/reindex/BulkByScrollResponseTests.java @@ -10,7 +10,7 @@ import org.elasticsearch.ElasticsearchException; import org.elasticsearch.ResourceNotFoundException; -import org.elasticsearch.Version; +import org.elasticsearch.TransportVersion; import org.elasticsearch.action.bulk.BulkItemResponse.Failure; import org.elasticsearch.client.internal.transport.NoNodeAvailableException; import org.elasticsearch.common.io.stream.BytesStreamOutput; @@ -82,7 +82,7 @@ private List randomSearchFailures() { private void assertResponseEquals(BulkByScrollResponse expected, BulkByScrollResponse actual) { assertEquals(expected.getTook(), actual.getTook()); - BulkByScrollTaskStatusTests.assertTaskStatusEquals(Version.CURRENT, expected.getStatus(), actual.getStatus()); + BulkByScrollTaskStatusTests.assertTaskStatusEquals(TransportVersion.CURRENT, expected.getStatus(), actual.getStatus()); assertEquals(expected.getBulkFailures().size(), actual.getBulkFailures().size()); for (int i = 0; i < expected.getBulkFailures().size(); i++) { Failure expectedFailure = expected.getBulkFailures().get(i); diff --git a/server/src/test/java/org/elasticsearch/index/reindex/BulkByScrollTaskStatusTests.java b/server/src/test/java/org/elasticsearch/index/reindex/BulkByScrollTaskStatusTests.java index b341e8c3503c5..1d2ec8ae082e8 100644 --- a/server/src/test/java/org/elasticsearch/index/reindex/BulkByScrollTaskStatusTests.java +++ b/server/src/test/java/org/elasticsearch/index/reindex/BulkByScrollTaskStatusTests.java @@ -10,7 +10,7 @@ import org.apache.lucene.tests.util.LuceneTestCase; import org.elasticsearch.ElasticsearchException; -import org.elasticsearch.Version; +import org.elasticsearch.TransportVersion; import org.elasticsearch.common.Randomness; import org.elasticsearch.common.Strings; import org.elasticsearch.common.io.stream.BytesStreamOutput; @@ -44,14 +44,14 @@ public void testBulkByTaskStatus() throws IOException { BytesStreamOutput out = new BytesStreamOutput(); status.writeTo(out); BulkByScrollTask.Status tripped = new BulkByScrollTask.Status(out.bytes().streamInput()); - assertTaskStatusEquals(out.getVersion(), status, tripped); + assertTaskStatusEquals(out.getTransportVersion(), status, tripped); } /** * Assert that two task statuses are equal after serialization. * @param version the version at which expected was serialized */ - public static void assertTaskStatusEquals(Version version, BulkByScrollTask.Status expected, BulkByScrollTask.Status actual) { + public static void assertTaskStatusEquals(TransportVersion version, BulkByScrollTask.Status expected, BulkByScrollTask.Status actual) { assertEquals(expected.getTotal(), actual.getTotal()); assertEquals(expected.getUpdated(), actual.getUpdated()); assertEquals(expected.getCreated(), actual.getCreated()); diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/license/GetFeatureUsageResponse.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/license/GetFeatureUsageResponse.java index 8a91c9b0d2456..91380980a58a7 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/license/GetFeatureUsageResponse.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/license/GetFeatureUsageResponse.java @@ -7,7 +7,7 @@ package org.elasticsearch.license; -import org.elasticsearch.Version; +import org.elasticsearch.TransportVersion; import org.elasticsearch.action.ActionResponse; import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.StreamOutput; @@ -48,14 +48,14 @@ public FeatureUsageInfo( } public FeatureUsageInfo(StreamInput in) throws IOException { - if (in.getVersion().onOrAfter(Version.V_7_16_0)) { + if (in.getTransportVersion().onOrAfter(TransportVersion.V_7_16_0)) { this.family = in.readOptionalString(); } else { this.family = null; } this.name = in.readString(); this.lastUsedTime = ZonedDateTime.ofInstant(Instant.ofEpochSecond(in.readLong()), ZoneOffset.UTC); - if (in.getVersion().onOrAfter(Version.V_7_15_0)) { + if (in.getTransportVersion().onOrAfter(TransportVersion.V_7_15_0)) { this.context = in.readOptionalString(); } else { this.context = null; @@ -65,12 +65,12 @@ public FeatureUsageInfo(StreamInput in) throws IOException { @Override public void writeTo(StreamOutput out) throws IOException { - if (out.getVersion().onOrAfter(Version.V_7_16_0)) { + if (out.getTransportVersion().onOrAfter(TransportVersion.V_7_16_0)) { out.writeOptionalString(this.family); } out.writeString(name); out.writeLong(lastUsedTime.toEpochSecond()); - if (out.getVersion().onOrAfter(Version.V_7_15_0)) { + if (out.getTransportVersion().onOrAfter(TransportVersion.V_7_15_0)) { out.writeOptionalString(this.context); } out.writeString(licenseLevel); diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/protocol/xpack/XPackInfoRequest.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/protocol/xpack/XPackInfoRequest.java index 9889a88c91382..dca4352690bd6 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/protocol/xpack/XPackInfoRequest.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/protocol/xpack/XPackInfoRequest.java @@ -6,7 +6,7 @@ */ package org.elasticsearch.protocol.xpack; -import org.elasticsearch.Version; +import org.elasticsearch.TransportVersion; import org.elasticsearch.action.ActionRequest; import org.elasticsearch.action.ActionRequestValidationException; import org.elasticsearch.common.io.stream.StreamInput; @@ -57,7 +57,7 @@ public XPackInfoRequest(StreamInput in) throws IOException { categories.add(Category.valueOf(in.readString())); } this.categories = categories; - if (hasLicenseVersionField(in.getVersion())) { + if (hasLicenseVersionField(in.getTransportVersion())) { int ignoredLicenseVersion = in.readVInt(); } } @@ -91,12 +91,12 @@ public void writeTo(StreamOutput out) throws IOException { for (Category category : categories) { out.writeString(category.name()); } - if (hasLicenseVersionField(out.getVersion())) { + if (hasLicenseVersionField(out.getTransportVersion())) { out.writeVInt(License.VERSION_CURRENT); } } - private static boolean hasLicenseVersionField(Version streamVersion) { - return streamVersion.onOrAfter(Version.V_7_8_1) && streamVersion.before(Version.V_8_0_0); + private static boolean hasLicenseVersionField(TransportVersion streamVersion) { + return streamVersion.onOrAfter(TransportVersion.V_7_8_1) && streamVersion.before(TransportVersion.V_8_0_0); } } diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/protocol/xpack/XPackInfoResponse.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/protocol/xpack/XPackInfoResponse.java index 05fffeca9cc2b..820181c5c3218 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/protocol/xpack/XPackInfoResponse.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/protocol/xpack/XPackInfoResponse.java @@ -6,7 +6,7 @@ */ package org.elasticsearch.protocol.xpack; -import org.elasticsearch.Version; +import org.elasticsearch.TransportVersion; import org.elasticsearch.action.ActionResponse; import org.elasticsearch.common.Strings; import org.elasticsearch.common.io.stream.StreamInput; @@ -362,7 +362,7 @@ public FeatureSet(String name, boolean available, boolean enabled) { public FeatureSet(StreamInput in) throws IOException { this(in.readString(), readAvailable(in), in.readBoolean()); - if (in.getVersion().before(Version.V_8_0_0)) { + if (in.getTransportVersion().before(TransportVersion.V_8_0_0)) { in.readMap(); // backcompat reading native code info, but no longer used here } } @@ -370,7 +370,7 @@ public FeatureSet(StreamInput in) throws IOException { // this is separated out so that the removed description can be read from the stream on construction // TODO: remove this for 8.0 private static boolean readAvailable(StreamInput in) throws IOException { - if (in.getVersion().before(Version.V_7_3_0)) { + if (in.getTransportVersion().before(TransportVersion.V_7_3_0)) { in.readOptionalString(); } return in.readBoolean(); @@ -379,12 +379,12 @@ private static boolean readAvailable(StreamInput in) throws IOException { @Override public void writeTo(StreamOutput out) throws IOException { out.writeString(name); - if (out.getVersion().before(Version.V_7_3_0)) { + if (out.getTransportVersion().before(TransportVersion.V_7_3_0)) { out.writeOptionalString(null); } out.writeBoolean(available); out.writeBoolean(enabled); - if (out.getVersion().before(Version.V_8_0_0)) { + if (out.getTransportVersion().before(TransportVersion.V_8_0_0)) { out.writeGenericMap(Collections.emptyMap()); } } diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/protocol/xpack/graph/GraphExploreRequest.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/protocol/xpack/graph/GraphExploreRequest.java index a2f73fcab266a..94604dbd8227c 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/protocol/xpack/graph/GraphExploreRequest.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/protocol/xpack/graph/GraphExploreRequest.java @@ -6,7 +6,7 @@ */ package org.elasticsearch.protocol.xpack.graph; -import org.elasticsearch.Version; +import org.elasticsearch.TransportVersion; import org.elasticsearch.action.ActionRequest; import org.elasticsearch.action.ActionRequestValidationException; import org.elasticsearch.action.IndicesRequest; @@ -111,7 +111,7 @@ public GraphExploreRequest(StreamInput in) throws IOException { indices = in.readStringArray(); indicesOptions = IndicesOptions.readIndicesOptions(in); - if (in.getVersion().before(Version.V_8_0_0)) { + if (in.getTransportVersion().before(TransportVersion.V_8_0_0)) { String[] types = in.readStringArray(); assert types.length == 0; } @@ -180,7 +180,7 @@ public void writeTo(StreamOutput out) throws IOException { super.writeTo(out); out.writeStringArray(indices); indicesOptions.writeIndicesOptions(out); - if (out.getVersion().before(Version.V_8_0_0)) { + if (out.getTransportVersion().before(TransportVersion.V_8_0_0)) { out.writeStringArray(Strings.EMPTY_ARRAY); } out.writeOptionalString(routing); diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ilm/IndexLifecycleExplainResponse.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ilm/IndexLifecycleExplainResponse.java index 7ff06b3fe9049..879db231a99e3 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ilm/IndexLifecycleExplainResponse.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ilm/IndexLifecycleExplainResponse.java @@ -8,7 +8,6 @@ package org.elasticsearch.xpack.core.ilm; import org.elasticsearch.TransportVersion; -import org.elasticsearch.Version; import org.elasticsearch.common.Strings; import org.elasticsearch.common.bytes.BytesReference; import org.elasticsearch.common.io.stream.StreamInput; @@ -353,7 +352,7 @@ public void writeTo(StreamOutput out) throws IOException { out.writeOptionalString(repositoryName); out.writeOptionalString(snapshotName); out.writeOptionalString(shrinkIndexName); - if (out.getVersion().onOrAfter(Version.V_8_1_0)) { + if (out.getTransportVersion().onOrAfter(TransportVersion.V_8_1_0)) { out.writeOptionalLong(indexCreationDate); } } diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ilm/IndexLifecycleFeatureSetUsage.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ilm/IndexLifecycleFeatureSetUsage.java index d160341b5f38d..59b614fc805be 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ilm/IndexLifecycleFeatureSetUsage.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ilm/IndexLifecycleFeatureSetUsage.java @@ -7,7 +7,6 @@ package org.elasticsearch.xpack.core.ilm; import org.elasticsearch.TransportVersion; -import org.elasticsearch.Version; import org.elasticsearch.common.Strings; import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.StreamOutput; @@ -457,10 +456,10 @@ public void writeTo(StreamOutput out) throws IOException { out.writeOptionalVInt(setPriorityPriority); out.writeOptionalWriteable(shrinkMaxPrimaryShardSize); out.writeOptionalVInt(shrinkNumberOfShards); - if (out.getVersion().onOrAfter(Version.V_8_2_0)) { + if (out.getTransportVersion().onOrAfter(TransportVersion.V_8_2_0)) { out.writeOptionalVLong(rolloverMaxPrimaryShardDocs); } - if (out.getVersion().onOrAfter(Version.V_8_4_0)) { + if (out.getTransportVersion().onOrAfter(TransportVersion.V_8_4_0)) { out.writeOptionalTimeValue(rolloverMinAge); out.writeOptionalVLong(rolloverMinDocs); out.writeOptionalWriteable(rolloverMinPrimaryShardSize); diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ilm/RolloverAction.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ilm/RolloverAction.java index 4d74afbcccd99..960f2ae58153a 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ilm/RolloverAction.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ilm/RolloverAction.java @@ -7,7 +7,6 @@ package org.elasticsearch.xpack.core.ilm; import org.elasticsearch.TransportVersion; -import org.elasticsearch.Version; import org.elasticsearch.client.internal.Client; import org.elasticsearch.common.Strings; import org.elasticsearch.common.io.stream.StreamInput; @@ -188,10 +187,10 @@ public void writeTo(StreamOutput out) throws IOException { out.writeOptionalWriteable(maxPrimaryShardSize); out.writeOptionalTimeValue(maxAge); out.writeOptionalVLong(maxDocs); - if (out.getVersion().onOrAfter(Version.V_8_2_0)) { + if (out.getTransportVersion().onOrAfter(TransportVersion.V_8_2_0)) { out.writeOptionalVLong(maxPrimaryShardDocs); } - if (out.getVersion().onOrAfter(Version.V_8_4_0)) { + if (out.getTransportVersion().onOrAfter(TransportVersion.V_8_4_0)) { out.writeOptionalWriteable(minSize); out.writeOptionalWriteable(minPrimaryShardSize); out.writeOptionalTimeValue(minAge); diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/indexing/IndexerJobStats.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/indexing/IndexerJobStats.java index 394f3cbe47d17..1bdc35962f7fe 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/indexing/IndexerJobStats.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/indexing/IndexerJobStats.java @@ -7,7 +7,6 @@ package org.elasticsearch.xpack.core.indexing; import org.elasticsearch.TransportVersion; -import org.elasticsearch.Version; import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.StreamOutput; import org.elasticsearch.common.io.stream.Writeable; @@ -206,7 +205,7 @@ public void writeTo(StreamOutput out) throws IOException { out.writeVLong(searchTotal); out.writeVLong(indexFailures); out.writeVLong(searchFailures); - if (out.getVersion().onOrAfter(Version.V_7_7_0)) { + if (out.getTransportVersion().onOrAfter(TransportVersion.V_7_7_0)) { out.writeVLong(processingTime); out.writeVLong(processingTotal); } diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/transform/transforms/pivot/DateHistogramGroupSource.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/transform/transforms/pivot/DateHistogramGroupSource.java index aff6188a34c56..b36a6a447b6b2 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/transform/transforms/pivot/DateHistogramGroupSource.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/transform/transforms/pivot/DateHistogramGroupSource.java @@ -6,7 +6,7 @@ */ package org.elasticsearch.xpack.core.transform.transforms.pivot; -import org.elasticsearch.Version; +import org.elasticsearch.TransportVersion; import org.elasticsearch.common.Rounding; import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.StreamOutput; @@ -228,7 +228,7 @@ public DateHistogramGroupSource(StreamInput in) throws IOException { super(in); this.interval = readInterval(in); this.timeZone = in.readOptionalZoneId(); - if (in.getVersion().onOrAfter(Version.V_8_7_0)) { + if (in.getTransportVersion().onOrAfter(TransportVersion.V_8_7_0)) { this.offset = in.readLong(); } else { this.offset = 0; @@ -331,7 +331,7 @@ public void writeTo(StreamOutput out) throws IOException { super.writeTo(out); writeInterval(interval, out); out.writeOptionalZoneId(timeZone); - if (out.getVersion().onOrAfter(Version.V_8_7_0)) { + if (out.getTransportVersion().onOrAfter(TransportVersion.V_8_7_0)) { out.writeLong(offset); } } diff --git a/x-pack/plugin/monitoring/src/test/java/org/elasticsearch/xpack/monitoring/action/MonitoringBulkRequestTests.java b/x-pack/plugin/monitoring/src/test/java/org/elasticsearch/xpack/monitoring/action/MonitoringBulkRequestTests.java index 67a25ecdb1c2c..37fe08b302e7d 100644 --- a/x-pack/plugin/monitoring/src/test/java/org/elasticsearch/xpack/monitoring/action/MonitoringBulkRequestTests.java +++ b/x-pack/plugin/monitoring/src/test/java/org/elasticsearch/xpack/monitoring/action/MonitoringBulkRequestTests.java @@ -237,7 +237,7 @@ public void testSerialization() throws IOException { originalRequest.writeTo(out); final StreamInput in = out.bytes().streamInput(); - in.setVersion(out.getVersion()); + in.setTransportVersion(out.getTransportVersion()); final MonitoringBulkRequest deserializedRequest = new MonitoringBulkRequest(in); diff --git a/x-pack/plugin/snapshot-repo-test-kit/src/main/java/org/elasticsearch/repositories/blobstore/testkit/BlobAnalyzeAction.java b/x-pack/plugin/snapshot-repo-test-kit/src/main/java/org/elasticsearch/repositories/blobstore/testkit/BlobAnalyzeAction.java index 5109166a22c48..65acc3b7e2f14 100644 --- a/x-pack/plugin/snapshot-repo-test-kit/src/main/java/org/elasticsearch/repositories/blobstore/testkit/BlobAnalyzeAction.java +++ b/x-pack/plugin/snapshot-repo-test-kit/src/main/java/org/elasticsearch/repositories/blobstore/testkit/BlobAnalyzeAction.java @@ -9,7 +9,7 @@ import org.apache.logging.log4j.LogManager; import org.apache.logging.log4j.Logger; -import org.elasticsearch.Version; +import org.elasticsearch.TransportVersion; import org.elasticsearch.action.ActionListener; import org.elasticsearch.action.ActionListenerResponseHandler; import org.elasticsearch.action.ActionRequest; @@ -676,7 +676,7 @@ public static class Request extends ActionRequest implements TaskAwareRequest { earlyReadNodeCount = in.readVInt(); readEarly = in.readBoolean(); writeAndOverwrite = in.readBoolean(); - if (in.getVersion().onOrAfter(Version.V_7_14_0)) { + if (in.getTransportVersion().onOrAfter(TransportVersion.V_7_14_0)) { abortWrite = in.readBoolean(); } else { abortWrite = false; @@ -696,10 +696,10 @@ public void writeTo(StreamOutput out) throws IOException { out.writeVInt(earlyReadNodeCount); out.writeBoolean(readEarly); out.writeBoolean(writeAndOverwrite); - if (out.getVersion().onOrAfter(Version.V_7_14_0)) { + if (out.getTransportVersion().onOrAfter(TransportVersion.V_7_14_0)) { out.writeBoolean(abortWrite); } else if (abortWrite) { - throw new IllegalStateException("cannot send abortWrite request to node of version [" + out.getVersion() + "]"); + throw new IllegalStateException("cannot send abortWrite request on transport version [" + out.getTransportVersion() + "]"); } } diff --git a/x-pack/plugin/snapshot-repo-test-kit/src/main/java/org/elasticsearch/repositories/blobstore/testkit/RepositoryAnalyzeAction.java b/x-pack/plugin/snapshot-repo-test-kit/src/main/java/org/elasticsearch/repositories/blobstore/testkit/RepositoryAnalyzeAction.java index 43030941e8028..ec79367f2b57c 100644 --- a/x-pack/plugin/snapshot-repo-test-kit/src/main/java/org/elasticsearch/repositories/blobstore/testkit/RepositoryAnalyzeAction.java +++ b/x-pack/plugin/snapshot-repo-test-kit/src/main/java/org/elasticsearch/repositories/blobstore/testkit/RepositoryAnalyzeAction.java @@ -10,7 +10,7 @@ import org.apache.logging.log4j.LogManager; import org.apache.logging.log4j.Logger; import org.elasticsearch.ExceptionsHelper; -import org.elasticsearch.Version; +import org.elasticsearch.TransportVersion; import org.elasticsearch.action.ActionListener; import org.elasticsearch.action.ActionListenerResponseHandler; import org.elasticsearch.action.ActionRequest; @@ -673,7 +673,7 @@ public Request(StreamInput in) throws IOException { maxTotalDataSize = ByteSizeValue.readFrom(in); detailed = in.readBoolean(); reroutedFrom = in.readOptionalWriteable(DiscoveryNode::new); - if (in.getVersion().onOrAfter(Version.V_7_14_0)) { + if (in.getTransportVersion().onOrAfter(TransportVersion.V_7_14_0)) { abortWritePermitted = in.readBoolean(); } else { abortWritePermitted = false; @@ -700,10 +700,12 @@ public void writeTo(StreamOutput out) throws IOException { maxTotalDataSize.writeTo(out); out.writeBoolean(detailed); out.writeOptionalWriteable(reroutedFrom); - if (out.getVersion().onOrAfter(Version.V_7_14_0)) { + if (out.getTransportVersion().onOrAfter(TransportVersion.V_7_14_0)) { out.writeBoolean(abortWritePermitted); } else if (abortWritePermitted) { - throw new IllegalStateException("cannot send abortWritePermitted request to node of version [" + out.getVersion() + "]"); + throw new IllegalStateException( + "cannot send abortWritePermitted request on transport version [" + out.getTransportVersion() + "]" + ); } } From 8aa40545bd75fd6cd3d357803c11b7cd6cbcd4e3 Mon Sep 17 00:00:00 2001 From: Martijn van Groningen Date: Tue, 31 Jan 2023 13:29:03 +0100 Subject: [PATCH 32/38] Update rollup dependencies (#93369) Change ilm and data streams dependencies to be test dependencies. --- x-pack/plugin/rollup/build.gradle | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/x-pack/plugin/rollup/build.gradle b/x-pack/plugin/rollup/build.gradle index e2da0ba0f866e..0e97e4f555e79 100644 --- a/x-pack/plugin/rollup/build.gradle +++ b/x-pack/plugin/rollup/build.gradle @@ -10,8 +10,8 @@ archivesBaseName = 'x-pack-rollup' dependencies { compileOnly project(path: xpackModule('core')) - compileOnly project(':modules:data-streams') - compileOnly project(path: xpackModule('ilm')) + testImplementation project(':modules:data-streams') + testImplementation project(path: xpackModule('ilm')) compileOnly project(path: xpackModule('mapper-aggregate-metric')) testImplementation(testArtifact(project(xpackModule('core')))) } From 93544797f35ae9d7c8a34432ef0e21e69ae587d1 Mon Sep 17 00:00:00 2001 From: Keith Massey Date: Tue, 31 Jan 2023 07:34:30 -0600 Subject: [PATCH 33/38] Avoiding race conditions in GeoIpDownloaderIT (#93363) --- .../org/elasticsearch/ingest/geoip/GeoIpDownloaderIT.java | 7 ++++++- 1 file changed, 6 insertions(+), 1 deletion(-) diff --git a/modules/ingest-geoip/src/internalClusterTest/java/org/elasticsearch/ingest/geoip/GeoIpDownloaderIT.java b/modules/ingest-geoip/src/internalClusterTest/java/org/elasticsearch/ingest/geoip/GeoIpDownloaderIT.java index f3f37f50147fb..5d02fde827160 100644 --- a/modules/ingest-geoip/src/internalClusterTest/java/org/elasticsearch/ingest/geoip/GeoIpDownloaderIT.java +++ b/modules/ingest-geoip/src/internalClusterTest/java/org/elasticsearch/ingest/geoip/GeoIpDownloaderIT.java @@ -254,6 +254,7 @@ public void testGeoIpDatabasesDownload() throws Exception { assertBusy(() -> { GeoIpTaskState state = getGeoIpTaskState(); assertEquals(Set.of("GeoLite2-ASN.mmdb", "GeoLite2-City.mmdb", "GeoLite2-Country.mmdb"), state.getDatabases().keySet()); + putGeoIpPipeline(); // This is to work around the race condition described in #92888 }, 2, TimeUnit.MINUTES); for (String id : List.of("GeoLite2-ASN.mmdb", "GeoLite2-City.mmdb", "GeoLite2-Country.mmdb")) { @@ -309,7 +310,11 @@ public void testGeoIpDatabasesDownloadNoGeoipProcessors() throws Exception { .setPersistentSettings(Settings.builder().put(GeoIpDownloaderTaskExecutor.ENABLED_SETTING.getKey(), true)) .get(); assertTrue(settingsResponse.isAcknowledged()); - assertBusy(() -> { assertNull(getTask().getState()); }); + assertBusy(() -> { + assertNotNull(getTask()); + assertNull(getTask().getState()); + putGeoIpPipeline(); // This is to work around the race condition described in #92888 + }); putNonGeoipPipeline(pipelineId); assertBusy(() -> { assertNull(getTask().getState()); }); putNonGeoipPipeline(pipelineId); From e68c2586b5cfe91e35555e4db277335e402b600d Mon Sep 17 00:00:00 2001 From: Pooya Salehi Date: Tue, 31 Jan 2023 14:42:07 +0100 Subject: [PATCH 34/38] Set forced_refresh to true when using stateless refresh work-around (#93383) In #93160, we never set the forced_refresh flag in the response. With this change, the bulk response now correctly reflects what happened. It also unblocks a bunch of YAML tests for Stateless. Relates ES-5292 --- .../action/bulk/TransportBulkAction.java | 13 +++++++++++-- 1 file changed, 11 insertions(+), 2 deletions(-) diff --git a/server/src/main/java/org/elasticsearch/action/bulk/TransportBulkAction.java b/server/src/main/java/org/elasticsearch/action/bulk/TransportBulkAction.java index 66b365f6a092e..ba4bf8c343cb4 100644 --- a/server/src/main/java/org/elasticsearch/action/bulk/TransportBulkAction.java +++ b/server/src/main/java/org/elasticsearch/action/bulk/TransportBulkAction.java @@ -192,11 +192,20 @@ public static ActionListe @Override protected void doExecute(Task task, BulkRequest bulkRequest, ActionListener outerListener) { - // As a work-around to support `?refresh`, explicitly replace the refresh policy with a call to the Refresh API. + // As a work-around to support `?refresh`, explicitly replace the refresh policy with a call to the Refresh API, + // and always set forced_refresh to true. // TODO: Replace with a less hacky approach. ActionListener listener = outerListener; if (DiscoveryNode.isStateless(clusterService.getSettings()) && bulkRequest.getRefreshPolicy() != WriteRequest.RefreshPolicy.NONE) { - listener = outerListener.delegateFailure((l, r) -> { client.admin().indices().prepareRefresh().execute(l.map(ignored -> r)); }); + listener = outerListener.delegateFailure((l, r) -> client.admin().indices().prepareRefresh().execute(l.map(ignored -> { + for (BulkItemResponse response : r.getItems()) { + DocWriteResponse docWriteResponse = response.getResponse(); + if (docWriteResponse != null) { + docWriteResponse.setForcedRefresh(true); + } + } + return r; + }))); bulkRequest.setRefreshPolicy(WriteRequest.RefreshPolicy.NONE); } /* From 845178b697e9147764b896324e6e32778ab39992 Mon Sep 17 00:00:00 2001 From: Pablo Alcantar Morales Date: Tue, 31 Jan 2023 14:52:29 +0100 Subject: [PATCH 35/38] Cache the creation of parsers within `DateProcessor` (#92880) cache potentially duped values in the `DateProcessor`, avoiding the creation of disposable objects during the different executions --- docs/changelog/92880.yaml | 5 ++ .../ingest/common/DateFormat.java | 11 +++- .../ingest/common/DateProcessor.java | 61 ++++++++++++++++++- .../ingest/common/DateProcessorTests.java | 33 ++++++++++ 4 files changed, 107 insertions(+), 3 deletions(-) create mode 100644 docs/changelog/92880.yaml diff --git a/docs/changelog/92880.yaml b/docs/changelog/92880.yaml new file mode 100644 index 0000000000000..5336987ee2cde --- /dev/null +++ b/docs/changelog/92880.yaml @@ -0,0 +1,5 @@ +pr: 92880 +summary: Cache the creation of parsers within DateProcessor +area: Ingest Node +type: enhancement +issues: [] diff --git a/modules/ingest-common/src/main/java/org/elasticsearch/ingest/common/DateFormat.java b/modules/ingest-common/src/main/java/org/elasticsearch/ingest/common/DateFormat.java index 4a9fa93662bde..84c4315a69017 100644 --- a/modules/ingest-common/src/main/java/org/elasticsearch/ingest/common/DateFormat.java +++ b/modules/ingest-common/src/main/java/org/elasticsearch/ingest/common/DateFormat.java @@ -36,11 +36,10 @@ enum DateFormat { @Override Function getFunction(String format, ZoneId timezone, Locale locale) { return (date) -> { - TemporalAccessor accessor = DateFormatter.forPattern("iso8601").parse(date); + TemporalAccessor accessor = ISO_8601.parse(date); // even though locale could be set to en-us, Locale.ROOT (following iso8601 calendar data rules) should be used return DateFormatters.from(accessor, Locale.ROOT, timezone).withZoneSameInstant(timezone); }; - } }, Unix { @@ -115,6 +114,14 @@ Function getFunction(String format, ZoneId zoneId, Locale } }; + /** It's important to keep this variable as a constant because {@link DateFormatter#forPattern(String)} is an expensive method and, + * in this case, it's a never changing value. + *
+ * Also, we shouldn't inline it in the {@link DateFormat#Iso8601}'s enum because it'd make useless the cache used + * at {@link DateProcessor}). + */ + private static final DateFormatter ISO_8601 = DateFormatter.forPattern("iso8601"); + abstract Function getFunction(String format, ZoneId timezone, Locale locale); static DateFormat fromString(String format) { diff --git a/modules/ingest-common/src/main/java/org/elasticsearch/ingest/common/DateProcessor.java b/modules/ingest-common/src/main/java/org/elasticsearch/ingest/common/DateProcessor.java index 71a73f605a4c2..8ef870f773779 100644 --- a/modules/ingest-common/src/main/java/org/elasticsearch/ingest/common/DateProcessor.java +++ b/modules/ingest-common/src/main/java/org/elasticsearch/ingest/common/DateProcessor.java @@ -9,8 +9,10 @@ package org.elasticsearch.ingest.common; import org.elasticsearch.ExceptionsHelper; +import org.elasticsearch.common.settings.SettingsException; import org.elasticsearch.common.time.DateFormatter; import org.elasticsearch.common.util.LocaleUtils; +import org.elasticsearch.common.util.concurrent.ConcurrentCollections; import org.elasticsearch.core.Nullable; import org.elasticsearch.ingest.AbstractProcessor; import org.elasticsearch.ingest.ConfigurationUtils; @@ -19,6 +21,7 @@ import org.elasticsearch.script.ScriptService; import org.elasticsearch.script.TemplateScript; +import java.lang.ref.SoftReference; import java.time.ZoneId; import java.time.ZoneOffset; import java.time.ZonedDateTime; @@ -26,7 +29,9 @@ import java.util.List; import java.util.Locale; import java.util.Map; +import java.util.concurrent.ConcurrentMap; import java.util.function.Function; +import java.util.function.Supplier; public final class DateProcessor extends AbstractProcessor { @@ -72,9 +77,17 @@ public final class DateProcessor extends AbstractProcessor { this.targetField = targetField; this.formats = formats; this.dateParsers = new ArrayList<>(this.formats.size()); + for (String format : formats) { DateFormat dateFormat = DateFormat.fromString(format); - dateParsers.add((params) -> dateFormat.getFunction(format, newDateTimeZone(params), newLocale(params))); + dateParsers.add((params) -> { + var documentZoneId = newDateTimeZone(params); + var documentLocale = newLocale(params); + return Cache.INSTANCE.getOrCompute( + new Cache.Key(format, documentZoneId, documentLocale), + () -> dateFormat.getFunction(format, documentZoneId, documentLocale) + ); + }); } this.outputFormat = outputFormat; formatter = DateFormatter.forPattern(this.outputFormat); @@ -198,4 +211,50 @@ public DateProcessor create( ); } } + + /** + * An ad-hoc cache class that just throws away the cached values once it's full because we don't want to affect the performance + * while applying eviction policies when adding new values or retrieving them. + */ + static final class Cache { + + private static final String CACHE_CAPACITY_SETTING = "es.ingest.date_processor.cache_capacity"; + static final Cache INSTANCE; + + static { + var cacheSizeStr = System.getProperty(CACHE_CAPACITY_SETTING, "256"); + try { + INSTANCE = new Cache(Integer.parseInt(cacheSizeStr)); + } catch (NumberFormatException e) { + throw new SettingsException("{} must be a valid number but was [{}]", CACHE_CAPACITY_SETTING, cacheSizeStr); + } + } + private final ConcurrentMap>> map; + private final int capacity; + + Cache(int capacity) { + if (capacity <= 0) { + throw new IllegalArgumentException("cache capacity must be a value greater than 0 but was " + capacity); + } + this.capacity = capacity; + this.map = ConcurrentCollections.newConcurrentMapWithAggressiveConcurrency(this.capacity); + } + + Function getOrCompute(Key key, Supplier> supplier) { + Function fn; + var element = map.get(key); + // element exist and wasn't GCed + if (element != null && (fn = element.get()) != null) { + return fn; + } + if (map.size() >= capacity) { + map.clear(); + } + fn = supplier.get(); + map.put(key, new SoftReference<>(fn)); + return fn; + } + + record Key(String format, ZoneId zoneId, Locale locale) {} + } } diff --git a/modules/ingest-common/src/test/java/org/elasticsearch/ingest/common/DateProcessorTests.java b/modules/ingest-common/src/test/java/org/elasticsearch/ingest/common/DateProcessorTests.java index fcf1e9a301ebb..18454c866cb28 100644 --- a/modules/ingest-common/src/test/java/org/elasticsearch/ingest/common/DateProcessorTests.java +++ b/modules/ingest-common/src/test/java/org/elasticsearch/ingest/common/DateProcessorTests.java @@ -24,9 +24,15 @@ import java.util.List; import java.util.Locale; import java.util.Map; +import java.util.function.Function; +import java.util.function.Supplier; import static org.hamcrest.Matchers.containsString; import static org.hamcrest.Matchers.equalTo; +import static org.mockito.Mockito.mock; +import static org.mockito.Mockito.times; +import static org.mockito.Mockito.verify; +import static org.mockito.Mockito.when; public class DateProcessorTests extends ESTestCase { @@ -335,4 +341,31 @@ public void testOutputFormat() { String expectedDate = "00:00:00." + Strings.format("%09d", nanosAfterEpoch); assertThat(ingestDocument.getFieldValue("date_as_date", String.class), equalTo(expectedDate)); } + + @SuppressWarnings("unchecked") + public void testCacheIsEvictedAfterReachMaxCapacity() { + Supplier> supplier1 = mock(Supplier.class); + Supplier> supplier2 = mock(Supplier.class); + Function zonedDateTimeFunction1 = str -> ZonedDateTime.now(); + Function zonedDateTimeFunction2 = str -> ZonedDateTime.now(); + var cache = new DateProcessor.Cache(1); + var key1 = new DateProcessor.Cache.Key("format-1", ZoneId.systemDefault(), Locale.ROOT); + var key2 = new DateProcessor.Cache.Key("format-2", ZoneId.systemDefault(), Locale.ROOT); + + when(supplier1.get()).thenReturn(zonedDateTimeFunction1); + when(supplier2.get()).thenReturn(zonedDateTimeFunction2); + + assertEquals(cache.getOrCompute(key1, supplier1), zonedDateTimeFunction1); // 1 call to supplier1 + assertEquals(cache.getOrCompute(key2, supplier2), zonedDateTimeFunction2); // 1 call to supplier2 + assertEquals(cache.getOrCompute(key1, supplier1), zonedDateTimeFunction1); // 1 more call to supplier1 + assertEquals(cache.getOrCompute(key1, supplier1), zonedDateTimeFunction1); // should use cached value + assertEquals(cache.getOrCompute(key2, supplier2), zonedDateTimeFunction2); // 1 more call to supplier2 + assertEquals(cache.getOrCompute(key2, supplier2), zonedDateTimeFunction2); // should use cached value + assertEquals(cache.getOrCompute(key2, supplier2), zonedDateTimeFunction2); // should use cached value + assertEquals(cache.getOrCompute(key2, supplier2), zonedDateTimeFunction2); // should use cached value + assertEquals(cache.getOrCompute(key1, supplier1), zonedDateTimeFunction1); // 1 more to call to supplier1 + + verify(supplier1, times(3)).get(); + verify(supplier2, times(2)).get(); + } } From 85a31872c36599b388586c17a4cb7d114eba6938 Mon Sep 17 00:00:00 2001 From: Nicolas Ruflin Date: Tue, 31 Jan 2023 16:40:29 +0100 Subject: [PATCH 36/38] Add `ignore_missing_component_templates` config option (#92436) This change introduces the configuration option `ignore_missing_component_templates` as discussed in https://github.com/elastic/elasticsearch/issues/92426 The implementation [option 6](https://github.com/elastic/elasticsearch/issues/92426#issuecomment-1372675683) was picked with a slight adjustment meaning no patterns are allowed. ## Implementation During the creation of an index template, the list of component templates is checked if all component templates exist. This check is extended to skip any component templates which are listed under `ignore_missing_component_templates`. An index template that skips the check for the component template `logs-foo@custom` looks as following: ``` PUT _index_template/logs-foo { "index_patterns": ["logs-foo-*"], "data_stream": { }, "composed_of": ["logs-foo@package", "logs-foo@custom"], "ignore_missing_component_templates": ["logs-foo@custom"], "priority": 500 } ``` The component template `logs-foo@package` has to exist before creation. It can be created with: ``` PUT _component_template/logs-foo@custom { "template": { "mappings": { "properties": { "host.ip": { "type": "ip" } } } } } ``` ## Testing For manual testing, different scenarios can be tested. To simplify testing, the commands from `.http` file are added. Before each test run, a clean cluster is expected. ### New behaviour, missing component template With the new config option, it must be possible to create an index template with a missing component templates without getting an error: ``` ### Add logs-foo@package component template PUT http://localhost:9200/ _component_template/logs-foo@package Authorization: Basic elastic password Content-Type: application/json { "template": { "mappings": { "properties": { "host.name": { "type": "keyword" } } } } } ### Add logs-foo index template PUT http://localhost:9200/ _index_template/logs-foo Authorization: Basic elastic password Content-Type: application/json { "index_patterns": ["logs-foo-*"], "data_stream": { }, "composed_of": ["logs-foo@package", "logs-foo@custom"], "ignore_missing_component_templates": ["logs-foo@custom"], "priority": 500 } ### Create data stream PUT http://localhost:9200/ _data_stream/logs-foo-bar Authorization: Basic elastic password Content-Type: application/json ### Check if mappings exist GET http://localhost:9200/ logs-foo-bar Authorization: Basic elastic password Content-Type: application/json ``` It is checked if all templates could be created and data stream mappings are correct. ### Old behaviour, with all component templates In the following, a component template is made optional but it already exists. It is checked, that it will show up in the mappings: ``` ### Add logs-foo@package component template PUT http://localhost:9200/ _component_template/logs-foo@package Authorization: Basic elastic password Content-Type: application/json { "template": { "mappings": { "properties": { "host.name": { "type": "keyword" } } } } } ### Add logs-foo@custom component template PUT http://localhost:9200/ _component_template/logs-foo@custom Authorization: Basic elastic password Content-Type: application/json { "template": { "mappings": { "properties": { "host.ip": { "type": "ip" } } } } } ### Add logs-foo index template PUT http://localhost:9200/ _index_template/logs-foo Authorization: Basic elastic password Content-Type: application/json { "index_patterns": ["logs-foo-*"], "data_stream": { }, "composed_of": ["logs-foo@package", "logs-foo@custom"], "ignore_missing_component_templates": ["logs-foo@custom"], "priority": 500 } ### Create data stream PUT http://localhost:9200/ _data_stream/logs-foo-bar Authorization: Basic elastic password Content-Type: application/json ### Check if mappings exist GET http://localhost:9200/ logs-foo-bar Authorization: Basic elastic password Content-Type: application/json ``` ### Check old behaviour Ensure, that the old behaviour still exists when a component template is used that is not part of `ignore_missing_component_templates`: ``` ### Add logs-foo index template PUT http://localhost:9200/ _index_template/logs-foo Authorization: Basic elastic password Content-Type: application/json { "index_patterns": ["logs-foo-*"], "data_stream": { }, "composed_of": ["logs-foo@package", "logs-foo@custom"], "ignore_missing_component_templates": ["logs-foo@custom"], "priority": 500 } ``` Co-authored-by: Lee Hinman --- docs/changelog/92436.yaml | 6 + ...gnore-missing-component-templates.asciidoc | 95 +++++++++++ .../indices/index-templates.asciidoc | 2 + .../15_composition.yml | 63 ++++++++ .../metadata/ComposableIndexTemplate.java | 58 ++++++- .../MetadataIndexTemplateService.java | 25 ++- .../ComposableIndexTemplateTests.java | 40 ++++- .../MetadataIndexTemplateServiceTests.java | 150 +++++++++++++++++- ...adataMigrateToDataTiersRoutingService.java | 3 + 9 files changed, 423 insertions(+), 19 deletions(-) create mode 100644 docs/changelog/92436.yaml create mode 100644 docs/reference/indices/ignore-missing-component-templates.asciidoc diff --git a/docs/changelog/92436.yaml b/docs/changelog/92436.yaml new file mode 100644 index 0000000000000..1f8b4a9bf1877 --- /dev/null +++ b/docs/changelog/92436.yaml @@ -0,0 +1,6 @@ +pr: 92436 +summary: Add `ignore_missing_component_templates` config option +area: Indices APIs +type: enhancement +issues: + - 92426 diff --git a/docs/reference/indices/ignore-missing-component-templates.asciidoc b/docs/reference/indices/ignore-missing-component-templates.asciidoc new file mode 100644 index 0000000000000..8337be779c709 --- /dev/null +++ b/docs/reference/indices/ignore-missing-component-templates.asciidoc @@ -0,0 +1,95 @@ +[[ignore_missing_component_templates]] +== Config ignore_missing_component_templates + +The configuration option `ignore_missing_component_templates` can be used when an index template references a component template that might not exist. Every time a data stream is created based on the index template, the existence of the component template will be checked. If it exists, it will used to form the index's composite settings. If it does not exist, it is ignored. + +=== Usage example + +In the following, one component template and an index template are created. The index template references two component templates, but only the `@package` one exists. + + +Create the component template `logs-foo_component1`. This has to be created before the index template as it is not optional: + +[source,console] +---- +PUT _component_template/logs-foo_component1 +{ + "template": { + "mappings": { + "properties": { + "host.name": { + "type": "keyword" + } + } + } + } +} +---- + +Next, the index template will be created and it references two component templates: + +[source,JSON] +---- + "composed_of": ["logs-foo_component1", "logs-foo_component2"] +---- + +Before, only the `logs-foo_component1` compontent template was created, meaning the `logs-foo_component2` is missing. Because of this the following entry was added to the config: + +[source,JSON] +---- + "ignore_missing_component_templates": ["logs-foo_component2"], +---- + +During creation of the template, it will not validate that `logs-foo_component2` exists: + + +[source,console] +---- +PUT _index_template/logs-foo +{ + "index_patterns": ["logs-foo-*"], + "data_stream": { }, + "composed_of": ["logs-foo_component1", "logs-foo_component2"], + "ignore_missing_component_templates": ["logs-foo_component2"], + "priority": 500 +} +---- +// TEST[continued] + +The index template `logs-foo` was successfully created. A data stream can be created based on this template: + +[source,console] +---- +PUT _data_stream/logs-foo-bar +---- +// TEST[continued] + +Looking at the mappings of the data stream, it will contain the `host.name` field. + +At a later stage, the missing component template might be added: + +[source,console] +---- +PUT _component_template/logs-foo_component2 +{ + "template": { + "mappings": { + "properties": { + "host.ip": { + "type": "ip" + } + } + } + } +} +---- +// TEST[continued] + +This will not have an immediate effect on the data stream. The mapping `host.ip` will only show up in the data stream mappings when the data stream is rolled over automatically next time or a manual rollover is triggered: + +[source,console] +---- +POST logs-foo-bar/_rollover +---- +// TEST[continued] +// TEST[teardown:data_stream_cleanup] diff --git a/docs/reference/indices/index-templates.asciidoc b/docs/reference/indices/index-templates.asciidoc index 8a4c985970b26..6128ab48998f3 100644 --- a/docs/reference/indices/index-templates.asciidoc +++ b/docs/reference/indices/index-templates.asciidoc @@ -161,3 +161,5 @@ DELETE _component_template/component_template1 //// include::simulate-multi-component-templates.asciidoc[] + +include::ignore-missing-component-templates.asciidoc[] diff --git a/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/indices.put_index_template/15_composition.yml b/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/indices.put_index_template/15_composition.yml index 5eef78a8c63ba..2aaf492f0ff0d 100644 --- a/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/indices.put_index_template/15_composition.yml +++ b/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/indices.put_index_template/15_composition.yml @@ -286,3 +286,66 @@ - is_false: purple-index.mappings.properties.nested.include_in_root - is_true: purple-index.mappings.properties.nested.include_in_parent + +--- +"Index template ignore_missing_component_template valid": + - skip: + version: " - 8.6.99" + reason: "index template v2 ignore_missing_component_template config not available before 8.7" + features: allowed_warnings + + - do: + cluster.put_component_template: + name: red + body: + template: + mappings: + properties: + foo: + type: keyword + + - do: + allowed_warnings: + - "index template [blue] has index patterns [purple-index] matching patterns from existing older templates [global] with patterns (global => [*]); this template [blue] will take precedence during new index creation" + indices.put_index_template: + name: blue + body: + index_patterns: ["purple-index"] + composed_of: ["red", "blue"] + ignore_missing_component_templates: ["blue"] + + - do: + indices.create: + index: purple-index + + - do: + indices.get: + index: purple-index + + - match: {purple-index.mappings.properties.foo: {type: keyword}} + +--- +"Index template ignore_missing_component_template invalid": + - skip: + version: " - 8.6.99" + reason: "index template v2 ignore_missing_component_template config not available before 8.7" + features: allowed_warnings + + - do: + cluster.put_component_template: + name: red + body: + template: + mappings: + properties: + foo: + type: keyword + + - do: + catch: /index_template \[blue\] invalid, cause \[index template \[blue\] specifies a missing component templates \[blue\] that does not exist/ + indices.put_index_template: + name: blue + body: + index_patterns: ["purple-index"] + composed_of: ["red", "blue"] + ignore_missing_component_templates: ["foo"] diff --git a/server/src/main/java/org/elasticsearch/cluster/metadata/ComposableIndexTemplate.java b/server/src/main/java/org/elasticsearch/cluster/metadata/ComposableIndexTemplate.java index 367b0f9f6f00d..377d91d60a99e 100644 --- a/server/src/main/java/org/elasticsearch/cluster/metadata/ComposableIndexTemplate.java +++ b/server/src/main/java/org/elasticsearch/cluster/metadata/ComposableIndexTemplate.java @@ -9,6 +9,7 @@ package org.elasticsearch.cluster.metadata; import org.elasticsearch.TransportVersion; +import org.elasticsearch.Version; import org.elasticsearch.cluster.Diff; import org.elasticsearch.cluster.SimpleDiffable; import org.elasticsearch.common.Strings; @@ -46,6 +47,7 @@ public class ComposableIndexTemplate implements SimpleDiffable PARSER = new ConstructingObjectParser<>( @@ -59,7 +61,8 @@ public class ComposableIndexTemplate implements SimpleDiffable) a[5], (DataStreamTemplate) a[6], - (Boolean) a[7] + (Boolean) a[7], + (List) a[8] ) ); @@ -72,6 +75,7 @@ public class ComposableIndexTemplate implements SimpleDiffable p.map(), METADATA); PARSER.declareObject(ConstructingObjectParser.optionalConstructorArg(), DataStreamTemplate.PARSER, DATA_STREAM); PARSER.declareBoolean(ConstructingObjectParser.optionalConstructorArg(), ALLOW_AUTO_CREATE); + PARSER.declareStringArray(ConstructingObjectParser.optionalConstructorArg(), IGNORE_MISSING_COMPONENT_TEMPLATES); } private final List indexPatterns; @@ -89,6 +93,8 @@ public class ComposableIndexTemplate implements SimpleDiffable ignoreMissingComponentTemplates; static Diff readITV2DiffFrom(StreamInput in) throws IOException { return SimpleDiffable.readDiffFrom(ComposableIndexTemplate::new, in); @@ -106,7 +112,7 @@ public ComposableIndexTemplate( @Nullable Long version, @Nullable Map metadata ) { - this(indexPatterns, template, componentTemplates, priority, version, metadata, null, null); + this(indexPatterns, template, componentTemplates, priority, version, metadata, null, null, null); } public ComposableIndexTemplate( @@ -118,7 +124,7 @@ public ComposableIndexTemplate( @Nullable Map metadata, @Nullable DataStreamTemplate dataStreamTemplate ) { - this(indexPatterns, template, componentTemplates, priority, version, metadata, dataStreamTemplate, null); + this(indexPatterns, template, componentTemplates, priority, version, metadata, dataStreamTemplate, null, null); } public ComposableIndexTemplate( @@ -130,6 +136,20 @@ public ComposableIndexTemplate( @Nullable Map metadata, @Nullable DataStreamTemplate dataStreamTemplate, @Nullable Boolean allowAutoCreate + ) { + this(indexPatterns, template, componentTemplates, priority, version, metadata, dataStreamTemplate, null, null); + } + + public ComposableIndexTemplate( + List indexPatterns, + @Nullable Template template, + @Nullable List componentTemplates, + @Nullable Long priority, + @Nullable Long version, + @Nullable Map metadata, + @Nullable DataStreamTemplate dataStreamTemplate, + @Nullable Boolean allowAutoCreate, + @Nullable List ignoreMissingComponentTemplates ) { this.indexPatterns = indexPatterns; this.template = template; @@ -139,6 +159,7 @@ public ComposableIndexTemplate( this.metadata = metadata; this.dataStreamTemplate = dataStreamTemplate; this.allowAutoCreate = allowAutoCreate; + this.ignoreMissingComponentTemplates = ignoreMissingComponentTemplates; } public ComposableIndexTemplate(StreamInput in) throws IOException { @@ -154,6 +175,11 @@ public ComposableIndexTemplate(StreamInput in) throws IOException { this.metadata = in.readMap(); this.dataStreamTemplate = in.readOptionalWriteable(DataStreamTemplate::new); this.allowAutoCreate = in.readOptionalBoolean(); + if (in.getVersion().onOrAfter(Version.V_8_7_0)) { + this.ignoreMissingComponentTemplates = in.readOptionalStringList(); + } else { + this.ignoreMissingComponentTemplates = null; + } } public List indexPatterns() { @@ -204,6 +230,11 @@ public Boolean getAllowAutoCreate() { return this.allowAutoCreate; } + @Nullable + public List getIgnoreMissingComponentTemplates() { + return ignoreMissingComponentTemplates; + } + @Override public void writeTo(StreamOutput out) throws IOException { out.writeStringCollection(this.indexPatterns); @@ -219,6 +250,9 @@ public void writeTo(StreamOutput out) throws IOException { out.writeGenericMap(this.metadata); out.writeOptionalWriteable(dataStreamTemplate); out.writeOptionalBoolean(allowAutoCreate); + if (out.getVersion().onOrAfter(Version.V_8_7_0)) { + out.writeOptionalStringCollection(ignoreMissingComponentTemplates); + } } @Override @@ -246,6 +280,9 @@ public XContentBuilder toXContent(XContentBuilder builder, Params params) throws if (this.allowAutoCreate != null) { builder.field(ALLOW_AUTO_CREATE.getPreferredName(), allowAutoCreate); } + if (this.ignoreMissingComponentTemplates != null) { + builder.stringListField(IGNORE_MISSING_COMPONENT_TEMPLATES.getPreferredName(), ignoreMissingComponentTemplates); + } builder.endObject(); return builder; } @@ -260,7 +297,8 @@ public int hashCode() { this.version, this.metadata, this.dataStreamTemplate, - this.allowAutoCreate + this.allowAutoCreate, + this.ignoreMissingComponentTemplates ); } @@ -280,7 +318,8 @@ && componentTemplatesEquals(this.componentTemplates, other.componentTemplates) && Objects.equals(this.version, other.version) && Objects.equals(this.metadata, other.metadata) && Objects.equals(this.dataStreamTemplate, other.dataStreamTemplate) - && Objects.equals(this.allowAutoCreate, other.allowAutoCreate); + && Objects.equals(this.allowAutoCreate, other.allowAutoCreate) + && Objects.equals(this.ignoreMissingComponentTemplates, other.ignoreMissingComponentTemplates); } static boolean componentTemplatesEquals(List c1, List c2) { @@ -421,6 +460,7 @@ public static class Builder { private Map metadata; private DataStreamTemplate dataStreamTemplate; private Boolean allowAutoCreate; + private List ignoreMissingComponentTemplates; public Builder() {} @@ -464,6 +504,11 @@ public Builder allowAutoCreate(Boolean allowAutoCreate) { return this; } + public Builder ignoreMissingComponentTemplates(List ignoreMissingComponentTemplates) { + this.ignoreMissingComponentTemplates = ignoreMissingComponentTemplates; + return this; + } + public ComposableIndexTemplate build() { return new ComposableIndexTemplate( this.indexPatterns, @@ -473,7 +518,8 @@ public ComposableIndexTemplate build() { this.version, this.metadata, this.dataStreamTemplate, - this.allowAutoCreate + this.allowAutoCreate, + this.ignoreMissingComponentTemplates ); } } diff --git a/server/src/main/java/org/elasticsearch/cluster/metadata/MetadataIndexTemplateService.java b/server/src/main/java/org/elasticsearch/cluster/metadata/MetadataIndexTemplateService.java index 36a432b7625e9..3dca87dcbde41 100644 --- a/server/src/main/java/org/elasticsearch/cluster/metadata/MetadataIndexTemplateService.java +++ b/server/src/main/java/org/elasticsearch/cluster/metadata/MetadataIndexTemplateService.java @@ -505,17 +505,34 @@ public static void validateV2TemplateRequest(Metadata metadata, String name, Com } final Map componentTemplates = metadata.componentTemplates(); + final List ignoreMissingComponentTemplates = (template.getIgnoreMissingComponentTemplates() == null + ? List.of() + : template.getIgnoreMissingComponentTemplates()); final List missingComponentTemplates = template.composedOf() .stream() .filter(componentTemplate -> componentTemplates.containsKey(componentTemplate) == false) + .filter(componentTemplate -> ignoreMissingComponentTemplates.contains(componentTemplate) == false) .toList(); - if (missingComponentTemplates.size() > 0) { + if (missingComponentTemplates.size() > 0 && ignoreMissingComponentTemplates.size() == 0) { throw new InvalidIndexTemplateException( name, "index template [" + name + "] specifies component templates " + missingComponentTemplates + " that do not exist" ); } + + if (missingComponentTemplates.size() > 0 && ignoreMissingComponentTemplates.size() > 0) { + + throw new InvalidIndexTemplateException( + name, + "index template [" + + name + + "] specifies a missing component templates " + + missingComponentTemplates + + " " + + "that does not exist and is not part of 'ignore_missing_component_templates'" + ); + } } public ClusterState addIndexTemplateV2( @@ -579,7 +596,8 @@ public ClusterState addIndexTemplateV2( template.version(), template.metadata(), template.getDataStreamTemplate(), - template.getAllowAutoCreate() + template.getAllowAutoCreate(), + template.getIgnoreMissingComponentTemplates() ); } @@ -679,7 +697,8 @@ private void validateIndexTemplateV2(String name, ComposableIndexTemplate indexT indexTemplate.version(), indexTemplate.metadata(), indexTemplate.getDataStreamTemplate(), - indexTemplate.getAllowAutoCreate() + indexTemplate.getAllowAutoCreate(), + indexTemplate.getIgnoreMissingComponentTemplates() ); validate(name, templateToValidate); diff --git a/server/src/test/java/org/elasticsearch/cluster/metadata/ComposableIndexTemplateTests.java b/server/src/test/java/org/elasticsearch/cluster/metadata/ComposableIndexTemplateTests.java index d4e7993ac0529..649355fb8b7f4 100644 --- a/server/src/test/java/org/elasticsearch/cluster/metadata/ComposableIndexTemplateTests.java +++ b/server/src/test/java/org/elasticsearch/cluster/metadata/ComposableIndexTemplateTests.java @@ -75,6 +75,7 @@ public static ComposableIndexTemplate randomInstance() { } List indexPatterns = randomList(1, 4, () -> randomAlphaOfLength(4)); + List ignoreMissingComponentTemplates = randomList(0, 4, () -> randomAlphaOfLength(4)); return new ComposableIndexTemplate( indexPatterns, template, @@ -83,7 +84,8 @@ public static ComposableIndexTemplate randomInstance() { randomBoolean() ? null : randomNonNegativeLong(), meta, dataStreamTemplate, - randomBoolean() ? null : randomBoolean() + randomBoolean() ? null : randomBoolean(), + ignoreMissingComponentTemplates ); } @@ -149,7 +151,7 @@ protected ComposableIndexTemplate mutateInstance(ComposableIndexTemplate orig) { } public static ComposableIndexTemplate mutateTemplate(ComposableIndexTemplate orig) { - switch (randomIntBetween(0, 6)) { + switch (randomIntBetween(0, 7)) { case 0: List newIndexPatterns = randomValueOtherThan( orig.indexPatterns(), @@ -177,7 +179,8 @@ public static ComposableIndexTemplate mutateTemplate(ComposableIndexTemplate ori orig.version(), orig.metadata(), orig.getDataStreamTemplate(), - orig.getAllowAutoCreate() + orig.getAllowAutoCreate(), + orig.getIgnoreMissingComponentTemplates() ); case 2: List newComposedOf = randomValueOtherThan(orig.composedOf(), () -> randomList(0, 10, () -> randomAlphaOfLength(5))); @@ -189,7 +192,8 @@ public static ComposableIndexTemplate mutateTemplate(ComposableIndexTemplate ori orig.version(), orig.metadata(), orig.getDataStreamTemplate(), - orig.getAllowAutoCreate() + orig.getAllowAutoCreate(), + orig.getIgnoreMissingComponentTemplates() ); case 3: return new ComposableIndexTemplate( @@ -200,7 +204,8 @@ public static ComposableIndexTemplate mutateTemplate(ComposableIndexTemplate ori orig.version(), orig.metadata(), orig.getDataStreamTemplate(), - orig.getAllowAutoCreate() + orig.getAllowAutoCreate(), + orig.getIgnoreMissingComponentTemplates() ); case 4: return new ComposableIndexTemplate( @@ -211,7 +216,8 @@ public static ComposableIndexTemplate mutateTemplate(ComposableIndexTemplate ori randomValueOtherThan(orig.version(), ESTestCase::randomNonNegativeLong), orig.metadata(), orig.getDataStreamTemplate(), - orig.getAllowAutoCreate() + orig.getAllowAutoCreate(), + orig.getIgnoreMissingComponentTemplates() ); case 5: return new ComposableIndexTemplate( @@ -222,7 +228,8 @@ public static ComposableIndexTemplate mutateTemplate(ComposableIndexTemplate ori orig.version(), randomValueOtherThan(orig.metadata(), ComposableIndexTemplateTests::randomMeta), orig.getDataStreamTemplate(), - orig.getAllowAutoCreate() + orig.getAllowAutoCreate(), + orig.getIgnoreMissingComponentTemplates() ); case 6: return new ComposableIndexTemplate( @@ -233,7 +240,24 @@ public static ComposableIndexTemplate mutateTemplate(ComposableIndexTemplate ori orig.version(), orig.metadata(), randomValueOtherThan(orig.getDataStreamTemplate(), ComposableIndexTemplateTests::randomDataStreamTemplate), - orig.getAllowAutoCreate() + orig.getAllowAutoCreate(), + orig.getIgnoreMissingComponentTemplates() + ); + case 7: + List ignoreMissingComponentTemplates = randomValueOtherThan( + orig.getIgnoreMissingComponentTemplates(), + () -> randomList(1, 4, () -> randomAlphaOfLength(4)) + ); + return new ComposableIndexTemplate( + orig.indexPatterns(), + orig.template(), + orig.composedOf(), + orig.priority(), + orig.version(), + orig.metadata(), + orig.getDataStreamTemplate(), + orig.getAllowAutoCreate(), + ignoreMissingComponentTemplates ); default: throw new IllegalStateException("illegal randomization branch"); diff --git a/server/src/test/java/org/elasticsearch/cluster/metadata/MetadataIndexTemplateServiceTests.java b/server/src/test/java/org/elasticsearch/cluster/metadata/MetadataIndexTemplateServiceTests.java index 9702810e3afc6..e8ad6d75736b2 100644 --- a/server/src/test/java/org/elasticsearch/cluster/metadata/MetadataIndexTemplateServiceTests.java +++ b/server/src/test/java/org/elasticsearch/cluster/metadata/MetadataIndexTemplateServiceTests.java @@ -597,6 +597,7 @@ public void testRemoveIndexTemplateV2Wildcards() throws Exception { ClusterState state = metadataIndexTemplateService.addIndexTemplateV2(ClusterState.EMPTY_STATE, false, "foo", template); assertThat(state.metadata().templatesV2().get("foo"), notNullValue()); + assertTemplatesEqual(state.metadata().templatesV2().get("foo"), template); Exception e = expectThrows( @@ -1529,7 +1530,12 @@ public void testAddInvalidTemplate() throws Exception { }); assertThat(e.name(), equalTo("template")); - assertThat(e.getMessage(), containsString("index template [template] specifies " + "component templates [bad] that do not exist")); + assertThat( + e.getMessage(), + containsString( + "index_template [template] invalid, cause [index template [template] specifies component templates [bad] that do not exist]" + ) + ); } public void testRemoveComponentTemplate() throws Exception { @@ -2108,6 +2114,146 @@ public void testV2TemplateOverlaps() throws Exception { } } + /** + * Tests to add two component templates but ignores both with is valid + * + * @throws Exception + */ + public void testIgnoreMissingComponentTemplateValid() throws Exception { + + String indexTemplateName = "metric-test"; + + List componentTemplates = new ArrayList<>(); + componentTemplates.add("foo"); + componentTemplates.add("bar"); + + // Order of params is mixed up on purpose + List ignoreMissingComponentTemplates = new ArrayList<>(); + ignoreMissingComponentTemplates.add("bar"); + ignoreMissingComponentTemplates.add("foo"); + + ComposableIndexTemplate template = new ComposableIndexTemplate( + Arrays.asList("metrics-test-*"), + null, + componentTemplates, + 1L, + null, + null, + null, + null, + ignoreMissingComponentTemplates + ); + MetadataIndexTemplateService metadataIndexTemplateService = getMetadataIndexTemplateService(); + + ClusterState state = metadataIndexTemplateService.addIndexTemplateV2(ClusterState.EMPTY_STATE, false, indexTemplateName, template); + MetadataIndexTemplateService.validateV2TemplateRequest(state.metadata(), indexTemplateName, template); + } + + public void testIgnoreMissingComponentTemplateInvalid() throws Exception { + + String indexTemplateName = "metric-test"; + + List componentTemplates = new ArrayList<>(); + componentTemplates.add("foo"); + componentTemplates.add("fail"); + + List ignoreMissingComponentTemplates = new ArrayList<>(); + ignoreMissingComponentTemplates.add("bar"); + ignoreMissingComponentTemplates.add("foo"); + + ComposableIndexTemplate template = new ComposableIndexTemplate( + Arrays.asList("metrics-foo-*"), + null, + componentTemplates, + 1L, + null, + null, + null, + null, + ignoreMissingComponentTemplates + ); + + MetadataIndexTemplateService metadataIndexTemplateService = getMetadataIndexTemplateService(); + ClusterState state = metadataIndexTemplateService.addIndexTemplateV2(ClusterState.EMPTY_STATE, false, indexTemplateName, template); + + // try now the same thing with validation on + InvalidIndexTemplateException e = expectThrows( + InvalidIndexTemplateException.class, + () -> MetadataIndexTemplateService.validateV2TemplateRequest(state.metadata(), indexTemplateName, template) + + ); + assertThat(e.getMessage(), containsString("specifies a missing component templates [fail] that does not exist")); + } + + /** + * This is a similar test as above but with running the service + * @throws Exception + */ + public void testAddInvalidTemplateIgnoreService() throws Exception { + + String indexTemplateName = "metric-test"; + + List componentTemplates = new ArrayList<>(); + componentTemplates.add("foo"); + componentTemplates.add("fail"); + + List ignoreMissingComponentTemplates = new ArrayList<>(); + ignoreMissingComponentTemplates.add("bar"); + ignoreMissingComponentTemplates.add("foo"); + + ComposableIndexTemplate template = new ComposableIndexTemplate( + Arrays.asList("metrics-foo-*"), + null, + componentTemplates, + 1L, + null, + null, + null, + null, + ignoreMissingComponentTemplates + ); + + ComponentTemplate ct = new ComponentTemplate(new Template(Settings.EMPTY, null, null), null, null); + + final MetadataIndexTemplateService service = getMetadataIndexTemplateService(); + CountDownLatch ctLatch = new CountDownLatch(1); + // Makes ure the foo template exists + service.putComponentTemplate( + "api", + randomBoolean(), + "foo", + TimeValue.timeValueSeconds(5), + ct, + ActionListener.wrap(r -> ctLatch.countDown(), e -> { + logger.error("unexpected error", e); + fail("unexpected error"); + }) + ); + ctLatch.await(5, TimeUnit.SECONDS); + InvalidIndexTemplateException e = expectThrows(InvalidIndexTemplateException.class, () -> { + CountDownLatch latch = new CountDownLatch(1); + AtomicReference err = new AtomicReference<>(); + service.putIndexTemplateV2( + "api", + randomBoolean(), + "template", + TimeValue.timeValueSeconds(30), + template, + ActionListener.wrap(r -> fail("should have failed!"), exception -> { + err.set(exception); + latch.countDown(); + }) + ); + latch.await(5, TimeUnit.SECONDS); + if (err.get() != null) { + throw err.get(); + } + }); + + assertThat(e.name(), equalTo("template")); + assertThat(e.getMessage(), containsString("missing component templates [fail] that does not exist")); + } + private static List putTemplate(NamedXContentRegistry xContentRegistry, PutRequest request) { ThreadPool testThreadPool = mock(ThreadPool.class); ClusterService clusterService = ClusterServiceUtils.createClusterService(testThreadPool); @@ -2200,6 +2346,6 @@ private MetadataIndexTemplateService getMetadataIndexTemplateService() { } public static void assertTemplatesEqual(ComposableIndexTemplate actual, ComposableIndexTemplate expected) { - assertTrue(Objects.equals(actual, expected)); + assertEquals(actual, expected); } } diff --git a/x-pack/plugin/ilm/src/main/java/org/elasticsearch/xpack/cluster/metadata/MetadataMigrateToDataTiersRoutingService.java b/x-pack/plugin/ilm/src/main/java/org/elasticsearch/xpack/cluster/metadata/MetadataMigrateToDataTiersRoutingService.java index a0d64321041f7..ff9659235f2d3 100644 --- a/x-pack/plugin/ilm/src/main/java/org/elasticsearch/xpack/cluster/metadata/MetadataMigrateToDataTiersRoutingService.java +++ b/x-pack/plugin/ilm/src/main/java/org/elasticsearch/xpack/cluster/metadata/MetadataMigrateToDataTiersRoutingService.java @@ -693,6 +693,9 @@ static List migrateComposableTemplates(Metadata.Builder mb, ClusterState migratedComposableTemplateBuilder.metadata(composableTemplate.metadata()); migratedComposableTemplateBuilder.dataStreamTemplate(composableTemplate.getDataStreamTemplate()); migratedComposableTemplateBuilder.allowAutoCreate(composableTemplate.getAllowAutoCreate()); + migratedComposableTemplateBuilder.ignoreMissingComponentTemplates( + composableTemplate.getIgnoreMissingComponentTemplates() + ); mb.put(templateEntry.getKey(), migratedComposableTemplateBuilder.build()); migratedComposableTemplates.add(templateEntry.getKey()); From 610d507a58b7b9149693059c81ea9998b6ed610a Mon Sep 17 00:00:00 2001 From: Mark Vieira Date: Wed, 1 Feb 2023 08:15:43 -0800 Subject: [PATCH 37/38] Fix BWC tests when FIPS is enabled --- .../gradle/internal/ElasticsearchTestBasePlugin.java | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/build-tools-internal/src/main/java/org/elasticsearch/gradle/internal/ElasticsearchTestBasePlugin.java b/build-tools-internal/src/main/java/org/elasticsearch/gradle/internal/ElasticsearchTestBasePlugin.java index 854dc6d204382..c6758092b17ec 100644 --- a/build-tools-internal/src/main/java/org/elasticsearch/gradle/internal/ElasticsearchTestBasePlugin.java +++ b/build-tools-internal/src/main/java/org/elasticsearch/gradle/internal/ElasticsearchTestBasePlugin.java @@ -89,7 +89,7 @@ public void execute(Task t) { test.getJvmArgumentProviders().add(nonInputProperties); test.getExtensions().add("nonInputProperties", nonInputProperties); - test.setWorkingDir(project.file(project.getBuildDir() + "/testrun/" + test.getName())); + test.setWorkingDir(project.file(project.getBuildDir() + "/testrun/" + test.getName().replace("#", "_"))); test.setMaxParallelForks(Integer.parseInt(System.getProperty("tests.jvms", BuildParams.getDefaultParallel().toString()))); test.exclude("**/*$*.class"); From 0857e41cf1037f20a5773d3e53337b00653dc0e4 Mon Sep 17 00:00:00 2001 From: Mark Vieira Date: Wed, 1 Feb 2023 08:57:26 -0800 Subject: [PATCH 38/38] Attempt to fix windows failures --- .../test/cluster/local/LocalClusterFactory.java | 8 ++++++-- 1 file changed, 6 insertions(+), 2 deletions(-) diff --git a/test/test-clusters/src/main/java/org/elasticsearch/test/cluster/local/LocalClusterFactory.java b/test/test-clusters/src/main/java/org/elasticsearch/test/cluster/local/LocalClusterFactory.java index 963566f52e8a9..5f43bb8aa71b6 100644 --- a/test/test-clusters/src/main/java/org/elasticsearch/test/cluster/local/LocalClusterFactory.java +++ b/test/test-clusters/src/main/java/org/elasticsearch/test/cluster/local/LocalClusterFactory.java @@ -79,13 +79,13 @@ public LocalClusterHandle create(LocalClusterSpec spec) { public class Node { private final LocalNodeSpec spec; private final Path workingDir; - private final Path distributionDir; private final Path repoDir; private final Path dataDir; private final Path logsDir; private final Path configDir; private final Path tempDir; + private Path distributionDir; private Version currentVersion; private Process process = null; private DistributionDescriptor distributionDescriptor; @@ -93,7 +93,6 @@ public class Node { public Node(LocalNodeSpec spec) { this.spec = spec; this.workingDir = baseWorkingDir.resolve(spec.getCluster().getName()).resolve(spec.getName()); - this.distributionDir = workingDir.resolve("distro"); // location of es distribution files, typically hard-linked this.repoDir = baseWorkingDir.resolve("repo"); this.dataDir = workingDir.resolve("data"); this.logsDir = workingDir.resolve("logs"); @@ -111,6 +110,11 @@ public synchronized void start(Version version) { LOGGER.info("Creating installation for node '{}' in {}", spec.getName(), workingDir); distributionDescriptor = resolveDistribution(); LOGGER.info("Distribution for node '{}': {}", spec.getName(), distributionDescriptor); + distributionDir = OS.conditional( + // Use per-version distribution directories on Windows to avoid cleanup failures + c -> c.onWindows(() -> workingDir.resolve("distro").resolve(distributionDescriptor.getVersion().toString())) + .onUnix(() -> workingDir.resolve("distro")) + ); initializeWorkingDirectory(currentVersion != null); createConfigDirectory(); copyExtraConfigFiles(); // extra config files might be needed for running cli tools like plugin install