From e0340d3b8b758752bd7e109ba697229e841f5e2f Mon Sep 17 00:00:00 2001 From: Mark Vieira Date: Wed, 18 Jan 2023 12:01:14 -0800 Subject: [PATCH 01/63] Convert full cluster restart tests to new rest testing framework --- .../main/groovy/elasticsearch.bwc-test.gradle | 14 +++ .../InternalTestArtifactExtension.java | 2 +- .../test/rest/RestTestBasePlugin.java | 74 ++++++++++-- qa/full-cluster-restart/build.gradle | 62 ++--------- .../FullClustRestartUpgradeStatus.java | 14 +++ .../upgrades/FullClusterRestartIT.java | 49 +++++++- .../FullClusterRestartTestOrdering.java | 24 ++++ ...rameterizedFullClusterRestartTestCase.java | 90 +++++++++++++++ .../upgrades/QueryBuilderBWCIT.java | 29 ++++- .../test/cluster/ClusterHandle.java | 17 +++ .../test/cluster/ElasticsearchCluster.java | 3 + .../local/AbstractLocalSpecBuilder.java | 23 ++++ .../local/DefaultLocalClusterSpecBuilder.java | 34 +++++- .../cluster/local/LocalClusterFactory.java | 62 +++++++++-- .../cluster/local/LocalClusterHandle.java | 49 ++++---- .../test/cluster/local/LocalClusterSpec.java | 13 ++- .../local/LocalClusterSpecBuilder.java | 11 ++ .../local/LocalElasticsearchCluster.java | 30 ++++- .../test/cluster/local/LocalSpecBuilder.java | 11 ++ .../LocalDistributionResolver.java | 3 + .../ReleasedDistributionResolver.java | 54 +++++++++ .../SnapshotDistributionResolver.java | 30 ++++- .../qa/full-cluster-restart/build.gradle | 99 ++--------------- .../xpack/restart/FullClusterRestartIT.java | 46 +++++++- .../resources/system_key | 0 x-pack/qa/full-cluster-restart/build.gradle | 105 ++---------------- ...stractXpackFullClusterRestartTestCase.java | 49 ++++++++ .../restart/CoreFullClusterRestartIT.java | 46 ++++++++ .../xpack/restart/FullClusterRestartIT.java | 10 +- ...MLModelDeploymentFullClusterRestartIT.java | 10 +- ...nfigIndexMappingsFullClusterRestartIT.java | 10 +- .../MlHiddenIndicesFullClusterRestartIT.java | 10 +- .../MlMigrationFullClusterRestartIT.java | 10 +- .../xpack/restart/QueryBuilderBWCIT.java | 42 +++++++ .../xpack/restart/WatcherMappingUpdateIT.java | 10 +- .../xpack/restart/funny-timeout-watch.json | 0 .../xpack/restart/logging-watch.json | 0 .../xpack/restart/simple-watch.json | 0 .../xpack/restart/throttle-period-watch.json | 0 .../resources/system_key | 0 .../restart/CoreFullClusterRestartIT.java | 24 ---- .../xpack/restart/QueryBuilderBWCIT.java | 22 ---- 42 files changed, 830 insertions(+), 361 deletions(-) create mode 100644 qa/full-cluster-restart/src/javaRestTest/java/org/elasticsearch/upgrades/FullClustRestartUpgradeStatus.java rename qa/full-cluster-restart/src/{test => javaRestTest}/java/org/elasticsearch/upgrades/FullClusterRestartIT.java (97%) create mode 100644 qa/full-cluster-restart/src/javaRestTest/java/org/elasticsearch/upgrades/FullClusterRestartTestOrdering.java create mode 100644 qa/full-cluster-restart/src/javaRestTest/java/org/elasticsearch/upgrades/ParameterizedFullClusterRestartTestCase.java rename qa/full-cluster-restart/src/{test => javaRestTest}/java/org/elasticsearch/upgrades/QueryBuilderBWCIT.java (92%) create mode 100644 test/test-clusters/src/main/java/org/elasticsearch/test/cluster/local/distribution/ReleasedDistributionResolver.java rename x-pack/plugin/shutdown/qa/full-cluster-restart/src/{test => javaRestTest}/java/org/elasticsearch/xpack/restart/FullClusterRestartIT.java (66%) rename x-pack/plugin/shutdown/qa/full-cluster-restart/src/{test => javaRestTest}/resources/system_key (100%) create mode 100644 x-pack/qa/full-cluster-restart/src/javaRestTest/java/org/elasticsearch/xpack/restart/AbstractXpackFullClusterRestartTestCase.java create mode 100644 x-pack/qa/full-cluster-restart/src/javaRestTest/java/org/elasticsearch/xpack/restart/CoreFullClusterRestartIT.java rename x-pack/qa/full-cluster-restart/src/{test => javaRestTest}/java/org/elasticsearch/xpack/restart/FullClusterRestartIT.java (99%) rename x-pack/qa/full-cluster-restart/src/{test => javaRestTest}/java/org/elasticsearch/xpack/restart/MLModelDeploymentFullClusterRestartIT.java (97%) rename x-pack/qa/full-cluster-restart/src/{test => javaRestTest}/java/org/elasticsearch/xpack/restart/MlConfigIndexMappingsFullClusterRestartIT.java (94%) rename x-pack/qa/full-cluster-restart/src/{test => javaRestTest}/java/org/elasticsearch/xpack/restart/MlHiddenIndicesFullClusterRestartIT.java (96%) rename x-pack/qa/full-cluster-restart/src/{test => javaRestTest}/java/org/elasticsearch/xpack/restart/MlMigrationFullClusterRestartIT.java (96%) create mode 100644 x-pack/qa/full-cluster-restart/src/javaRestTest/java/org/elasticsearch/xpack/restart/QueryBuilderBWCIT.java rename x-pack/qa/full-cluster-restart/src/{test => javaRestTest}/java/org/elasticsearch/xpack/restart/WatcherMappingUpdateIT.java (92%) rename x-pack/qa/full-cluster-restart/src/{test => javaRestTest}/resources/org/elasticsearch/xpack/restart/funny-timeout-watch.json (100%) rename x-pack/qa/full-cluster-restart/src/{test => javaRestTest}/resources/org/elasticsearch/xpack/restart/logging-watch.json (100%) rename x-pack/qa/full-cluster-restart/src/{test => javaRestTest}/resources/org/elasticsearch/xpack/restart/simple-watch.json (100%) rename x-pack/qa/full-cluster-restart/src/{test => javaRestTest}/resources/org/elasticsearch/xpack/restart/throttle-period-watch.json (100%) rename x-pack/qa/full-cluster-restart/src/{test => javaRestTest}/resources/system_key (100%) delete mode 100644 x-pack/qa/full-cluster-restart/src/test/java/org/elasticsearch/xpack/restart/CoreFullClusterRestartIT.java delete mode 100644 x-pack/qa/full-cluster-restart/src/test/java/org/elasticsearch/xpack/restart/QueryBuilderBWCIT.java diff --git a/build-tools-internal/src/main/groovy/elasticsearch.bwc-test.gradle b/build-tools-internal/src/main/groovy/elasticsearch.bwc-test.gradle index b80c450c5914e..a5e74c3721297 100644 --- a/build-tools-internal/src/main/groovy/elasticsearch.bwc-test.gradle +++ b/build-tools-internal/src/main/groovy/elasticsearch.bwc-test.gradle @@ -9,6 +9,8 @@ import org.elasticsearch.gradle.Version import org.elasticsearch.gradle.internal.ElasticsearchTestBasePlugin import org.elasticsearch.gradle.internal.info.BuildParams +import org.elasticsearch.gradle.internal.test.rest.InternalJavaRestTestPlugin +import org.elasticsearch.gradle.testclusters.StandaloneRestIntegTestTask ext.bwcTaskName = { Version version -> return "v${version}#bwcTest" @@ -36,5 +38,17 @@ plugins.withType(ElasticsearchTestBasePlugin) { } } +plugins.withType(InternalJavaRestTestPlugin) { + tasks.named("javaRestTest") { + enabled = false + } + + tasks.withType(StandaloneRestIntegTestTask).configureEach { + testClassesDirs = sourceSets.javaRestTest.output.classesDirs + classpath = sourceSets.javaRestTest.runtimeClasspath + usesDefaultDistribution() + } +} + tasks.matching { it.name.equals("check") }.configureEach {dependsOn(bwcTestSnapshots) } tasks.matching { it.name.equals("test") }.configureEach {enabled = false} diff --git a/build-tools-internal/src/main/java/org/elasticsearch/gradle/internal/InternalTestArtifactExtension.java b/build-tools-internal/src/main/java/org/elasticsearch/gradle/internal/InternalTestArtifactExtension.java index fae845b229651..4952085f466be 100644 --- a/build-tools-internal/src/main/java/org/elasticsearch/gradle/internal/InternalTestArtifactExtension.java +++ b/build-tools-internal/src/main/java/org/elasticsearch/gradle/internal/InternalTestArtifactExtension.java @@ -32,7 +32,7 @@ public void registerTestArtifactFromSourceSet(SourceSet sourceSet) { JavaPluginExtension javaPluginExtension = project.getExtensions().getByType(JavaPluginExtension.class); javaPluginExtension.registerFeature(name + "Artifacts", featureSpec -> { featureSpec.usingSourceSet(sourceSet); - featureSpec.capability("org.elasticsearch.gradle", project.getName() + "-" + name + "-artifacts", "1.0"); + featureSpec.capability("org.elasticsearch.gradle", project.getName() + "-test-artifacts", "1.0"); // This feature is only used internally in the // elasticsearch build so we do not need any publication. featureSpec.disablePublication(); diff --git a/build-tools-internal/src/main/java/org/elasticsearch/gradle/internal/test/rest/RestTestBasePlugin.java b/build-tools-internal/src/main/java/org/elasticsearch/gradle/internal/test/rest/RestTestBasePlugin.java index 9baa17bc00d7c..1a7b5bc3ee2a1 100644 --- a/build-tools-internal/src/main/java/org/elasticsearch/gradle/internal/test/rest/RestTestBasePlugin.java +++ b/build-tools-internal/src/main/java/org/elasticsearch/gradle/internal/test/rest/RestTestBasePlugin.java @@ -13,6 +13,8 @@ import org.elasticsearch.gradle.Architecture; import org.elasticsearch.gradle.DistributionDownloadPlugin; import org.elasticsearch.gradle.ElasticsearchDistribution; +import org.elasticsearch.gradle.ElasticsearchDistributionType; +import org.elasticsearch.gradle.Version; import org.elasticsearch.gradle.VersionProperties; import org.elasticsearch.gradle.distribution.ElasticsearchDistributionTypes; import org.elasticsearch.gradle.internal.ElasticsearchJavaPlugin; @@ -58,6 +60,8 @@ public class RestTestBasePlugin implements Plugin { private static final String TESTS_RUNTIME_JAVA_SYSPROP = "tests.runtime.java"; private static final String DEFAULT_DISTRIBUTION_SYSPROP = "tests.default.distribution"; private static final String INTEG_TEST_DISTRIBUTION_SYSPROP = "tests.integ-test.distribution"; + private static final String BWC_SNAPSHOT_DISTRIBUTION_SYSPROP_PREFIX = "tests.snapshot.distribution."; + private static final String BWC_RELEASED_DISTRIBUTION_SYSPROP_PREFIX = "tests.release.distribution."; private static final String TESTS_CLUSTER_MODULES_PATH_SYSPROP = "tests.cluster.modules.path"; private static final String TESTS_CLUSTER_PLUGINS_PATH_SYSPROP = "tests.cluster.plugins.path"; private static final String DEFAULT_REST_INTEG_TEST_DISTRO = "default_distro"; @@ -79,16 +83,17 @@ public void apply(Project project) { project.getPluginManager().apply(InternalDistributionDownloadPlugin.class); // Register integ-test and default distributions - NamedDomainObjectContainer distributions = DistributionDownloadPlugin.getContainer(project); - ElasticsearchDistribution defaultDistro = distributions.create(DEFAULT_REST_INTEG_TEST_DISTRO, distro -> { - distro.setVersion(VersionProperties.getElasticsearch()); - distro.setArchitecture(Architecture.current()); - }); - ElasticsearchDistribution integTestDistro = distributions.create(INTEG_TEST_REST_INTEG_TEST_DISTRO, distro -> { - distro.setVersion(VersionProperties.getElasticsearch()); - distro.setArchitecture(Architecture.current()); - distro.setType(ElasticsearchDistributionTypes.INTEG_TEST_ZIP); - }); + ElasticsearchDistribution defaultDistro = createDistribution( + project, + DEFAULT_REST_INTEG_TEST_DISTRO, + VersionProperties.getElasticsearch() + ); + ElasticsearchDistribution integTestDistro = createDistribution( + project, + INTEG_TEST_REST_INTEG_TEST_DISTRO, + VersionProperties.getElasticsearch(), + ElasticsearchDistributionTypes.INTEG_TEST_ZIP + ); // Create configures for module and plugin dependencies Configuration modulesConfiguration = createPluginConfiguration(project, MODULES_CONFIGURATION, true, false); @@ -151,6 +156,35 @@ public Void call(Object... args) { return null; } }); + + // Add `usesBwcDistribution(version)` extension method to test tasks to indicate they require a BWC distribution + task.getExtensions().getExtraProperties().set("usesBwcDistribution", new Closure(task) { + @Override + public Void call(Object... args) { + if (args.length != 1 && args[0] instanceof Version == false) { + throw new IllegalArgumentException("Expected exactly one argument of type org.elasticsearch.gradle.Version"); + } + + Version version = (Version) args[0]; + boolean isReleased = BuildParams.getBwcVersions().unreleasedInfo(version) == null; + String versionString = version.toString(); + ElasticsearchDistribution bwcDistro = createDistribution(project, "bwc_" + versionString, versionString); + + task.dependsOn(bwcDistro); + registerDistributionInputs(task, bwcDistro); + + nonInputSystemProperties.systemProperty( + (isReleased ? BWC_RELEASED_DISTRIBUTION_SYSPROP_PREFIX : BWC_SNAPSHOT_DISTRIBUTION_SYSPROP_PREFIX) + versionString, + providerFactory.provider(() -> bwcDistro.getExtracted().getSingleFile().getPath()) + ); + + if (version.before(BuildParams.getBwcVersions().getMinimumWireCompatibleVersion())) { + // If we are upgrade testing older versions we also need to upgrade to 7.last + this.call(BuildParams.getBwcVersions().getMinimumWireCompatibleVersion()); + } + return null; + } + }); }); project.getTasks() @@ -158,6 +192,26 @@ public Void call(Object... args) { .configure(check -> check.dependsOn(project.getTasks().withType(StandaloneRestIntegTestTask.class))); } + private ElasticsearchDistribution createDistribution(Project project, String name, String version) { + return createDistribution(project, name, version, null); + } + + private ElasticsearchDistribution createDistribution(Project project, String name, String version, ElasticsearchDistributionType type) { + NamedDomainObjectContainer distributions = DistributionDownloadPlugin.getContainer(project); + ElasticsearchDistribution maybeDistro = distributions.findByName(name); + if (maybeDistro == null) { + return distributions.create(name, distro -> { + distro.setVersion(version); + distro.setArchitecture(Architecture.current()); + if (type != null) { + distro.setType(type); + } + }); + } else { + return maybeDistro; + } + } + private FileTree getDistributionFiles(ElasticsearchDistribution distribution, Action patternFilter) { return distribution.getExtracted().getAsFileTree().matching(patternFilter); } diff --git a/qa/full-cluster-restart/build.gradle b/qa/full-cluster-restart/build.gradle index a3af45b43363e..b6f181809e0e4 100644 --- a/qa/full-cluster-restart/build.gradle +++ b/qa/full-cluster-restart/build.gradle @@ -6,64 +6,20 @@ * Side Public License, v 1. */ - -import org.elasticsearch.gradle.Version import org.elasticsearch.gradle.internal.info.BuildParams import org.elasticsearch.gradle.testclusters.StandaloneRestIntegTestTask -apply plugin: 'elasticsearch.internal-testclusters' -apply plugin: 'elasticsearch.standalone-rest-test' -apply plugin: 'elasticsearch.internal-test-artifact' +apply plugin: 'elasticsearch.internal-java-rest-test' +apply plugin: 'elasticsearch.internal-test-artifact-base' apply plugin: 'elasticsearch.bwc-test' -BuildParams.bwcVersions.withIndexCompatible { bwcVersion, baseName -> - def baseCluster = testClusters.register(baseName) { - if (bwcVersion.before(BuildParams.bwcVersions.minimumWireCompatibleVersion)) { - // When testing older versions we have to first upgrade to 7.last - versions = [bwcVersion.toString(), BuildParams.bwcVersions.minimumWireCompatibleVersion.toString(), project.version] - } else { - versions = [bwcVersion.toString(), project.version] - } - numberOfNodes = 2 - // some tests rely on the translog not being flushed - setting 'indices.memory.shard_inactive_time', '60m' - setting 'path.repo', "${buildDir}/cluster/shared/repo/${baseName}" - setting 'xpack.security.enabled', 'false' - requiresFeature 'es.index_mode_feature_flag_registered', Version.fromString("8.0.0") - } - - tasks.register("${baseName}#oldClusterTest", StandaloneRestIntegTestTask) { - useCluster baseCluster - mustRunAfter("precommit") - doFirst { - delete("${buildDir}/cluster/shared/repo/${baseName}") - } - - systemProperty 'tests.is_old_cluster', 'true' - } - - tasks.register("${baseName}#upgradedClusterTest", StandaloneRestIntegTestTask) { - useCluster baseCluster - dependsOn "${baseName}#oldClusterTest" - doFirst { - baseCluster.get().goToNextVersion() - if (bwcVersion.before(BuildParams.bwcVersions.minimumWireCompatibleVersion)) { - // When doing a full cluster restart of older versions we actually have to upgrade twice. First to 7.last, then to the current version. - baseCluster.get().goToNextVersion() - } - } - systemProperty 'tests.is_old_cluster', 'false' - } - - String oldVersion = bwcVersion.toString().minus("-SNAPSHOT") - tasks.matching { it.name.startsWith(baseName) && it.name.endsWith("ClusterTest") }.configureEach { - it.systemProperty 'tests.old_cluster_version', oldVersion - it.systemProperty 'tests.path.repo', "${buildDir}/cluster/shared/repo/${baseName}" - it.nonInputProperties.systemProperty('tests.rest.cluster', baseCluster.map(c -> c.allHttpSocketURI.join(","))) - it.nonInputProperties.systemProperty('tests.clustername', baseName) - } +testArtifacts { + registerTestArtifactFromSourceSet(sourceSets.javaRestTest) +} - tasks.register(bwcTaskName(bwcVersion)) { - dependsOn tasks.named("${baseName}#upgradedClusterTest") +BuildParams.bwcVersions.withIndexCompatible { bwcVersion, baseName -> + tasks.register(bwcTaskName(bwcVersion), StandaloneRestIntegTestTask) { + usesBwcDistribution(bwcVersion) + systemProperty("tests.old_cluster_version", bwcVersion) } } diff --git a/qa/full-cluster-restart/src/javaRestTest/java/org/elasticsearch/upgrades/FullClustRestartUpgradeStatus.java b/qa/full-cluster-restart/src/javaRestTest/java/org/elasticsearch/upgrades/FullClustRestartUpgradeStatus.java new file mode 100644 index 0000000000000..dda196ddafc20 --- /dev/null +++ b/qa/full-cluster-restart/src/javaRestTest/java/org/elasticsearch/upgrades/FullClustRestartUpgradeStatus.java @@ -0,0 +1,14 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0 and the Server Side Public License, v 1; you may not use this file except + * in compliance with, at your election, the Elastic License 2.0 or the Server + * Side Public License, v 1. + */ + +package org.elasticsearch.upgrades; + +public enum FullClustRestartUpgradeStatus { + OLD, + UPGRADED +} diff --git a/qa/full-cluster-restart/src/test/java/org/elasticsearch/upgrades/FullClusterRestartIT.java b/qa/full-cluster-restart/src/javaRestTest/java/org/elasticsearch/upgrades/FullClusterRestartIT.java similarity index 97% rename from qa/full-cluster-restart/src/test/java/org/elasticsearch/upgrades/FullClusterRestartIT.java rename to qa/full-cluster-restart/src/javaRestTest/java/org/elasticsearch/upgrades/FullClusterRestartIT.java index af66fbc61562b..e250a945aa903 100644 --- a/qa/full-cluster-restart/src/test/java/org/elasticsearch/upgrades/FullClusterRestartIT.java +++ b/qa/full-cluster-restart/src/javaRestTest/java/org/elasticsearch/upgrades/FullClusterRestartIT.java @@ -8,6 +8,8 @@ package org.elasticsearch.upgrades; +import com.carrotsearch.randomizedtesting.annotations.Name; + import org.apache.http.util.EntityUtils; import org.elasticsearch.Version; import org.elasticsearch.action.admin.cluster.settings.RestClusterGetSettingsResponse; @@ -28,6 +30,10 @@ import org.elasticsearch.rest.action.admin.indices.RestPutIndexTemplateAction; import org.elasticsearch.test.NotEqualMessageBuilder; import org.elasticsearch.test.XContentTestUtils; +import org.elasticsearch.test.cluster.ElasticsearchCluster; +import org.elasticsearch.test.cluster.FeatureFlag; +import org.elasticsearch.test.cluster.local.LocalClusterConfigProvider; +import org.elasticsearch.test.cluster.local.distribution.DistributionType; import org.elasticsearch.test.rest.ESRestTestCase; import org.elasticsearch.test.rest.ObjectPath; import org.elasticsearch.transport.Compression; @@ -35,6 +41,10 @@ import org.elasticsearch.xcontent.XContentParser; import org.elasticsearch.xcontent.json.JsonXContent; import org.junit.Before; +import org.junit.ClassRule; +import org.junit.rules.RuleChain; +import org.junit.rules.TemporaryFolder; +import org.junit.rules.TestRule; import java.io.IOException; import java.util.ArrayList; @@ -44,7 +54,6 @@ import java.util.HashMap; import java.util.HashSet; import java.util.List; -import java.util.Locale; import java.util.Map; import java.util.Set; import java.util.concurrent.TimeUnit; @@ -80,13 +89,41 @@ * version is started with the same data directories and then this is rerun * with {@code tests.is_old_cluster} set to {@code false}. */ -public class FullClusterRestartIT extends AbstractFullClusterRestartTestCase { +public class FullClusterRestartIT extends ParameterizedFullClusterRestartTestCase { + + private static TemporaryFolder repoDirectory = new TemporaryFolder(); + + protected static LocalClusterConfigProvider clusterConfig = c -> {}; + + private static ElasticsearchCluster cluster = ElasticsearchCluster.local() + .distribution(DistributionType.DEFAULT) + .version(getOldClusterTestVersion()) + .nodes(2) + .setting("path.repo", () -> repoDirectory.getRoot().getPath()) + .setting("xpack.security.enabled", "false") + // some tests rely on the translog not being flushed + .setting("indices.memory.shard_inactive_time", "60m") + .apply(() -> clusterConfig) + .feature(FeatureFlag.TIME_SERIES_MODE) + .build(); + + @ClassRule + public static TestRule ruleChain = RuleChain.outerRule(repoDirectory).around(cluster); private String index; + public FullClusterRestartIT(@Name("cluster") FullClustRestartUpgradeStatus upgradeStatus) { + super(upgradeStatus); + } + + @Override + protected ElasticsearchCluster getUpgradeCluster() { + return cluster; + } + @Before public void setIndex() { - index = getTestName().toLowerCase(Locale.ROOT); + index = getRootTestName(); } public void testSearch() throws Exception { @@ -1051,7 +1088,7 @@ public void testSnapshotRestore() throws IOException { repoConfig.startObject("settings"); { repoConfig.field("compress", randomBoolean()); - repoConfig.field("location", System.getProperty("tests.path.repo")); + repoConfig.field("location", repoDirectory.getRoot().getPath()); } repoConfig.endObject(); } @@ -1725,7 +1762,7 @@ public void testEnableSoftDeletesOnRestore() throws Exception { repoConfig.startObject("settings"); { repoConfig.field("compress", randomBoolean()); - repoConfig.field("location", System.getProperty("tests.path.repo")); + repoConfig.field("location", repoDirectory.getRoot().getPath()); } repoConfig.endObject(); } @@ -1785,7 +1822,7 @@ public void testForbidDisableSoftDeletesOnRestore() throws Exception { repoConfig.startObject("settings"); { repoConfig.field("compress", randomBoolean()); - repoConfig.field("location", System.getProperty("tests.path.repo")); + repoConfig.field("location", repoDirectory.getRoot().getPath()); } repoConfig.endObject(); } diff --git a/qa/full-cluster-restart/src/javaRestTest/java/org/elasticsearch/upgrades/FullClusterRestartTestOrdering.java b/qa/full-cluster-restart/src/javaRestTest/java/org/elasticsearch/upgrades/FullClusterRestartTestOrdering.java new file mode 100644 index 0000000000000..9f5c57346b945 --- /dev/null +++ b/qa/full-cluster-restart/src/javaRestTest/java/org/elasticsearch/upgrades/FullClusterRestartTestOrdering.java @@ -0,0 +1,24 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0 and the Server Side Public License, v 1; you may not use this file except + * in compliance with, at your election, the Elastic License 2.0 or the Server + * Side Public License, v 1. + */ + +package org.elasticsearch.upgrades; + +import com.carrotsearch.randomizedtesting.TestMethodAndParams; + +import java.util.Comparator; + +public class FullClusterRestartTestOrdering implements Comparator { + @Override + public int compare(TestMethodAndParams o1, TestMethodAndParams o2) { + return Integer.compare(getOrdinal(o1), getOrdinal(o2)); + } + + private int getOrdinal(TestMethodAndParams t) { + return ((FullClustRestartUpgradeStatus) t.getInstanceArguments().get(0)).ordinal(); + } +} diff --git a/qa/full-cluster-restart/src/javaRestTest/java/org/elasticsearch/upgrades/ParameterizedFullClusterRestartTestCase.java b/qa/full-cluster-restart/src/javaRestTest/java/org/elasticsearch/upgrades/ParameterizedFullClusterRestartTestCase.java new file mode 100644 index 0000000000000..29e1ae3e92255 --- /dev/null +++ b/qa/full-cluster-restart/src/javaRestTest/java/org/elasticsearch/upgrades/ParameterizedFullClusterRestartTestCase.java @@ -0,0 +1,90 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0 and the Server Side Public License, v 1; you may not use this file except + * in compliance with, at your election, the Elastic License 2.0 or the Server + * Side Public License, v 1. + */ + +package org.elasticsearch.upgrades; + +import com.carrotsearch.randomizedtesting.annotations.Name; +import com.carrotsearch.randomizedtesting.annotations.ParametersFactory; +import com.carrotsearch.randomizedtesting.annotations.TestCaseOrdering; + +import org.elasticsearch.test.cluster.ElasticsearchCluster; +import org.elasticsearch.test.cluster.util.Version; +import org.elasticsearch.test.rest.ESRestTestCase; +import org.junit.Before; + +import java.util.Arrays; +import java.util.Locale; + +import static org.elasticsearch.upgrades.FullClustRestartUpgradeStatus.OLD; +import static org.elasticsearch.upgrades.FullClustRestartUpgradeStatus.UPGRADED; + +@TestCaseOrdering(FullClusterRestartTestOrdering.class) +public abstract class ParameterizedFullClusterRestartTestCase extends ESRestTestCase { + private static final Version MINIMUM_WIRE_COMPATIBLE_VERSION = Version.fromString("7.17.0"); + private static final Version OLD_CLUSTER_VERSION = Version.fromString(System.getProperty("tests.old_cluster_version")); + private static boolean upgradeFailed = false; + private final FullClustRestartUpgradeStatus requestedUpgradeStatus; + + public ParameterizedFullClusterRestartTestCase(@Name("cluster") FullClustRestartUpgradeStatus upgradeStatus) { + this.requestedUpgradeStatus = upgradeStatus; + } + + @ParametersFactory + public static Iterable parameters() throws Exception { + return Arrays.stream(FullClustRestartUpgradeStatus.values()).map(v -> new Object[] { v }).toList(); + } + + @Before + public void maybeUpgrade() throws Exception { + if (getUpgradeCluster().getVersion().equals(OLD_CLUSTER_VERSION) && requestedUpgradeStatus == UPGRADED) { + try { + if (OLD_CLUSTER_VERSION.before(MINIMUM_WIRE_COMPATIBLE_VERSION)) { + // First upgrade to latest wire compatible version + getUpgradeCluster().upgradeToVersion(MINIMUM_WIRE_COMPATIBLE_VERSION); + } + getUpgradeCluster().upgradeToVersion(Version.CURRENT); + closeClients(); + initClient(); + } catch (Exception e) { + upgradeFailed = true; + throw e; + } + } + + // Skip remaining tests if upgrade failed + assumeFalse("Cluster upgrade failed", upgradeFailed); + } + + public boolean isRunningAgainstOldCluster() { + return requestedUpgradeStatus == OLD; + } + + public static org.elasticsearch.Version getOldClusterVersion() { + return org.elasticsearch.Version.fromString(OLD_CLUSTER_VERSION.toString()); + } + + public static Version getOldClusterTestVersion() { + return Version.fromString(OLD_CLUSTER_VERSION.toString()); + } + + protected abstract ElasticsearchCluster getUpgradeCluster(); + + @Override + protected String getTestRestCluster() { + return getUpgradeCluster().getHttpAddresses(); + } + + @Override + protected boolean preserveClusterUponCompletion() { + return true; + } + + protected String getRootTestName() { + return getTestName().split(" ")[0].toLowerCase(Locale.ROOT); + } +} diff --git a/qa/full-cluster-restart/src/test/java/org/elasticsearch/upgrades/QueryBuilderBWCIT.java b/qa/full-cluster-restart/src/javaRestTest/java/org/elasticsearch/upgrades/QueryBuilderBWCIT.java similarity index 92% rename from qa/full-cluster-restart/src/test/java/org/elasticsearch/upgrades/QueryBuilderBWCIT.java rename to qa/full-cluster-restart/src/javaRestTest/java/org/elasticsearch/upgrades/QueryBuilderBWCIT.java index d69f0b05958f9..91607dec6f721 100644 --- a/qa/full-cluster-restart/src/test/java/org/elasticsearch/upgrades/QueryBuilderBWCIT.java +++ b/qa/full-cluster-restart/src/javaRestTest/java/org/elasticsearch/upgrades/QueryBuilderBWCIT.java @@ -8,6 +8,8 @@ package org.elasticsearch.upgrades; +import com.carrotsearch.randomizedtesting.annotations.Name; + import org.elasticsearch.client.Request; import org.elasticsearch.client.Response; import org.elasticsearch.common.Strings; @@ -32,7 +34,11 @@ import org.elasticsearch.index.query.functionscore.FunctionScoreQueryBuilder; import org.elasticsearch.index.query.functionscore.RandomScoreFunctionBuilder; import org.elasticsearch.search.SearchModule; +import org.elasticsearch.test.cluster.ElasticsearchCluster; +import org.elasticsearch.test.cluster.local.LocalClusterConfigProvider; +import org.elasticsearch.test.cluster.local.distribution.DistributionType; import org.elasticsearch.xcontent.XContentBuilder; +import org.junit.ClassRule; import java.io.ByteArrayInputStream; import java.io.InputStream; @@ -54,10 +60,29 @@ * The queries to test are specified in json format, which turns out to work because we tend break here rarely. If the * json format of a query being tested here then feel free to change this. */ -public class QueryBuilderBWCIT extends AbstractFullClusterRestartTestCase { - +public class QueryBuilderBWCIT extends ParameterizedFullClusterRestartTestCase { private static final List CANDIDATES = new ArrayList<>(); + protected static LocalClusterConfigProvider clusterConfig = c -> {}; + + @ClassRule + public static ElasticsearchCluster cluster = ElasticsearchCluster.local() + .distribution(DistributionType.DEFAULT) + .version(getOldClusterTestVersion()) + .nodes(2) + .setting("xpack.security.enabled", "false") + .apply(() -> clusterConfig) + .build(); + + @Override + protected ElasticsearchCluster getUpgradeCluster() { + return cluster; + } + + public QueryBuilderBWCIT(@Name("cluster") FullClustRestartUpgradeStatus upgradeStatus) { + super(upgradeStatus); + } + static { addCandidate(""" "match": { "text_field": "value"} diff --git a/test/test-clusters/src/main/java/org/elasticsearch/test/cluster/ClusterHandle.java b/test/test-clusters/src/main/java/org/elasticsearch/test/cluster/ClusterHandle.java index 658925744860d..2a4e3e3958c57 100644 --- a/test/test-clusters/src/main/java/org/elasticsearch/test/cluster/ClusterHandle.java +++ b/test/test-clusters/src/main/java/org/elasticsearch/test/cluster/ClusterHandle.java @@ -8,6 +8,8 @@ package org.elasticsearch.test.cluster; +import org.elasticsearch.test.cluster.util.Version; + import java.io.Closeable; /** @@ -73,4 +75,19 @@ public interface ClusterHandle extends Closeable { * @return cluster node TCP transport endpoints */ String getTransportEndpoint(int index); + + /** + * Upgrades a single node to the given version. Method blocks until the node is back up and ready to respond to requests. + * + * @param index index of node ot upgrade + * @param version version to upgrade to + */ + void upgradeNodeToVersion(int index, Version version); + + /** + * Performs a "full cluster restart" upgrade to the given version. Method blocks until the cluster is restarted and available. + * + * @param version version to upgrade to + */ + void upgradeToVersion(Version version); } diff --git a/test/test-clusters/src/main/java/org/elasticsearch/test/cluster/ElasticsearchCluster.java b/test/test-clusters/src/main/java/org/elasticsearch/test/cluster/ElasticsearchCluster.java index 02eb3fb73df63..3bc4efaeb032f 100644 --- a/test/test-clusters/src/main/java/org/elasticsearch/test/cluster/ElasticsearchCluster.java +++ b/test/test-clusters/src/main/java/org/elasticsearch/test/cluster/ElasticsearchCluster.java @@ -10,6 +10,7 @@ import org.elasticsearch.test.cluster.local.DefaultLocalClusterSpecBuilder; import org.elasticsearch.test.cluster.local.LocalClusterSpecBuilder; +import org.elasticsearch.test.cluster.util.Version; import org.junit.rules.TestRule; /** @@ -32,4 +33,6 @@ static LocalClusterSpecBuilder local() { return new DefaultLocalClusterSpecBuilder(); } + Version getVersion(); + } diff --git a/test/test-clusters/src/main/java/org/elasticsearch/test/cluster/local/AbstractLocalSpecBuilder.java b/test/test-clusters/src/main/java/org/elasticsearch/test/cluster/local/AbstractLocalSpecBuilder.java index 7e6fede6b84aa..2ca00e27435c1 100644 --- a/test/test-clusters/src/main/java/org/elasticsearch/test/cluster/local/AbstractLocalSpecBuilder.java +++ b/test/test-clusters/src/main/java/org/elasticsearch/test/cluster/local/AbstractLocalSpecBuilder.java @@ -12,6 +12,7 @@ import org.elasticsearch.test.cluster.FeatureFlag; import org.elasticsearch.test.cluster.SettingsProvider; import org.elasticsearch.test.cluster.local.distribution.DistributionType; +import org.elasticsearch.test.cluster.util.Version; import org.elasticsearch.test.cluster.util.resource.Resource; import java.util.ArrayList; @@ -32,8 +33,10 @@ public abstract class AbstractLocalSpecBuilder> im private final Set plugins = new HashSet<>(); private final Set features = new HashSet<>(); private final Map keystoreSettings = new HashMap<>(); + private final Map keystoreFiles = new HashMap<>(); private final Map extraConfigFiles = new HashMap<>(); private DistributionType distributionType; + private Version version; protected AbstractLocalSpecBuilder(AbstractLocalSpecBuilder parent) { this.parent = parent; @@ -136,6 +139,16 @@ public Map getKeystoreSettings() { return inherit(() -> parent.getKeystoreSettings(), keystoreSettings); } + @Override + public T keystore(String key, Resource file) { + this.keystoreFiles.put(key, file); + return cast(this); + } + + public Map getKeystoreFiles() { + return inherit(() -> parent.getKeystoreFiles(), keystoreFiles); + } + @Override public T configFile(String fileName, Resource configFile) { this.extraConfigFiles.put(fileName, configFile); @@ -146,6 +159,16 @@ public Map getExtraConfigFiles() { return inherit(() -> parent.getExtraConfigFiles(), extraConfigFiles); } + @Override + public T version(Version version) { + this.version = version; + return cast(this); + } + + public Version getVersion() { + return inherit(() -> parent.getVersion(), version); + } + private List inherit(Supplier> parent, List child) { List combinedList = new ArrayList<>(); if (this.parent != null) { diff --git a/test/test-clusters/src/main/java/org/elasticsearch/test/cluster/local/DefaultLocalClusterSpecBuilder.java b/test/test-clusters/src/main/java/org/elasticsearch/test/cluster/local/DefaultLocalClusterSpecBuilder.java index 7e4011ca9481c..8d8ae010c552f 100644 --- a/test/test-clusters/src/main/java/org/elasticsearch/test/cluster/local/DefaultLocalClusterSpecBuilder.java +++ b/test/test-clusters/src/main/java/org/elasticsearch/test/cluster/local/DefaultLocalClusterSpecBuilder.java @@ -19,12 +19,14 @@ import java.util.List; import java.util.Optional; import java.util.function.Consumer; +import java.util.function.Supplier; public class DefaultLocalClusterSpecBuilder extends AbstractLocalSpecBuilder implements LocalClusterSpecBuilder { private String name = "test-cluster"; private final List nodeBuilders = new ArrayList<>(); private final List users = new ArrayList<>(); private final List roleFiles = new ArrayList<>(); + private final List> lazyConfigProviders = new ArrayList<>(); public DefaultLocalClusterSpecBuilder() { super(null); @@ -45,6 +47,12 @@ public DefaultLocalClusterSpecBuilder apply(LocalClusterConfigProvider configPro return this; } + @Override + public LocalClusterSpecBuilder apply(Supplier configProvider) { + lazyConfigProviders.add(configProvider); + return this; + } + @Override public DefaultLocalClusterSpecBuilder nodes(int nodes) { if (nodes < nodeBuilders.size()) { @@ -116,7 +124,28 @@ public ElasticsearchCluster build() { clusterSpec.setNodes(nodeSpecs); clusterSpec.validate(); - return new LocalElasticsearchCluster(clusterSpec); + return new LocalElasticsearchCluster(this); + } + + LocalClusterSpec buildClusterSpec() { + // Apply lazily provided configuration + lazyConfigProviders.forEach(s -> s.get().apply(this)); + + List clusterUsers = users.isEmpty() ? List.of(User.DEFAULT_USER) : users; + LocalClusterSpec clusterSpec = new LocalClusterSpec(name, clusterUsers, roleFiles); + List nodeSpecs; + + if (nodeBuilders.isEmpty()) { + // No node-specific configuration so assume a single-node cluster + nodeSpecs = List.of(new DefaultLocalNodeSpecBuilder(this).build(clusterSpec)); + } else { + nodeSpecs = nodeBuilders.stream().map(node -> node.build(clusterSpec)).toList(); + } + + clusterSpec.setNodes(nodeSpecs); + clusterSpec.validate(); + + return clusterSpec; } public static class DefaultLocalNodeSpecBuilder extends AbstractLocalSpecBuilder implements LocalNodeSpecBuilder { @@ -137,7 +166,7 @@ private LocalNodeSpec build(LocalClusterSpec cluster) { return new LocalNodeSpec( cluster, name, - Version.CURRENT, + Optional.of(getVersion()).orElse(Version.CURRENT), getSettingsProviders(), getSettings(), getEnvironmentProviders(), @@ -147,6 +176,7 @@ private LocalNodeSpec build(LocalClusterSpec cluster) { Optional.ofNullable(getDistributionType()).orElse(DistributionType.INTEG_TEST), getFeatures(), getKeystoreSettings(), + getKeystoreFiles(), getExtraConfigFiles() ); } diff --git a/test/test-clusters/src/main/java/org/elasticsearch/test/cluster/local/LocalClusterFactory.java b/test/test-clusters/src/main/java/org/elasticsearch/test/cluster/local/LocalClusterFactory.java index d85f008727176..92112ca9559d9 100644 --- a/test/test-clusters/src/main/java/org/elasticsearch/test/cluster/local/LocalClusterFactory.java +++ b/test/test-clusters/src/main/java/org/elasticsearch/test/cluster/local/LocalClusterFactory.java @@ -81,7 +81,7 @@ public class Node { private final Path configDir; private final Path tempDir; - private boolean initialized = false; + private Version currentVersion; private Process process = null; private DistributionDescriptor distributionDescriptor; @@ -96,19 +96,22 @@ public Node(LocalNodeSpec spec) { this.tempDir = workingDir.resolve("tmp"); // elasticsearch temporary directory } - public synchronized void start() { + public synchronized void start(Version version) { LOGGER.info("Starting Elasticsearch node '{}'", spec.getName()); + if (version != null) { + spec.setVersion(version); + } - if (initialized == false) { + if (currentVersion == null || currentVersion.equals(spec.getVersion()) == false) { LOGGER.info("Creating installation for node '{}' in {}", spec.getName(), workingDir); distributionDescriptor = resolveDistribution(); LOGGER.info("Distribution for node '{}': {}", spec.getName(), distributionDescriptor); - initializeWorkingDirectory(); + initializeWorkingDirectory(currentVersion != null); installPlugins(); - if (spec.getDistributionType() == DistributionType.INTEG_TEST) { + if (distributionDescriptor.getType() == DistributionType.INTEG_TEST) { installModules(); } - initialized = true; + currentVersion = spec.getVersion(); } try { @@ -120,6 +123,7 @@ public synchronized void start() { writeConfiguration(); createKeystore(); addKeystoreSettings(); + addKeystoreFiles(); configureSecurity(); copyExtraConfigFiles(); @@ -155,6 +159,20 @@ public String getTransportEndpoint() { return readPortsFile(portsFile).get(0); } + public void deletePortsFiles() { + try { + Path hostsFile = workingDir.resolve("config").resolve("unicast_hosts.txt"); + Path httpPortsFile = workingDir.resolve("logs").resolve("http.ports"); + Path transportPortsFile = workingDir.resolve("logs").resolve("transport.ports"); + + Files.deleteIfExists(hostsFile); + Files.deleteIfExists(httpPortsFile); + Files.deleteIfExists(transportPortsFile); + } catch (IOException e) { + throw new UncheckedIOException("Failed to write unicast_hosts for: " + this, e); + } + } + public LocalNodeSpec getSpec() { return spec; } @@ -192,9 +210,13 @@ private List readPortsFile(Path file) { } } - private void initializeWorkingDirectory() { + private void initializeWorkingDirectory(boolean preserverWorkingDirectory) { try { - IOUtils.deleteWithRetry(workingDir); + if (preserverWorkingDirectory == false) { + IOUtils.deleteWithRetry(workingDir); + } else { + IOUtils.deleteWithRetry(distributionDir); + } try { IOUtils.syncWithLinks(distributionDescriptor.getDistributionDir(), distributionDir); } catch (IOUtils.LinkCreationException e) { @@ -310,6 +332,30 @@ private void addKeystoreSettings() { }); } + private void addKeystoreFiles() { + spec.getKeystoreFiles().forEach((key, file) -> { + try { + Path path = Files.createTempFile(tempDir, key, null); + file.writeTo(path); + + ProcessUtils.exec( + workingDir, + OS.conditional( + c -> c.onWindows(() -> distributionDir.resolve("bin").resolve("elasticsearch-keystore.bat")) + .onUnix(() -> distributionDir.resolve("bin").resolve("elasticsearch-keystore")) + ), + getEnvironmentVariables(), + false, + "add-file", + key, + path.toString() + ).waitFor(); + } catch (InterruptedException | IOException e) { + throw new RuntimeException(e); + } + }); + } + private void configureSecurity() { if (spec.isSecurityEnabled()) { if (spec.getUsers().isEmpty() == false) { diff --git a/test/test-clusters/src/main/java/org/elasticsearch/test/cluster/local/LocalClusterHandle.java b/test/test-clusters/src/main/java/org/elasticsearch/test/cluster/local/LocalClusterHandle.java index 878b017e3cd62..62ba9113d47c1 100644 --- a/test/test-clusters/src/main/java/org/elasticsearch/test/cluster/local/LocalClusterHandle.java +++ b/test/test-clusters/src/main/java/org/elasticsearch/test/cluster/local/LocalClusterHandle.java @@ -15,6 +15,7 @@ import org.elasticsearch.test.cluster.local.model.User; import org.elasticsearch.test.cluster.util.ExceptionUtils; import org.elasticsearch.test.cluster.util.Retry; +import org.elasticsearch.test.cluster.util.Version; import java.io.IOException; import java.io.UncheckedIOException; @@ -66,7 +67,7 @@ public LocalClusterHandle(String name, List nodes) { public void start() { if (started.getAndSet(true) == false) { LOGGER.info("Starting Elasticsearch test cluster '{}'", name); - execute(() -> nodes.parallelStream().forEach(Node::start)); + execute(() -> nodes.parallelStream().forEach(n -> n.start(null))); } waitUntilReady(); } @@ -75,11 +76,11 @@ public void start() { public void stop(boolean forcibly) { if (started.getAndSet(false)) { LOGGER.info("Stopping Elasticsearch test cluster '{}', forcibly: {}", name, forcibly); - execute(() -> nodes.forEach(n -> n.stop(forcibly))); - deletePortFiles(); + execute(() -> nodes.parallelStream().forEach(n -> n.stop(forcibly))); + execute(() -> nodes.parallelStream().forEach(Node::deletePortsFiles)); } else { // Make sure the process is stopped, otherwise wait - execute(() -> nodes.forEach(n -> n.waitForExit())); + execute(() -> nodes.parallelStream().forEach(Node::waitForExit)); } } @@ -128,6 +129,26 @@ public String getTransportEndpoint(int index) { return getTransportEndpoints().split(",")[index]; } + @Override + public void upgradeNodeToVersion(int index, Version version) { + Node node = nodes.get(index); + node.stop(false); + LOGGER.info("Upgrading node '{}' to version {}", node.getSpec().getName(), version); + node.deletePortsFiles(); + node.start(version); + waitUntilReady(); + } + + @Override + public void upgradeToVersion(Version version) { + stop(false); + if (started.getAndSet(true) == false) { + LOGGER.info("Upgrading Elasticsearch test cluster '{}' to version {}", name, version); + execute(() -> nodes.parallelStream().forEach(n -> n.start(version))); + } + waitUntilReady(); + } + private void waitUntilReady() { writeUnicastHostsFile(); try { @@ -191,7 +212,7 @@ private boolean isSecurityAutoConfigured(Node node) { private void writeUnicastHostsFile() { String transportUris = execute(() -> nodes.parallelStream().map(Node::getTransportEndpoint).collect(Collectors.joining("\n"))); - nodes.forEach(node -> { + execute(() -> nodes.parallelStream().forEach(node -> { try { Path hostsFile = node.getWorkingDir().resolve("config").resolve("unicast_hosts.txt"); if (Files.notExists(hostsFile)) { @@ -200,23 +221,7 @@ private void writeUnicastHostsFile() { } catch (IOException e) { throw new UncheckedIOException("Failed to write unicast_hosts for: " + node, e); } - }); - } - - private void deletePortFiles() { - nodes.forEach(node -> { - try { - Path hostsFile = node.getWorkingDir().resolve("config").resolve("unicast_hosts.txt"); - Path httpPortsFile = node.getWorkingDir().resolve("logs").resolve("http.ports"); - Path tranportPortsFile = node.getWorkingDir().resolve("logs").resolve("transport.ports"); - - Files.deleteIfExists(hostsFile); - Files.deleteIfExists(httpPortsFile); - Files.deleteIfExists(tranportPortsFile); - } catch (IOException e) { - throw new UncheckedIOException("Failed to write unicast_hosts for: " + node, e); - } - }); + })); } private T execute(Callable task) { diff --git a/test/test-clusters/src/main/java/org/elasticsearch/test/cluster/local/LocalClusterSpec.java b/test/test-clusters/src/main/java/org/elasticsearch/test/cluster/local/LocalClusterSpec.java index da409a4935abe..52ff95920b4fd 100644 --- a/test/test-clusters/src/main/java/org/elasticsearch/test/cluster/local/LocalClusterSpec.java +++ b/test/test-clusters/src/main/java/org/elasticsearch/test/cluster/local/LocalClusterSpec.java @@ -69,7 +69,6 @@ void validate() { public static class LocalNodeSpec { private final LocalClusterSpec cluster; private final String name; - private final Version version; private final List settingsProviders; private final Map settings; private final List environmentProviders; @@ -79,7 +78,9 @@ public static class LocalNodeSpec { private final DistributionType distributionType; private final Set features; private final Map keystoreSettings; + private final Map keystoreFiles; private final Map extraConfigFiles; + private Version version; public LocalNodeSpec( LocalClusterSpec cluster, @@ -94,6 +95,7 @@ public LocalNodeSpec( DistributionType distributionType, Set features, Map keystoreSettings, + Map keystoreFiles, Map extraConfigFiles ) { this.cluster = cluster; @@ -108,9 +110,14 @@ public LocalNodeSpec( this.distributionType = distributionType; this.features = features; this.keystoreSettings = keystoreSettings; + this.keystoreFiles = keystoreFiles; this.extraConfigFiles = extraConfigFiles; } + void setVersion(Version version) { + this.version = version; + } + public LocalClusterSpec getCluster() { return cluster; } @@ -151,6 +158,10 @@ public Map getKeystoreSettings() { return keystoreSettings; } + public Map getKeystoreFiles() { + return keystoreFiles; + } + public Map getExtraConfigFiles() { return extraConfigFiles; } diff --git a/test/test-clusters/src/main/java/org/elasticsearch/test/cluster/local/LocalClusterSpecBuilder.java b/test/test-clusters/src/main/java/org/elasticsearch/test/cluster/local/LocalClusterSpecBuilder.java index c07a491d2ace6..1f4086fd47fe8 100644 --- a/test/test-clusters/src/main/java/org/elasticsearch/test/cluster/local/LocalClusterSpecBuilder.java +++ b/test/test-clusters/src/main/java/org/elasticsearch/test/cluster/local/LocalClusterSpecBuilder.java @@ -12,6 +12,7 @@ import org.elasticsearch.test.cluster.util.resource.Resource; import java.util.function.Consumer; +import java.util.function.Supplier; public interface LocalClusterSpecBuilder extends LocalSpecBuilder { /** @@ -19,8 +20,18 @@ public interface LocalClusterSpecBuilder extends LocalSpecBuilder configProvider); + /** * Sets the number of nodes for the cluster. */ diff --git a/test/test-clusters/src/main/java/org/elasticsearch/test/cluster/local/LocalElasticsearchCluster.java b/test/test-clusters/src/main/java/org/elasticsearch/test/cluster/local/LocalElasticsearchCluster.java index 54d541cd07144..dc532dfd956bb 100644 --- a/test/test-clusters/src/main/java/org/elasticsearch/test/cluster/local/LocalElasticsearchCluster.java +++ b/test/test-clusters/src/main/java/org/elasticsearch/test/cluster/local/LocalElasticsearchCluster.java @@ -10,18 +10,21 @@ import org.elasticsearch.test.cluster.ElasticsearchCluster; import org.elasticsearch.test.cluster.local.distribution.LocalDistributionResolver; +import org.elasticsearch.test.cluster.local.distribution.ReleasedDistributionResolver; import org.elasticsearch.test.cluster.local.distribution.SnapshotDistributionResolver; +import org.elasticsearch.test.cluster.util.Version; import org.junit.runner.Description; import org.junit.runners.model.Statement; import java.nio.file.Path; public class LocalElasticsearchCluster implements ElasticsearchCluster { - private final LocalClusterSpec spec; + private final DefaultLocalClusterSpecBuilder builder; + private LocalClusterSpec spec; private LocalClusterHandle handle; - public LocalElasticsearchCluster(LocalClusterSpec spec) { - this.spec = spec; + public LocalElasticsearchCluster(DefaultLocalClusterSpecBuilder builder) { + this.builder = builder; } @Override @@ -30,9 +33,10 @@ public Statement apply(Statement base, Description description) { @Override public void evaluate() throws Throwable { try { + spec = builder.buildClusterSpec(); handle = new LocalClusterFactory( Path.of(System.getProperty("java.io.tmpdir")).resolve(description.getDisplayName()).toAbsolutePath(), - new LocalDistributionResolver(new SnapshotDistributionResolver()) + new LocalDistributionResolver(new SnapshotDistributionResolver(new ReleasedDistributionResolver())) ).create(spec); handle.start(); base.evaluate(); @@ -97,6 +101,24 @@ public String getTransportEndpoint(int index) { return handle.getTransportEndpoint(index); } + @Override + public void upgradeNodeToVersion(int index, Version version) { + checkHandle(); + handle.upgradeNodeToVersion(index, version); + } + + @Override + public void upgradeToVersion(Version version) { + checkHandle(); + handle.upgradeToVersion(version); + } + + @Override + public Version getVersion() { + checkHandle(); + return spec.getNodes().get(0).getVersion(); + } + private void checkHandle() { if (handle == null) { throw new IllegalStateException("Cluster handle has not been initialized. Did you forget the @ClassRule annotation?"); diff --git a/test/test-clusters/src/main/java/org/elasticsearch/test/cluster/local/LocalSpecBuilder.java b/test/test-clusters/src/main/java/org/elasticsearch/test/cluster/local/LocalSpecBuilder.java index 0b73c0737c440..2bd8831521ddb 100644 --- a/test/test-clusters/src/main/java/org/elasticsearch/test/cluster/local/LocalSpecBuilder.java +++ b/test/test-clusters/src/main/java/org/elasticsearch/test/cluster/local/LocalSpecBuilder.java @@ -12,6 +12,7 @@ import org.elasticsearch.test.cluster.FeatureFlag; import org.elasticsearch.test.cluster.SettingsProvider; import org.elasticsearch.test.cluster.local.distribution.DistributionType; +import org.elasticsearch.test.cluster.util.Version; import org.elasticsearch.test.cluster.util.resource.Resource; import java.util.function.Supplier; @@ -68,8 +69,18 @@ interface LocalSpecBuilder> { */ T keystore(String key, String value); + /** + * Adds a secure file to the node keystore. + */ + T keystore(String key, Resource file); + /** * Adds a file to the node config directory */ T configFile(String fileName, Resource configFile); + + /** + * Sets the version of Elasticsearch. Defaults to {@link Version#CURRENT}. + */ + T version(Version version); } diff --git a/test/test-clusters/src/main/java/org/elasticsearch/test/cluster/local/distribution/LocalDistributionResolver.java b/test/test-clusters/src/main/java/org/elasticsearch/test/cluster/local/distribution/LocalDistributionResolver.java index 5c9f45cbe092f..b9442b28e1591 100644 --- a/test/test-clusters/src/main/java/org/elasticsearch/test/cluster/local/distribution/LocalDistributionResolver.java +++ b/test/test-clusters/src/main/java/org/elasticsearch/test/cluster/local/distribution/LocalDistributionResolver.java @@ -13,6 +13,9 @@ import java.nio.file.Files; import java.nio.file.Path; +/** + * A {@link DistributionResolver} for resolving locally built distributions for the current version of Elasticsearch. + */ public class LocalDistributionResolver implements DistributionResolver { private final DistributionResolver delegate; diff --git a/test/test-clusters/src/main/java/org/elasticsearch/test/cluster/local/distribution/ReleasedDistributionResolver.java b/test/test-clusters/src/main/java/org/elasticsearch/test/cluster/local/distribution/ReleasedDistributionResolver.java new file mode 100644 index 0000000000000..12654be310ef8 --- /dev/null +++ b/test/test-clusters/src/main/java/org/elasticsearch/test/cluster/local/distribution/ReleasedDistributionResolver.java @@ -0,0 +1,54 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0 and the Server Side Public License, v 1; you may not use this file except + * in compliance with, at your election, the Elastic License 2.0 or the Server + * Side Public License, v 1. + */ + +package org.elasticsearch.test.cluster.local.distribution; + +import org.elasticsearch.test.cluster.util.Version; + +import java.nio.file.Files; +import java.nio.file.Path; + +/** + * A {@link DistributionResolver} for resolving previously released distributions of Elasticsearch. + */ +public class ReleasedDistributionResolver implements DistributionResolver { + private static final String BWC_DISTRIBUTION_SYSPROP_PREFIX = "tests.release.distribution."; + + @Override + public DistributionDescriptor resolve(Version version, DistributionType type) { + String distributionPath = System.getProperty(BWC_DISTRIBUTION_SYSPROP_PREFIX + version.toString()); + + if (distributionPath == null) { + String taskPath = System.getProperty("tests.task"); + String project = taskPath.substring(0, taskPath.lastIndexOf(':')); + String taskName = taskPath.substring(taskPath.lastIndexOf(':') + 1); + + throw new IllegalStateException( + "Cannot locate Elasticsearch distribution. Ensure you've added the following to the build script for project '" + + project + + "':\n\n" + + "tasks.named('" + + taskName + + "') {\n" + + " usesBwcDistribution(" + + version + + ")\n" + + "}" + ); + } + + Path distributionDir = Path.of(distributionPath); + if (Files.notExists(distributionDir)) { + throw new IllegalStateException( + "Cannot locate Elasticsearch distribution. Directory at '" + distributionDir + "' does not exist." + ); + } + + return new DefaultDistributionDescriptor(version, false, distributionDir, DistributionType.DEFAULT); + } +} diff --git a/test/test-clusters/src/main/java/org/elasticsearch/test/cluster/local/distribution/SnapshotDistributionResolver.java b/test/test-clusters/src/main/java/org/elasticsearch/test/cluster/local/distribution/SnapshotDistributionResolver.java index 182dbe66a584d..c6cecf09e9b9d 100644 --- a/test/test-clusters/src/main/java/org/elasticsearch/test/cluster/local/distribution/SnapshotDistributionResolver.java +++ b/test/test-clusters/src/main/java/org/elasticsearch/test/cluster/local/distribution/SnapshotDistributionResolver.java @@ -10,10 +10,36 @@ import org.elasticsearch.test.cluster.util.Version; +import java.nio.file.Files; +import java.nio.file.Path; + +/** + * A {@link DistributionResolver} for resolving snapshot versions of Elasticsearch for previous, backwards-compatible versions. + */ public class SnapshotDistributionResolver implements DistributionResolver { + private static final String BWC_DISTRIBUTION_SYSPROP_PREFIX = "tests.snapshot.distribution."; + private final DistributionResolver delegate; + + public SnapshotDistributionResolver(DistributionResolver delegate) { + this.delegate = delegate; + } + @Override public DistributionDescriptor resolve(Version version, DistributionType type) { - // Not yet implemented - throw new UnsupportedOperationException("Cannot resolve distribution for version " + version); + String distributionPath = System.getProperty(BWC_DISTRIBUTION_SYSPROP_PREFIX + version.toString()); + + if (distributionPath != null) { + Path distributionDir = Path.of(distributionPath); + if (Files.notExists(distributionDir)) { + throw new IllegalStateException( + "Cannot locate Elasticsearch distribution. Directory at '" + distributionDir + "' does not exist." + ); + } + + // Snapshot distributions are never release builds and always use the default distribution + return new DefaultDistributionDescriptor(version, true, distributionDir, DistributionType.DEFAULT); + } + + return delegate.resolve(version, type); } } diff --git a/x-pack/plugin/shutdown/qa/full-cluster-restart/build.gradle b/x-pack/plugin/shutdown/qa/full-cluster-restart/build.gradle index 429b29bbc9fdb..d9539bb668b4b 100644 --- a/x-pack/plugin/shutdown/qa/full-cluster-restart/build.gradle +++ b/x-pack/plugin/shutdown/qa/full-cluster-restart/build.gradle @@ -1,104 +1,21 @@ -import org.elasticsearch.gradle.Version import org.elasticsearch.gradle.internal.info.BuildParams import org.elasticsearch.gradle.testclusters.StandaloneRestIntegTestTask -apply plugin: 'elasticsearch.internal-testclusters' -apply plugin: 'elasticsearch.standalone-rest-test' +apply plugin: 'elasticsearch.internal-java-rest-test' apply plugin: 'elasticsearch.bwc-test' dependencies { // TODO: Remove core dependency and change tests to not use builders that are part of xpack-core. // Currently needed for MlConfigIndexMappingsFullClusterRestartIT and SLM classes used in // FullClusterRestartIT - testImplementation(testArtifact(project(xpackModule('core')))) - testImplementation(testArtifact(project(":qa:full-cluster-restart"))) - testImplementation project(':x-pack:qa') -} - -tasks.named("forbiddenPatterns") { - exclude '**/system_key' -} - -String outputDir = "${buildDir}/generated-resources/${project.name}" - -tasks.register("copyTestNodeKeyMaterial", Copy) { - from project(':x-pack:plugin:core') - .files( - 'src/test/resources/org/elasticsearch/xpack/security/transport/ssl/certs/simple/testnode.pem', - 'src/test/resources/org/elasticsearch/xpack/security/transport/ssl/certs/simple/testnode.crt' - ) - into outputDir + javaRestTestImplementation(testArtifact(project(xpackModule('core')))) + javaRestTestImplementation(testArtifact(project(":qa:full-cluster-restart"))) + javaRestTestImplementation project(':x-pack:qa') } BuildParams.bwcVersions.withIndexCompatible { bwcVersion, baseName -> - def baseCluster = testClusters.register(baseName) { - testDistribution = "DEFAULT" - if (bwcVersion.before(BuildParams.bwcVersions.minimumWireCompatibleVersion)) { - // When testing older versions we have to first upgrade to 7.last - versions = [bwcVersion.toString(), BuildParams.bwcVersions.minimumWireCompatibleVersion.toString(), project.version] - } else { - versions = [bwcVersion.toString(), project.version] - } - numberOfNodes = 2 - setting 'path.repo', "${buildDir}/cluster/shared/repo/${baseName}" - user username: "test_user", password: "x-pack-test-password" - - setting 'path.repo', "${buildDir}/cluster/shared/repo/${baseName}" - // some tests rely on the translog not being flushed - setting 'indices.memory.shard_inactive_time', '60m' - setting 'xpack.security.enabled', 'true' - setting 'xpack.security.transport.ssl.enabled', 'true' - setting 'xpack.license.self_generated.type', 'trial' - - extraConfigFile 'testnode.pem', file("${outputDir}/testnode.pem") - extraConfigFile 'testnode.crt', file("${outputDir}/testnode.crt") - - keystore 'xpack.watcher.encryption_key', file("${project.projectDir}/src/test/resources/system_key") - setting 'xpack.watcher.encrypt_sensitive_data', 'true' - - setting 'xpack.security.transport.ssl.key', 'testnode.pem' - setting 'xpack.security.transport.ssl.certificate', 'testnode.crt' - keystore 'xpack.security.transport.ssl.secure_key_passphrase', 'testnode' - - setting 'xpack.security.authc.api_key.enabled', 'true' - - requiresFeature 'es.index_mode_feature_flag_registered', Version.fromString("8.0.0") - } - - tasks.register("${baseName}#oldClusterTest", StandaloneRestIntegTestTask) { - mustRunAfter("precommit") - useCluster baseCluster - dependsOn "copyTestNodeKeyMaterial" - doFirst { - delete("${buildDir}/cluster/shared/repo/${baseName}") - } - systemProperty 'tests.is_old_cluster', 'true' - } - - tasks.register("${baseName}#upgradedClusterTest", StandaloneRestIntegTestTask) { - mustRunAfter("precommit") - useCluster baseCluster - dependsOn "${baseName}#oldClusterTest" - doFirst { - baseCluster.get().goToNextVersion() - if (bwcVersion.before(BuildParams.bwcVersions.minimumWireCompatibleVersion)) { - // When doing a full cluster restart of older versions we actually have to upgrade twice. First to 7.last, then to the current version. - baseCluster.get().goToNextVersion() - } - } - systemProperty 'tests.is_old_cluster', 'false' - } - - String oldVersion = bwcVersion.toString().minus("-SNAPSHOT") - tasks.matching { it.name.startsWith("${baseName}#") && it.name.endsWith("ClusterTest") }.configureEach { - it.systemProperty 'tests.old_cluster_version', oldVersion - it.systemProperty 'tests.path.repo', "${buildDir}/cluster/shared/repo/${baseName}" - it.nonInputProperties.systemProperty('tests.rest.cluster', baseCluster.map(c -> c.allHttpSocketURI.join(","))) - it.nonInputProperties.systemProperty('tests.clustername', baseName) - } - - tasks.register(bwcTaskName(bwcVersion)) { - dependsOn "${baseName}#upgradedClusterTest" - } - + tasks.register(bwcTaskName(bwcVersion), StandaloneRestIntegTestTask) { + usesBwcDistribution(bwcVersion) + systemProperty("tests.old_cluster_version", bwcVersion) + } } diff --git a/x-pack/plugin/shutdown/qa/full-cluster-restart/src/test/java/org/elasticsearch/xpack/restart/FullClusterRestartIT.java b/x-pack/plugin/shutdown/qa/full-cluster-restart/src/javaRestTest/java/org/elasticsearch/xpack/restart/FullClusterRestartIT.java similarity index 66% rename from x-pack/plugin/shutdown/qa/full-cluster-restart/src/test/java/org/elasticsearch/xpack/restart/FullClusterRestartIT.java rename to x-pack/plugin/shutdown/qa/full-cluster-restart/src/javaRestTest/java/org/elasticsearch/xpack/restart/FullClusterRestartIT.java index df6e3ed6b9388..69684377df662 100644 --- a/x-pack/plugin/shutdown/qa/full-cluster-restart/src/test/java/org/elasticsearch/xpack/restart/FullClusterRestartIT.java +++ b/x-pack/plugin/shutdown/qa/full-cluster-restart/src/javaRestTest/java/org/elasticsearch/xpack/restart/FullClusterRestartIT.java @@ -6,6 +6,8 @@ */ package org.elasticsearch.xpack.restart; +import com.carrotsearch.randomizedtesting.annotations.Name; + import org.elasticsearch.Version; import org.elasticsearch.client.Request; import org.elasticsearch.client.Response; @@ -13,10 +15,16 @@ import org.elasticsearch.common.Strings; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.util.concurrent.ThreadContext; +import org.elasticsearch.test.cluster.ElasticsearchCluster; +import org.elasticsearch.test.cluster.FeatureFlag; +import org.elasticsearch.test.cluster.local.distribution.DistributionType; +import org.elasticsearch.test.cluster.util.resource.Resource; import org.elasticsearch.test.rest.ESRestTestCase; -import org.elasticsearch.upgrades.AbstractFullClusterRestartTestCase; +import org.elasticsearch.upgrades.FullClustRestartUpgradeStatus; +import org.elasticsearch.upgrades.ParameterizedFullClusterRestartTestCase; import org.elasticsearch.xcontent.XContentBuilder; import org.elasticsearch.xcontent.json.JsonXContent; +import org.junit.ClassRule; import java.io.IOException; import java.nio.charset.StandardCharsets; @@ -33,7 +41,37 @@ import static org.hamcrest.Matchers.hasSize; import static org.hamcrest.Matchers.notNullValue; -public class FullClusterRestartIT extends AbstractFullClusterRestartTestCase { +public class FullClusterRestartIT extends ParameterizedFullClusterRestartTestCase { + + @ClassRule + public static ElasticsearchCluster cluster = ElasticsearchCluster.local() + .distribution(DistributionType.DEFAULT) + .version(getOldClusterTestVersion()) + .nodes(2) + // some tests rely on the translog not being flushed + .setting("indices.memory.shard_inactive_time", "60m") + .setting("xpack.security.enabled", "true") + .setting("xpack.security.transport.ssl.enabled", "true") + .setting("xpack.security.transport.ssl.key", "testnode.pem") + .setting("xpack.security.transport.ssl.certificate", "testnode.crt") + .setting("xpack.license.self_generated.type", "trial") + .setting("xpack.watcher.encrypt_sensitive_data", "true") + .setting("xpack.security.authc.api_key.enabled", "true") + .configFile("testnode.pem", Resource.fromClasspath("org/elasticsearch/xpack/security/transport/ssl/certs/simple/testnode.pem")) + .configFile("testnode.crt", Resource.fromClasspath("org/elasticsearch/xpack/security/transport/ssl/certs/simple/testnode.crt")) + .keystore("xpack.watcher.encryption_key", Resource.fromClasspath("system_key")) + .keystore("xpack.security.transport.ssl.secure_key_passphrase", "testnode") + .feature(FeatureFlag.TIME_SERIES_MODE) + .build(); + + public FullClusterRestartIT(@Name("cluster") FullClustRestartUpgradeStatus upgradeStatus) { + super(upgradeStatus); + } + + @Override + protected ElasticsearchCluster getUpgradeCluster() { + return cluster; + } @Override protected Settings restClientSettings() { @@ -64,7 +102,7 @@ public void testNodeShutdown() throws Exception { // Use the types available from as early as possible final String type = randomFrom("restart", "remove"); putBody.field("type", type); - putBody.field("reason", this.getTestName()); + putBody.field("reason", getRootTestName()); } putBody.endObject(); putShutdownRequest.setJsonEntity(Strings.toString(putBody)); @@ -81,7 +119,7 @@ public void testNodeShutdown() throws Exception { assertThat("there should be exactly one shutdown registered", shutdowns, hasSize(1)); final Map shutdown = shutdowns.get(0); assertThat(shutdown.get("node_id"), notNullValue()); // Since we randomly determine the node ID, we can't check it - assertThat(shutdown.get("reason"), equalTo(this.getTestName())); + assertThat(shutdown.get("reason"), equalTo(getRootTestName())); assertThat( (String) shutdown.get("status"), anyOf( diff --git a/x-pack/plugin/shutdown/qa/full-cluster-restart/src/test/resources/system_key b/x-pack/plugin/shutdown/qa/full-cluster-restart/src/javaRestTest/resources/system_key similarity index 100% rename from x-pack/plugin/shutdown/qa/full-cluster-restart/src/test/resources/system_key rename to x-pack/plugin/shutdown/qa/full-cluster-restart/src/javaRestTest/resources/system_key diff --git a/x-pack/qa/full-cluster-restart/build.gradle b/x-pack/qa/full-cluster-restart/build.gradle index 3923d439d394d..d9539bb668b4b 100644 --- a/x-pack/qa/full-cluster-restart/build.gradle +++ b/x-pack/qa/full-cluster-restart/build.gradle @@ -1,110 +1,21 @@ -import org.elasticsearch.gradle.Version import org.elasticsearch.gradle.internal.info.BuildParams import org.elasticsearch.gradle.testclusters.StandaloneRestIntegTestTask -apply plugin: 'elasticsearch.internal-testclusters' -apply plugin: 'elasticsearch.standalone-rest-test' +apply plugin: 'elasticsearch.internal-java-rest-test' apply plugin: 'elasticsearch.bwc-test' dependencies { // TODO: Remove core dependency and change tests to not use builders that are part of xpack-core. // Currently needed for MlConfigIndexMappingsFullClusterRestartIT and SLM classes used in // FullClusterRestartIT - testImplementation(testArtifact(project(xpackModule('core')))) - testImplementation(testArtifact(project(":qa:full-cluster-restart"))) - testImplementation project(':x-pack:qa') -} - -tasks.named("forbiddenPatterns") { - exclude '**/system_key' -} - -String outputDir = "${buildDir}/generated-resources/${project.name}" - -tasks.register("copyTestNodeKeyMaterial", Copy) { - from project(':x-pack:plugin:core') - .files( - 'src/test/resources/org/elasticsearch/xpack/security/transport/ssl/certs/simple/testnode.pem', - 'src/test/resources/org/elasticsearch/xpack/security/transport/ssl/certs/simple/testnode.crt' - ) - into outputDir + javaRestTestImplementation(testArtifact(project(xpackModule('core')))) + javaRestTestImplementation(testArtifact(project(":qa:full-cluster-restart"))) + javaRestTestImplementation project(':x-pack:qa') } BuildParams.bwcVersions.withIndexCompatible { bwcVersion, baseName -> - def baseCluster = testClusters.register(baseName) { - testDistribution = "DEFAULT" - if (bwcVersion.before(BuildParams.bwcVersions.minimumWireCompatibleVersion)) { - // When testing older versions we have to first upgrade to 7.last - versions = [bwcVersion.toString(), BuildParams.bwcVersions.minimumWireCompatibleVersion.toString(), project.version] - } else { - versions = [bwcVersion.toString(), project.version] - } - numberOfNodes = 2 - setting 'path.repo', "${buildDir}/cluster/shared/repo/${baseName}" - user username: "test_user", password: "x-pack-test-password" - - setting 'path.repo', "${buildDir}/cluster/shared/repo/${baseName}" - // some tests rely on the translog not being flushed - setting 'indices.memory.shard_inactive_time', '60m' - setting 'xpack.security.enabled', 'true' - setting 'xpack.security.transport.ssl.enabled', 'true' - setting 'xpack.license.self_generated.type', 'trial' - - extraConfigFile 'testnode.pem', file("${outputDir}/testnode.pem") - extraConfigFile 'testnode.crt', file("${outputDir}/testnode.crt") - - keystore 'xpack.watcher.encryption_key', file("${project.projectDir}/src/test/resources/system_key") - setting 'xpack.watcher.encrypt_sensitive_data', 'true' - - setting 'xpack.security.transport.ssl.key', 'testnode.pem' - setting 'xpack.security.transport.ssl.certificate', 'testnode.crt' - keystore 'xpack.security.transport.ssl.secure_key_passphrase', 'testnode' - - setting 'xpack.security.authc.api_key.enabled', 'true' - - requiresFeature 'es.index_mode_feature_flag_registered', Version.fromString("8.0.0") - } - - tasks.register("${baseName}#oldClusterTest", StandaloneRestIntegTestTask) { - mustRunAfter("precommit") - useCluster baseCluster - dependsOn "copyTestNodeKeyMaterial" - doFirst { - delete("${buildDir}/cluster/shared/repo/${baseName}") - } - systemProperty 'tests.is_old_cluster', 'true' - exclude 'org/elasticsearch/upgrades/FullClusterRestartIT.class' - exclude 'org/elasticsearch/upgrades/FullClusterRestartSettingsUpgradeIT.class' - exclude 'org/elasticsearch/upgrades/QueryBuilderBWCIT.class' - } - - tasks.register("${baseName}#upgradedClusterTest", StandaloneRestIntegTestTask) { - mustRunAfter("precommit") - useCluster baseCluster - dependsOn "${baseName}#oldClusterTest" - doFirst { - baseCluster.get().goToNextVersion() - if (bwcVersion.before(BuildParams.bwcVersions.minimumWireCompatibleVersion)) { - // When doing a full cluster restart of older versions we actually have to upgrade twice. First to 7.last, then to the current version. - baseCluster.get().goToNextVersion() - } - } - systemProperty 'tests.is_old_cluster', 'false' - exclude 'org/elasticsearch/upgrades/FullClusterRestartIT.class' - exclude 'org/elasticsearch/upgrades/FullClusterRestartSettingsUpgradeIT.class' - exclude 'org/elasticsearch/upgrades/QueryBuilderBWCIT.class' - } - - String oldVersion = bwcVersion.toString().minus("-SNAPSHOT") - tasks.matching { it.name.startsWith("${baseName}#") && it.name.endsWith("ClusterTest") }.configureEach { - it.systemProperty 'tests.old_cluster_version', oldVersion - it.systemProperty 'tests.path.repo', "${buildDir}/cluster/shared/repo/${baseName}" - it.nonInputProperties.systemProperty('tests.rest.cluster', baseCluster.map(c -> c.allHttpSocketURI.join(","))) - it.nonInputProperties.systemProperty('tests.clustername', baseName) - } - - tasks.register(bwcTaskName(bwcVersion)) { - dependsOn "${baseName}#upgradedClusterTest" - } - + tasks.register(bwcTaskName(bwcVersion), StandaloneRestIntegTestTask) { + usesBwcDistribution(bwcVersion) + systemProperty("tests.old_cluster_version", bwcVersion) + } } diff --git a/x-pack/qa/full-cluster-restart/src/javaRestTest/java/org/elasticsearch/xpack/restart/AbstractXpackFullClusterRestartTestCase.java b/x-pack/qa/full-cluster-restart/src/javaRestTest/java/org/elasticsearch/xpack/restart/AbstractXpackFullClusterRestartTestCase.java new file mode 100644 index 0000000000000..10486c914d470 --- /dev/null +++ b/x-pack/qa/full-cluster-restart/src/javaRestTest/java/org/elasticsearch/xpack/restart/AbstractXpackFullClusterRestartTestCase.java @@ -0,0 +1,49 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0; you may not use this file except in compliance with the Elastic License + * 2.0. + */ + +package org.elasticsearch.xpack.restart; + +import org.elasticsearch.test.cluster.ElasticsearchCluster; +import org.elasticsearch.test.cluster.FeatureFlag; +import org.elasticsearch.test.cluster.local.distribution.DistributionType; +import org.elasticsearch.test.cluster.util.resource.Resource; +import org.elasticsearch.upgrades.FullClustRestartUpgradeStatus; +import org.elasticsearch.upgrades.ParameterizedFullClusterRestartTestCase; +import org.junit.ClassRule; + +public abstract class AbstractXpackFullClusterRestartTestCase extends ParameterizedFullClusterRestartTestCase { + + @ClassRule + public static ElasticsearchCluster cluster = ElasticsearchCluster.local() + .distribution(DistributionType.DEFAULT) + .version(getOldClusterTestVersion()) + .nodes(2) + // some tests rely on the translog not being flushed + .setting("indices.memory.shard_inactive_time", "60m") + .setting("xpack.security.enabled", "true") + .setting("xpack.security.transport.ssl.enabled", "true") + .setting("xpack.security.transport.ssl.key", "testnode.pem") + .setting("xpack.security.transport.ssl.certificate", "testnode.crt") + .setting("xpack.license.self_generated.type", "trial") + .setting("xpack.watcher.encrypt_sensitive_data", "true") + .setting("xpack.security.authc.api_key.enabled", "true") + .configFile("testnode.pem", Resource.fromClasspath("org/elasticsearch/xpack/security/transport/ssl/certs/simple/testnode.pem")) + .configFile("testnode.crt", Resource.fromClasspath("org/elasticsearch/xpack/security/transport/ssl/certs/simple/testnode.crt")) + .keystore("xpack.watcher.encryption_key", Resource.fromClasspath("system_key")) + .keystore("xpack.security.transport.ssl.secure_key_passphrase", "testnode") + .feature(FeatureFlag.TIME_SERIES_MODE) + .build(); + + public AbstractXpackFullClusterRestartTestCase(FullClustRestartUpgradeStatus upgradeStatus) { + super(upgradeStatus); + } + + @Override + protected ElasticsearchCluster getUpgradeCluster() { + return cluster; + } +} diff --git a/x-pack/qa/full-cluster-restart/src/javaRestTest/java/org/elasticsearch/xpack/restart/CoreFullClusterRestartIT.java b/x-pack/qa/full-cluster-restart/src/javaRestTest/java/org/elasticsearch/xpack/restart/CoreFullClusterRestartIT.java new file mode 100644 index 0000000000000..cc202cd71569d --- /dev/null +++ b/x-pack/qa/full-cluster-restart/src/javaRestTest/java/org/elasticsearch/xpack/restart/CoreFullClusterRestartIT.java @@ -0,0 +1,46 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0; you may not use this file except in compliance with the Elastic License + * 2.0. + */ +package org.elasticsearch.xpack.restart; + +import com.carrotsearch.randomizedtesting.annotations.Name; + +import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.common.util.concurrent.ThreadContext; +import org.elasticsearch.test.cluster.util.resource.Resource; +import org.elasticsearch.upgrades.FullClustRestartUpgradeStatus; +import org.elasticsearch.upgrades.FullClusterRestartIT; + +import java.nio.charset.StandardCharsets; +import java.util.Base64; + +public class CoreFullClusterRestartIT extends FullClusterRestartIT { + + static { + clusterConfig = c -> c.setting("xpack.security.enabled", "true") + .setting("xpack.security.transport.ssl.enabled", "true") + .setting("xpack.security.transport.ssl.key", "testnode.pem") + .setting("xpack.security.transport.ssl.certificate", "testnode.crt") + .setting("xpack.license.self_generated.type", "trial") + .setting("xpack.watcher.encrypt_sensitive_data", "true") + .setting("xpack.security.authc.api_key.enabled", "true") + .configFile("testnode.pem", Resource.fromClasspath("org/elasticsearch/xpack/security/transport/ssl/certs/simple/testnode.pem")) + .configFile("testnode.crt", Resource.fromClasspath("org/elasticsearch/xpack/security/transport/ssl/certs/simple/testnode.crt")) + .keystore("xpack.watcher.encryption_key", Resource.fromClasspath("system_key")) + .keystore("xpack.security.transport.ssl.secure_key_passphrase", "testnode"); + } + + public CoreFullClusterRestartIT(@Name("cluster") FullClustRestartUpgradeStatus upgradeStatus) { + super(upgradeStatus); + } + + @Override + protected Settings restClientSettings() { + String token = "Basic " + Base64.getEncoder().encodeToString("test_user:x-pack-test-password".getBytes(StandardCharsets.UTF_8)); + return Settings.builder().put(ThreadContext.PREFIX + ".Authorization", token).build(); + } + +} diff --git a/x-pack/qa/full-cluster-restart/src/test/java/org/elasticsearch/xpack/restart/FullClusterRestartIT.java b/x-pack/qa/full-cluster-restart/src/javaRestTest/java/org/elasticsearch/xpack/restart/FullClusterRestartIT.java similarity index 99% rename from x-pack/qa/full-cluster-restart/src/test/java/org/elasticsearch/xpack/restart/FullClusterRestartIT.java rename to x-pack/qa/full-cluster-restart/src/javaRestTest/java/org/elasticsearch/xpack/restart/FullClusterRestartIT.java index 42c551b16655b..b15f6a08ef2c4 100644 --- a/x-pack/qa/full-cluster-restart/src/test/java/org/elasticsearch/xpack/restart/FullClusterRestartIT.java +++ b/x-pack/qa/full-cluster-restart/src/javaRestTest/java/org/elasticsearch/xpack/restart/FullClusterRestartIT.java @@ -6,6 +6,8 @@ */ package org.elasticsearch.xpack.restart; +import com.carrotsearch.randomizedtesting.annotations.Name; + import org.apache.http.entity.ContentType; import org.apache.http.entity.StringEntity; import org.apache.http.util.EntityUtils; @@ -25,7 +27,7 @@ import org.elasticsearch.rest.action.search.RestSearchAction; import org.elasticsearch.test.StreamsUtils; import org.elasticsearch.test.rest.ESRestTestCase; -import org.elasticsearch.upgrades.AbstractFullClusterRestartTestCase; +import org.elasticsearch.upgrades.FullClustRestartUpgradeStatus; import org.elasticsearch.xcontent.ObjectPath; import org.elasticsearch.xcontent.XContentBuilder; import org.elasticsearch.xcontent.XContentParser; @@ -61,11 +63,15 @@ import static org.hamcrest.Matchers.not; import static org.hamcrest.Matchers.startsWith; -public class FullClusterRestartIT extends AbstractFullClusterRestartTestCase { +public class FullClusterRestartIT extends AbstractXpackFullClusterRestartTestCase { public static final int UPGRADE_FIELD_EXPECTED_INDEX_FORMAT_VERSION = 6; public static final int SECURITY_EXPECTED_INDEX_FORMAT_VERSION = 6; + public FullClusterRestartIT(@Name("cluster") FullClustRestartUpgradeStatus upgradeStatus) { + super(upgradeStatus); + } + @Override protected Settings restClientSettings() { String token = "Basic " + Base64.getEncoder().encodeToString("test_user:x-pack-test-password".getBytes(StandardCharsets.UTF_8)); diff --git a/x-pack/qa/full-cluster-restart/src/test/java/org/elasticsearch/xpack/restart/MLModelDeploymentFullClusterRestartIT.java b/x-pack/qa/full-cluster-restart/src/javaRestTest/java/org/elasticsearch/xpack/restart/MLModelDeploymentFullClusterRestartIT.java similarity index 97% rename from x-pack/qa/full-cluster-restart/src/test/java/org/elasticsearch/xpack/restart/MLModelDeploymentFullClusterRestartIT.java rename to x-pack/qa/full-cluster-restart/src/javaRestTest/java/org/elasticsearch/xpack/restart/MLModelDeploymentFullClusterRestartIT.java index a2b61b55a2975..00e253c43cbc8 100644 --- a/x-pack/qa/full-cluster-restart/src/test/java/org/elasticsearch/xpack/restart/MLModelDeploymentFullClusterRestartIT.java +++ b/x-pack/qa/full-cluster-restart/src/javaRestTest/java/org/elasticsearch/xpack/restart/MLModelDeploymentFullClusterRestartIT.java @@ -7,6 +7,8 @@ package org.elasticsearch.xpack.restart; +import com.carrotsearch.randomizedtesting.annotations.Name; + import org.apache.http.util.EntityUtils; import org.elasticsearch.Version; import org.elasticsearch.client.Request; @@ -15,7 +17,7 @@ import org.elasticsearch.common.util.concurrent.ThreadContext; import org.elasticsearch.common.xcontent.support.XContentMapValues; import org.elasticsearch.core.Strings; -import org.elasticsearch.upgrades.AbstractFullClusterRestartTestCase; +import org.elasticsearch.upgrades.FullClustRestartUpgradeStatus; import org.elasticsearch.xpack.core.ml.inference.assignment.AllocationStatus; import org.junit.Before; @@ -32,7 +34,7 @@ import static org.hamcrest.Matchers.equalTo; import static org.hamcrest.Matchers.hasSize; -public class MLModelDeploymentFullClusterRestartIT extends AbstractFullClusterRestartTestCase { +public class MLModelDeploymentFullClusterRestartIT extends AbstractXpackFullClusterRestartTestCase { // See PyTorchModelIT for how this model was created static final String BASE_64_ENCODED_MODEL = @@ -63,6 +65,10 @@ public class MLModelDeploymentFullClusterRestartIT extends AbstractFullClusterRe RAW_MODEL_SIZE = Base64.getDecoder().decode(BASE_64_ENCODED_MODEL).length; } + public MLModelDeploymentFullClusterRestartIT(@Name("cluster") FullClustRestartUpgradeStatus upgradeStatus) { + super(upgradeStatus); + } + @Before public void setLogging() throws IOException { Request loggingSettings = new Request("PUT", "_cluster/settings"); diff --git a/x-pack/qa/full-cluster-restart/src/test/java/org/elasticsearch/xpack/restart/MlConfigIndexMappingsFullClusterRestartIT.java b/x-pack/qa/full-cluster-restart/src/javaRestTest/java/org/elasticsearch/xpack/restart/MlConfigIndexMappingsFullClusterRestartIT.java similarity index 94% rename from x-pack/qa/full-cluster-restart/src/test/java/org/elasticsearch/xpack/restart/MlConfigIndexMappingsFullClusterRestartIT.java rename to x-pack/qa/full-cluster-restart/src/javaRestTest/java/org/elasticsearch/xpack/restart/MlConfigIndexMappingsFullClusterRestartIT.java index bfc078ffe9206..da3a00574f4a3 100644 --- a/x-pack/qa/full-cluster-restart/src/test/java/org/elasticsearch/xpack/restart/MlConfigIndexMappingsFullClusterRestartIT.java +++ b/x-pack/qa/full-cluster-restart/src/javaRestTest/java/org/elasticsearch/xpack/restart/MlConfigIndexMappingsFullClusterRestartIT.java @@ -6,6 +6,8 @@ */ package org.elasticsearch.xpack.restart; +import com.carrotsearch.randomizedtesting.annotations.Name; + import org.elasticsearch.Version; import org.elasticsearch.client.Request; import org.elasticsearch.client.Response; @@ -13,7 +15,7 @@ import org.elasticsearch.common.util.concurrent.ThreadContext; import org.elasticsearch.common.xcontent.support.XContentMapValues; import org.elasticsearch.core.Strings; -import org.elasticsearch.upgrades.AbstractFullClusterRestartTestCase; +import org.elasticsearch.upgrades.FullClustRestartUpgradeStatus; import org.elasticsearch.xpack.test.rest.IndexMappingTemplateAsserter; import org.elasticsearch.xpack.test.rest.XPackRestTestConstants; import org.elasticsearch.xpack.test.rest.XPackRestTestHelper; @@ -29,11 +31,15 @@ import static org.hamcrest.Matchers.is; import static org.hamcrest.Matchers.nullValue; -public class MlConfigIndexMappingsFullClusterRestartIT extends AbstractFullClusterRestartTestCase { +public class MlConfigIndexMappingsFullClusterRestartIT extends AbstractXpackFullClusterRestartTestCase { private static final String OLD_CLUSTER_JOB_ID = "ml-config-mappings-old-cluster-job"; private static final String NEW_CLUSTER_JOB_ID = "ml-config-mappings-new-cluster-job"; + public MlConfigIndexMappingsFullClusterRestartIT(@Name("cluster") FullClustRestartUpgradeStatus upgradeStatus) { + super(upgradeStatus); + } + @Override protected Settings restClientSettings() { String token = "Basic " + Base64.getEncoder().encodeToString("test_user:x-pack-test-password".getBytes(StandardCharsets.UTF_8)); diff --git a/x-pack/qa/full-cluster-restart/src/test/java/org/elasticsearch/xpack/restart/MlHiddenIndicesFullClusterRestartIT.java b/x-pack/qa/full-cluster-restart/src/javaRestTest/java/org/elasticsearch/xpack/restart/MlHiddenIndicesFullClusterRestartIT.java similarity index 96% rename from x-pack/qa/full-cluster-restart/src/test/java/org/elasticsearch/xpack/restart/MlHiddenIndicesFullClusterRestartIT.java rename to x-pack/qa/full-cluster-restart/src/javaRestTest/java/org/elasticsearch/xpack/restart/MlHiddenIndicesFullClusterRestartIT.java index ff15b6a428182..509f1b1cf72cc 100644 --- a/x-pack/qa/full-cluster-restart/src/test/java/org/elasticsearch/xpack/restart/MlHiddenIndicesFullClusterRestartIT.java +++ b/x-pack/qa/full-cluster-restart/src/javaRestTest/java/org/elasticsearch/xpack/restart/MlHiddenIndicesFullClusterRestartIT.java @@ -6,6 +6,8 @@ */ package org.elasticsearch.xpack.restart; +import com.carrotsearch.randomizedtesting.annotations.Name; + import org.elasticsearch.Version; import org.elasticsearch.client.Request; import org.elasticsearch.client.RequestOptions; @@ -16,7 +18,7 @@ import org.elasticsearch.common.xcontent.support.XContentMapValues; import org.elasticsearch.core.Strings; import org.elasticsearch.core.Tuple; -import org.elasticsearch.upgrades.AbstractFullClusterRestartTestCase; +import org.elasticsearch.upgrades.FullClustRestartUpgradeStatus; import org.elasticsearch.xcontent.XContentParser; import org.elasticsearch.xcontent.XContentParserConfiguration; import org.elasticsearch.xcontent.spi.XContentProvider; @@ -38,7 +40,7 @@ import static org.hamcrest.Matchers.notNullValue; import static org.hamcrest.Matchers.nullValue; -public class MlHiddenIndicesFullClusterRestartIT extends AbstractFullClusterRestartTestCase { +public class MlHiddenIndicesFullClusterRestartIT extends AbstractXpackFullClusterRestartTestCase { private static final String JOB_ID = "ml-hidden-indices-old-cluster-job"; private static final List, String>> EXPECTED_INDEX_ALIAS_PAIRS = List.of( @@ -49,6 +51,10 @@ public class MlHiddenIndicesFullClusterRestartIT extends AbstractFullClusterRest Tuple.tuple(List.of(".ml-anomalies-shared"), ".ml-anomalies-.write-" + JOB_ID) ); + public MlHiddenIndicesFullClusterRestartIT(@Name("cluster") FullClustRestartUpgradeStatus upgradeStatus) { + super(upgradeStatus); + } + @Override protected Settings restClientSettings() { String token = "Basic " + Base64.getEncoder().encodeToString("test_user:x-pack-test-password".getBytes(StandardCharsets.UTF_8)); diff --git a/x-pack/qa/full-cluster-restart/src/test/java/org/elasticsearch/xpack/restart/MlMigrationFullClusterRestartIT.java b/x-pack/qa/full-cluster-restart/src/javaRestTest/java/org/elasticsearch/xpack/restart/MlMigrationFullClusterRestartIT.java similarity index 96% rename from x-pack/qa/full-cluster-restart/src/test/java/org/elasticsearch/xpack/restart/MlMigrationFullClusterRestartIT.java rename to x-pack/qa/full-cluster-restart/src/javaRestTest/java/org/elasticsearch/xpack/restart/MlMigrationFullClusterRestartIT.java index 61ce6f7827e2a..726e3ab559818 100644 --- a/x-pack/qa/full-cluster-restart/src/test/java/org/elasticsearch/xpack/restart/MlMigrationFullClusterRestartIT.java +++ b/x-pack/qa/full-cluster-restart/src/javaRestTest/java/org/elasticsearch/xpack/restart/MlMigrationFullClusterRestartIT.java @@ -6,6 +6,8 @@ */ package org.elasticsearch.xpack.restart; +import com.carrotsearch.randomizedtesting.annotations.Name; + import org.elasticsearch.Version; import org.elasticsearch.client.Request; import org.elasticsearch.client.Response; @@ -17,7 +19,7 @@ import org.elasticsearch.search.aggregations.AggregatorFactories; import org.elasticsearch.search.aggregations.bucket.terms.TermsAggregationBuilder; import org.elasticsearch.search.aggregations.metrics.MaxAggregationBuilder; -import org.elasticsearch.upgrades.AbstractFullClusterRestartTestCase; +import org.elasticsearch.upgrades.FullClustRestartUpgradeStatus; import org.elasticsearch.xpack.core.ml.MlTasks; import org.elasticsearch.xpack.core.ml.datafeed.DatafeedConfig; import org.elasticsearch.xpack.test.rest.XPackRestTestConstants; @@ -35,13 +37,17 @@ import static org.hamcrest.Matchers.emptyOrNullString; import static org.hamcrest.Matchers.is; -public class MlMigrationFullClusterRestartIT extends AbstractFullClusterRestartTestCase { +public class MlMigrationFullClusterRestartIT extends AbstractXpackFullClusterRestartTestCase { private static final String OLD_CLUSTER_OPEN_JOB_ID = "migration-old-cluster-open-job"; private static final String OLD_CLUSTER_STARTED_DATAFEED_ID = "migration-old-cluster-started-datafeed"; private static final String OLD_CLUSTER_CLOSED_JOB_ID = "migration-old-cluster-closed-job"; private static final String OLD_CLUSTER_STOPPED_DATAFEED_ID = "migration-old-cluster-stopped-datafeed"; + public MlMigrationFullClusterRestartIT(@Name("cluster") FullClustRestartUpgradeStatus upgradeStatus) { + super(upgradeStatus); + } + @Override protected Settings restClientSettings() { String token = "Basic " + Base64.getEncoder().encodeToString("test_user:x-pack-test-password".getBytes(StandardCharsets.UTF_8)); diff --git a/x-pack/qa/full-cluster-restart/src/javaRestTest/java/org/elasticsearch/xpack/restart/QueryBuilderBWCIT.java b/x-pack/qa/full-cluster-restart/src/javaRestTest/java/org/elasticsearch/xpack/restart/QueryBuilderBWCIT.java new file mode 100644 index 0000000000000..5a2268626864e --- /dev/null +++ b/x-pack/qa/full-cluster-restart/src/javaRestTest/java/org/elasticsearch/xpack/restart/QueryBuilderBWCIT.java @@ -0,0 +1,42 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0; you may not use this file except in compliance with the Elastic License + * 2.0. + */ +package org.elasticsearch.xpack.restart; + +import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.common.util.concurrent.ThreadContext; +import org.elasticsearch.test.cluster.util.resource.Resource; +import org.elasticsearch.upgrades.FullClustRestartUpgradeStatus; + +import java.nio.charset.StandardCharsets; +import java.util.Base64; + +public class QueryBuilderBWCIT extends org.elasticsearch.upgrades.QueryBuilderBWCIT { + + static { + clusterConfig = c -> c.setting("xpack.security.enabled", "true") + .setting("xpack.security.transport.ssl.enabled", "true") + .setting("xpack.security.transport.ssl.key", "testnode.pem") + .setting("xpack.security.transport.ssl.certificate", "testnode.crt") + .setting("xpack.license.self_generated.type", "trial") + .setting("xpack.watcher.encrypt_sensitive_data", "true") + .setting("xpack.security.authc.api_key.enabled", "true") + .configFile("testnode.pem", Resource.fromClasspath("org/elasticsearch/xpack/security/transport/ssl/certs/simple/testnode.pem")) + .configFile("testnode.crt", Resource.fromClasspath("org/elasticsearch/xpack/security/transport/ssl/certs/simple/testnode.crt")) + .keystore("xpack.watcher.encryption_key", Resource.fromClasspath("system_key")) + .keystore("xpack.security.transport.ssl.secure_key_passphrase", "testnode"); + } + + public QueryBuilderBWCIT(FullClustRestartUpgradeStatus upgradeStatus) { + super(upgradeStatus); + } + + @Override + protected Settings restClientSettings() { + String token = "Basic " + Base64.getEncoder().encodeToString("test_user:x-pack-test-password".getBytes(StandardCharsets.UTF_8)); + return Settings.builder().put(ThreadContext.PREFIX + ".Authorization", token).build(); + } +} diff --git a/x-pack/qa/full-cluster-restart/src/test/java/org/elasticsearch/xpack/restart/WatcherMappingUpdateIT.java b/x-pack/qa/full-cluster-restart/src/javaRestTest/java/org/elasticsearch/xpack/restart/WatcherMappingUpdateIT.java similarity index 92% rename from x-pack/qa/full-cluster-restart/src/test/java/org/elasticsearch/xpack/restart/WatcherMappingUpdateIT.java rename to x-pack/qa/full-cluster-restart/src/javaRestTest/java/org/elasticsearch/xpack/restart/WatcherMappingUpdateIT.java index ea926e964360d..e6f8a89f08c1b 100644 --- a/x-pack/qa/full-cluster-restart/src/test/java/org/elasticsearch/xpack/restart/WatcherMappingUpdateIT.java +++ b/x-pack/qa/full-cluster-restart/src/javaRestTest/java/org/elasticsearch/xpack/restart/WatcherMappingUpdateIT.java @@ -7,6 +7,8 @@ package org.elasticsearch.xpack.restart; +import com.carrotsearch.randomizedtesting.annotations.Name; + import org.apache.http.util.EntityUtils; import org.elasticsearch.Version; import org.elasticsearch.client.Request; @@ -14,7 +16,7 @@ import org.elasticsearch.client.Response; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.util.concurrent.ThreadContext; -import org.elasticsearch.upgrades.AbstractFullClusterRestartTestCase; +import org.elasticsearch.upgrades.FullClustRestartUpgradeStatus; import java.nio.charset.StandardCharsets; import java.util.Base64; @@ -23,7 +25,11 @@ import static org.hamcrest.Matchers.containsString; import static org.hamcrest.Matchers.not; -public class WatcherMappingUpdateIT extends AbstractFullClusterRestartTestCase { +public class WatcherMappingUpdateIT extends AbstractXpackFullClusterRestartTestCase { + + public WatcherMappingUpdateIT(@Name("cluster") FullClustRestartUpgradeStatus upgradeStatus) { + super(upgradeStatus); + } @Override protected Settings restClientSettings() { diff --git a/x-pack/qa/full-cluster-restart/src/test/resources/org/elasticsearch/xpack/restart/funny-timeout-watch.json b/x-pack/qa/full-cluster-restart/src/javaRestTest/resources/org/elasticsearch/xpack/restart/funny-timeout-watch.json similarity index 100% rename from x-pack/qa/full-cluster-restart/src/test/resources/org/elasticsearch/xpack/restart/funny-timeout-watch.json rename to x-pack/qa/full-cluster-restart/src/javaRestTest/resources/org/elasticsearch/xpack/restart/funny-timeout-watch.json diff --git a/x-pack/qa/full-cluster-restart/src/test/resources/org/elasticsearch/xpack/restart/logging-watch.json b/x-pack/qa/full-cluster-restart/src/javaRestTest/resources/org/elasticsearch/xpack/restart/logging-watch.json similarity index 100% rename from x-pack/qa/full-cluster-restart/src/test/resources/org/elasticsearch/xpack/restart/logging-watch.json rename to x-pack/qa/full-cluster-restart/src/javaRestTest/resources/org/elasticsearch/xpack/restart/logging-watch.json diff --git a/x-pack/qa/full-cluster-restart/src/test/resources/org/elasticsearch/xpack/restart/simple-watch.json b/x-pack/qa/full-cluster-restart/src/javaRestTest/resources/org/elasticsearch/xpack/restart/simple-watch.json similarity index 100% rename from x-pack/qa/full-cluster-restart/src/test/resources/org/elasticsearch/xpack/restart/simple-watch.json rename to x-pack/qa/full-cluster-restart/src/javaRestTest/resources/org/elasticsearch/xpack/restart/simple-watch.json diff --git a/x-pack/qa/full-cluster-restart/src/test/resources/org/elasticsearch/xpack/restart/throttle-period-watch.json b/x-pack/qa/full-cluster-restart/src/javaRestTest/resources/org/elasticsearch/xpack/restart/throttle-period-watch.json similarity index 100% rename from x-pack/qa/full-cluster-restart/src/test/resources/org/elasticsearch/xpack/restart/throttle-period-watch.json rename to x-pack/qa/full-cluster-restart/src/javaRestTest/resources/org/elasticsearch/xpack/restart/throttle-period-watch.json diff --git a/x-pack/qa/full-cluster-restart/src/test/resources/system_key b/x-pack/qa/full-cluster-restart/src/javaRestTest/resources/system_key similarity index 100% rename from x-pack/qa/full-cluster-restart/src/test/resources/system_key rename to x-pack/qa/full-cluster-restart/src/javaRestTest/resources/system_key diff --git a/x-pack/qa/full-cluster-restart/src/test/java/org/elasticsearch/xpack/restart/CoreFullClusterRestartIT.java b/x-pack/qa/full-cluster-restart/src/test/java/org/elasticsearch/xpack/restart/CoreFullClusterRestartIT.java deleted file mode 100644 index e06cb12f747a7..0000000000000 --- a/x-pack/qa/full-cluster-restart/src/test/java/org/elasticsearch/xpack/restart/CoreFullClusterRestartIT.java +++ /dev/null @@ -1,24 +0,0 @@ -/* - * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one - * or more contributor license agreements. Licensed under the Elastic License - * 2.0; you may not use this file except in compliance with the Elastic License - * 2.0. - */ -package org.elasticsearch.xpack.restart; - -import org.elasticsearch.common.settings.Settings; -import org.elasticsearch.common.util.concurrent.ThreadContext; -import org.elasticsearch.upgrades.FullClusterRestartIT; - -import java.nio.charset.StandardCharsets; -import java.util.Base64; - -public class CoreFullClusterRestartIT extends FullClusterRestartIT { - - @Override - protected Settings restClientSettings() { - String token = "Basic " + Base64.getEncoder().encodeToString("test_user:x-pack-test-password".getBytes(StandardCharsets.UTF_8)); - return Settings.builder().put(ThreadContext.PREFIX + ".Authorization", token).build(); - } - -} diff --git a/x-pack/qa/full-cluster-restart/src/test/java/org/elasticsearch/xpack/restart/QueryBuilderBWCIT.java b/x-pack/qa/full-cluster-restart/src/test/java/org/elasticsearch/xpack/restart/QueryBuilderBWCIT.java deleted file mode 100644 index cffc6881df645..0000000000000 --- a/x-pack/qa/full-cluster-restart/src/test/java/org/elasticsearch/xpack/restart/QueryBuilderBWCIT.java +++ /dev/null @@ -1,22 +0,0 @@ -/* - * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one - * or more contributor license agreements. Licensed under the Elastic License - * 2.0; you may not use this file except in compliance with the Elastic License - * 2.0. - */ -package org.elasticsearch.xpack.restart; - -import org.elasticsearch.common.settings.Settings; -import org.elasticsearch.common.util.concurrent.ThreadContext; - -import java.nio.charset.StandardCharsets; -import java.util.Base64; - -public class QueryBuilderBWCIT extends org.elasticsearch.upgrades.QueryBuilderBWCIT { - - @Override - protected Settings restClientSettings() { - String token = "Basic " + Base64.getEncoder().encodeToString("test_user:x-pack-test-password".getBytes(StandardCharsets.UTF_8)); - return Settings.builder().put(ThreadContext.PREFIX + ".Authorization", token).build(); - } -} From 0f64f844c72d0b9e7a3b07069ed73cddd6011aa2 Mon Sep 17 00:00:00 2001 From: Mark Vieira Date: Wed, 18 Jan 2023 12:10:39 -0800 Subject: [PATCH 02/63] Fix NPE --- .../test/cluster/local/DefaultLocalClusterSpecBuilder.java | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/test/test-clusters/src/main/java/org/elasticsearch/test/cluster/local/DefaultLocalClusterSpecBuilder.java b/test/test-clusters/src/main/java/org/elasticsearch/test/cluster/local/DefaultLocalClusterSpecBuilder.java index 8d8ae010c552f..271b0d3b5e36d 100644 --- a/test/test-clusters/src/main/java/org/elasticsearch/test/cluster/local/DefaultLocalClusterSpecBuilder.java +++ b/test/test-clusters/src/main/java/org/elasticsearch/test/cluster/local/DefaultLocalClusterSpecBuilder.java @@ -166,7 +166,7 @@ private LocalNodeSpec build(LocalClusterSpec cluster) { return new LocalNodeSpec( cluster, name, - Optional.of(getVersion()).orElse(Version.CURRENT), + Optional.ofNullable(getVersion()).orElse(Version.CURRENT), getSettingsProviders(), getSettings(), getEnvironmentProviders(), From 4b774537310528a6ca8b42a478b7509999434c56 Mon Sep 17 00:00:00 2001 From: Mark Vieira Date: Wed, 18 Jan 2023 13:02:18 -0800 Subject: [PATCH 03/63] Wait for cluster ready --- .../test/cluster/local/LocalClusterHandle.java | 11 ++++------- 1 file changed, 4 insertions(+), 7 deletions(-) diff --git a/test/test-clusters/src/main/java/org/elasticsearch/test/cluster/local/LocalClusterHandle.java b/test/test-clusters/src/main/java/org/elasticsearch/test/cluster/local/LocalClusterHandle.java index 62ba9113d47c1..dfea1846bb365 100644 --- a/test/test-clusters/src/main/java/org/elasticsearch/test/cluster/local/LocalClusterHandle.java +++ b/test/test-clusters/src/main/java/org/elasticsearch/test/cluster/local/LocalClusterHandle.java @@ -22,6 +22,7 @@ import java.net.MalformedURLException; import java.nio.file.Files; import java.nio.file.Path; +import java.security.GeneralSecurityException; import java.time.Duration; import java.util.List; import java.util.concurrent.Callable; @@ -152,13 +153,9 @@ public void upgradeToVersion(Version version) { private void waitUntilReady() { writeUnicastHostsFile(); try { - Retry.retryUntilTrue(CLUSTER_UP_TIMEOUT, Duration.ZERO, () -> { - WaitForHttpResource wait = configureWaitForReady(); - return wait.wait(500); - }); - } catch (TimeoutException e) { - throw new RuntimeException("Timed out after " + CLUSTER_UP_TIMEOUT + " waiting for cluster '" + name + "' status to be yellow"); - } catch (ExecutionException e) { + WaitForHttpResource wait = configureWaitForReady(); + wait.wait(CLUSTER_UP_TIMEOUT.toMillis()); + } catch (Exception e) { throw new RuntimeException("An error occurred while checking cluster '" + name + "' status.", e); } } From b6b5b6fe8fa26db51bb814d8c6f263fa900b5d81 Mon Sep 17 00:00:00 2001 From: Mark Vieira Date: Wed, 18 Jan 2023 13:04:10 -0800 Subject: [PATCH 04/63] Avoid clashing with Object#wait --- .../elasticsearch/test/cluster/local/LocalClusterHandle.java | 5 +---- .../test/cluster/local/WaitForHttpResource.java | 2 +- 2 files changed, 2 insertions(+), 5 deletions(-) diff --git a/test/test-clusters/src/main/java/org/elasticsearch/test/cluster/local/LocalClusterHandle.java b/test/test-clusters/src/main/java/org/elasticsearch/test/cluster/local/LocalClusterHandle.java index dfea1846bb365..5d851d61f49e8 100644 --- a/test/test-clusters/src/main/java/org/elasticsearch/test/cluster/local/LocalClusterHandle.java +++ b/test/test-clusters/src/main/java/org/elasticsearch/test/cluster/local/LocalClusterHandle.java @@ -14,7 +14,6 @@ import org.elasticsearch.test.cluster.local.LocalClusterFactory.Node; import org.elasticsearch.test.cluster.local.model.User; import org.elasticsearch.test.cluster.util.ExceptionUtils; -import org.elasticsearch.test.cluster.util.Retry; import org.elasticsearch.test.cluster.util.Version; import java.io.IOException; @@ -22,7 +21,6 @@ import java.net.MalformedURLException; import java.nio.file.Files; import java.nio.file.Path; -import java.security.GeneralSecurityException; import java.time.Duration; import java.util.List; import java.util.concurrent.Callable; @@ -30,7 +28,6 @@ import java.util.concurrent.ForkJoinPool; import java.util.concurrent.ForkJoinWorkerThread; import java.util.concurrent.TimeUnit; -import java.util.concurrent.TimeoutException; import java.util.concurrent.atomic.AtomicBoolean; import java.util.concurrent.atomic.AtomicLong; import java.util.stream.Collectors; @@ -154,7 +151,7 @@ private void waitUntilReady() { writeUnicastHostsFile(); try { WaitForHttpResource wait = configureWaitForReady(); - wait.wait(CLUSTER_UP_TIMEOUT.toMillis()); + wait.waitFor(CLUSTER_UP_TIMEOUT.toMillis()); } catch (Exception e) { throw new RuntimeException("An error occurred while checking cluster '" + name + "' status.", e); } diff --git a/test/test-clusters/src/main/java/org/elasticsearch/test/cluster/local/WaitForHttpResource.java b/test/test-clusters/src/main/java/org/elasticsearch/test/cluster/local/WaitForHttpResource.java index edab2cdf1e7e9..f00e6f13cb314 100644 --- a/test/test-clusters/src/main/java/org/elasticsearch/test/cluster/local/WaitForHttpResource.java +++ b/test/test-clusters/src/main/java/org/elasticsearch/test/cluster/local/WaitForHttpResource.java @@ -90,7 +90,7 @@ public void setPassword(String password) { this.password = password; } - public boolean wait(int durationInMs) throws GeneralSecurityException, InterruptedException, IOException { + public boolean waitFor(long durationInMs) throws GeneralSecurityException, InterruptedException, IOException { final long waitUntil = System.nanoTime() + TimeUnit.MILLISECONDS.toNanos(durationInMs); final long sleep = Long.max(durationInMs / 10, 100); From 665227517319d1f8e65f27806c5d504c516e5c94 Mon Sep 17 00:00:00 2001 From: Mark Vieira Date: Wed, 18 Jan 2023 13:19:30 -0800 Subject: [PATCH 05/63] Skip test when non-applicable --- .../elasticsearch/xpack/restart/FullClusterRestartIT.java | 8 ++++++-- 1 file changed, 6 insertions(+), 2 deletions(-) diff --git a/x-pack/plugin/shutdown/qa/full-cluster-restart/src/javaRestTest/java/org/elasticsearch/xpack/restart/FullClusterRestartIT.java b/x-pack/plugin/shutdown/qa/full-cluster-restart/src/javaRestTest/java/org/elasticsearch/xpack/restart/FullClusterRestartIT.java index 69684377df662..e26e919913f6e 100644 --- a/x-pack/plugin/shutdown/qa/full-cluster-restart/src/javaRestTest/java/org/elasticsearch/xpack/restart/FullClusterRestartIT.java +++ b/x-pack/plugin/shutdown/qa/full-cluster-restart/src/javaRestTest/java/org/elasticsearch/xpack/restart/FullClusterRestartIT.java @@ -24,6 +24,7 @@ import org.elasticsearch.upgrades.ParameterizedFullClusterRestartTestCase; import org.elasticsearch.xcontent.XContentBuilder; import org.elasticsearch.xcontent.json.JsonXContent; +import org.junit.BeforeClass; import org.junit.ClassRule; import java.io.IOException; @@ -85,10 +86,13 @@ protected Settings restClientSettings() { .build(); } - @SuppressWarnings("unchecked") - public void testNodeShutdown() throws Exception { + @BeforeClass + public static void checkClusterVersion() { assumeTrue("no shutdown in versions before " + Version.V_7_15_0, getOldClusterVersion().onOrAfter(Version.V_7_15_0)); + } + @SuppressWarnings("unchecked") + public void testNodeShutdown() throws Exception { if (isRunningAgainstOldCluster()) { final Request getNodesReq = new Request("GET", "_nodes"); final Response getNodesResp = adminClient().performRequest(getNodesReq); From d1240a275ed7e19edf927a004409d714c1163669 Mon Sep 17 00:00:00 2001 From: Mark Vieira Date: Wed, 18 Jan 2023 13:59:55 -0800 Subject: [PATCH 06/63] Disable parallel test execution temporarily --- .../gradle/internal/test/rest/RestTestBasePlugin.java | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/build-tools-internal/src/main/java/org/elasticsearch/gradle/internal/test/rest/RestTestBasePlugin.java b/build-tools-internal/src/main/java/org/elasticsearch/gradle/internal/test/rest/RestTestBasePlugin.java index 1a7b5bc3ee2a1..fa47e29b555d7 100644 --- a/build-tools-internal/src/main/java/org/elasticsearch/gradle/internal/test/rest/RestTestBasePlugin.java +++ b/build-tools-internal/src/main/java/org/elasticsearch/gradle/internal/test/rest/RestTestBasePlugin.java @@ -120,7 +120,7 @@ public void apply(Project project) { registerDistributionInputs(task, integTestDistro); // Enable parallel execution for these tests since each test gets its own cluster - task.setMaxParallelForks(task.getProject().getGradle().getStartParameter().getMaxWorkerCount() / 2); + // task.setMaxParallelForks(task.getProject().getGradle().getStartParameter().getMaxWorkerCount() / 2); // Disable test failure reporting since this stuff is now captured in build scans task.getExtensions().getExtraProperties().set("dumpOutputOnFailure", false); From 45eadbb344b051f03eefbddaacd3fe8720263f4a Mon Sep 17 00:00:00 2001 From: Mark Vieira Date: Wed, 18 Jan 2023 15:01:05 -0800 Subject: [PATCH 07/63] Revert "Disable parallel test execution temporarily" This reverts commit d1240a275ed7e19edf927a004409d714c1163669. --- .../gradle/internal/test/rest/RestTestBasePlugin.java | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/build-tools-internal/src/main/java/org/elasticsearch/gradle/internal/test/rest/RestTestBasePlugin.java b/build-tools-internal/src/main/java/org/elasticsearch/gradle/internal/test/rest/RestTestBasePlugin.java index fa47e29b555d7..1a7b5bc3ee2a1 100644 --- a/build-tools-internal/src/main/java/org/elasticsearch/gradle/internal/test/rest/RestTestBasePlugin.java +++ b/build-tools-internal/src/main/java/org/elasticsearch/gradle/internal/test/rest/RestTestBasePlugin.java @@ -120,7 +120,7 @@ public void apply(Project project) { registerDistributionInputs(task, integTestDistro); // Enable parallel execution for these tests since each test gets its own cluster - // task.setMaxParallelForks(task.getProject().getGradle().getStartParameter().getMaxWorkerCount() / 2); + task.setMaxParallelForks(task.getProject().getGradle().getStartParameter().getMaxWorkerCount() / 2); // Disable test failure reporting since this stuff is now captured in build scans task.getExtensions().getExtraProperties().set("dumpOutputOnFailure", false); From 21d0ed84ff247742d48086c9a8a0b738ea0b72c9 Mon Sep 17 00:00:00 2001 From: Mark Vieira Date: Wed, 18 Jan 2023 15:04:09 -0800 Subject: [PATCH 08/63] Reset upgrade failure status after test class --- .../upgrades/ParameterizedFullClusterRestartTestCase.java | 6 ++++++ 1 file changed, 6 insertions(+) diff --git a/qa/full-cluster-restart/src/javaRestTest/java/org/elasticsearch/upgrades/ParameterizedFullClusterRestartTestCase.java b/qa/full-cluster-restart/src/javaRestTest/java/org/elasticsearch/upgrades/ParameterizedFullClusterRestartTestCase.java index 29e1ae3e92255..9d3b8e08c52d8 100644 --- a/qa/full-cluster-restart/src/javaRestTest/java/org/elasticsearch/upgrades/ParameterizedFullClusterRestartTestCase.java +++ b/qa/full-cluster-restart/src/javaRestTest/java/org/elasticsearch/upgrades/ParameterizedFullClusterRestartTestCase.java @@ -15,6 +15,7 @@ import org.elasticsearch.test.cluster.ElasticsearchCluster; import org.elasticsearch.test.cluster.util.Version; import org.elasticsearch.test.rest.ESRestTestCase; +import org.junit.AfterClass; import org.junit.Before; import java.util.Arrays; @@ -60,6 +61,11 @@ public void maybeUpgrade() throws Exception { assumeFalse("Cluster upgrade failed", upgradeFailed); } + @AfterClass + public static void resetUpgrade() { + upgradeFailed = false; + } + public boolean isRunningAgainstOldCluster() { return requestedUpgradeStatus == OLD; } From ae0f99c08eb7b4514e23904f8c3728c16ff6d7de Mon Sep 17 00:00:00 2001 From: Mark Vieira Date: Thu, 19 Jan 2023 08:14:03 -0800 Subject: [PATCH 09/63] Fix test --- .../elasticsearch/xpack/restart/WatcherMappingUpdateIT.java | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/x-pack/qa/full-cluster-restart/src/javaRestTest/java/org/elasticsearch/xpack/restart/WatcherMappingUpdateIT.java b/x-pack/qa/full-cluster-restart/src/javaRestTest/java/org/elasticsearch/xpack/restart/WatcherMappingUpdateIT.java index e6f8a89f08c1b..8eb16e47fbdbb 100644 --- a/x-pack/qa/full-cluster-restart/src/javaRestTest/java/org/elasticsearch/xpack/restart/WatcherMappingUpdateIT.java +++ b/x-pack/qa/full-cluster-restart/src/javaRestTest/java/org/elasticsearch/xpack/restart/WatcherMappingUpdateIT.java @@ -14,6 +14,7 @@ import org.elasticsearch.client.Request; import org.elasticsearch.client.RequestOptions; import org.elasticsearch.client.Response; +import org.elasticsearch.client.WarningFailureException; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.util.concurrent.ThreadContext; import org.elasticsearch.upgrades.FullClustRestartUpgradeStatus; @@ -97,7 +98,7 @@ private void assertNoMappingVersion(String index) throws Exception { private RequestOptions.Builder getWarningHandlerOptions(String index) { return RequestOptions.DEFAULT.toBuilder() - .setWarningsHandler(w -> w.contains(getWatcherSystemIndexWarning(index)) == false || w.size() != 1); + .setWarningsHandler(w -> w.size() > 0 && w.contains(getWatcherSystemIndexWarning(index)) == false); } private String getWatcherSystemIndexWarning(String index) { From c271e6e2251a1660cba281910d3ce980443017a1 Mon Sep 17 00:00:00 2001 From: Mark Vieira Date: Thu, 19 Jan 2023 08:19:48 -0800 Subject: [PATCH 10/63] Remove unused import --- .../org/elasticsearch/xpack/restart/WatcherMappingUpdateIT.java | 1 - 1 file changed, 1 deletion(-) diff --git a/x-pack/qa/full-cluster-restart/src/javaRestTest/java/org/elasticsearch/xpack/restart/WatcherMappingUpdateIT.java b/x-pack/qa/full-cluster-restart/src/javaRestTest/java/org/elasticsearch/xpack/restart/WatcherMappingUpdateIT.java index 8eb16e47fbdbb..b096356c1c3dd 100644 --- a/x-pack/qa/full-cluster-restart/src/javaRestTest/java/org/elasticsearch/xpack/restart/WatcherMappingUpdateIT.java +++ b/x-pack/qa/full-cluster-restart/src/javaRestTest/java/org/elasticsearch/xpack/restart/WatcherMappingUpdateIT.java @@ -14,7 +14,6 @@ import org.elasticsearch.client.Request; import org.elasticsearch.client.RequestOptions; import org.elasticsearch.client.Response; -import org.elasticsearch.client.WarningFailureException; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.util.concurrent.ThreadContext; import org.elasticsearch.upgrades.FullClustRestartUpgradeStatus; From aa8a3bc7d2d09336a35006378f3756165eaa92fb Mon Sep 17 00:00:00 2001 From: Mark Vieira Date: Thu, 19 Jan 2023 14:28:25 -0800 Subject: [PATCH 11/63] Fixes --- .../elasticsearch/test/cluster/local/LocalClusterFactory.java | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/test/test-clusters/src/main/java/org/elasticsearch/test/cluster/local/LocalClusterFactory.java b/test/test-clusters/src/main/java/org/elasticsearch/test/cluster/local/LocalClusterFactory.java index 49356f28d6ea6..2bd2360b4bb18 100644 --- a/test/test-clusters/src/main/java/org/elasticsearch/test/cluster/local/LocalClusterFactory.java +++ b/test/test-clusters/src/main/java/org/elasticsearch/test/cluster/local/LocalClusterFactory.java @@ -109,7 +109,6 @@ public synchronized void start(Version version) { distributionDescriptor = resolveDistribution(); LOGGER.info("Distribution for node '{}': {}", spec.getName(), distributionDescriptor); initializeWorkingDirectory(currentVersion != null); - initializeWorkingDirectory(); copyExtraJarFiles(); installPlugins(); if (distributionDescriptor.getType() == DistributionType.INTEG_TEST) { @@ -370,6 +369,7 @@ private void addKeystoreFiles() { file.writeTo(path); ProcessUtils.exec( + spec.getKeystorePassword(), workingDir, OS.conditional( c -> c.onWindows(() -> distributionDir.resolve("bin").resolve("elasticsearch-keystore.bat")) From afc66a8f97aa54cff94178aafcd39134367e653b Mon Sep 17 00:00:00 2001 From: Mark Vieira Date: Thu, 19 Jan 2023 15:40:49 -0800 Subject: [PATCH 12/63] Warning might not always happen --- .../xpack/restart/FullClusterRestartIT.java | 41 ++++++++----------- 1 file changed, 17 insertions(+), 24 deletions(-) diff --git a/x-pack/qa/full-cluster-restart/src/javaRestTest/java/org/elasticsearch/xpack/restart/FullClusterRestartIT.java b/x-pack/qa/full-cluster-restart/src/javaRestTest/java/org/elasticsearch/xpack/restart/FullClusterRestartIT.java index b15f6a08ef2c4..ef862739b2ee0 100644 --- a/x-pack/qa/full-cluster-restart/src/javaRestTest/java/org/elasticsearch/xpack/restart/FullClusterRestartIT.java +++ b/x-pack/qa/full-cluster-restart/src/javaRestTest/java/org/elasticsearch/xpack/restart/FullClusterRestartIT.java @@ -109,12 +109,7 @@ public void testSecurityNativeRealm() throws Exception { } else { waitForYellow(".security"); final Request getSettingsRequest = new Request("GET", "/.security/_settings/index.format"); - getSettingsRequest.setOptions( - expectWarnings( - "this request accesses system indices: [.security-7], but in a future major " - + "version, direct access to system indices will be prevented by default" - ) - ); + getSettingsRequest.setOptions(systemIndexWarningHandlerOptions(".security-7")); Response settingsResponse = client().performRequest(getSettingsRequest); Map settingsResponseMap = entityAsMap(settingsResponse); logger.info("settings response map {}", settingsResponseMap); @@ -396,12 +391,7 @@ public void testApiKeySuperuser() throws IOException { "doc_type": "foo" }"""); if (getOldClusterVersion().onOrAfter(Version.V_7_10_0)) { - indexRequest.setOptions( - expectWarnings( - "this request accesses system indices: [.security-7], but in a future major " - + "version, direct access to system indices will be prevented by default" - ).toBuilder().addHeader("Authorization", apiKeyAuthHeader) - ); + indexRequest.setOptions(systemIndexWarningHandlerOptions(".security-7").addHeader("Authorization", apiKeyAuthHeader)); } else { indexRequest.setOptions(RequestOptions.DEFAULT.toBuilder().addHeader("Authorization", apiKeyAuthHeader)); } @@ -415,12 +405,7 @@ public void testApiKeySuperuser() throws IOException { // read is ok final Request searchRequest = new Request("GET", ".security/_search"); - searchRequest.setOptions( - expectWarnings( - "this request accesses system indices: [.security-7], but in a future major " - + "version, direct access to system indices will be prevented by default" - ).toBuilder().addHeader("Authorization", apiKeyAuthHeader) - ); + searchRequest.setOptions(systemIndexWarningHandlerOptions(".security-7").addHeader("Authorization", apiKeyAuthHeader)); assertOK(client().performRequest(searchRequest)); // write must not be allowed @@ -429,12 +414,7 @@ public void testApiKeySuperuser() throws IOException { { "doc_type": "foo" }"""); - indexRequest.setOptions( - expectWarnings( - "this request accesses system indices: [.security-7], but in a future major " - + "version, direct access to system indices will be prevented by default" - ).toBuilder().addHeader("Authorization", apiKeyAuthHeader) - ); + indexRequest.setOptions(systemIndexWarningHandlerOptions(".security-7").addHeader("Authorization", apiKeyAuthHeader)); final ResponseException e = expectThrows(ResponseException.class, () -> client().performRequest(indexRequest)); assertThat(e.getResponse().getStatusLine().getStatusCode(), equalTo(403)); assertThat(e.getMessage(), containsString("is unauthorized")); @@ -1001,4 +981,17 @@ private static void createComposableTemplate(RestClient client, String templateN createIndexTemplateRequest.setEntity(templateJSON); client.performRequest(createIndexTemplateRequest); } + + private RequestOptions.Builder systemIndexWarningHandlerOptions(String index) { + return RequestOptions.DEFAULT.toBuilder() + .setWarningsHandler( + w -> w.size() > 0 + && w.contains( + "this request accesses system indices: [" + + index + + "], but in a future major " + + "version, direct access to system indices will be prevented by default" + ) == false + ); + } } From 8d4bd49c3f76b64332091330322e90ca29f42b61 Mon Sep 17 00:00:00 2001 From: Mark Vieira Date: Thu, 26 Jan 2023 14:30:20 -0800 Subject: [PATCH 13/63] Fix compilation error --- .../org/elasticsearch/test/cluster/local/LocalClusterSpec.java | 1 + 1 file changed, 1 insertion(+) diff --git a/test/test-clusters/src/main/java/org/elasticsearch/test/cluster/local/LocalClusterSpec.java b/test/test-clusters/src/main/java/org/elasticsearch/test/cluster/local/LocalClusterSpec.java index 830e00b401f67..2234b037381a8 100644 --- a/test/test-clusters/src/main/java/org/elasticsearch/test/cluster/local/LocalClusterSpec.java +++ b/test/test-clusters/src/main/java/org/elasticsearch/test/cluster/local/LocalClusterSpec.java @@ -265,6 +265,7 @@ private LocalNodeSpec getFilteredSpec(SettingsProvider filteredProvider) { n.distributionType, n.features, n.keystoreSettings, + n.keystoreFiles, n.keystorePassword, n.extraConfigFiles, n.systemProperties From b8d6eaab2e64b00c8ac7afa3beffe72399db81b9 Mon Sep 17 00:00:00 2001 From: Mark Vieira Date: Thu, 26 Jan 2023 15:38:49 -0800 Subject: [PATCH 14/63] Avoid unnecessary restarts when testing current version --- .../upgrades/ParameterizedFullClusterRestartTestCase.java | 6 +++++- .../elasticsearch/test/cluster/ElasticsearchCluster.java | 3 --- .../test/cluster/local/LocalElasticsearchCluster.java | 6 ------ 3 files changed, 5 insertions(+), 10 deletions(-) diff --git a/qa/full-cluster-restart/src/javaRestTest/java/org/elasticsearch/upgrades/ParameterizedFullClusterRestartTestCase.java b/qa/full-cluster-restart/src/javaRestTest/java/org/elasticsearch/upgrades/ParameterizedFullClusterRestartTestCase.java index 9d3b8e08c52d8..a4694e1261170 100644 --- a/qa/full-cluster-restart/src/javaRestTest/java/org/elasticsearch/upgrades/ParameterizedFullClusterRestartTestCase.java +++ b/qa/full-cluster-restart/src/javaRestTest/java/org/elasticsearch/upgrades/ParameterizedFullClusterRestartTestCase.java @@ -29,6 +29,7 @@ public abstract class ParameterizedFullClusterRestartTestCase extends ESRestTest private static final Version MINIMUM_WIRE_COMPATIBLE_VERSION = Version.fromString("7.17.0"); private static final Version OLD_CLUSTER_VERSION = Version.fromString(System.getProperty("tests.old_cluster_version")); private static boolean upgradeFailed = false; + private static boolean upgraded = false; private final FullClustRestartUpgradeStatus requestedUpgradeStatus; public ParameterizedFullClusterRestartTestCase(@Name("cluster") FullClustRestartUpgradeStatus upgradeStatus) { @@ -42,7 +43,7 @@ public static Iterable parameters() throws Exception { @Before public void maybeUpgrade() throws Exception { - if (getUpgradeCluster().getVersion().equals(OLD_CLUSTER_VERSION) && requestedUpgradeStatus == UPGRADED) { + if (upgraded == false && requestedUpgradeStatus == UPGRADED) { try { if (OLD_CLUSTER_VERSION.before(MINIMUM_WIRE_COMPATIBLE_VERSION)) { // First upgrade to latest wire compatible version @@ -54,6 +55,8 @@ public void maybeUpgrade() throws Exception { } catch (Exception e) { upgradeFailed = true; throw e; + } finally { + upgraded = true; } } @@ -63,6 +66,7 @@ public void maybeUpgrade() throws Exception { @AfterClass public static void resetUpgrade() { + upgraded = false; upgradeFailed = false; } diff --git a/test/test-clusters/src/main/java/org/elasticsearch/test/cluster/ElasticsearchCluster.java b/test/test-clusters/src/main/java/org/elasticsearch/test/cluster/ElasticsearchCluster.java index 3bc4efaeb032f..02eb3fb73df63 100644 --- a/test/test-clusters/src/main/java/org/elasticsearch/test/cluster/ElasticsearchCluster.java +++ b/test/test-clusters/src/main/java/org/elasticsearch/test/cluster/ElasticsearchCluster.java @@ -10,7 +10,6 @@ import org.elasticsearch.test.cluster.local.DefaultLocalClusterSpecBuilder; import org.elasticsearch.test.cluster.local.LocalClusterSpecBuilder; -import org.elasticsearch.test.cluster.util.Version; import org.junit.rules.TestRule; /** @@ -33,6 +32,4 @@ static LocalClusterSpecBuilder local() { return new DefaultLocalClusterSpecBuilder(); } - Version getVersion(); - } diff --git a/test/test-clusters/src/main/java/org/elasticsearch/test/cluster/local/LocalElasticsearchCluster.java b/test/test-clusters/src/main/java/org/elasticsearch/test/cluster/local/LocalElasticsearchCluster.java index dc532dfd956bb..9a5e5666f5e9a 100644 --- a/test/test-clusters/src/main/java/org/elasticsearch/test/cluster/local/LocalElasticsearchCluster.java +++ b/test/test-clusters/src/main/java/org/elasticsearch/test/cluster/local/LocalElasticsearchCluster.java @@ -113,12 +113,6 @@ public void upgradeToVersion(Version version) { handle.upgradeToVersion(version); } - @Override - public Version getVersion() { - checkHandle(); - return spec.getNodes().get(0).getVersion(); - } - private void checkHandle() { if (handle == null) { throw new IllegalStateException("Cluster handle has not been initialized. Did you forget the @ClassRule annotation?"); From a14906c42e4d05e22e0dbdfd55d51d206f97d4dc Mon Sep 17 00:00:00 2001 From: Mark Vieira Date: Fri, 27 Jan 2023 09:47:45 -0800 Subject: [PATCH 15/63] Review feedback --- .../elasticsearch/upgrades/FullClusterRestartIT.java | 2 +- .../upgrades/FullClusterRestartTestOrdering.java | 2 +- ...tatus.java => FullClusterRestartUpgradeStatus.java} | 2 +- .../ParameterizedFullClusterRestartTestCase.java | 10 +++++----- .../org/elasticsearch/upgrades/QueryBuilderBWCIT.java | 2 +- .../test/cluster/local/LocalClusterFactory.java | 8 ++++---- .../xpack/restart/FullClusterRestartIT.java | 4 ++-- .../AbstractXpackFullClusterRestartTestCase.java | 4 ++-- .../xpack/restart/CoreFullClusterRestartIT.java | 4 ++-- .../xpack/restart/FullClusterRestartIT.java | 4 ++-- .../restart/MLModelDeploymentFullClusterRestartIT.java | 4 ++-- .../MlConfigIndexMappingsFullClusterRestartIT.java | 4 ++-- .../restart/MlHiddenIndicesFullClusterRestartIT.java | 4 ++-- .../xpack/restart/MlMigrationFullClusterRestartIT.java | 4 ++-- .../elasticsearch/xpack/restart/QueryBuilderBWCIT.java | 4 ++-- .../xpack/restart/WatcherMappingUpdateIT.java | 4 ++-- 16 files changed, 33 insertions(+), 33 deletions(-) rename qa/full-cluster-restart/src/javaRestTest/java/org/elasticsearch/upgrades/{FullClustRestartUpgradeStatus.java => FullClusterRestartUpgradeStatus.java} (90%) diff --git a/qa/full-cluster-restart/src/javaRestTest/java/org/elasticsearch/upgrades/FullClusterRestartIT.java b/qa/full-cluster-restart/src/javaRestTest/java/org/elasticsearch/upgrades/FullClusterRestartIT.java index e250a945aa903..3f9a007e6bf4e 100644 --- a/qa/full-cluster-restart/src/javaRestTest/java/org/elasticsearch/upgrades/FullClusterRestartIT.java +++ b/qa/full-cluster-restart/src/javaRestTest/java/org/elasticsearch/upgrades/FullClusterRestartIT.java @@ -112,7 +112,7 @@ public class FullClusterRestartIT extends ParameterizedFullClusterRestartTestCas private String index; - public FullClusterRestartIT(@Name("cluster") FullClustRestartUpgradeStatus upgradeStatus) { + public FullClusterRestartIT(@Name("cluster") FullClusterRestartUpgradeStatus upgradeStatus) { super(upgradeStatus); } diff --git a/qa/full-cluster-restart/src/javaRestTest/java/org/elasticsearch/upgrades/FullClusterRestartTestOrdering.java b/qa/full-cluster-restart/src/javaRestTest/java/org/elasticsearch/upgrades/FullClusterRestartTestOrdering.java index 9f5c57346b945..232619ee93bb9 100644 --- a/qa/full-cluster-restart/src/javaRestTest/java/org/elasticsearch/upgrades/FullClusterRestartTestOrdering.java +++ b/qa/full-cluster-restart/src/javaRestTest/java/org/elasticsearch/upgrades/FullClusterRestartTestOrdering.java @@ -19,6 +19,6 @@ public int compare(TestMethodAndParams o1, TestMethodAndParams o2) { } private int getOrdinal(TestMethodAndParams t) { - return ((FullClustRestartUpgradeStatus) t.getInstanceArguments().get(0)).ordinal(); + return ((FullClusterRestartUpgradeStatus) t.getInstanceArguments().get(0)).ordinal(); } } diff --git a/qa/full-cluster-restart/src/javaRestTest/java/org/elasticsearch/upgrades/FullClustRestartUpgradeStatus.java b/qa/full-cluster-restart/src/javaRestTest/java/org/elasticsearch/upgrades/FullClusterRestartUpgradeStatus.java similarity index 90% rename from qa/full-cluster-restart/src/javaRestTest/java/org/elasticsearch/upgrades/FullClustRestartUpgradeStatus.java rename to qa/full-cluster-restart/src/javaRestTest/java/org/elasticsearch/upgrades/FullClusterRestartUpgradeStatus.java index dda196ddafc20..06048d020e2a0 100644 --- a/qa/full-cluster-restart/src/javaRestTest/java/org/elasticsearch/upgrades/FullClustRestartUpgradeStatus.java +++ b/qa/full-cluster-restart/src/javaRestTest/java/org/elasticsearch/upgrades/FullClusterRestartUpgradeStatus.java @@ -8,7 +8,7 @@ package org.elasticsearch.upgrades; -public enum FullClustRestartUpgradeStatus { +public enum FullClusterRestartUpgradeStatus { OLD, UPGRADED } diff --git a/qa/full-cluster-restart/src/javaRestTest/java/org/elasticsearch/upgrades/ParameterizedFullClusterRestartTestCase.java b/qa/full-cluster-restart/src/javaRestTest/java/org/elasticsearch/upgrades/ParameterizedFullClusterRestartTestCase.java index a4694e1261170..a064c87743800 100644 --- a/qa/full-cluster-restart/src/javaRestTest/java/org/elasticsearch/upgrades/ParameterizedFullClusterRestartTestCase.java +++ b/qa/full-cluster-restart/src/javaRestTest/java/org/elasticsearch/upgrades/ParameterizedFullClusterRestartTestCase.java @@ -21,8 +21,8 @@ import java.util.Arrays; import java.util.Locale; -import static org.elasticsearch.upgrades.FullClustRestartUpgradeStatus.OLD; -import static org.elasticsearch.upgrades.FullClustRestartUpgradeStatus.UPGRADED; +import static org.elasticsearch.upgrades.FullClusterRestartUpgradeStatus.OLD; +import static org.elasticsearch.upgrades.FullClusterRestartUpgradeStatus.UPGRADED; @TestCaseOrdering(FullClusterRestartTestOrdering.class) public abstract class ParameterizedFullClusterRestartTestCase extends ESRestTestCase { @@ -30,15 +30,15 @@ public abstract class ParameterizedFullClusterRestartTestCase extends ESRestTest private static final Version OLD_CLUSTER_VERSION = Version.fromString(System.getProperty("tests.old_cluster_version")); private static boolean upgradeFailed = false; private static boolean upgraded = false; - private final FullClustRestartUpgradeStatus requestedUpgradeStatus; + private final FullClusterRestartUpgradeStatus requestedUpgradeStatus; - public ParameterizedFullClusterRestartTestCase(@Name("cluster") FullClustRestartUpgradeStatus upgradeStatus) { + public ParameterizedFullClusterRestartTestCase(@Name("cluster") FullClusterRestartUpgradeStatus upgradeStatus) { this.requestedUpgradeStatus = upgradeStatus; } @ParametersFactory public static Iterable parameters() throws Exception { - return Arrays.stream(FullClustRestartUpgradeStatus.values()).map(v -> new Object[] { v }).toList(); + return Arrays.stream(FullClusterRestartUpgradeStatus.values()).map(v -> new Object[] { v }).toList(); } @Before diff --git a/qa/full-cluster-restart/src/javaRestTest/java/org/elasticsearch/upgrades/QueryBuilderBWCIT.java b/qa/full-cluster-restart/src/javaRestTest/java/org/elasticsearch/upgrades/QueryBuilderBWCIT.java index 91607dec6f721..1636644409fc7 100644 --- a/qa/full-cluster-restart/src/javaRestTest/java/org/elasticsearch/upgrades/QueryBuilderBWCIT.java +++ b/qa/full-cluster-restart/src/javaRestTest/java/org/elasticsearch/upgrades/QueryBuilderBWCIT.java @@ -79,7 +79,7 @@ protected ElasticsearchCluster getUpgradeCluster() { return cluster; } - public QueryBuilderBWCIT(@Name("cluster") FullClustRestartUpgradeStatus upgradeStatus) { + public QueryBuilderBWCIT(@Name("cluster") FullClusterRestartUpgradeStatus upgradeStatus) { super(upgradeStatus); } diff --git a/test/test-clusters/src/main/java/org/elasticsearch/test/cluster/local/LocalClusterFactory.java b/test/test-clusters/src/main/java/org/elasticsearch/test/cluster/local/LocalClusterFactory.java index 0d7340166a2a0..34e1f7285c6b0 100644 --- a/test/test-clusters/src/main/java/org/elasticsearch/test/cluster/local/LocalClusterFactory.java +++ b/test/test-clusters/src/main/java/org/elasticsearch/test/cluster/local/LocalClusterFactory.java @@ -213,12 +213,12 @@ private List readPortsFile(Path file) { } } - private void initializeWorkingDirectory(boolean preserverWorkingDirectory) { + private void initializeWorkingDirectory(boolean preserveWorkingDirectory) { try { - if (preserverWorkingDirectory == false) { - IOUtils.deleteWithRetry(workingDir); - } else { + if (preserveWorkingDirectory) { IOUtils.deleteWithRetry(distributionDir); + } else { + IOUtils.deleteWithRetry(workingDir); } try { IOUtils.syncWithLinks(distributionDescriptor.getDistributionDir(), distributionDir); diff --git a/x-pack/plugin/shutdown/qa/full-cluster-restart/src/javaRestTest/java/org/elasticsearch/xpack/restart/FullClusterRestartIT.java b/x-pack/plugin/shutdown/qa/full-cluster-restart/src/javaRestTest/java/org/elasticsearch/xpack/restart/FullClusterRestartIT.java index e26e919913f6e..07ed594770649 100644 --- a/x-pack/plugin/shutdown/qa/full-cluster-restart/src/javaRestTest/java/org/elasticsearch/xpack/restart/FullClusterRestartIT.java +++ b/x-pack/plugin/shutdown/qa/full-cluster-restart/src/javaRestTest/java/org/elasticsearch/xpack/restart/FullClusterRestartIT.java @@ -20,7 +20,7 @@ import org.elasticsearch.test.cluster.local.distribution.DistributionType; import org.elasticsearch.test.cluster.util.resource.Resource; import org.elasticsearch.test.rest.ESRestTestCase; -import org.elasticsearch.upgrades.FullClustRestartUpgradeStatus; +import org.elasticsearch.upgrades.FullClusterRestartUpgradeStatus; import org.elasticsearch.upgrades.ParameterizedFullClusterRestartTestCase; import org.elasticsearch.xcontent.XContentBuilder; import org.elasticsearch.xcontent.json.JsonXContent; @@ -65,7 +65,7 @@ public class FullClusterRestartIT extends ParameterizedFullClusterRestartTestCas .feature(FeatureFlag.TIME_SERIES_MODE) .build(); - public FullClusterRestartIT(@Name("cluster") FullClustRestartUpgradeStatus upgradeStatus) { + public FullClusterRestartIT(@Name("cluster") FullClusterRestartUpgradeStatus upgradeStatus) { super(upgradeStatus); } diff --git a/x-pack/qa/full-cluster-restart/src/javaRestTest/java/org/elasticsearch/xpack/restart/AbstractXpackFullClusterRestartTestCase.java b/x-pack/qa/full-cluster-restart/src/javaRestTest/java/org/elasticsearch/xpack/restart/AbstractXpackFullClusterRestartTestCase.java index 10486c914d470..0bc9101301a54 100644 --- a/x-pack/qa/full-cluster-restart/src/javaRestTest/java/org/elasticsearch/xpack/restart/AbstractXpackFullClusterRestartTestCase.java +++ b/x-pack/qa/full-cluster-restart/src/javaRestTest/java/org/elasticsearch/xpack/restart/AbstractXpackFullClusterRestartTestCase.java @@ -11,7 +11,7 @@ import org.elasticsearch.test.cluster.FeatureFlag; import org.elasticsearch.test.cluster.local.distribution.DistributionType; import org.elasticsearch.test.cluster.util.resource.Resource; -import org.elasticsearch.upgrades.FullClustRestartUpgradeStatus; +import org.elasticsearch.upgrades.FullClusterRestartUpgradeStatus; import org.elasticsearch.upgrades.ParameterizedFullClusterRestartTestCase; import org.junit.ClassRule; @@ -38,7 +38,7 @@ public abstract class AbstractXpackFullClusterRestartTestCase extends Parameteri .feature(FeatureFlag.TIME_SERIES_MODE) .build(); - public AbstractXpackFullClusterRestartTestCase(FullClustRestartUpgradeStatus upgradeStatus) { + public AbstractXpackFullClusterRestartTestCase(FullClusterRestartUpgradeStatus upgradeStatus) { super(upgradeStatus); } diff --git a/x-pack/qa/full-cluster-restart/src/javaRestTest/java/org/elasticsearch/xpack/restart/CoreFullClusterRestartIT.java b/x-pack/qa/full-cluster-restart/src/javaRestTest/java/org/elasticsearch/xpack/restart/CoreFullClusterRestartIT.java index cc202cd71569d..65a8b0b475f29 100644 --- a/x-pack/qa/full-cluster-restart/src/javaRestTest/java/org/elasticsearch/xpack/restart/CoreFullClusterRestartIT.java +++ b/x-pack/qa/full-cluster-restart/src/javaRestTest/java/org/elasticsearch/xpack/restart/CoreFullClusterRestartIT.java @@ -11,7 +11,7 @@ import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.util.concurrent.ThreadContext; import org.elasticsearch.test.cluster.util.resource.Resource; -import org.elasticsearch.upgrades.FullClustRestartUpgradeStatus; +import org.elasticsearch.upgrades.FullClusterRestartUpgradeStatus; import org.elasticsearch.upgrades.FullClusterRestartIT; import java.nio.charset.StandardCharsets; @@ -33,7 +33,7 @@ public class CoreFullClusterRestartIT extends FullClusterRestartIT { .keystore("xpack.security.transport.ssl.secure_key_passphrase", "testnode"); } - public CoreFullClusterRestartIT(@Name("cluster") FullClustRestartUpgradeStatus upgradeStatus) { + public CoreFullClusterRestartIT(@Name("cluster") FullClusterRestartUpgradeStatus upgradeStatus) { super(upgradeStatus); } diff --git a/x-pack/qa/full-cluster-restart/src/javaRestTest/java/org/elasticsearch/xpack/restart/FullClusterRestartIT.java b/x-pack/qa/full-cluster-restart/src/javaRestTest/java/org/elasticsearch/xpack/restart/FullClusterRestartIT.java index ef862739b2ee0..ab48825ed983a 100644 --- a/x-pack/qa/full-cluster-restart/src/javaRestTest/java/org/elasticsearch/xpack/restart/FullClusterRestartIT.java +++ b/x-pack/qa/full-cluster-restart/src/javaRestTest/java/org/elasticsearch/xpack/restart/FullClusterRestartIT.java @@ -27,7 +27,7 @@ import org.elasticsearch.rest.action.search.RestSearchAction; import org.elasticsearch.test.StreamsUtils; import org.elasticsearch.test.rest.ESRestTestCase; -import org.elasticsearch.upgrades.FullClustRestartUpgradeStatus; +import org.elasticsearch.upgrades.FullClusterRestartUpgradeStatus; import org.elasticsearch.xcontent.ObjectPath; import org.elasticsearch.xcontent.XContentBuilder; import org.elasticsearch.xcontent.XContentParser; @@ -68,7 +68,7 @@ public class FullClusterRestartIT extends AbstractXpackFullClusterRestartTestCas public static final int UPGRADE_FIELD_EXPECTED_INDEX_FORMAT_VERSION = 6; public static final int SECURITY_EXPECTED_INDEX_FORMAT_VERSION = 6; - public FullClusterRestartIT(@Name("cluster") FullClustRestartUpgradeStatus upgradeStatus) { + public FullClusterRestartIT(@Name("cluster") FullClusterRestartUpgradeStatus upgradeStatus) { super(upgradeStatus); } diff --git a/x-pack/qa/full-cluster-restart/src/javaRestTest/java/org/elasticsearch/xpack/restart/MLModelDeploymentFullClusterRestartIT.java b/x-pack/qa/full-cluster-restart/src/javaRestTest/java/org/elasticsearch/xpack/restart/MLModelDeploymentFullClusterRestartIT.java index 00e253c43cbc8..5a9e28274b84e 100644 --- a/x-pack/qa/full-cluster-restart/src/javaRestTest/java/org/elasticsearch/xpack/restart/MLModelDeploymentFullClusterRestartIT.java +++ b/x-pack/qa/full-cluster-restart/src/javaRestTest/java/org/elasticsearch/xpack/restart/MLModelDeploymentFullClusterRestartIT.java @@ -17,7 +17,7 @@ import org.elasticsearch.common.util.concurrent.ThreadContext; import org.elasticsearch.common.xcontent.support.XContentMapValues; import org.elasticsearch.core.Strings; -import org.elasticsearch.upgrades.FullClustRestartUpgradeStatus; +import org.elasticsearch.upgrades.FullClusterRestartUpgradeStatus; import org.elasticsearch.xpack.core.ml.inference.assignment.AllocationStatus; import org.junit.Before; @@ -65,7 +65,7 @@ public class MLModelDeploymentFullClusterRestartIT extends AbstractXpackFullClus RAW_MODEL_SIZE = Base64.getDecoder().decode(BASE_64_ENCODED_MODEL).length; } - public MLModelDeploymentFullClusterRestartIT(@Name("cluster") FullClustRestartUpgradeStatus upgradeStatus) { + public MLModelDeploymentFullClusterRestartIT(@Name("cluster") FullClusterRestartUpgradeStatus upgradeStatus) { super(upgradeStatus); } diff --git a/x-pack/qa/full-cluster-restart/src/javaRestTest/java/org/elasticsearch/xpack/restart/MlConfigIndexMappingsFullClusterRestartIT.java b/x-pack/qa/full-cluster-restart/src/javaRestTest/java/org/elasticsearch/xpack/restart/MlConfigIndexMappingsFullClusterRestartIT.java index da3a00574f4a3..e4ab3957f2627 100644 --- a/x-pack/qa/full-cluster-restart/src/javaRestTest/java/org/elasticsearch/xpack/restart/MlConfigIndexMappingsFullClusterRestartIT.java +++ b/x-pack/qa/full-cluster-restart/src/javaRestTest/java/org/elasticsearch/xpack/restart/MlConfigIndexMappingsFullClusterRestartIT.java @@ -15,7 +15,7 @@ import org.elasticsearch.common.util.concurrent.ThreadContext; import org.elasticsearch.common.xcontent.support.XContentMapValues; import org.elasticsearch.core.Strings; -import org.elasticsearch.upgrades.FullClustRestartUpgradeStatus; +import org.elasticsearch.upgrades.FullClusterRestartUpgradeStatus; import org.elasticsearch.xpack.test.rest.IndexMappingTemplateAsserter; import org.elasticsearch.xpack.test.rest.XPackRestTestConstants; import org.elasticsearch.xpack.test.rest.XPackRestTestHelper; @@ -36,7 +36,7 @@ public class MlConfigIndexMappingsFullClusterRestartIT extends AbstractXpackFull private static final String OLD_CLUSTER_JOB_ID = "ml-config-mappings-old-cluster-job"; private static final String NEW_CLUSTER_JOB_ID = "ml-config-mappings-new-cluster-job"; - public MlConfigIndexMappingsFullClusterRestartIT(@Name("cluster") FullClustRestartUpgradeStatus upgradeStatus) { + public MlConfigIndexMappingsFullClusterRestartIT(@Name("cluster") FullClusterRestartUpgradeStatus upgradeStatus) { super(upgradeStatus); } diff --git a/x-pack/qa/full-cluster-restart/src/javaRestTest/java/org/elasticsearch/xpack/restart/MlHiddenIndicesFullClusterRestartIT.java b/x-pack/qa/full-cluster-restart/src/javaRestTest/java/org/elasticsearch/xpack/restart/MlHiddenIndicesFullClusterRestartIT.java index 86d3e2239a36e..aeb3dad547946 100644 --- a/x-pack/qa/full-cluster-restart/src/javaRestTest/java/org/elasticsearch/xpack/restart/MlHiddenIndicesFullClusterRestartIT.java +++ b/x-pack/qa/full-cluster-restart/src/javaRestTest/java/org/elasticsearch/xpack/restart/MlHiddenIndicesFullClusterRestartIT.java @@ -18,7 +18,7 @@ import org.elasticsearch.common.xcontent.support.XContentMapValues; import org.elasticsearch.core.Strings; import org.elasticsearch.core.Tuple; -import org.elasticsearch.upgrades.FullClustRestartUpgradeStatus; +import org.elasticsearch.upgrades.FullClusterRestartUpgradeStatus; import org.elasticsearch.xcontent.XContentParser; import org.elasticsearch.xcontent.XContentParserConfiguration; import org.elasticsearch.xcontent.spi.XContentProvider; @@ -51,7 +51,7 @@ public class MlHiddenIndicesFullClusterRestartIT extends AbstractXpackFullCluste Tuple.tuple(List.of(".ml-anomalies-shared"), ".ml-anomalies-.write-" + JOB_ID) ); - public MlHiddenIndicesFullClusterRestartIT(@Name("cluster") FullClustRestartUpgradeStatus upgradeStatus) { + public MlHiddenIndicesFullClusterRestartIT(@Name("cluster") FullClusterRestartUpgradeStatus upgradeStatus) { super(upgradeStatus); } diff --git a/x-pack/qa/full-cluster-restart/src/javaRestTest/java/org/elasticsearch/xpack/restart/MlMigrationFullClusterRestartIT.java b/x-pack/qa/full-cluster-restart/src/javaRestTest/java/org/elasticsearch/xpack/restart/MlMigrationFullClusterRestartIT.java index 726e3ab559818..2bbda9123ae34 100644 --- a/x-pack/qa/full-cluster-restart/src/javaRestTest/java/org/elasticsearch/xpack/restart/MlMigrationFullClusterRestartIT.java +++ b/x-pack/qa/full-cluster-restart/src/javaRestTest/java/org/elasticsearch/xpack/restart/MlMigrationFullClusterRestartIT.java @@ -19,7 +19,7 @@ import org.elasticsearch.search.aggregations.AggregatorFactories; import org.elasticsearch.search.aggregations.bucket.terms.TermsAggregationBuilder; import org.elasticsearch.search.aggregations.metrics.MaxAggregationBuilder; -import org.elasticsearch.upgrades.FullClustRestartUpgradeStatus; +import org.elasticsearch.upgrades.FullClusterRestartUpgradeStatus; import org.elasticsearch.xpack.core.ml.MlTasks; import org.elasticsearch.xpack.core.ml.datafeed.DatafeedConfig; import org.elasticsearch.xpack.test.rest.XPackRestTestConstants; @@ -44,7 +44,7 @@ public class MlMigrationFullClusterRestartIT extends AbstractXpackFullClusterRes private static final String OLD_CLUSTER_CLOSED_JOB_ID = "migration-old-cluster-closed-job"; private static final String OLD_CLUSTER_STOPPED_DATAFEED_ID = "migration-old-cluster-stopped-datafeed"; - public MlMigrationFullClusterRestartIT(@Name("cluster") FullClustRestartUpgradeStatus upgradeStatus) { + public MlMigrationFullClusterRestartIT(@Name("cluster") FullClusterRestartUpgradeStatus upgradeStatus) { super(upgradeStatus); } diff --git a/x-pack/qa/full-cluster-restart/src/javaRestTest/java/org/elasticsearch/xpack/restart/QueryBuilderBWCIT.java b/x-pack/qa/full-cluster-restart/src/javaRestTest/java/org/elasticsearch/xpack/restart/QueryBuilderBWCIT.java index 5a2268626864e..563cde322b725 100644 --- a/x-pack/qa/full-cluster-restart/src/javaRestTest/java/org/elasticsearch/xpack/restart/QueryBuilderBWCIT.java +++ b/x-pack/qa/full-cluster-restart/src/javaRestTest/java/org/elasticsearch/xpack/restart/QueryBuilderBWCIT.java @@ -9,7 +9,7 @@ import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.util.concurrent.ThreadContext; import org.elasticsearch.test.cluster.util.resource.Resource; -import org.elasticsearch.upgrades.FullClustRestartUpgradeStatus; +import org.elasticsearch.upgrades.FullClusterRestartUpgradeStatus; import java.nio.charset.StandardCharsets; import java.util.Base64; @@ -30,7 +30,7 @@ public class QueryBuilderBWCIT extends org.elasticsearch.upgrades.QueryBuilderBW .keystore("xpack.security.transport.ssl.secure_key_passphrase", "testnode"); } - public QueryBuilderBWCIT(FullClustRestartUpgradeStatus upgradeStatus) { + public QueryBuilderBWCIT(FullClusterRestartUpgradeStatus upgradeStatus) { super(upgradeStatus); } diff --git a/x-pack/qa/full-cluster-restart/src/javaRestTest/java/org/elasticsearch/xpack/restart/WatcherMappingUpdateIT.java b/x-pack/qa/full-cluster-restart/src/javaRestTest/java/org/elasticsearch/xpack/restart/WatcherMappingUpdateIT.java index b096356c1c3dd..043b3f49a8825 100644 --- a/x-pack/qa/full-cluster-restart/src/javaRestTest/java/org/elasticsearch/xpack/restart/WatcherMappingUpdateIT.java +++ b/x-pack/qa/full-cluster-restart/src/javaRestTest/java/org/elasticsearch/xpack/restart/WatcherMappingUpdateIT.java @@ -16,7 +16,7 @@ import org.elasticsearch.client.Response; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.util.concurrent.ThreadContext; -import org.elasticsearch.upgrades.FullClustRestartUpgradeStatus; +import org.elasticsearch.upgrades.FullClusterRestartUpgradeStatus; import java.nio.charset.StandardCharsets; import java.util.Base64; @@ -27,7 +27,7 @@ public class WatcherMappingUpdateIT extends AbstractXpackFullClusterRestartTestCase { - public WatcherMappingUpdateIT(@Name("cluster") FullClustRestartUpgradeStatus upgradeStatus) { + public WatcherMappingUpdateIT(@Name("cluster") FullClusterRestartUpgradeStatus upgradeStatus) { super(upgradeStatus); } From 7669d79cabc92b620658dc72da58e7219323dafa Mon Sep 17 00:00:00 2001 From: Mark Vieira Date: Fri, 27 Jan 2023 09:50:45 -0800 Subject: [PATCH 16/63] Always call deletePortsFiles when stopping a node --- .../elasticsearch/test/cluster/local/LocalClusterFactory.java | 1 + .../elasticsearch/test/cluster/local/LocalClusterHandle.java | 2 -- 2 files changed, 1 insertion(+), 2 deletions(-) diff --git a/test/test-clusters/src/main/java/org/elasticsearch/test/cluster/local/LocalClusterFactory.java b/test/test-clusters/src/main/java/org/elasticsearch/test/cluster/local/LocalClusterFactory.java index 34e1f7285c6b0..24cebbeb7168f 100644 --- a/test/test-clusters/src/main/java/org/elasticsearch/test/cluster/local/LocalClusterFactory.java +++ b/test/test-clusters/src/main/java/org/elasticsearch/test/cluster/local/LocalClusterFactory.java @@ -138,6 +138,7 @@ public synchronized void stop(boolean forcibly) { ProcessUtils.stopHandle(process.toHandle(), forcibly); ProcessReaper.instance().unregister(getServiceName()); } + deletePortsFiles(); } public void waitForExit() { diff --git a/test/test-clusters/src/main/java/org/elasticsearch/test/cluster/local/LocalClusterHandle.java b/test/test-clusters/src/main/java/org/elasticsearch/test/cluster/local/LocalClusterHandle.java index 5d851d61f49e8..6ad2709957299 100644 --- a/test/test-clusters/src/main/java/org/elasticsearch/test/cluster/local/LocalClusterHandle.java +++ b/test/test-clusters/src/main/java/org/elasticsearch/test/cluster/local/LocalClusterHandle.java @@ -75,7 +75,6 @@ public void stop(boolean forcibly) { if (started.getAndSet(false)) { LOGGER.info("Stopping Elasticsearch test cluster '{}', forcibly: {}", name, forcibly); execute(() -> nodes.parallelStream().forEach(n -> n.stop(forcibly))); - execute(() -> nodes.parallelStream().forEach(Node::deletePortsFiles)); } else { // Make sure the process is stopped, otherwise wait execute(() -> nodes.parallelStream().forEach(Node::waitForExit)); @@ -132,7 +131,6 @@ public void upgradeNodeToVersion(int index, Version version) { Node node = nodes.get(index); node.stop(false); LOGGER.info("Upgrading node '{}' to version {}", node.getSpec().getName(), version); - node.deletePortsFiles(); node.start(version); waitUntilReady(); } From 999fefedc3ff437c614e69dba74558f243f207e2 Mon Sep 17 00:00:00 2001 From: Mark Vieira Date: Fri, 27 Jan 2023 09:57:57 -0800 Subject: [PATCH 17/63] Spotless --- .../elasticsearch/xpack/restart/CoreFullClusterRestartIT.java | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/x-pack/qa/full-cluster-restart/src/javaRestTest/java/org/elasticsearch/xpack/restart/CoreFullClusterRestartIT.java b/x-pack/qa/full-cluster-restart/src/javaRestTest/java/org/elasticsearch/xpack/restart/CoreFullClusterRestartIT.java index 65a8b0b475f29..dcdc127079637 100644 --- a/x-pack/qa/full-cluster-restart/src/javaRestTest/java/org/elasticsearch/xpack/restart/CoreFullClusterRestartIT.java +++ b/x-pack/qa/full-cluster-restart/src/javaRestTest/java/org/elasticsearch/xpack/restart/CoreFullClusterRestartIT.java @@ -11,8 +11,8 @@ import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.util.concurrent.ThreadContext; import org.elasticsearch.test.cluster.util.resource.Resource; -import org.elasticsearch.upgrades.FullClusterRestartUpgradeStatus; import org.elasticsearch.upgrades.FullClusterRestartIT; +import org.elasticsearch.upgrades.FullClusterRestartUpgradeStatus; import java.nio.charset.StandardCharsets; import java.util.Base64; From b12398e230b02051e719d25e7105e9ec0252daa3 Mon Sep 17 00:00:00 2001 From: David Turner Date: Mon, 30 Jan 2023 18:24:55 +0000 Subject: [PATCH 18/63] Fix some possible NPEs in strange JVM configs (#93352) `JvmErgonomics` requires various JVM options to be present, but if they are omitted then we throw a `NullPointerException` which looks to the user like an ES bug. They would have to be doing something a little odd to get into this state, but nonetheless it is possible to hit these NPEs. We don't need to handle such a config gracefully, but we should clarify why Elasticsearch won't start to help the user fix their config. --- .../server/cli/JvmErgonomics.java | 42 ++++++++++++------ .../elasticsearch/server/cli/JvmOption.java | 7 +++ .../server/cli/JvmErgonomicsTests.java | 43 +++++++++++++++++++ 3 files changed, 78 insertions(+), 14 deletions(-) diff --git a/distribution/tools/server-cli/src/main/java/org/elasticsearch/server/cli/JvmErgonomics.java b/distribution/tools/server-cli/src/main/java/org/elasticsearch/server/cli/JvmErgonomics.java index 46e3da3ced90b..926d5727a1b4a 100644 --- a/distribution/tools/server-cli/src/main/java/org/elasticsearch/server/cli/JvmErgonomics.java +++ b/distribution/tools/server-cli/src/main/java/org/elasticsearch/server/cli/JvmErgonomics.java @@ -64,28 +64,42 @@ static boolean tuneG1GCForSmallHeap(final long heapSize) { } static boolean tuneG1GCHeapRegion(final Map finalJvmOptions, final boolean tuneG1GCForSmallHeap) { - JvmOption g1GCHeapRegion = finalJvmOptions.get("G1HeapRegionSize"); - JvmOption g1GC = finalJvmOptions.get("UseG1GC"); - return (tuneG1GCForSmallHeap && g1GC.getMandatoryValue().equals("true") && g1GCHeapRegion.isCommandLineOrigin() == false); + return tuneG1GCForSmallHeap && usingG1GcWithoutCommandLineOriginOption(finalJvmOptions, "G1HeapRegionSize"); } static int tuneG1GCReservePercent(final Map finalJvmOptions, final boolean tuneG1GCForSmallHeap) { - JvmOption g1GC = finalJvmOptions.get("UseG1GC"); - JvmOption g1GCReservePercent = finalJvmOptions.get("G1ReservePercent"); - if (g1GC.getMandatoryValue().equals("true")) { - if (g1GCReservePercent.isCommandLineOrigin() == false && tuneG1GCForSmallHeap) { - return 15; - } else if (g1GCReservePercent.isCommandLineOrigin() == false && tuneG1GCForSmallHeap == false) { - return 25; - } + if (usingG1GcWithoutCommandLineOriginOption(finalJvmOptions, "G1ReservePercent")) { + return tuneG1GCForSmallHeap ? 15 : 25; } return 0; } static boolean tuneG1GCInitiatingHeapOccupancyPercent(final Map finalJvmOptions) { - JvmOption g1GC = finalJvmOptions.get("UseG1GC"); - JvmOption g1GCInitiatingHeapOccupancyPercent = finalJvmOptions.get("InitiatingHeapOccupancyPercent"); - return g1GCInitiatingHeapOccupancyPercent.isCommandLineOrigin() == false && g1GC.getMandatoryValue().equals("true"); + return usingG1GcWithoutCommandLineOriginOption(finalJvmOptions, "InitiatingHeapOccupancyPercent"); + } + + /** + * @return
    + *
  • {@code true} if `-XX:+UseG1GC` is in the final JVM options and {@code optionName} was not specified. + *
  • {@code false} if either `-XX:-UseG1GC` is in the final JVM options, or {@code optionName} was specified. + *
+ * + * @throws IllegalStateException if neither `-XX:+UseG1GC` nor `-XX:-UseG1GC` is in the final JVM options, or `-XX:+UseG1GC` is selected + * and {@code optionName} is not in the final JVM options. + */ + private static boolean usingG1GcWithoutCommandLineOriginOption(Map finalJvmOptions, String optionName) { + return getRequiredOption(finalJvmOptions, "UseG1GC").getMandatoryValue().equals("true") + && getRequiredOption(finalJvmOptions, optionName).isCommandLineOrigin() == false; + } + + private static JvmOption getRequiredOption(final Map finalJvmOptions, final String key) { + final var jvmOption = finalJvmOptions.get(key); + if (jvmOption == null) { + throw new IllegalStateException( + "JVM option [" + key + "] was unexpectedly missing. Elasticsearch requires this option to be present." + ); + } + return jvmOption; } private static final Pattern SYSTEM_PROPERTY = Pattern.compile("^-D(?[\\w+].*?)=(?.*)$"); diff --git a/distribution/tools/server-cli/src/main/java/org/elasticsearch/server/cli/JvmOption.java b/distribution/tools/server-cli/src/main/java/org/elasticsearch/server/cli/JvmOption.java index 39bf2e54dade0..60cbcb86c02b9 100644 --- a/distribution/tools/server-cli/src/main/java/org/elasticsearch/server/cli/JvmOption.java +++ b/distribution/tools/server-cli/src/main/java/org/elasticsearch/server/cli/JvmOption.java @@ -8,6 +8,8 @@ package org.elasticsearch.server.cli; +import org.elasticsearch.common.Strings; + import java.io.BufferedReader; import java.io.IOException; import java.io.InputStream; @@ -29,6 +31,11 @@ class JvmOption { private final String origin; JvmOption(String value, String origin) { + if (origin == null) { + throw new IllegalStateException(Strings.format(""" + Elasticsearch could not determine the origin of JVM option [%s]. \ + This indicates that it is running in an unsupported configuration.""", value)); + } this.value = value; this.origin = origin; } diff --git a/distribution/tools/server-cli/src/test/java/org/elasticsearch/server/cli/JvmErgonomicsTests.java b/distribution/tools/server-cli/src/test/java/org/elasticsearch/server/cli/JvmErgonomicsTests.java index f68a51de85c2a..0d4edfc384d46 100644 --- a/distribution/tools/server-cli/src/test/java/org/elasticsearch/server/cli/JvmErgonomicsTests.java +++ b/distribution/tools/server-cli/src/test/java/org/elasticsearch/server/cli/JvmErgonomicsTests.java @@ -19,6 +19,7 @@ import java.util.List; import java.util.Map; +import static org.hamcrest.Matchers.allOf; import static org.hamcrest.Matchers.containsString; import static org.hamcrest.Matchers.equalTo; import static org.hamcrest.Matchers.everyItem; @@ -179,4 +180,46 @@ public void testMaxDirectMemorySizeChoiceWhenSet() throws Exception { ); } + @SuppressWarnings("ConstantConditions") + public void testMissingOptionHandling() { + final Map g1GcOn = Map.of("UseG1GC", new JvmOption("true", "")); + final Map g1GcOff = Map.of("UseG1GC", new JvmOption("", "")); + + assertFalse(JvmErgonomics.tuneG1GCHeapRegion(Map.of(), false)); + assertThat( + expectThrows(IllegalStateException.class, () -> JvmErgonomics.tuneG1GCHeapRegion(Map.of(), true)).getMessage(), + allOf(containsString("[UseG1GC]"), containsString("unexpectedly missing")) + ); + assertThat( + expectThrows(IllegalStateException.class, () -> JvmErgonomics.tuneG1GCHeapRegion(g1GcOn, true)).getMessage(), + allOf(containsString("[G1HeapRegionSize]"), containsString("unexpectedly missing")) + ); + assertFalse(JvmErgonomics.tuneG1GCHeapRegion(g1GcOff, randomBoolean())); + + assertThat( + expectThrows(IllegalStateException.class, () -> JvmErgonomics.tuneG1GCReservePercent(Map.of(), randomBoolean())).getMessage(), + allOf(containsString("[UseG1GC]"), containsString("unexpectedly missing")) + ); + assertThat( + expectThrows(IllegalStateException.class, () -> JvmErgonomics.tuneG1GCReservePercent(g1GcOn, randomBoolean())).getMessage(), + allOf(containsString("[G1ReservePercent]"), containsString("unexpectedly missing")) + ); + assertEquals(0, JvmErgonomics.tuneG1GCReservePercent(g1GcOff, randomBoolean())); + + assertThat( + expectThrows(IllegalStateException.class, () -> JvmErgonomics.tuneG1GCInitiatingHeapOccupancyPercent(Map.of())).getMessage(), + allOf(containsString("[UseG1GC]"), containsString("unexpectedly missing")) + ); + assertThat( + expectThrows(IllegalStateException.class, () -> JvmErgonomics.tuneG1GCInitiatingHeapOccupancyPercent(g1GcOn)).getMessage(), + allOf(containsString("[InitiatingHeapOccupancyPercent]"), containsString("unexpectedly missing")) + ); + assertFalse(JvmErgonomics.tuneG1GCInitiatingHeapOccupancyPercent(g1GcOff)); + + assertThat( + expectThrows(IllegalStateException.class, () -> new JvmOption("OptionName", null)).getMessage(), + allOf(containsString("could not determine the origin of JVM option [OptionName]"), containsString("unsupported")) + ); + } + } From b0cc6422bfefa0e39d7fd09e20258e4f6ef95e35 Mon Sep 17 00:00:00 2001 From: Keith Massey Date: Mon, 30 Jan 2023 13:07:48 -0600 Subject: [PATCH 19/63] Download the geoip databases only when needed (#92335) This commit changes the geoip downloader so that we only download the geoip databases if you have at least one geoip processor in your cluster, or when you add a new geoip processor (or if `ingest.geoip.downloader.eager.download` is explicitly set to true). --- docs/changelog/92335.yaml | 6 + .../ingest/processors/geoip.asciidoc | 12 +- .../ingest/geoip/GeoIpDownloaderIT.java | 138 +++++++++- .../ingest/geoip/GeoIpDownloaderStatsIT.java | 32 ++- .../ingest/geoip/GeoIpDownloaderTaskIT.java | 2 +- .../ingest/geoip/GeoIpDownloader.java | 77 ++++-- .../geoip/GeoIpDownloaderTaskExecutor.java | 127 ++++++++- .../ingest/geoip/IngestGeoIpPlugin.java | 6 +- .../GeoIpDownloaderTaskExecutorTests.java | 253 ++++++++++++++++++ .../ingest/geoip/GeoIpDownloaderTests.java | 47 +++- .../IngestGeoIpClientYamlTestSuiteIT.java | 40 +++ .../upgrades/GeoIpUpgradeIT.java | 4 +- 12 files changed, 684 insertions(+), 60 deletions(-) create mode 100644 docs/changelog/92335.yaml create mode 100644 modules/ingest-geoip/src/test/java/org/elasticsearch/ingest/geoip/GeoIpDownloaderTaskExecutorTests.java diff --git a/docs/changelog/92335.yaml b/docs/changelog/92335.yaml new file mode 100644 index 0000000000000..9dc21fdcdc511 --- /dev/null +++ b/docs/changelog/92335.yaml @@ -0,0 +1,6 @@ +pr: 92335 +summary: Download the geoip databases only when needed +area: Ingest Node +type: bug +issues: + - 90673 diff --git a/docs/reference/ingest/processors/geoip.asciidoc b/docs/reference/ingest/processors/geoip.asciidoc index fa31a3bbe2543..d39f3be82d2b0 100644 --- a/docs/reference/ingest/processors/geoip.asciidoc +++ b/docs/reference/ingest/processors/geoip.asciidoc @@ -11,7 +11,10 @@ IPv4 or IPv6 address. By default, the processor uses the GeoLite2 City, GeoLite2 Country, and GeoLite2 ASN GeoIP2 databases from http://dev.maxmind.com/geoip/geoip2/geolite2/[MaxMind], shared under the -CC BY-SA 4.0 license. {es} automatically downloads updates for +CC BY-SA 4.0 license. It automatically downloads these databases if either +`ingest.geoip.downloader.eager.download` is set to true, or your cluster +has at least one pipeline with a `geoip` processor. {es} +automatically downloads updates for these databases from the Elastic GeoIP endpoint: https://geoip.elastic.co/v1/database. To get download statistics for these updates, use the <>. @@ -412,6 +415,13 @@ If `true`, {es} automatically downloads and manages updates for GeoIP2 databases from the `ingest.geoip.downloader.endpoint`. If `false`, {es} does not download updates and deletes all downloaded databases. Defaults to `true`. +[[ingest-geoip-downloader-eager-download]] +(<>, Boolean) +If `true`, {es} downloads GeoIP2 databases immediately, regardless of whether a +pipeline exists with a geoip processor. If `false`, {es} only begins downloading +the databases if a pipeline with a geoip processor exists or is added. Defaults +to `false`. + [[ingest-geoip-downloader-endpoint]] `ingest.geoip.downloader.endpoint`:: (<>, string) diff --git a/modules/ingest-geoip/src/internalClusterTest/java/org/elasticsearch/ingest/geoip/GeoIpDownloaderIT.java b/modules/ingest-geoip/src/internalClusterTest/java/org/elasticsearch/ingest/geoip/GeoIpDownloaderIT.java index 0e164cab818b2..f3f37f50147fb 100644 --- a/modules/ingest-geoip/src/internalClusterTest/java/org/elasticsearch/ingest/geoip/GeoIpDownloaderIT.java +++ b/modules/ingest-geoip/src/internalClusterTest/java/org/elasticsearch/ingest/geoip/GeoIpDownloaderIT.java @@ -27,10 +27,13 @@ import org.elasticsearch.index.query.BoolQueryBuilder; import org.elasticsearch.index.query.MatchQueryBuilder; import org.elasticsearch.index.query.RangeQueryBuilder; +import org.elasticsearch.ingest.AbstractProcessor; import org.elasticsearch.ingest.IngestDocument; +import org.elasticsearch.ingest.Processor; import org.elasticsearch.ingest.geoip.stats.GeoIpDownloaderStatsAction; import org.elasticsearch.persistent.PersistentTaskParams; import org.elasticsearch.persistent.PersistentTasksCustomMetadata; +import org.elasticsearch.plugins.IngestPlugin; import org.elasticsearch.plugins.Plugin; import org.elasticsearch.reindex.ReindexPlugin; import org.elasticsearch.search.SearchHit; @@ -51,11 +54,13 @@ import java.util.ArrayList; import java.util.Arrays; import java.util.Collection; +import java.util.HashMap; import java.util.Iterator; import java.util.List; import java.util.Map; import java.util.Set; import java.util.concurrent.TimeUnit; +import java.util.function.BiConsumer; import java.util.stream.Collectors; import java.util.stream.Stream; import java.util.stream.StreamSupport; @@ -82,7 +87,12 @@ public class GeoIpDownloaderIT extends AbstractGeoIpIT { @Override protected Collection> nodePlugins() { - return Arrays.asList(ReindexPlugin.class, IngestGeoIpPlugin.class, GeoIpProcessorNonIngestNodeIT.IngestGeoIpSettingsPlugin.class); + return Arrays.asList( + ReindexPlugin.class, + IngestGeoIpPlugin.class, + GeoIpProcessorNonIngestNodeIT.IngestGeoIpSettingsPlugin.class, + NonGeoProcessorsPlugin.class + ); } @Override @@ -104,7 +114,7 @@ public void cleanUp() throws Exception { .setPersistentSettings( Settings.builder() .putNull(GeoIpDownloaderTaskExecutor.ENABLED_SETTING.getKey()) - .putNull(GeoIpDownloader.POLL_INTERVAL_SETTING.getKey()) + .putNull(GeoIpDownloaderTaskExecutor.POLL_INTERVAL_SETTING.getKey()) .putNull("ingest.geoip.database_validity") ) .get(); @@ -149,6 +159,7 @@ public void cleanUp() throws Exception { @TestLogging(value = "org.elasticsearch.ingest.geoip:TRACE", reason = "https://github.com/elastic/elasticsearch/issues/75221") public void testInvalidTimestamp() throws Exception { assumeTrue("only test with fixture to have stable results", ENDPOINT != null); + putGeoIpPipeline(); ClusterUpdateSettingsResponse settingsResponse = client().admin() .cluster() .prepareUpdateSettings() @@ -160,7 +171,7 @@ public void testInvalidTimestamp() throws Exception { assertEquals(Set.of("GeoLite2-ASN.mmdb", "GeoLite2-City.mmdb", "GeoLite2-Country.mmdb"), state.getDatabases().keySet()); }, 2, TimeUnit.MINUTES); - putPipeline(); + putGeoIpPipeline(); verifyUpdatedDatabase(); settingsResponse = client().admin() @@ -172,7 +183,9 @@ public void testInvalidTimestamp() throws Exception { settingsResponse = client().admin() .cluster() .prepareUpdateSettings() - .setPersistentSettings(Settings.builder().put(GeoIpDownloader.POLL_INTERVAL_SETTING.getKey(), TimeValue.timeValueDays(2))) + .setPersistentSettings( + Settings.builder().put(GeoIpDownloaderTaskExecutor.POLL_INTERVAL_SETTING.getKey(), TimeValue.timeValueDays(2)) + ) .get(); assertTrue(settingsResponse.isAcknowledged()); List geoIpTmpDirs = getGeoIpTmpDirs(); @@ -186,7 +199,7 @@ public void testInvalidTimestamp() throws Exception { } } }); - putPipeline(); + putGeoIpPipeline(); assertBusy(() -> { SimulateDocumentBaseResult result = simulatePipeline(); assertThat(result.getFailure(), nullValue()); @@ -221,7 +234,9 @@ public void testUpdatedTimestamp() throws Exception { ClusterUpdateSettingsResponse settingsResponse = client().admin() .cluster() .prepareUpdateSettings() - .setPersistentSettings(Settings.builder().put(GeoIpDownloader.POLL_INTERVAL_SETTING.getKey(), TimeValue.timeValueDays(2))) + .setPersistentSettings( + Settings.builder().put(GeoIpDownloaderTaskExecutor.POLL_INTERVAL_SETTING.getKey(), TimeValue.timeValueDays(2)) + ) .get(); assertTrue(settingsResponse.isAcknowledged()); assertBusy(() -> assertNotEquals(lastCheck, getGeoIpTaskState().getDatabases().get("GeoLite2-ASN.mmdb").lastCheck())); @@ -229,6 +244,7 @@ public void testUpdatedTimestamp() throws Exception { } public void testGeoIpDatabasesDownload() throws Exception { + putGeoIpPipeline(); ClusterUpdateSettingsResponse settingsResponse = client().admin() .cluster() .prepareUpdateSettings() @@ -283,12 +299,34 @@ public void testGeoIpDatabasesDownload() throws Exception { } } + public void testGeoIpDatabasesDownloadNoGeoipProcessors() throws Exception { + assumeTrue("only test with fixture to have stable results", ENDPOINT != null); + String pipelineId = randomAlphaOfLength(10); + putGeoIpPipeline(pipelineId); + ClusterUpdateSettingsResponse settingsResponse = client().admin() + .cluster() + .prepareUpdateSettings() + .setPersistentSettings(Settings.builder().put(GeoIpDownloaderTaskExecutor.ENABLED_SETTING.getKey(), true)) + .get(); + assertTrue(settingsResponse.isAcknowledged()); + assertBusy(() -> { assertNull(getTask().getState()); }); + putNonGeoipPipeline(pipelineId); + assertBusy(() -> { assertNull(getTask().getState()); }); + putNonGeoipPipeline(pipelineId); + assertNull(getTask().getState()); + putGeoIpPipeline(); + assertBusy(() -> { + GeoIpTaskState state = getGeoIpTaskState(); + assertEquals(Set.of("GeoLite2-ASN.mmdb", "GeoLite2-City.mmdb", "GeoLite2-Country.mmdb"), state.getDatabases().keySet()); + }, 2, TimeUnit.MINUTES); + } + @TestLogging(value = "org.elasticsearch.ingest.geoip:TRACE", reason = "https://github.com/elastic/elasticsearch/issues/69972") public void testUseGeoIpProcessorWithDownloadedDBs() throws Exception { assumeTrue("only test with fixture to have stable results", ENDPOINT != null); setupDatabasesInConfigDirectory(); // setup: - putPipeline(); + putGeoIpPipeline(); // verify before updating dbs { @@ -355,7 +393,7 @@ public void testUseGeoIpProcessorWithDownloadedDBs() throws Exception { @TestLogging(value = "org.elasticsearch.ingest.geoip:TRACE", reason = "https://github.com/elastic/elasticsearch/issues/79074") public void testStartWithNoDatabases() throws Exception { assumeTrue("only test with fixture to have stable results", ENDPOINT != null); - putPipeline(); + putGeoIpPipeline(); // Behaviour without any databases loaded: { @@ -438,7 +476,21 @@ private SimulateDocumentBaseResult simulatePipeline() throws IOException { return (SimulateDocumentBaseResult) simulateResponse.getResults().get(0); } - private void putPipeline() throws IOException { + /** + * This creates a pipeline with a geoip processor, which ought to cause the geoip downloader to begin (assuming it is enabled). + * @throws IOException + */ + private void putGeoIpPipeline() throws IOException { + putGeoIpPipeline("_id"); + } + + /** + * This creates a pipeline named pipelineId with a geoip processor, which ought to cause the geoip downloader to begin (assuming it is + * enabled). + * @param pipelineId The name of the new pipeline with a geoip processor + * @throws IOException + */ + private void putGeoIpPipeline(String pipelineId) throws IOException { BytesReference bytes; try (XContentBuilder builder = JsonXContent.contentBuilder()) { builder.startObject(); @@ -484,7 +536,45 @@ private void putPipeline() throws IOException { builder.endObject(); bytes = BytesReference.bytes(builder); } - assertAcked(client().admin().cluster().preparePutPipeline("_id", bytes, XContentType.JSON).get()); + assertAcked(client().admin().cluster().preparePutPipeline(pipelineId, bytes, XContentType.JSON).get()); + } + + /** + * This creates a pipeline named pipelineId that does _not_ have a geoip processor. + * @throws IOException + */ + private void putNonGeoipPipeline(String pipelineId) throws IOException { + BytesReference bytes; + try (XContentBuilder builder = JsonXContent.contentBuilder()) { + builder.startObject(); + { + builder.startArray("processors"); + { + builder.startObject(); + { + builder.startObject(NonGeoProcessorsPlugin.NON_GEO_PROCESSOR_TYPE); + builder.endObject(); + } + builder.endObject(); + builder.startObject(); + { + builder.startObject(NonGeoProcessorsPlugin.NON_GEO_PROCESSOR_TYPE); + builder.endObject(); + } + builder.endObject(); + builder.startObject(); + { + builder.startObject(NonGeoProcessorsPlugin.NON_GEO_PROCESSOR_TYPE); + builder.endObject(); + } + builder.endObject(); + } + builder.endArray(); + } + builder.endObject(); + bytes = BytesReference.bytes(builder); + } + assertAcked(client().admin().cluster().preparePutPipeline(pipelineId, bytes, XContentType.JSON).get()); } private List getGeoIpTmpDirs() throws IOException { @@ -624,4 +714,32 @@ public int read(byte[] b, int off, int len) throws IOException { return read; } } + + /** + * This class defines a processor of type "test". + */ + public static final class NonGeoProcessorsPlugin extends Plugin implements IngestPlugin { + public static final String NON_GEO_PROCESSOR_TYPE = "test"; + + @Override + public Map getProcessors(Processor.Parameters parameters) { + Map procMap = new HashMap<>(); + procMap.put(NON_GEO_PROCESSOR_TYPE, (factories, tag, description, config) -> new AbstractProcessor(tag, description) { + @Override + public void execute(IngestDocument ingestDocument, BiConsumer handler) {} + + @Override + public String getType() { + return NON_GEO_PROCESSOR_TYPE; + } + + @Override + public boolean isAsync() { + return false; + } + + }); + return procMap; + } + } } diff --git a/modules/ingest-geoip/src/internalClusterTest/java/org/elasticsearch/ingest/geoip/GeoIpDownloaderStatsIT.java b/modules/ingest-geoip/src/internalClusterTest/java/org/elasticsearch/ingest/geoip/GeoIpDownloaderStatsIT.java index 6076063a38b5c..eea763351dd09 100644 --- a/modules/ingest-geoip/src/internalClusterTest/java/org/elasticsearch/ingest/geoip/GeoIpDownloaderStatsIT.java +++ b/modules/ingest-geoip/src/internalClusterTest/java/org/elasticsearch/ingest/geoip/GeoIpDownloaderStatsIT.java @@ -20,6 +20,8 @@ import org.elasticsearch.xcontent.ToXContent; import org.elasticsearch.xcontent.XContentBuilder; import org.elasticsearch.xcontent.XContentFactory; +import org.elasticsearch.xcontent.XContentType; +import org.elasticsearch.xcontent.json.JsonXContent; import org.junit.After; import java.io.IOException; @@ -29,6 +31,7 @@ import java.util.Map; import java.util.stream.Collectors; +import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertAcked; import static org.elasticsearch.xcontent.ToXContent.EMPTY_PARAMS; import static org.hamcrest.Matchers.containsInAnyOrder; import static org.hamcrest.Matchers.equalTo; @@ -80,7 +83,7 @@ public void testStats() throws Exception { assertThat(jsonMapView.get("stats.databases_count"), equalTo(0)); assertThat(jsonMapView.get("stats.total_download_time"), equalTo(0)); assertEquals(0, jsonMapView.>get("nodes").size()); - + putPipeline(); ClusterUpdateSettingsResponse settingsResponse = client().admin() .cluster() .prepareUpdateSettings() @@ -108,6 +111,33 @@ public void testStats() throws Exception { }); } + private void putPipeline() throws IOException { + BytesReference bytes; + try (XContentBuilder builder = JsonXContent.contentBuilder()) { + builder.startObject(); + { + builder.startArray("processors"); + { + builder.startObject(); + { + builder.startObject("geoip"); + { + builder.field("field", "ip"); + builder.field("target_field", "ip-city"); + builder.field("database_file", "GeoLite2-City.mmdb"); + } + builder.endObject(); + } + builder.endObject(); + } + builder.endArray(); + } + builder.endObject(); + bytes = BytesReference.bytes(builder); + } + assertAcked(client().admin().cluster().preparePutPipeline("_id", bytes, XContentType.JSON).get()); + } + public static Map convertToMap(ToXContent part) throws IOException { XContentBuilder builder = XContentFactory.jsonBuilder(); part.toXContent(builder, EMPTY_PARAMS); diff --git a/modules/ingest-geoip/src/internalClusterTest/java/org/elasticsearch/ingest/geoip/GeoIpDownloaderTaskIT.java b/modules/ingest-geoip/src/internalClusterTest/java/org/elasticsearch/ingest/geoip/GeoIpDownloaderTaskIT.java index 60be668272b2c..83fde48b39f3d 100644 --- a/modules/ingest-geoip/src/internalClusterTest/java/org/elasticsearch/ingest/geoip/GeoIpDownloaderTaskIT.java +++ b/modules/ingest-geoip/src/internalClusterTest/java/org/elasticsearch/ingest/geoip/GeoIpDownloaderTaskIT.java @@ -48,7 +48,7 @@ public void cleanUp() throws Exception { .setPersistentSettings( Settings.builder() .putNull(GeoIpDownloaderTaskExecutor.ENABLED_SETTING.getKey()) - .putNull(GeoIpDownloader.POLL_INTERVAL_SETTING.getKey()) + .putNull(GeoIpDownloaderTaskExecutor.POLL_INTERVAL_SETTING.getKey()) .putNull("ingest.geoip.database_validity") ) .get() diff --git a/modules/ingest-geoip/src/main/java/org/elasticsearch/ingest/geoip/GeoIpDownloader.java b/modules/ingest-geoip/src/main/java/org/elasticsearch/ingest/geoip/GeoIpDownloader.java index 6776ab9d629a2..0732674632b34 100644 --- a/modules/ingest-geoip/src/main/java/org/elasticsearch/ingest/geoip/GeoIpDownloader.java +++ b/modules/ingest-geoip/src/main/java/org/elasticsearch/ingest/geoip/GeoIpDownloader.java @@ -10,7 +10,6 @@ import org.apache.logging.log4j.LogManager; import org.apache.logging.log4j.Logger; -import org.apache.logging.log4j.util.Supplier; import org.elasticsearch.ElasticsearchException; import org.elasticsearch.action.ActionListener; import org.elasticsearch.action.admin.indices.flush.FlushRequest; @@ -48,6 +47,7 @@ import java.util.List; import java.util.Map; import java.util.Objects; +import java.util.function.Supplier; /** * Main component responsible for downloading new GeoIP databases. @@ -59,14 +59,6 @@ public class GeoIpDownloader extends AllocatedPersistentTask { private static final Logger logger = LogManager.getLogger(GeoIpDownloader.class); - public static final Setting POLL_INTERVAL_SETTING = Setting.timeSetting( - "ingest.geoip.downloader.poll.interval", - TimeValue.timeValueDays(3), - TimeValue.timeValueDays(1), - Property.Dynamic, - Property.NodeScope - ); - // for overriding in tests private static final String DEFAULT_ENDPOINT = System.getProperty( "ingest.geoip.downloader.endpoint.default", @@ -91,9 +83,16 @@ public class GeoIpDownloader extends AllocatedPersistentTask { // visible for testing protected volatile GeoIpTaskState state; - private volatile TimeValue pollInterval; private volatile Scheduler.ScheduledCancellable scheduled; private volatile GeoIpDownloaderStats stats = GeoIpDownloaderStats.EMPTY; + private final Supplier pollIntervalSupplier; + private final Supplier eagerDownloadSupplier; + /* + * This variable tells us whether we have at least one pipeline with a geoip processor. If there are no geoip processors then we do + * not download geoip databases (unless configured to eagerly download). Access is not protected because it is set in the constructor + * and then only ever updated on the cluster state update thread (it is also read on the generic thread). Non-private for unit testing. + */ + private final Supplier atLeastOneGeoipProcessorSupplier; GeoIpDownloader( Client client, @@ -106,7 +105,10 @@ public class GeoIpDownloader extends AllocatedPersistentTask { String action, String description, TaskId parentTask, - Map headers + Map headers, + Supplier pollIntervalSupplier, + Supplier eagerDownloadSupplier, + Supplier atLeastOneGeoipProcessorSupplier ) { super(id, type, action, description, parentTask, headers); this.httpClient = httpClient; @@ -114,15 +116,9 @@ public class GeoIpDownloader extends AllocatedPersistentTask { this.clusterService = clusterService; this.threadPool = threadPool; endpoint = ENDPOINT_SETTING.get(settings); - pollInterval = POLL_INTERVAL_SETTING.get(settings); - clusterService.getClusterSettings().addSettingsUpdateConsumer(POLL_INTERVAL_SETTING, this::setPollInterval); - } - - public void setPollInterval(TimeValue pollInterval) { - this.pollInterval = pollInterval; - if (scheduled != null && scheduled.cancel()) { - scheduleNextRun(TimeValue.ZERO); - } + this.pollIntervalSupplier = pollIntervalSupplier; + this.eagerDownloadSupplier = eagerDownloadSupplier; + this.atLeastOneGeoipProcessorSupplier = atLeastOneGeoipProcessorSupplier; } // visible for testing @@ -130,6 +126,7 @@ void updateDatabases() throws IOException { var clusterState = clusterService.state(); var geoipIndex = clusterState.getMetadata().getIndicesLookup().get(GeoIpDownloader.DATABASES_INDEX); if (geoipIndex != null) { + logger.trace("The {} index is not null", GeoIpDownloader.DATABASES_INDEX); if (clusterState.getRoutingTable().index(geoipIndex.getWriteIndex()).allPrimaryShardsActive() == false) { throw new ElasticsearchException("not all primary shards of [" + DATABASES_INDEX + "] index are active"); } @@ -138,13 +135,18 @@ void updateDatabases() throws IOException { throw blockException; } } - - logger.debug("updating geoip databases"); - List> response = fetchDatabasesOverview(); - for (Map res : response) { - if (res.get("name").toString().endsWith(".tgz")) { - processDatabase(res); + if (eagerDownloadSupplier.get() || atLeastOneGeoipProcessorSupplier.get()) { + logger.trace("Updating geoip databases"); + List> response = fetchDatabasesOverview(); + for (Map res : response) { + if (res.get("name").toString().endsWith(".tgz")) { + processDatabase(res); + } } + } else { + logger.trace( + "Not updating geoip databases because no geoip processors exist in the cluster and eager downloading is not configured" + ); } } @@ -186,7 +188,7 @@ void processDatabase(Map databaseInfo) { } } catch (Exception e) { stats = stats.failedDownload(); - logger.error((Supplier) () -> "error downloading geoip database [" + name + "]", e); + logger.error((org.apache.logging.log4j.util.Supplier) () -> "error downloading geoip database [" + name + "]", e); } } @@ -266,6 +268,9 @@ void setState(GeoIpTaskState state) { this.state = state; } + /** + * Downloads the geoip databases now, and schedules them to be downloaded again after pollInterval. + */ void runDownloader() { if (isCancelled() || isCompleted()) { return; @@ -281,7 +286,22 @@ void runDownloader() { } catch (Exception e) { logger.error("exception during geoip databases cleanup", e); } - scheduleNextRun(pollInterval); + scheduleNextRun(pollIntervalSupplier.get()); + } + + /** + * This method requests that the downloader be rescheduled to run immediately (presumably because a dynamic property supplied by + * pollIntervalSupplier or eagerDownloadSupplier has changed, or a pipeline with a geoip processor has been added). This method does + * nothing if this task is cancelled, completed, or has not yet been scheduled to run for the first time. It cancels any existing + * scheduled run. + */ + public void requestReschedule() { + if (isCancelled() || isCompleted()) { + return; + } + if (scheduled != null && scheduled.cancel()) { + scheduleNextRun(TimeValue.ZERO); + } } private void cleanDatabases() { @@ -321,4 +341,5 @@ private void scheduleNextRun(TimeValue time) { scheduled = threadPool.schedule(this::runDownloader, time, ThreadPool.Names.GENERIC); } } + } diff --git a/modules/ingest-geoip/src/main/java/org/elasticsearch/ingest/geoip/GeoIpDownloaderTaskExecutor.java b/modules/ingest-geoip/src/main/java/org/elasticsearch/ingest/geoip/GeoIpDownloaderTaskExecutor.java index c56fd9c2d0c53..7457738b75301 100644 --- a/modules/ingest-geoip/src/main/java/org/elasticsearch/ingest/geoip/GeoIpDownloaderTaskExecutor.java +++ b/modules/ingest-geoip/src/main/java/org/elasticsearch/ingest/geoip/GeoIpDownloaderTaskExecutor.java @@ -17,15 +17,20 @@ import org.elasticsearch.client.internal.Client; import org.elasticsearch.client.internal.OriginSettingClient; import org.elasticsearch.cluster.ClusterChangedEvent; +import org.elasticsearch.cluster.ClusterState; import org.elasticsearch.cluster.ClusterStateListener; import org.elasticsearch.cluster.metadata.IndexAbstraction; import org.elasticsearch.cluster.node.DiscoveryNode; import org.elasticsearch.cluster.service.ClusterService; import org.elasticsearch.common.settings.Setting; import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.core.TimeValue; import org.elasticsearch.gateway.GatewayService; import org.elasticsearch.index.Index; +import org.elasticsearch.ingest.IngestMetadata; import org.elasticsearch.ingest.IngestService; +import org.elasticsearch.ingest.Pipeline; +import org.elasticsearch.ingest.PipelineConfiguration; import org.elasticsearch.persistent.AllocatedPersistentTask; import org.elasticsearch.persistent.PersistentTaskState; import org.elasticsearch.persistent.PersistentTasksCustomMetadata; @@ -35,7 +40,10 @@ import org.elasticsearch.threadpool.ThreadPool; import org.elasticsearch.transport.RemoteTransportException; +import java.util.List; import java.util.Map; +import java.util.Objects; +import java.util.concurrent.atomic.AtomicBoolean; import java.util.concurrent.atomic.AtomicReference; import static org.elasticsearch.ingest.geoip.GeoIpDownloader.DATABASES_INDEX; @@ -56,6 +64,20 @@ public final class GeoIpDownloaderTaskExecutor extends PersistentTasksExecutor POLL_INTERVAL_SETTING = Setting.timeSetting( + "ingest.geoip.downloader.poll.interval", + TimeValue.timeValueDays(3), + TimeValue.timeValueDays(1), + Setting.Property.Dynamic, + Setting.Property.NodeScope + ); + + public static final Setting EAGER_DOWNLOAD_SETTING = Setting.boolSetting( + "ingest.geoip.downloader.eager.download", + false, + Setting.Property.Dynamic, + Setting.Property.NodeScope + ); private static final Logger logger = LogManager.getLogger(GeoIpDownloader.class); @@ -66,6 +88,10 @@ public final class GeoIpDownloaderTaskExecutor extends PersistentTasksExecutor currentTask = new AtomicReference<>(); + private volatile TimeValue pollInterval; + private volatile boolean eagerDownload; + private volatile boolean atLeastOneGeoipProcessor; + private final AtomicBoolean taskIsBootstrapped = new AtomicBoolean(false); GeoIpDownloaderTaskExecutor(Client client, HttpClient httpClient, ClusterService clusterService, ThreadPool threadPool) { super(GEOIP_DOWNLOADER, ThreadPool.Names.GENERIC); @@ -75,9 +101,18 @@ public final class GeoIpDownloaderTaskExecutor extends PersistentTasksExecutor pollInterval, + () -> eagerDownload, + () -> atLeastOneGeoipProcessor ); } @@ -140,12 +198,65 @@ public void clusterChanged(ClusterChangedEvent event) { return; } - clusterService.removeListener(this); - if (ENABLED_SETTING.get(event.state().getMetadata().settings(), settings)) { - startTask(() -> clusterService.addListener(this)); - } else { - stopTask(() -> clusterService.addListener(this)); + if (taskIsBootstrapped.getAndSet(true) == false) { + this.atLeastOneGeoipProcessor = hasAtLeastOneGeoipProcessor(event.state()); + if (ENABLED_SETTING.get(event.state().getMetadata().settings(), settings)) { + startTask(() -> taskIsBootstrapped.set(false)); + } else { + stopTask(() -> taskIsBootstrapped.set(false)); + } } + + if (event.metadataChanged() && event.changedCustomMetadataSet().contains(IngestMetadata.TYPE)) { + boolean newAtLeastOneGeoipProcessor = hasAtLeastOneGeoipProcessor(event.state()); + if (newAtLeastOneGeoipProcessor && atLeastOneGeoipProcessor == false) { + atLeastOneGeoipProcessor = true; + logger.trace("Scheduling runDownloader because a geoip processor has been added"); + GeoIpDownloader currentDownloader = getCurrentTask(); + if (currentDownloader != null) { + currentDownloader.requestReschedule(); + } + } else { + atLeastOneGeoipProcessor = newAtLeastOneGeoipProcessor; + } + } + } + + @SuppressWarnings("unchecked") + static boolean hasAtLeastOneGeoipProcessor(ClusterState clusterState) { + List pipelineDefinitions = IngestService.getPipelines(clusterState); + return pipelineDefinitions.stream().anyMatch(pipelineDefinition -> { + Map pipelineMap = pipelineDefinition.getConfigAsMap(); + return hasAtLeastOneGeoipProcessor((List>) pipelineMap.get(Pipeline.PROCESSORS_KEY)); + }); + } + + private static boolean hasAtLeastOneGeoipProcessor(List> processors) { + return processors != null && processors.stream().anyMatch(GeoIpDownloaderTaskExecutor::hasAtLeastOneGeoipProcessor); + } + + private static boolean hasAtLeastOneGeoipProcessor(Map processor) { + return processor != null + && (processor.containsKey(GeoIpProcessor.TYPE) + || isProcessorWithOnFailureGeoIpProcessor(processor) + || isForeachProcessorWithGeoipProcessor(processor)); + } + + @SuppressWarnings("unchecked") + private static boolean isProcessorWithOnFailureGeoIpProcessor(Map processor) { + return processor != null + && processor.values() + .stream() + .anyMatch( + value -> value instanceof Map + && hasAtLeastOneGeoipProcessor(((Map>>) value).get("on_failure")) + ); + } + + @SuppressWarnings("unchecked") + private static boolean isForeachProcessorWithGeoipProcessor(Map processor) { + return processor.containsKey("foreach") + && hasAtLeastOneGeoipProcessor(((Map>) processor.get("foreach")).get("processor")); } private void startTask(Runnable onFailure) { diff --git a/modules/ingest-geoip/src/main/java/org/elasticsearch/ingest/geoip/IngestGeoIpPlugin.java b/modules/ingest-geoip/src/main/java/org/elasticsearch/ingest/geoip/IngestGeoIpPlugin.java index f69558171fd44..8aaf476b353ea 100644 --- a/modules/ingest-geoip/src/main/java/org/elasticsearch/ingest/geoip/IngestGeoIpPlugin.java +++ b/modules/ingest-geoip/src/main/java/org/elasticsearch/ingest/geoip/IngestGeoIpPlugin.java @@ -85,9 +85,10 @@ public class IngestGeoIpPlugin extends Plugin implements IngestPlugin, SystemInd public List> getSettings() { return Arrays.asList( CACHE_SIZE, + GeoIpDownloaderTaskExecutor.EAGER_DOWNLOAD_SETTING, + GeoIpDownloaderTaskExecutor.ENABLED_SETTING, GeoIpDownloader.ENDPOINT_SETTING, - GeoIpDownloader.POLL_INTERVAL_SETTING, - GeoIpDownloaderTaskExecutor.ENABLED_SETTING + GeoIpDownloaderTaskExecutor.POLL_INTERVAL_SETTING ); } @@ -126,6 +127,7 @@ public Collection createComponents( } geoIpDownloaderTaskExecutor = new GeoIpDownloaderTaskExecutor(client, new HttpClient(), clusterService, threadPool); + geoIpDownloaderTaskExecutor.init(); return List.of(databaseRegistry.get(), geoIpDownloaderTaskExecutor); } diff --git a/modules/ingest-geoip/src/test/java/org/elasticsearch/ingest/geoip/GeoIpDownloaderTaskExecutorTests.java b/modules/ingest-geoip/src/test/java/org/elasticsearch/ingest/geoip/GeoIpDownloaderTaskExecutorTests.java new file mode 100644 index 0000000000000..5cbe205f5c9c7 --- /dev/null +++ b/modules/ingest-geoip/src/test/java/org/elasticsearch/ingest/geoip/GeoIpDownloaderTaskExecutorTests.java @@ -0,0 +1,253 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0 and the Server Side Public License, v 1; you may not use this file except + * in compliance with, at your election, the Elastic License 2.0 or the Server + * Side Public License, v 1. + */ + +package org.elasticsearch.ingest.geoip; + +import org.elasticsearch.cluster.ClusterState; +import org.elasticsearch.cluster.metadata.Metadata; +import org.elasticsearch.common.bytes.BytesArray; +import org.elasticsearch.ingest.IngestMetadata; +import org.elasticsearch.ingest.PipelineConfiguration; +import org.elasticsearch.test.ESTestCase; +import org.elasticsearch.xcontent.XContentType; + +import java.util.HashMap; +import java.util.List; +import java.util.Map; + +import static org.mockito.Mockito.mock; +import static org.mockito.Mockito.when; + +public class GeoIpDownloaderTaskExecutorTests extends ESTestCase { + public void testHasAtLeastOneGeoipProcessor() { + Map configs = new HashMap<>(); + IngestMetadata ingestMetadata = new IngestMetadata(configs); + ClusterState clusterState = mock(ClusterState.class); + Metadata metadata = mock(Metadata.class); + when(metadata.custom(IngestMetadata.TYPE)).thenReturn(ingestMetadata); + when(clusterState.getMetadata()).thenReturn(metadata); + List expectHitsInputs = getPipelinesWithGeoIpProcessors(); + List expectMissesInputs = getPipelinesWithoutGeoIpProcessors(); + { + // Test that hasAtLeastOneGeoipProcessor returns true for any pipeline with a geoip processor: + for (String pipeline : expectHitsInputs) { + configs.clear(); + configs.put("_id1", new PipelineConfiguration("_id1", new BytesArray(pipeline), XContentType.JSON)); + assertTrue(GeoIpDownloaderTaskExecutor.hasAtLeastOneGeoipProcessor(clusterState)); + } + } + { + // Test that hasAtLeastOneGeoipProcessor returns false for any pipeline without a geoip processor: + for (String pipeline : expectMissesInputs) { + configs.clear(); + configs.put("_id1", new PipelineConfiguration("_id1", new BytesArray(pipeline), XContentType.JSON)); + assertFalse(GeoIpDownloaderTaskExecutor.hasAtLeastOneGeoipProcessor(clusterState)); + } + } + { + /* + * Now test that hasAtLeastOneGeoipProcessor returns true for a mix of pipelines, some which have geoip processors and some + * which do not: + */ + configs.clear(); + for (String pipeline : expectHitsInputs) { + String id = randomAlphaOfLength(20); + configs.put(id, new PipelineConfiguration(id, new BytesArray(pipeline), XContentType.JSON)); + } + for (String pipeline : expectMissesInputs) { + String id = randomAlphaOfLength(20); + configs.put(id, new PipelineConfiguration(id, new BytesArray(pipeline), XContentType.JSON)); + } + assertTrue(GeoIpDownloaderTaskExecutor.hasAtLeastOneGeoipProcessor(clusterState)); + } + } + + /* + * This method returns an assorted list of pipelines that have geoip processors -- ones that ought to cause hasAtLeastOneGeoipProcessor + * to return true. + */ + private List getPipelinesWithGeoIpProcessors() { + String simpleGeoIpProcessor = """ + { + "processors":[ + { + "geoip":{ + "field":"provider" + } + } + ] + } + """; + String onFailureWithGeoIpProcessor = """ + { + "processors":[ + { + "rename":{ + "field":"provider", + "target_field":"cloud.provider", + "on_failure":[ + { + "geoip":{ + "field":"error.message" + } + } + ] + } + } + ] + } + """; + String foreachWithGeoIpProcessor = """ + { + "processors":[ + { + "foreach":{ + "field":"values", + "processor": + { + "geoip":{ + "field":"someField" + } + } + } + } + ] + } + """; + String nestedForeachWithGeoIpProcessor = """ + { + "processors":[ + { + "foreach":{ + "field":"values", + "processor": + { + "foreach":{ + "field":"someField", + "processor": + { + "geoip":{ + "field":"someField" + } + } + } + } + } + } + ] + } + """; + String nestedForeachWithOnFailureWithGeoIpProcessor = """ + { + "processors":[ + { + "foreach":{ + "field":"values", + "processor": + { + "foreach":{ + "field":"someField", + "processor": + { + "rename":{ + "field":"provider", + "target_field":"cloud.provider", + "on_failure":[ + { + "geoip":{ + "field":"error.message" + } + } + ] + } + } + } + } + } + } + ] + } + """; + String onFailureWithForeachWithGeoIp = """ + { + "processors":[ + { + "rename":{ + "field":"provider", + "target_field":"cloud.provider", + "on_failure":[ + { + "foreach":{ + "field":"values", + "processor": + { + "geoip":{ + "field":"someField" + } + } + } + } + ] + } + } + ] + } + """; + return List.of( + simpleGeoIpProcessor, + onFailureWithGeoIpProcessor, + foreachWithGeoIpProcessor, + nestedForeachWithGeoIpProcessor, + nestedForeachWithOnFailureWithGeoIpProcessor, + onFailureWithForeachWithGeoIp + ); + } + + /* + * This method returns an assorted list of pipelines that _do not_ have geoip processors -- ones that ought to cause + * hasAtLeastOneGeoipProcessor to return false. + */ + private List getPipelinesWithoutGeoIpProcessors() { + String empty = """ + { + } + """; + String noProcessors = """ + { + "processors":[ + ] + } + """; + String onFailureWithForeachWithSet = """ + { + "processors":[ + { + "rename":{ + "field":"provider", + "target_field":"cloud.provider", + "on_failure":[ + { + "foreach":{ + "field":"values", + "processor": + { + "set":{ + "field":"someField" + } + } + } + } + ] + } + } + ] + } + """; + return List.of(empty, noProcessors, onFailureWithForeachWithSet); + } +} diff --git a/modules/ingest-geoip/src/test/java/org/elasticsearch/ingest/geoip/GeoIpDownloaderTests.java b/modules/ingest-geoip/src/test/java/org/elasticsearch/ingest/geoip/GeoIpDownloaderTests.java index 16088deb86b3d..9f3334a07d8f3 100644 --- a/modules/ingest-geoip/src/test/java/org/elasticsearch/ingest/geoip/GeoIpDownloaderTests.java +++ b/modules/ingest-geoip/src/test/java/org/elasticsearch/ingest/geoip/GeoIpDownloaderTests.java @@ -53,6 +53,7 @@ import java.util.List; import java.util.Map; import java.util.Set; +import java.util.concurrent.atomic.AtomicBoolean; import java.util.concurrent.atomic.AtomicInteger; import java.util.function.BiConsumer; @@ -81,7 +82,12 @@ public void setup() { when(clusterService.getClusterSettings()).thenReturn( new ClusterSettings( Settings.EMPTY, - Set.of(GeoIpDownloader.ENDPOINT_SETTING, GeoIpDownloader.POLL_INTERVAL_SETTING, GeoIpDownloaderTaskExecutor.ENABLED_SETTING) + Set.of( + GeoIpDownloaderTaskExecutor.EAGER_DOWNLOAD_SETTING, + GeoIpDownloader.ENDPOINT_SETTING, + GeoIpDownloaderTaskExecutor.POLL_INTERVAL_SETTING, + GeoIpDownloaderTaskExecutor.ENABLED_SETTING + ) ) ); ClusterState state = createClusterState(new PersistentTasksCustomMetadata(1L, Map.of())); @@ -98,7 +104,10 @@ public void setup() { "", "", EMPTY_TASK_ID, - Collections.emptyMap() + Collections.emptyMap(), + () -> GeoIpDownloaderTaskExecutor.POLL_INTERVAL_SETTING.getDefault(Settings.EMPTY), + () -> GeoIpDownloaderTaskExecutor.EAGER_DOWNLOAD_SETTING.getDefault(Settings.EMPTY), + () -> true ); } @@ -252,7 +261,10 @@ public void testProcessDatabaseNew() throws IOException { "", "", EMPTY_TASK_ID, - Collections.emptyMap() + Collections.emptyMap(), + () -> GeoIpDownloaderTaskExecutor.POLL_INTERVAL_SETTING.getDefault(Settings.EMPTY), + () -> GeoIpDownloaderTaskExecutor.EAGER_DOWNLOAD_SETTING.getDefault(Settings.EMPTY), + () -> true ) { @Override void updateTaskState() { @@ -298,7 +310,10 @@ public void testProcessDatabaseUpdate() throws IOException { "", "", EMPTY_TASK_ID, - Collections.emptyMap() + Collections.emptyMap(), + () -> GeoIpDownloaderTaskExecutor.POLL_INTERVAL_SETTING.getDefault(Settings.EMPTY), + () -> GeoIpDownloaderTaskExecutor.EAGER_DOWNLOAD_SETTING.getDefault(Settings.EMPTY), + () -> true ) { @Override void updateTaskState() { @@ -346,7 +361,10 @@ public void testProcessDatabaseSame() throws IOException { "", "", EMPTY_TASK_ID, - Collections.emptyMap() + Collections.emptyMap(), + () -> GeoIpDownloaderTaskExecutor.POLL_INTERVAL_SETTING.getDefault(Settings.EMPTY), + () -> GeoIpDownloaderTaskExecutor.EAGER_DOWNLOAD_SETTING.getDefault(Settings.EMPTY), + () -> true ) { @Override void updateTaskState() { @@ -387,7 +405,10 @@ public void testUpdateTaskState() { "", "", EMPTY_TASK_ID, - Collections.emptyMap() + Collections.emptyMap(), + () -> GeoIpDownloaderTaskExecutor.POLL_INTERVAL_SETTING.getDefault(Settings.EMPTY), + () -> GeoIpDownloaderTaskExecutor.EAGER_DOWNLOAD_SETTING.getDefault(Settings.EMPTY), + () -> true ) { @Override public void updatePersistentTaskState(PersistentTaskState state, ActionListener> listener) { @@ -414,7 +435,10 @@ public void testUpdateTaskStateError() { "", "", EMPTY_TASK_ID, - Collections.emptyMap() + Collections.emptyMap(), + () -> GeoIpDownloaderTaskExecutor.POLL_INTERVAL_SETTING.getDefault(Settings.EMPTY), + () -> GeoIpDownloaderTaskExecutor.EAGER_DOWNLOAD_SETTING.getDefault(Settings.EMPTY), + () -> true ) { @Override public void updatePersistentTaskState(PersistentTaskState state, ActionListener> listener) { @@ -440,6 +464,7 @@ public void testUpdateDatabases() throws IOException { builder.close(); when(httpClient.getBytes("a.b?elastic_geoip_service_tos=agree")).thenReturn(baos.toByteArray()); Iterator> it = maps.iterator(); + final AtomicBoolean atLeastOneGeoipProcessor = new AtomicBoolean(false); geoIpDownloader = new GeoIpDownloader( client, httpClient, @@ -451,7 +476,10 @@ public void testUpdateDatabases() throws IOException { "", "", EMPTY_TASK_ID, - Collections.emptyMap() + Collections.emptyMap(), + () -> GeoIpDownloaderTaskExecutor.POLL_INTERVAL_SETTING.getDefault(Settings.EMPTY), + () -> GeoIpDownloaderTaskExecutor.EAGER_DOWNLOAD_SETTING.getDefault(Settings.EMPTY), + atLeastOneGeoipProcessor::get ) { @Override void processDatabase(Map databaseInfo) { @@ -459,6 +487,9 @@ void processDatabase(Map databaseInfo) { } }; geoIpDownloader.updateDatabases(); + assertTrue(it.hasNext()); + atLeastOneGeoipProcessor.set(true); + geoIpDownloader.updateDatabases(); assertFalse(it.hasNext()); } diff --git a/modules/ingest-geoip/src/yamlRestTest/java/org/elasticsearch/ingest/geoip/IngestGeoIpClientYamlTestSuiteIT.java b/modules/ingest-geoip/src/yamlRestTest/java/org/elasticsearch/ingest/geoip/IngestGeoIpClientYamlTestSuiteIT.java index 5b40f4a6ada43..8584229ec171e 100644 --- a/modules/ingest-geoip/src/yamlRestTest/java/org/elasticsearch/ingest/geoip/IngestGeoIpClientYamlTestSuiteIT.java +++ b/modules/ingest-geoip/src/yamlRestTest/java/org/elasticsearch/ingest/geoip/IngestGeoIpClientYamlTestSuiteIT.java @@ -11,11 +11,17 @@ import com.carrotsearch.randomizedtesting.annotations.Name; import com.carrotsearch.randomizedtesting.annotations.ParametersFactory; +import org.apache.http.entity.ByteArrayEntity; +import org.apache.http.entity.ContentType; import org.elasticsearch.client.Request; +import org.elasticsearch.common.bytes.BytesReference; import org.elasticsearch.test.rest.yaml.ClientYamlTestCandidate; import org.elasticsearch.test.rest.yaml.ESClientYamlSuiteTestCase; +import org.elasticsearch.xcontent.XContentBuilder; +import org.elasticsearch.xcontent.json.JsonXContent; import org.junit.Before; +import java.io.IOException; import java.util.List; import java.util.Map; import java.util.stream.Collectors; @@ -36,6 +42,7 @@ public static Iterable parameters() throws Exception { @Before public void waitForDatabases() throws Exception { + putGeoipPipeline(); assertBusy(() -> { Request request = new Request("GET", "/_ingest/geoip/stats"); Map response = entityAsMap(client().performRequest(request)); @@ -53,4 +60,37 @@ public void waitForDatabases() throws Exception { }); } + /** + * This creates a pipeline with a geoip processor so that the GeoipDownloader will download its databases. + * @throws IOException + */ + private void putGeoipPipeline() throws IOException { + final BytesReference bytes; + try (XContentBuilder builder = JsonXContent.contentBuilder()) { + builder.startObject(); + { + builder.startArray("processors"); + { + builder.startObject(); + { + builder.startObject("geoip"); + { + builder.field("field", "ip"); + builder.field("target_field", "ip-city"); + builder.field("database_file", "GeoLite2-City.mmdb"); + } + builder.endObject(); + } + builder.endObject(); + } + builder.endArray(); + } + builder.endObject(); + bytes = BytesReference.bytes(builder); + } + Request putPipelineRequest = new Request("PUT", "/_ingest/pipeline/pipeline-with-geoip"); + putPipelineRequest.setEntity(new ByteArrayEntity(bytes.array(), ContentType.APPLICATION_JSON)); + client().performRequest(putPipelineRequest); + } + } diff --git a/x-pack/qa/rolling-upgrade/src/test/java/org/elasticsearch/upgrades/GeoIpUpgradeIT.java b/x-pack/qa/rolling-upgrade/src/test/java/org/elasticsearch/upgrades/GeoIpUpgradeIT.java index 3dedd041d6465..eb0e97e1ecce1 100644 --- a/x-pack/qa/rolling-upgrade/src/test/java/org/elasticsearch/upgrades/GeoIpUpgradeIT.java +++ b/x-pack/qa/rolling-upgrade/src/test/java/org/elasticsearch/upgrades/GeoIpUpgradeIT.java @@ -26,7 +26,9 @@ public void testGeoIpDownloader() throws Exception { assertBusy(() -> { Response response = client().performRequest(new Request("GET", "_ingest/geoip/stats")); String tasks = EntityUtils.toString(response.getEntity(), StandardCharsets.UTF_8); - assertThat(tasks, Matchers.containsString("failed_downloads\":1")); + // The geoip downloader doesn't actually do anything since there are no geoip processors: + assertThat(tasks, Matchers.containsString("failed_downloads\":0")); + assertThat(tasks, Matchers.containsString("successful_downloads\":0")); }); } } From 9d03b143e0ac33158ccbd0c1cf2a7e749174c458 Mon Sep 17 00:00:00 2001 From: Mark Vieira Date: Mon, 30 Jan 2023 13:50:04 -0800 Subject: [PATCH 20/63] Add JDK 20 to Java support compatibility testing matrix --- .ci/matrix-runtime-javas.yml | 1 + 1 file changed, 1 insertion(+) diff --git a/.ci/matrix-runtime-javas.yml b/.ci/matrix-runtime-javas.yml index a6b2d4a15d848..07582c4892d52 100644 --- a/.ci/matrix-runtime-javas.yml +++ b/.ci/matrix-runtime-javas.yml @@ -10,3 +10,4 @@ ES_RUNTIME_JAVA: - openjdk17 - openjdk18 - openjdk19 + - openjdk20 From 180caf0dc8cb4d0af4a7df545f2c24de7505ca46 Mon Sep 17 00:00:00 2001 From: Mark Vieira Date: Mon, 30 Jan 2023 14:49:58 -0800 Subject: [PATCH 21/63] Bump bundled JDK to Java 19.0.2 (#93354) Closes https://github.com/elastic/elasticsearch/issues/93025 --- build-tools-internal/version.properties | 2 +- docs/changelog/93354.yaml | 6 ++++++ gradle/verification-metadata.xml | 26 ++++++++++++------------- 3 files changed, 20 insertions(+), 14 deletions(-) create mode 100644 docs/changelog/93354.yaml diff --git a/build-tools-internal/version.properties b/build-tools-internal/version.properties index 8071a0dba037c..6130b599610a9 100644 --- a/build-tools-internal/version.properties +++ b/build-tools-internal/version.properties @@ -2,7 +2,7 @@ elasticsearch = 8.7.0 lucene = 9.5.0-snapshot-d19c3e2e0ed bundled_jdk_vendor = openjdk -bundled_jdk = 19.0.1+10@afdd2e245b014143b62ccb916125e3ce +bundled_jdk = 19.0.2+7@fdb695a9d9064ad6b064dc6df578380c # optional dependencies spatial4j = 0.7 diff --git a/docs/changelog/93354.yaml b/docs/changelog/93354.yaml new file mode 100644 index 0000000000000..2ad4d27a069cc --- /dev/null +++ b/docs/changelog/93354.yaml @@ -0,0 +1,6 @@ +pr: 93354 +summary: Bump bundled JDK to Java 19.0.2 +area: Packaging +type: upgrade +issues: + - 93025 diff --git a/gradle/verification-metadata.xml b/gradle/verification-metadata.xml index b049f21a3bcfc..84dd73a6b0f52 100644 --- a/gradle/verification-metadata.xml +++ b/gradle/verification-metadata.xml @@ -1662,25 +1662,25 @@ - - - + + + - - + + - - - + + + - - + + - - - + + + From de8eda45ebaabb3a8fb68c1242a3213ea13dcf86 Mon Sep 17 00:00:00 2001 From: Mark Vieira Date: Mon, 30 Jan 2023 15:15:05 -0800 Subject: [PATCH 22/63] Make `--debug-server-jvm` work with new test framework (#93355) --- .../testclusters/StandaloneRestIntegTestTask.java | 1 + .../test/cluster/local/LocalClusterFactory.java | 12 +++++++++++- 2 files changed, 12 insertions(+), 1 deletion(-) diff --git a/build-tools/src/main/java/org/elasticsearch/gradle/testclusters/StandaloneRestIntegTestTask.java b/build-tools/src/main/java/org/elasticsearch/gradle/testclusters/StandaloneRestIntegTestTask.java index ab1436bb9a317..3754f57dc3788 100644 --- a/build-tools/src/main/java/org/elasticsearch/gradle/testclusters/StandaloneRestIntegTestTask.java +++ b/build-tools/src/main/java/org/elasticsearch/gradle/testclusters/StandaloneRestIntegTestTask.java @@ -71,6 +71,7 @@ public StandaloneRestIntegTestTask() { @Option(option = "debug-server-jvm", description = "Enable debugging configuration, to allow attaching a debugger to elasticsearch.") public void setDebugServer(boolean enabled) { this.debugServer = enabled; + systemProperty("tests.cluster.debug.enabled", Boolean.toString(enabled)); } @Nested diff --git a/test/test-clusters/src/main/java/org/elasticsearch/test/cluster/local/LocalClusterFactory.java b/test/test-clusters/src/main/java/org/elasticsearch/test/cluster/local/LocalClusterFactory.java index 24cebbeb7168f..e63f8236d58d4 100644 --- a/test/test-clusters/src/main/java/org/elasticsearch/test/cluster/local/LocalClusterFactory.java +++ b/test/test-clusters/src/main/java/org/elasticsearch/test/cluster/local/LocalClusterFactory.java @@ -59,6 +59,9 @@ public class LocalClusterFactory implements ClusterFactory getEnvironmentVariables() { .collect(Collectors.joining(" ")); } + String debugArgs = ""; + if (Boolean.getBoolean(TESTS_CLUSTER_DEBUG_ENABLED_SYSPROP)) { + int port = DEFAULT_DEBUG_PORT + spec.getCluster().getNodes().indexOf(spec); + debugArgs = ENABLE_DEBUG_JVM_ARGS + port; + } + String heapSize = System.getProperty("tests.heap.size", "512m"); environment.put("ES_JAVA_OPTS", "-Xms" + heapSize + " -Xmx" + heapSize + " -ea -esa " // Support passing in additional JVM arguments + System.getProperty("tests.jvm.argline", "") + " " + featureFlagProperties - + systemProperties); + + systemProperties + + debugArgs); return environment; } From c97e56e6f220c230f363b55a5673276abdf036a2 Mon Sep 17 00:00:00 2001 From: Mark Vieira Date: Mon, 30 Jan 2023 15:52:44 -0800 Subject: [PATCH 23/63] Fix MapperSizeClientYamlTestSuiteIT when FIPS is enabled (#93357) The issue with this test failure is actually that we were silently failing to install the plugin under test into the cluster. The root cause here was the FIPS security policy file was not copied into cluster config directory before we attempting to run the plugin installer. Since we pass the FIPS JVM arguments to all CLI tools as well this caused plugin installation to fail. We now ensure that these files are copied before we attempt to run _any_ ES tools. Closes https://github.com/elastic/elasticsearch/issues/93303 --- .../test/mapper_size/10_basic.yml | 4 ---- .../cluster/local/LocalClusterFactory.java | 21 ++++++++++++------- 2 files changed, 14 insertions(+), 11 deletions(-) diff --git a/plugins/mapper-size/src/yamlRestTest/resources/rest-api-spec/test/mapper_size/10_basic.yml b/plugins/mapper-size/src/yamlRestTest/resources/rest-api-spec/test/mapper_size/10_basic.yml index d9b8dc0b01647..434368ed2f5b2 100644 --- a/plugins/mapper-size/src/yamlRestTest/resources/rest-api-spec/test/mapper_size/10_basic.yml +++ b/plugins/mapper-size/src/yamlRestTest/resources/rest-api-spec/test/mapper_size/10_basic.yml @@ -4,10 +4,6 @@ --- "Mapper Size": - - skip: - version: "all" - reason: "AwaitsFix https://github.com/elastic/elasticsearch/issues/93303" - - do: indices.create: index: test diff --git a/test/test-clusters/src/main/java/org/elasticsearch/test/cluster/local/LocalClusterFactory.java b/test/test-clusters/src/main/java/org/elasticsearch/test/cluster/local/LocalClusterFactory.java index e63f8236d58d4..963566f52e8a9 100644 --- a/test/test-clusters/src/main/java/org/elasticsearch/test/cluster/local/LocalClusterFactory.java +++ b/test/test-clusters/src/main/java/org/elasticsearch/test/cluster/local/LocalClusterFactory.java @@ -112,22 +112,20 @@ public synchronized void start(Version version) { distributionDescriptor = resolveDistribution(); LOGGER.info("Distribution for node '{}': {}", spec.getName(), distributionDescriptor); initializeWorkingDirectory(currentVersion != null); + createConfigDirectory(); + copyExtraConfigFiles(); // extra config files might be needed for running cli tools like plugin install copyExtraJarFiles(); installPlugins(); if (distributionDescriptor.getType() == DistributionType.INTEG_TEST) { installModules(); } currentVersion = spec.getVersion(); + } else { + createConfigDirectory(); + copyExtraConfigFiles(); } - try { - IOUtils.deleteWithRetry(configDir); - Files.createDirectories(configDir); - } catch (IOException e) { - throw new UncheckedIOException("An error occurred creating config directory", e); - } writeConfiguration(); - copyExtraConfigFiles(); createKeystore(); addKeystoreSettings(); addKeystoreFiles(); @@ -209,6 +207,15 @@ public void waitUntilReady() { } } + private void createConfigDirectory() { + try { + IOUtils.deleteWithRetry(configDir); + Files.createDirectories(configDir); + } catch (IOException e) { + throw new UncheckedIOException("An error occurred creating config directory", e); + } + } + private List readPortsFile(Path file) { try (Stream lines = Files.lines(file, StandardCharsets.UTF_8)) { return lines.map(String::trim).collect(Collectors.toList()); From c839c40de216bc1981d7aa6a09b412e53c671155 Mon Sep 17 00:00:00 2001 From: David Turner Date: Tue, 31 Jan 2023 07:02:44 +0000 Subject: [PATCH 24/63] Add ActionListener#run (#93338) It's pretty common to run a block of code in a `try ... catch` block that just passes exceptions off to a listener's `onFailure` method. This commit adds a small utility to encapsulate this, enabling some one-liners. --- .../test/rest/WaitForRefreshAndCloseIT.java | 7 +--- .../ingest/common/GrokProcessorGetAction.java | 21 +++++------ .../elasticsearch/action/ActionListener.java | 14 ++++--- .../action/SingleResultDeduplicator.java | 6 +-- .../TransportUpdateDesiredNodesAction.java | 8 ++-- ...TransportPrevalidateNodeRemovalAction.java | 8 ++-- .../state/TransportClusterStateAction.java | 10 ++--- .../action/bulk/TransportBulkAction.java | 10 ++--- .../action/support/ChannelActionListener.java | 6 +-- .../support/ListenableActionFuture.java | 10 ++--- .../action/support/TransportAction.java | 6 +-- .../broadcast/TransportBroadcastAction.java | 12 +----- .../node/TransportBroadcastByNodeAction.java | 6 +-- .../util/concurrent/ListenableFuture.java | 37 +++++++------------ .../snapshots/SnapshotShardsService.java | 8 ++-- .../xpack/ccr/action/ShardChangesAction.java | 6 +-- 16 files changed, 61 insertions(+), 114 deletions(-) diff --git a/distribution/archives/integ-test-zip/src/javaRestTest/java/org/elasticsearch/test/rest/WaitForRefreshAndCloseIT.java b/distribution/archives/integ-test-zip/src/javaRestTest/java/org/elasticsearch/test/rest/WaitForRefreshAndCloseIT.java index bdb0a76cf9709..19afb4932ff2c 100644 --- a/distribution/archives/integ-test-zip/src/javaRestTest/java/org/elasticsearch/test/rest/WaitForRefreshAndCloseIT.java +++ b/distribution/archives/integ-test-zip/src/javaRestTest/java/org/elasticsearch/test/rest/WaitForRefreshAndCloseIT.java @@ -10,6 +10,7 @@ import org.apache.http.util.EntityUtils; import org.elasticsearch.action.ActionFuture; +import org.elasticsearch.action.ActionListener; import org.elasticsearch.action.support.PlainActionFuture; import org.elasticsearch.client.Request; import org.elasticsearch.client.Response; @@ -117,11 +118,7 @@ private ActionFuture start(Request request) { client().performRequestAsync(request, new ResponseListener() { @Override public void onSuccess(Response response) { - try { - future.onResponse(EntityUtils.toString(response.getEntity())); - } catch (IOException e) { - future.onFailure(e); - } + ActionListener.completeWith(future, () -> EntityUtils.toString(response.getEntity())); } @Override diff --git a/modules/ingest-common/src/main/java/org/elasticsearch/ingest/common/GrokProcessorGetAction.java b/modules/ingest-common/src/main/java/org/elasticsearch/ingest/common/GrokProcessorGetAction.java index 5dcf944eaa2ad..ad00956d2dde7 100644 --- a/modules/ingest-common/src/main/java/org/elasticsearch/ingest/common/GrokProcessorGetAction.java +++ b/modules/ingest-common/src/main/java/org/elasticsearch/ingest/common/GrokProcessorGetAction.java @@ -144,18 +144,15 @@ public TransportAction(TransportService transportService, ActionFilters actionFi @Override protected void doExecute(Task task, Request request, ActionListener listener) { - try { - listener.onResponse( - new Response( - request.getEcsCompatibility().equals(Grok.ECS_COMPATIBILITY_MODES[0]) - ? request.sorted() ? sortedLegacyGrokPatterns : legacyGrokPatterns - : request.sorted() ? sortedEcsV1GrokPatterns - : ecsV1GrokPatterns - ) - ); - } catch (Exception e) { - listener.onFailure(e); - } + ActionListener.completeWith( + listener, + () -> new Response( + request.getEcsCompatibility().equals(Grok.ECS_COMPATIBILITY_MODES[0]) + ? request.sorted() ? sortedLegacyGrokPatterns : legacyGrokPatterns + : request.sorted() ? sortedEcsV1GrokPatterns + : ecsV1GrokPatterns + ) + ); } } diff --git a/server/src/main/java/org/elasticsearch/action/ActionListener.java b/server/src/main/java/org/elasticsearch/action/ActionListener.java index 6974ddb127603..dd8b629f208c3 100644 --- a/server/src/main/java/org/elasticsearch/action/ActionListener.java +++ b/server/src/main/java/org/elasticsearch/action/ActionListener.java @@ -309,11 +309,7 @@ static ActionListener() { @Override public void onResponse(Response response) { - try { - delegate.onResponse(response); - } catch (Exception e) { - onFailure(e); - } + ActionListener.run(delegate, l -> l.onResponse(response)); } @Override @@ -566,4 +562,12 @@ public String toString() { } } + static > void run(L listener, CheckedConsumer action) { + try { + action.accept(listener); + } catch (Exception e) { + listener.onFailure(e); + } + } + } diff --git a/server/src/main/java/org/elasticsearch/action/SingleResultDeduplicator.java b/server/src/main/java/org/elasticsearch/action/SingleResultDeduplicator.java index 20db5fb6efca2..273c542bc825c 100644 --- a/server/src/main/java/org/elasticsearch/action/SingleResultDeduplicator.java +++ b/server/src/main/java/org/elasticsearch/action/SingleResultDeduplicator.java @@ -89,10 +89,6 @@ public void onFailure(Exception e) { } }); }); - try { - executeAction.accept(wrappedListener); - } catch (Exception e) { - wrappedListener.onFailure(e); - } + ActionListener.run(wrappedListener, executeAction::accept); } } diff --git a/server/src/main/java/org/elasticsearch/action/admin/cluster/desirednodes/TransportUpdateDesiredNodesAction.java b/server/src/main/java/org/elasticsearch/action/admin/cluster/desirednodes/TransportUpdateDesiredNodesAction.java index b0d7000afa8ac..19e5762b0a72f 100644 --- a/server/src/main/java/org/elasticsearch/action/admin/cluster/desirednodes/TransportUpdateDesiredNodesAction.java +++ b/server/src/main/java/org/elasticsearch/action/admin/cluster/desirednodes/TransportUpdateDesiredNodesAction.java @@ -79,9 +79,9 @@ protected void masterOperation( Task task, UpdateDesiredNodesRequest request, ClusterState state, - ActionListener listener + ActionListener responseListener ) throws Exception { - try { + ActionListener.run(responseListener, listener -> { settingsValidator.validate(request.getNodes()); clusterService.submitStateUpdateTask( "update-desired-nodes", @@ -89,9 +89,7 @@ protected void masterOperation( ClusterStateTaskConfig.build(Priority.URGENT, request.masterNodeTimeout()), taskExecutor ); - } catch (Exception e) { - listener.onFailure(e); - } + }); } @Override diff --git a/server/src/main/java/org/elasticsearch/action/admin/cluster/node/shutdown/TransportPrevalidateNodeRemovalAction.java b/server/src/main/java/org/elasticsearch/action/admin/cluster/node/shutdown/TransportPrevalidateNodeRemovalAction.java index 376abf0863410..f3ce39a0cdbf1 100644 --- a/server/src/main/java/org/elasticsearch/action/admin/cluster/node/shutdown/TransportPrevalidateNodeRemovalAction.java +++ b/server/src/main/java/org/elasticsearch/action/admin/cluster/node/shutdown/TransportPrevalidateNodeRemovalAction.java @@ -83,14 +83,12 @@ protected void masterOperation( Task task, PrevalidateNodeRemovalRequest request, ClusterState state, - ActionListener listener + ActionListener responseListener ) { - try { + ActionListener.run(responseListener, listener -> { Set requestNodes = resolveNodes(request, state.nodes()); doPrevalidation(request, requestNodes, state, listener); - } catch (Exception e) { - listener.onFailure(e); - } + }); } public static Set resolveNodes(PrevalidateNodeRemovalRequest request, DiscoveryNodes discoveryNodes) { diff --git a/server/src/main/java/org/elasticsearch/action/admin/cluster/state/TransportClusterStateAction.java b/server/src/main/java/org/elasticsearch/action/admin/cluster/state/TransportClusterStateAction.java index 48370ca2199ce..c6af59caa7f17 100644 --- a/server/src/main/java/org/elasticsearch/action/admin/cluster/state/TransportClusterStateAction.java +++ b/server/src/main/java/org/elasticsearch/action/admin/cluster/state/TransportClusterStateAction.java @@ -123,13 +123,11 @@ public void onClusterServiceClose() { @Override public void onTimeout(TimeValue timeout) { - try { - if (cancellableTask.notifyIfCancelled(listener) == false) { - listener.onResponse(new ClusterStateResponse(state.getClusterName(), null, true)); + ActionListener.run(listener, l -> { + if (cancellableTask.notifyIfCancelled(l) == false) { + l.onResponse(new ClusterStateResponse(state.getClusterName(), null, true)); } - } catch (Exception e) { - listener.onFailure(e); - } + }); } }, clusterState -> cancellableTask.isCancelled() || acceptableClusterStateOrFailedPredicate.test(clusterState)); } diff --git a/server/src/main/java/org/elasticsearch/action/bulk/TransportBulkAction.java b/server/src/main/java/org/elasticsearch/action/bulk/TransportBulkAction.java index f2ee085e9ac19..66b365f6a092e 100644 --- a/server/src/main/java/org/elasticsearch/action/bulk/TransportBulkAction.java +++ b/server/src/main/java/org/elasticsearch/action/bulk/TransportBulkAction.java @@ -255,7 +255,7 @@ protected void doInternalExecute(Task task, BulkRequest bulkRequest, String exec // this method (doExecute) will be called again, but with the bulk requests updated from the ingest node processing but // also with IngestService.NOOP_PIPELINE_NAME on each request. This ensures that this on the second time through this method, // this path is never taken. - try { + ActionListener.run(listener, l -> { if (Assertions.ENABLED) { final boolean arePipelinesResolved = bulkRequest.requests() .stream() @@ -265,13 +265,11 @@ protected void doInternalExecute(Task task, BulkRequest bulkRequest, String exec assert arePipelinesResolved : bulkRequest; } if (clusterService.localNode().isIngestNode()) { - processBulkIndexIngestRequest(task, bulkRequest, executorName, listener); + processBulkIndexIngestRequest(task, bulkRequest, executorName, l); } else { - ingestForwarder.forwardIngestRequest(BulkAction.INSTANCE, bulkRequest, listener); + ingestForwarder.forwardIngestRequest(BulkAction.INSTANCE, bulkRequest, l); } - } catch (Exception e) { - listener.onFailure(e); - } + }); return; } diff --git a/server/src/main/java/org/elasticsearch/action/support/ChannelActionListener.java b/server/src/main/java/org/elasticsearch/action/support/ChannelActionListener.java index 53674d8ac4ba3..7e778f8a5fd8d 100644 --- a/server/src/main/java/org/elasticsearch/action/support/ChannelActionListener.java +++ b/server/src/main/java/org/elasticsearch/action/support/ChannelActionListener.java @@ -35,11 +35,7 @@ public ChannelActionListener(TransportChannel channel, String actionName, Reques @Override public void onResponse(Response response) { - try { - channel.sendResponse(response); - } catch (Exception e) { - onFailure(e); - } + ActionListener.run(this, l -> l.channel.sendResponse(response)); } @Override diff --git a/server/src/main/java/org/elasticsearch/action/support/ListenableActionFuture.java b/server/src/main/java/org/elasticsearch/action/support/ListenableActionFuture.java index d506e9cefa840..fcb079ef1fd83 100644 --- a/server/src/main/java/org/elasticsearch/action/support/ListenableActionFuture.java +++ b/server/src/main/java/org/elasticsearch/action/support/ListenableActionFuture.java @@ -78,13 +78,9 @@ protected void done(boolean success) { } private void executeListener(final ActionListener listener) { - try { - // we use a timeout of 0 to by pass assertion forbidding to call actionGet() (blocking) on a network thread. - // here we know we will never block - listener.onResponse(actionGet(0)); - } catch (Exception e) { - listener.onFailure(e); - } + // we use a timeout of 0 to by pass assertion forbidding to call actionGet() (blocking) on a network thread. + // here we know we will never block + ActionListener.completeWith(listener, () -> actionGet(0)); } } diff --git a/server/src/main/java/org/elasticsearch/action/support/TransportAction.java b/server/src/main/java/org/elasticsearch/action/support/TransportAction.java index de39117529647..4d3b9b0c15ff0 100644 --- a/server/src/main/java/org/elasticsearch/action/support/TransportAction.java +++ b/server/src/main/java/org/elasticsearch/action/support/TransportAction.java @@ -111,11 +111,7 @@ private TaskResultStoringActionListener(TaskManager taskManager, Task task, Acti @Override public void onResponse(Response response) { - try { - taskManager.storeResult(task, response, delegate); - } catch (Exception e) { - delegate.onFailure(e); - } + ActionListener.run(delegate, l -> taskManager.storeResult(task, response, l)); } @Override diff --git a/server/src/main/java/org/elasticsearch/action/support/broadcast/TransportBroadcastAction.java b/server/src/main/java/org/elasticsearch/action/support/broadcast/TransportBroadcastAction.java index 7e0b636d0056b..6132b61a304cb 100644 --- a/server/src/main/java/org/elasticsearch/action/support/broadcast/TransportBroadcastAction.java +++ b/server/src/main/java/org/elasticsearch/action/support/broadcast/TransportBroadcastAction.java @@ -137,11 +137,7 @@ protected AsyncBroadcastAction(Task task, Request request, ActionListener(0), clusterState)); - } catch (Exception e) { - listener.onFailure(e); - } + ActionListener.completeWith(listener, () -> newResponse(request, new AtomicReferenceArray(0), clusterState)); return; } // count the local operations, and perform the non local ones @@ -247,11 +243,7 @@ protected AtomicReferenceArray shardsResponses() { } protected void finishHim() { - try { - listener.onResponse(newResponse(request, shardsResponses, clusterState)); - } catch (Exception e) { - listener.onFailure(e); - } + ActionListener.completeWith(listener, () -> newResponse(request, shardsResponses, clusterState)); } void setFailure(ShardIterator shardIt, int shardIndex, Exception e) { diff --git a/server/src/main/java/org/elasticsearch/action/support/broadcast/node/TransportBroadcastByNodeAction.java b/server/src/main/java/org/elasticsearch/action/support/broadcast/node/TransportBroadcastByNodeAction.java index 4d0c914fa544d..82cc91e620d7e 100644 --- a/server/src/main/java/org/elasticsearch/action/support/broadcast/node/TransportBroadcastByNodeAction.java +++ b/server/src/main/java/org/elasticsearch/action/support/broadcast/node/TransportBroadcastByNodeAction.java @@ -336,11 +336,7 @@ public void start() { cancellableTask.addListener(this); } if (nodeIds.size() == 0) { - try { - onCompletion(); - } catch (Exception e) { - listener.onFailure(e); - } + ActionListener.run(listener, ignored -> onCompletion()); } else { int nodeIndex = -1; for (Map.Entry> entry : nodeIds.entrySet()) { diff --git a/server/src/main/java/org/elasticsearch/common/util/concurrent/ListenableFuture.java b/server/src/main/java/org/elasticsearch/common/util/concurrent/ListenableFuture.java index a02c5027d6df9..f2788d278c814 100644 --- a/server/src/main/java/org/elasticsearch/common/util/concurrent/ListenableFuture.java +++ b/server/src/main/java/org/elasticsearch/common/util/concurrent/ListenableFuture.java @@ -105,33 +105,24 @@ protected void done(boolean ignored) { } private void notifyListenerDirectly(ActionListener listener) { - try { - // call get in a non-blocking fashion as we could be on a network thread - // or another thread like the scheduler, which we should never block! - assert done; - V value = FutureUtils.get(ListenableFuture.this, 0L, TimeUnit.NANOSECONDS); - listener.onResponse(value); - } catch (Exception e) { - listener.onFailure(e); - } + // call get in a non-blocking fashion as we could be on a network thread + // or another thread like the scheduler, which we should never block! + assert done; + ActionListener.completeWith(listener, () -> FutureUtils.get(ListenableFuture.this, 0L, TimeUnit.NANOSECONDS)); } private void notifyListener(ActionListener listener, ExecutorService executorService) { - try { - executorService.execute(new Runnable() { - @Override - public void run() { - notifyListenerDirectly(listener); - } + ActionListener.run(listener, l -> executorService.execute(new Runnable() { + @Override + public void run() { + notifyListenerDirectly(l); + } - @Override - public String toString() { - return "ListenableFuture notification"; - } - }); - } catch (Exception e) { - listener.onFailure(e); - } + @Override + public String toString() { + return "ListenableFuture notification"; + } + })); } @Override diff --git a/server/src/main/java/org/elasticsearch/snapshots/SnapshotShardsService.java b/server/src/main/java/org/elasticsearch/snapshots/SnapshotShardsService.java index 16b5a78bac4dd..d6b5ffd5f42d4 100644 --- a/server/src/main/java/org/elasticsearch/snapshots/SnapshotShardsService.java +++ b/server/src/main/java/org/elasticsearch/snapshots/SnapshotShardsService.java @@ -344,9 +344,9 @@ private void snapshot( final IndexShardSnapshotStatus snapshotStatus, Version version, final long entryStartTime, - ActionListener listener + ActionListener resultListener ) { - try { + ActionListener.run(resultListener, listener -> { if (snapshotStatus.isAborted()) { throw new AbortedSnapshotException(); } @@ -387,9 +387,7 @@ private void snapshot( IOUtils.close(snapshotRef); throw e; } - } catch (Exception e) { - listener.onFailure(e); - } + }); } /** diff --git a/x-pack/plugin/ccr/src/main/java/org/elasticsearch/xpack/ccr/action/ShardChangesAction.java b/x-pack/plugin/ccr/src/main/java/org/elasticsearch/xpack/ccr/action/ShardChangesAction.java index 6e24d97cef49c..179e4a7e21388 100644 --- a/x-pack/plugin/ccr/src/main/java/org/elasticsearch/xpack/ccr/action/ShardChangesAction.java +++ b/x-pack/plugin/ccr/src/main/java/org/elasticsearch/xpack/ccr/action/ShardChangesAction.java @@ -447,11 +447,7 @@ private void globalCheckpointAdvanced( final ActionListener listener ) { logger.trace("{} global checkpoint advanced to [{}] after waiting for [{}]", shardId, globalCheckpoint, request.getFromSeqNo()); - try { - super.asyncShardOperation(request, shardId, listener); - } catch (final IOException caught) { - listener.onFailure(caught); - } + ActionListener.run(listener, l -> super.asyncShardOperation(request, shardId, l)); } private void globalCheckpointAdvancementFailure( From 060382738b570c91c1d09dd82abf7b1d7d52d558 Mon Sep 17 00:00:00 2001 From: David Kyle Date: Tue, 31 Jan 2023 09:19:23 +0000 Subject: [PATCH 25/63] [ML] Wait for _infer to work after restart in full cluster restart tests (#93327) --- .../restart/MLModelDeploymentFullClusterRestartIT.java | 6 ++++-- 1 file changed, 4 insertions(+), 2 deletions(-) diff --git a/x-pack/qa/full-cluster-restart/src/javaRestTest/java/org/elasticsearch/xpack/restart/MLModelDeploymentFullClusterRestartIT.java b/x-pack/qa/full-cluster-restart/src/javaRestTest/java/org/elasticsearch/xpack/restart/MLModelDeploymentFullClusterRestartIT.java index 5a9e28274b84e..25a14c47e52c7 100644 --- a/x-pack/qa/full-cluster-restart/src/javaRestTest/java/org/elasticsearch/xpack/restart/MLModelDeploymentFullClusterRestartIT.java +++ b/x-pack/qa/full-cluster-restart/src/javaRestTest/java/org/elasticsearch/xpack/restart/MLModelDeploymentFullClusterRestartIT.java @@ -105,8 +105,10 @@ public void testDeploymentSurvivesRestart() throws Exception { request.addParameter("timeout", "70s"); })); waitForDeploymentStarted(modelId); - assertInfer(modelId); - assertNewInfer(modelId); + assertBusy(() -> { + assertInfer(modelId); + assertNewInfer(modelId); + }, 90, TimeUnit.SECONDS); stopDeployment(modelId); } } From 39ba013d944e07031b17e60e864054f25803090d Mon Sep 17 00:00:00 2001 From: Iraklis Psaroudakis Date: Tue, 31 Jan 2023 11:31:36 +0200 Subject: [PATCH 26/63] Unpromotables skip replication and peer recovery (#93210) For skipping replication: * ReplicationTracker and Group filter shards that are promotable to primary * Remove unpromotable shards from in sync allocations in metadata * There is a new Refresh action for unpromotable replica shards Fixes ES-4861 For skipping peer recovery: * Unpromotable shards pass directly to STARTED skipping some intermediate peer recovery stages and messages Fixes ES-5257 --- docs/changelog/93210.yaml | 5 + .../cluster/routing/ShardRoutingRoleIT.java | 173 +++++++++++++++++- .../refresh/ReplicaShardRefreshRequest.java | 58 ------ .../refresh/TransportShardRefreshAction.java | 80 +++++--- ...ansportUnpromotableShardRefreshAction.java | 47 +++++ .../UnpromotableShardRefreshRequest.java | 59 ++++++ .../replication/ReplicationOperation.java | 1 + .../cluster/routing/IndexRoutingTable.java | 4 +- .../routing/IndexShardRoutingTable.java | 14 +- .../allocation/IndexMetadataUpdater.java | 14 +- .../index/seqno/ReplicationTracker.java | 23 ++- .../elasticsearch/index/shard/IndexShard.java | 28 ++- .../index/shard/ReplicationGroup.java | 5 +- .../recovery/PeerRecoveryTargetService.java | 83 +++++---- .../recovery/RecoveriesCollection.java | 1 + ...portVerifyShardBeforeCloseActionTests.java | 2 +- ...TransportResyncReplicationActionTests.java | 2 +- .../ReplicationOperationTests.java | 2 +- .../TransportReplicationActionTests.java | 7 +- .../cluster/ClusterStateTests.java | 6 +- .../metadata/AutoExpandReplicasTests.java | 8 +- .../index/engine/EngineTestCase.java | 2 +- 22 files changed, 464 insertions(+), 160 deletions(-) create mode 100644 docs/changelog/93210.yaml delete mode 100644 server/src/main/java/org/elasticsearch/action/admin/indices/refresh/ReplicaShardRefreshRequest.java create mode 100644 server/src/main/java/org/elasticsearch/action/admin/indices/refresh/TransportUnpromotableShardRefreshAction.java create mode 100644 server/src/main/java/org/elasticsearch/action/admin/indices/refresh/UnpromotableShardRefreshRequest.java diff --git a/docs/changelog/93210.yaml b/docs/changelog/93210.yaml new file mode 100644 index 0000000000000..179f4ab9dec8d --- /dev/null +++ b/docs/changelog/93210.yaml @@ -0,0 +1,5 @@ +pr: 93210 +summary: Unpromotables skip replication and peer recovery +area: Allocation +type: enhancement +issues: [] diff --git a/server/src/internalClusterTest/java/org/elasticsearch/cluster/routing/ShardRoutingRoleIT.java b/server/src/internalClusterTest/java/org/elasticsearch/cluster/routing/ShardRoutingRoleIT.java index 5b47e6d08acc4..2f186a41139b7 100644 --- a/server/src/internalClusterTest/java/org/elasticsearch/cluster/routing/ShardRoutingRoleIT.java +++ b/server/src/internalClusterTest/java/org/elasticsearch/cluster/routing/ShardRoutingRoleIT.java @@ -10,10 +10,13 @@ import org.apache.logging.log4j.LogManager; import org.apache.logging.log4j.Logger; +import org.elasticsearch.action.ActionResponse; +import org.elasticsearch.action.admin.indices.refresh.TransportUnpromotableShardRefreshAction; import org.elasticsearch.cluster.ClusterChangedEvent; import org.elasticsearch.cluster.ClusterState; import org.elasticsearch.cluster.ClusterStateListener; import org.elasticsearch.cluster.metadata.IndexMetadata; +import org.elasticsearch.cluster.node.DiscoveryNode; import org.elasticsearch.cluster.routing.allocation.RoutingAllocation; import org.elasticsearch.cluster.routing.allocation.command.AllocationCommand; import org.elasticsearch.cluster.routing.allocation.command.CancelAllocationCommand; @@ -28,6 +31,7 @@ import org.elasticsearch.index.IndexService; import org.elasticsearch.index.IndexSettings; import org.elasticsearch.index.engine.EngineFactory; +import org.elasticsearch.index.engine.EngineTestCase; import org.elasticsearch.index.engine.InternalEngine; import org.elasticsearch.index.engine.NoOpEngine; import org.elasticsearch.index.shard.IndexShard; @@ -39,6 +43,8 @@ import org.elasticsearch.snapshots.SnapshotState; import org.elasticsearch.test.ESIntegTestCase; import org.elasticsearch.test.XContentTestUtils; +import org.elasticsearch.test.transport.MockTransportService; +import org.elasticsearch.transport.TransportService; import java.io.IOException; import java.util.Arrays; @@ -46,16 +52,22 @@ import java.util.HashSet; import java.util.List; import java.util.Map; +import java.util.Objects; import java.util.Optional; +import java.util.concurrent.atomic.AtomicInteger; +import java.util.stream.Collectors; import java.util.stream.IntStream; import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertAcked; import static org.hamcrest.Matchers.anEmptyMap; import static org.hamcrest.Matchers.containsString; import static org.hamcrest.Matchers.empty; +import static org.hamcrest.Matchers.equalTo; import static org.hamcrest.Matchers.hasItem; import static org.hamcrest.Matchers.instanceOf; +import static org.hamcrest.Matchers.is; import static org.hamcrest.Matchers.not; +import static org.hamcrest.Matchers.startsWith; @SuppressWarnings("resource") public class ShardRoutingRoleIT extends ESIntegTestCase { @@ -65,6 +77,7 @@ public class ShardRoutingRoleIT extends ESIntegTestCase { public static class TestPlugin extends Plugin implements ClusterPlugin, EnginePlugin { volatile int numIndexingCopies = 1; + static final String NODE_ATTR_UNPROMOTABLE_ONLY = "unpromotableonly"; @Override public ShardRoutingRoleStrategy getShardRoutingRoleStrategy() { @@ -93,12 +106,55 @@ public Decision canForceAllocatePrimary(ShardRouting shardRouting, RoutingNode n } return super.canForceAllocatePrimary(shardRouting, node, allocation); } + + @Override + public Decision canAllocate(ShardRouting shardRouting, RoutingNode node, RoutingAllocation allocation) { + var nodesWithUnpromotableOnly = allocation.getClusterState() + .nodes() + .stream() + .filter(n -> Objects.equals("true", n.getAttributes().get(NODE_ATTR_UNPROMOTABLE_ONLY))) + .map(DiscoveryNode::getName) + .collect(Collectors.toUnmodifiableSet()); + if (nodesWithUnpromotableOnly.isEmpty() == false) { + if (nodesWithUnpromotableOnly.contains(node.node().getName())) { + if (shardRouting.isPromotableToPrimary()) { + return allocation.decision( + Decision.NO, + "test", + "shard is promotable to primary so may not be assigned to [" + node.node().getName() + "]" + ); + } + } else { + if (shardRouting.isPromotableToPrimary() == false) { + return allocation.decision( + Decision.NO, + "test", + "shard is not promotable to primary so may not be assigned to [" + node.node().getName() + "]" + ); + } + } + } + return Decision.YES; + } }); } @Override public Optional getEngineFactory(IndexSettings indexSettings) { - return Optional.of(config -> config.isPromotableToPrimary() ? new InternalEngine(config) : new NoOpEngine(config)); + return Optional.of(config -> { + if (config.isPromotableToPrimary()) { + return new InternalEngine(config); + } else { + try { + config.getStore().createEmpty(); + } catch (IOException e) { + logger.error("Error creating empty store", e); + throw new RuntimeException(e); + } + + return new NoOpEngine(EngineTestCase.copy(config, () -> -1L)); + } + }); } } @@ -109,7 +165,7 @@ protected boolean addMockInternalEngine() { @Override protected Collection> nodePlugins() { - return CollectionUtils.appendToCopy(super.nodePlugins(), TestPlugin.class); + return CollectionUtils.concatLists(List.of(MockTransportService.TestPlugin.class, TestPlugin.class), super.nodePlugins()); } @Override @@ -193,11 +249,32 @@ private static void assertRolesInRoutingTableXContent(ClusterState state) { } } - public void testShardCreation() { + private static void installMockTransportVerifications(RoutingTableWatcher routingTableWatcher) { + for (var transportService : internalCluster().getInstances(TransportService.class)) { + MockTransportService mockTransportService = (MockTransportService) transportService; + mockTransportService.addSendBehavior((connection, requestId, action, request, options) -> { + if (routingTableWatcher.numIndexingCopies == 1) { + assertThat("no recovery action should be exchanged", action, not(startsWith("internal:index/shard/recovery/"))); + assertThat("no replicated action should be exchanged", action, not(containsString("[r]"))); + } + connection.sendRequest(requestId, action, request, options); + }); + mockTransportService.addRequestHandlingBehavior( + TransportUnpromotableShardRefreshAction.NAME, + (handler, request, channel, task) -> { + // Skip handling the request and send an immediate empty response + channel.sendResponse(ActionResponse.Empty.INSTANCE); + } + ); + } + } + + public void testShardCreation() throws Exception { var routingTableWatcher = new RoutingTableWatcher(); var numDataNodes = routingTableWatcher.numReplicas + 2; internalCluster().ensureAtLeastNumDataNodes(numDataNodes); + installMockTransportVerifications(routingTableWatcher); getMasterNodePlugin().numIndexingCopies = routingTableWatcher.numIndexingCopies; final var masterClusterService = internalCluster().getCurrentMasterNodeInstance(ClusterService.class); @@ -234,6 +311,7 @@ public void testShardCreation() { ensureGreen(INDEX_NAME); assertEngineTypes(); + indexRandom(randomBoolean(), INDEX_NAME, randomIntBetween(50, 100)); // removing replicas drops SEARCH_ONLY copies first while (routingTableWatcher.numReplicas > 0) { @@ -341,6 +419,7 @@ public void testPromotion() { var numDataNodes = routingTableWatcher.numReplicas + 2; internalCluster().ensureAtLeastNumDataNodes(numDataNodes); + installMockTransportVerifications(routingTableWatcher); getMasterNodePlugin().numIndexingCopies = routingTableWatcher.numIndexingCopies; final var masterClusterService = internalCluster().getCurrentMasterNodeInstance(ClusterService.class); @@ -399,7 +478,7 @@ public AllocationCommand getCancelPrimaryCommand() { return null; } - public void testSearchRouting() { + public void testSearchRouting() throws Exception { var routingTableWatcher = new RoutingTableWatcher(); routingTableWatcher.numReplicas = Math.max(1, routingTableWatcher.numReplicas); @@ -407,6 +486,7 @@ public void testSearchRouting() { getMasterNodePlugin().numIndexingCopies = routingTableWatcher.numIndexingCopies; internalCluster().ensureAtLeastNumDataNodes(routingTableWatcher.numReplicas + 1); + installMockTransportVerifications(routingTableWatcher); final var masterClusterService = internalCluster().getCurrentMasterNodeInstance(ClusterService.class); try { @@ -414,7 +494,7 @@ public void testSearchRouting() { masterClusterService.addListener(routingTableWatcher); createIndex(INDEX_NAME, routingTableWatcher.getIndexSettings()); - // TODO index some documents here once recovery/replication ignore unpromotable shards + indexRandom(randomBoolean(), INDEX_NAME, randomIntBetween(50, 100)); ensureGreen(INDEX_NAME); assertEngineTypes(); @@ -483,6 +563,7 @@ public void testClosedIndex() { var numDataNodes = routingTableWatcher.numReplicas + 2; internalCluster().ensureAtLeastNumDataNodes(numDataNodes); + installMockTransportVerifications(routingTableWatcher); getMasterNodePlugin().numIndexingCopies = routingTableWatcher.numIndexingCopies; final var masterClusterService = internalCluster().getCurrentMasterNodeInstance(ClusterService.class); @@ -501,4 +582,86 @@ public void testClosedIndex() { masterClusterService.removeListener(routingTableWatcher); } } + + public void testRefreshOfUnpromotableShards() throws Exception { + var routingTableWatcher = new RoutingTableWatcher(); + + var numDataNodes = routingTableWatcher.numReplicas + 2; + internalCluster().ensureAtLeastNumDataNodes(numDataNodes); + installMockTransportVerifications(routingTableWatcher); + getMasterNodePlugin().numIndexingCopies = routingTableWatcher.numIndexingCopies; + final AtomicInteger refreshUnpromotableActions = new AtomicInteger(0); + + for (var transportService : internalCluster().getInstances(TransportService.class)) { + MockTransportService mockTransportService = (MockTransportService) transportService; + mockTransportService.addSendBehavior((connection, requestId, action, request, options) -> { + if (action.startsWith(TransportUnpromotableShardRefreshAction.NAME)) { + refreshUnpromotableActions.incrementAndGet(); + } + connection.sendRequest(requestId, action, request, options); + }); + } + + final var masterClusterService = internalCluster().getCurrentMasterNodeInstance(ClusterService.class); + try { + // verify the correct number of shard copies of each role as the routing table evolves + masterClusterService.addListener(routingTableWatcher); + + createIndex( + INDEX_NAME, + Settings.builder() + .put(routingTableWatcher.getIndexSettings()) + .put(IndexSettings.INDEX_CHECK_ON_STARTUP.getKey(), false) + .put(IndexSettings.INDEX_REFRESH_INTERVAL_SETTING.getKey(), -1) + .build() + ); + ensureGreen(INDEX_NAME); + assertEngineTypes(); + + indexRandom(true, INDEX_NAME, randomIntBetween(1, 10)); + + // Each primary will send a TransportUnpromotableShardRefreshAction to each of the unpromotable replica shards + assertThat( + refreshUnpromotableActions.get(), + is(equalTo((routingTableWatcher.numReplicas - (routingTableWatcher.numIndexingCopies - 1)) * routingTableWatcher.numShards)) + ); + } finally { + masterClusterService.removeListener(routingTableWatcher); + } + } + + public void testNodesWithUnpromotableShardsNeverGetReplicationActions() throws Exception { + var routingTableWatcher = new RoutingTableWatcher(); + var additionalNumberOfNodesWithUnpromotableShards = randomIntBetween(1, 3); + routingTableWatcher.numReplicas = routingTableWatcher.numIndexingCopies + additionalNumberOfNodesWithUnpromotableShards - 1; + internalCluster().ensureAtLeastNumDataNodes(routingTableWatcher.numIndexingCopies + 1); + final List nodesWithUnpromotableOnly = internalCluster().startDataOnlyNodes( + additionalNumberOfNodesWithUnpromotableShards, + Settings.builder().put("node.attr." + TestPlugin.NODE_ATTR_UNPROMOTABLE_ONLY, "true").build() + ); + installMockTransportVerifications(routingTableWatcher); + getMasterNodePlugin().numIndexingCopies = routingTableWatcher.numIndexingCopies; + + for (var transportService : internalCluster().getInstances(TransportService.class)) { + MockTransportService mockTransportService = (MockTransportService) transportService; + mockTransportService.addSendBehavior((connection, requestId, action, request, options) -> { + if (nodesWithUnpromotableOnly.contains(connection.getNode().getName())) { + assertThat(action, not(containsString("[r]"))); + } + connection.sendRequest(requestId, action, request, options); + }); + } + + final var masterClusterService = internalCluster().getCurrentMasterNodeInstance(ClusterService.class); + try { + // verify the correct number of shard copies of each role as the routing table evolves + masterClusterService.addListener(routingTableWatcher); + createIndex(INDEX_NAME, routingTableWatcher.getIndexSettings()); + ensureGreen(INDEX_NAME); + indexRandom(randomBoolean(), INDEX_NAME, randomIntBetween(50, 100)); + } finally { + masterClusterService.removeListener(routingTableWatcher); + } + } + } diff --git a/server/src/main/java/org/elasticsearch/action/admin/indices/refresh/ReplicaShardRefreshRequest.java b/server/src/main/java/org/elasticsearch/action/admin/indices/refresh/ReplicaShardRefreshRequest.java deleted file mode 100644 index a10d03bf30c10..0000000000000 --- a/server/src/main/java/org/elasticsearch/action/admin/indices/refresh/ReplicaShardRefreshRequest.java +++ /dev/null @@ -1,58 +0,0 @@ -/* - * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one - * or more contributor license agreements. Licensed under the Elastic License - * 2.0 and the Server Side Public License, v 1; you may not use this file except - * in compliance with, at your election, the Elastic License 2.0 or the Server - * Side Public License, v 1. - */ - -package org.elasticsearch.action.admin.indices.refresh; - -import org.elasticsearch.TransportVersion; -import org.elasticsearch.action.support.replication.ReplicationRequest; -import org.elasticsearch.common.io.stream.StreamInput; -import org.elasticsearch.common.io.stream.StreamOutput; -import org.elasticsearch.core.Nullable; -import org.elasticsearch.index.shard.ShardId; -import org.elasticsearch.tasks.TaskId; - -import java.io.IOException; - -public class ReplicaShardRefreshRequest extends ReplicationRequest { - - @Nullable - private final Long segmentGeneration; - - public ReplicaShardRefreshRequest(ShardId shardId, TaskId parentTaskId, @Nullable Long segmentGeneration) { - super(shardId); - setParentTask(parentTaskId); - this.segmentGeneration = segmentGeneration; - } - - public ReplicaShardRefreshRequest(StreamInput in) throws IOException { - super(in); - if (in.getTransportVersion().onOrAfter(TransportVersion.V_8_7_0)) { - this.segmentGeneration = in.readOptionalVLong(); - } else { - this.segmentGeneration = null; - } - } - - @Override - public void writeTo(StreamOutput out) throws IOException { - super.writeTo(out); - if (out.getTransportVersion().onOrAfter(TransportVersion.V_8_7_0)) { - out.writeOptionalVLong(segmentGeneration); - } - } - - @Nullable - public Long getSegmentGeneration() { - return segmentGeneration; - } - - @Override - public String toString() { - return "ReplicaShardRefreshRequest{" + shardId + '}'; - } -} diff --git a/server/src/main/java/org/elasticsearch/action/admin/indices/refresh/TransportShardRefreshAction.java b/server/src/main/java/org/elasticsearch/action/admin/indices/refresh/TransportShardRefreshAction.java index 53c83a99183d8..c7e7ab9733827 100644 --- a/server/src/main/java/org/elasticsearch/action/admin/indices/refresh/TransportShardRefreshAction.java +++ b/server/src/main/java/org/elasticsearch/action/admin/indices/refresh/TransportShardRefreshAction.java @@ -9,30 +9,38 @@ package org.elasticsearch.action.admin.indices.refresh; import org.elasticsearch.action.ActionListener; +import org.elasticsearch.action.ActionListenerResponseHandler; import org.elasticsearch.action.ActionType; import org.elasticsearch.action.support.ActionFilters; +import org.elasticsearch.action.support.RefCountingListener; import org.elasticsearch.action.support.replication.BasicReplicationRequest; import org.elasticsearch.action.support.replication.ReplicationResponse; import org.elasticsearch.action.support.replication.TransportReplicationAction; +import org.elasticsearch.cluster.ClusterState; import org.elasticsearch.cluster.action.shard.ShardStateAction; import org.elasticsearch.cluster.node.DiscoveryNode; +import org.elasticsearch.cluster.routing.ShardRouting; import org.elasticsearch.cluster.service.ClusterService; import org.elasticsearch.common.inject.Inject; import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.settings.Settings; -import org.elasticsearch.index.engine.Engine; import org.elasticsearch.index.shard.IndexShard; import org.elasticsearch.indices.IndicesService; import org.elasticsearch.logging.LogManager; import org.elasticsearch.logging.Logger; +import org.elasticsearch.tasks.Task; import org.elasticsearch.threadpool.ThreadPool; +import org.elasticsearch.transport.TransportRequestOptions; +import org.elasticsearch.transport.TransportResponse; import org.elasticsearch.transport.TransportService; import java.io.IOException; +import java.util.function.Predicate; +import java.util.stream.Collectors; public class TransportShardRefreshAction extends TransportReplicationAction< BasicReplicationRequest, - ReplicaShardRefreshRequest, + BasicReplicationRequest, ReplicationResponse> { private static final Logger logger = LogManager.getLogger(TransportShardRefreshAction.class); @@ -41,8 +49,6 @@ public class TransportShardRefreshAction extends TransportReplicationAction< public static final ActionType TYPE = new ActionType<>(NAME, ReplicationResponse::new); public static final String SOURCE_API = "api"; - private final Settings settings; - @Inject public TransportShardRefreshAction( Settings settings, @@ -63,10 +69,10 @@ public TransportShardRefreshAction( shardStateAction, actionFilters, BasicReplicationRequest::new, - ReplicaShardRefreshRequest::new, + BasicReplicationRequest::new, ThreadPool.Names.REFRESH ); - this.settings = settings; + new TransportUnpromotableShardRefreshAction(transportService, actionFilters, indicesService); } @Override @@ -78,31 +84,53 @@ protected ReplicationResponse newResponseInstance(StreamInput in) throws IOExcep protected void shardOperationOnPrimary( BasicReplicationRequest shardRequest, IndexShard primary, - ActionListener> listener + ActionListener> listener ) { - ActionListener.completeWith(listener, () -> { + try (var listeners = new RefCountingListener(listener.map(v -> new PrimaryResult<>(shardRequest, new ReplicationResponse())))) { var refreshResult = primary.refresh(SOURCE_API); logger.trace("{} refresh request executed on primary", primary.shardId()); - var shardRefreshRequest = new ReplicaShardRefreshRequest( - primary.shardId(), - shardRequest.getParentTask(), - refreshResult.generation() - ); - return new PrimaryResult<>(shardRefreshRequest, new ReplicationResponse()); - }); + + // Forward the request to all nodes that hold unpromotable replica shards + final ClusterState clusterState = clusterService.state(); + final Task parentTaskId = taskManager.getTask(shardRequest.getParentTask().getId()); + clusterState.routingTable() + .shardRoutingTable(shardRequest.shardId()) + .assignedShards() + .stream() + .filter(Predicate.not(ShardRouting::isPromotableToPrimary)) + .map(ShardRouting::currentNodeId) + .collect(Collectors.toUnmodifiableSet()) + .forEach(nodeId -> { + final DiscoveryNode node = clusterState.nodes().get(nodeId); + UnpromotableShardRefreshRequest request = new UnpromotableShardRefreshRequest( + primary.shardId(), + refreshResult.generation() + ); + logger.trace("forwarding refresh request [{}] to node [{}]", request, node); + transportService.sendChildRequest( + node, + TransportUnpromotableShardRefreshAction.NAME, + request, + parentTaskId, + TransportRequestOptions.EMPTY, + new ActionListenerResponseHandler<>( + listeners.acquire(ignored -> {}), + (in) -> TransportResponse.Empty.INSTANCE, + ThreadPool.Names.REFRESH + ) + ); + }); + } catch (Exception e) { + listener.onFailure(e); + } } @Override - protected void shardOperationOnReplica(ReplicaShardRefreshRequest request, IndexShard replica, ActionListener listener) { - if (DiscoveryNode.isStateless(settings) && replica.routingEntry().isPromotableToPrimary() == false) { - assert request.getSegmentGeneration() != Engine.RefreshResult.UNKNOWN_GENERATION; - replica.waitForSegmentGeneration(request.getSegmentGeneration(), listener.map(l -> new ReplicaResult())); - } else { - ActionListener.completeWith(listener, () -> { - replica.refresh(SOURCE_API); - logger.trace("{} refresh request executed on replica", replica.shardId()); - return new ReplicaResult(); - }); - } + protected void shardOperationOnReplica(BasicReplicationRequest request, IndexShard replica, ActionListener listener) { + ActionListener.completeWith(listener, () -> { + replica.refresh(SOURCE_API); + logger.trace("{} refresh request executed on replica", replica.shardId()); + return new ReplicaResult(); + }); } } diff --git a/server/src/main/java/org/elasticsearch/action/admin/indices/refresh/TransportUnpromotableShardRefreshAction.java b/server/src/main/java/org/elasticsearch/action/admin/indices/refresh/TransportUnpromotableShardRefreshAction.java new file mode 100644 index 0000000000000..500a53513a60b --- /dev/null +++ b/server/src/main/java/org/elasticsearch/action/admin/indices/refresh/TransportUnpromotableShardRefreshAction.java @@ -0,0 +1,47 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0 and the Server Side Public License, v 1; you may not use this file except + * in compliance with, at your election, the Elastic License 2.0 or the Server + * Side Public License, v 1. + */ + +package org.elasticsearch.action.admin.indices.refresh; + +import org.elasticsearch.action.ActionListener; +import org.elasticsearch.action.ActionResponse; +import org.elasticsearch.action.support.ActionFilters; +import org.elasticsearch.action.support.HandledTransportAction; +import org.elasticsearch.common.inject.Inject; +import org.elasticsearch.index.engine.Engine; +import org.elasticsearch.index.shard.IndexShard; +import org.elasticsearch.indices.IndicesService; +import org.elasticsearch.tasks.Task; +import org.elasticsearch.threadpool.ThreadPool; +import org.elasticsearch.transport.TransportService; + +public class TransportUnpromotableShardRefreshAction extends HandledTransportAction { + public static final String NAME = RefreshAction.NAME + "[u]"; + + private final IndicesService indicesService; + + @Inject + public TransportUnpromotableShardRefreshAction( + TransportService transportService, + ActionFilters actionFilters, + IndicesService indicesService + ) { + super(NAME, transportService, actionFilters, UnpromotableShardRefreshRequest::new, ThreadPool.Names.REFRESH); + this.indicesService = indicesService; + } + + @Override + protected void doExecute(Task task, UnpromotableShardRefreshRequest request, ActionListener responseListener) { + ActionListener.run(responseListener, listener -> { + assert request.getSegmentGeneration() != Engine.RefreshResult.UNKNOWN_GENERATION + : "The request segment is " + request.getSegmentGeneration(); + IndexShard shard = indicesService.indexServiceSafe(request.getShardId().getIndex()).getShard(request.getShardId().id()); + shard.waitForSegmentGeneration(request.getSegmentGeneration(), listener.map(l -> ActionResponse.Empty.INSTANCE)); + }); + } +} diff --git a/server/src/main/java/org/elasticsearch/action/admin/indices/refresh/UnpromotableShardRefreshRequest.java b/server/src/main/java/org/elasticsearch/action/admin/indices/refresh/UnpromotableShardRefreshRequest.java new file mode 100644 index 0000000000000..52ef3917ce722 --- /dev/null +++ b/server/src/main/java/org/elasticsearch/action/admin/indices/refresh/UnpromotableShardRefreshRequest.java @@ -0,0 +1,59 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0 and the Server Side Public License, v 1; you may not use this file except + * in compliance with, at your election, the Elastic License 2.0 or the Server + * Side Public License, v 1. + */ + +package org.elasticsearch.action.admin.indices.refresh; + +import org.elasticsearch.action.ActionRequest; +import org.elasticsearch.action.ActionRequestValidationException; +import org.elasticsearch.common.io.stream.StreamInput; +import org.elasticsearch.common.io.stream.StreamOutput; +import org.elasticsearch.index.shard.ShardId; + +import java.io.IOException; + +public class UnpromotableShardRefreshRequest extends ActionRequest { + + private final ShardId shardId; + private final long segmentGeneration; + + public UnpromotableShardRefreshRequest(final ShardId shardId, long segmentGeneration) { + this.shardId = shardId; + this.segmentGeneration = segmentGeneration; + } + + public UnpromotableShardRefreshRequest(StreamInput in) throws IOException { + super(in); + shardId = new ShardId(in); + segmentGeneration = in.readVLong(); + } + + @Override + public ActionRequestValidationException validate() { + return null; + } + + @Override + public void writeTo(StreamOutput out) throws IOException { + super.writeTo(out); + shardId.writeTo(out); + out.writeVLong(segmentGeneration); + } + + public ShardId getShardId() { + return shardId; + } + + public long getSegmentGeneration() { + return segmentGeneration; + } + + @Override + public String toString() { + return "UnpromotableShardRefreshRequest{" + "shardId=" + shardId + ", segmentGeneration=" + segmentGeneration + '}'; + } +} diff --git a/server/src/main/java/org/elasticsearch/action/support/replication/ReplicationOperation.java b/server/src/main/java/org/elasticsearch/action/support/replication/ReplicationOperation.java index 8ec274bc410f6..6b1916b4ec843 100644 --- a/server/src/main/java/org/elasticsearch/action/support/replication/ReplicationOperation.java +++ b/server/src/main/java/org/elasticsearch/action/support/replication/ReplicationOperation.java @@ -213,6 +213,7 @@ private void performOnReplica( final long maxSeqNoOfUpdatesOrDeletes, final PendingReplicationActions pendingReplicationActions ) { + assert shard.isPromotableToPrimary() : "only promotable shards should receive replication requests"; if (logger.isTraceEnabled()) { logger.trace("[{}] sending op [{}] to replica {} for request [{}]", shard.shardId(), opType, shard, replicaRequest); } diff --git a/server/src/main/java/org/elasticsearch/cluster/routing/IndexRoutingTable.java b/server/src/main/java/org/elasticsearch/cluster/routing/IndexRoutingTable.java index a26e36aa39f9b..0c62dce1b2209 100644 --- a/server/src/main/java/org/elasticsearch/cluster/routing/IndexRoutingTable.java +++ b/server/src/main/java/org/elasticsearch/cluster/routing/IndexRoutingTable.java @@ -136,7 +136,9 @@ boolean validate(Metadata metadata) { ); } final Set inSyncAllocationIds = indexMetadata.inSyncAllocationIds(shardRouting.id()); - if (shardRouting.active() && inSyncAllocationIds.contains(shardRouting.allocationId().getId()) == false) { + if (shardRouting.active() + && shardRouting.isPromotableToPrimary() + && inSyncAllocationIds.contains(shardRouting.allocationId().getId()) == false) { throw new IllegalStateException( "active shard routing " + shardRouting diff --git a/server/src/main/java/org/elasticsearch/cluster/routing/IndexShardRoutingTable.java b/server/src/main/java/org/elasticsearch/cluster/routing/IndexShardRoutingTable.java index 0dd85d873463d..3a5a369caa3f2 100644 --- a/server/src/main/java/org/elasticsearch/cluster/routing/IndexShardRoutingTable.java +++ b/server/src/main/java/org/elasticsearch/cluster/routing/IndexShardRoutingTable.java @@ -499,15 +499,17 @@ public ShardRouting getByAllocationId(String allocationId) { return null; } - public Set getAllAllocationIds() { + public Set getPromotableAllocationIds() { assert MasterService.assertNotMasterUpdateThread("not using this on the master thread so we don't have to pre-compute this"); Set allAllocationIds = new HashSet<>(); for (ShardRouting shard : shards) { - if (shard.relocating()) { - allAllocationIds.add(shard.getTargetRelocatingShard().allocationId().getId()); - } - if (shard.assignedToNode()) { - allAllocationIds.add(shard.allocationId().getId()); + if (shard.isPromotableToPrimary()) { + if (shard.relocating()) { + allAllocationIds.add(shard.getTargetRelocatingShard().allocationId().getId()); + } + if (shard.assignedToNode()) { + allAllocationIds.add(shard.allocationId().getId()); + } } } return allAllocationIds; diff --git a/server/src/main/java/org/elasticsearch/cluster/routing/allocation/IndexMetadataUpdater.java b/server/src/main/java/org/elasticsearch/cluster/routing/allocation/IndexMetadataUpdater.java index 469e7f7efe36c..e0b53e312e400 100644 --- a/server/src/main/java/org/elasticsearch/cluster/routing/allocation/IndexMetadataUpdater.java +++ b/server/src/main/java/org/elasticsearch/cluster/routing/allocation/IndexMetadataUpdater.java @@ -69,12 +69,14 @@ public void shardStarted(ShardRouting initializingShard, ShardRouting startedSha + "] and startedShard.allocationId [" + startedShard.allocationId().getId() + "] have to have the same"; - Updates updates = changes(startedShard.shardId()); - updates.addedAllocationIds.add(startedShard.allocationId().getId()); - if (startedShard.primary() - // started shard has to have null recoverySource; have to pick up recoverySource from its initializing state - && (initializingShard.recoverySource() == RecoverySource.ExistingStoreRecoverySource.FORCE_STALE_PRIMARY_INSTANCE)) { - updates.removedAllocationIds.add(RecoverySource.ExistingStoreRecoverySource.FORCED_ALLOCATION_ID); + if (startedShard.isPromotableToPrimary()) { + Updates updates = changes(startedShard.shardId()); + updates.addedAllocationIds.add(startedShard.allocationId().getId()); + if (startedShard.primary() + // started shard has to have null recoverySource; have to pick up recoverySource from its initializing state + && (initializingShard.recoverySource() == RecoverySource.ExistingStoreRecoverySource.FORCE_STALE_PRIMARY_INSTANCE)) { + updates.removedAllocationIds.add(RecoverySource.ExistingStoreRecoverySource.FORCED_ALLOCATION_ID); + } } } diff --git a/server/src/main/java/org/elasticsearch/index/seqno/ReplicationTracker.java b/server/src/main/java/org/elasticsearch/index/seqno/ReplicationTracker.java index 86290ca79a65a..12ae735d16b55 100644 --- a/server/src/main/java/org/elasticsearch/index/seqno/ReplicationTracker.java +++ b/server/src/main/java/org/elasticsearch/index/seqno/ReplicationTracker.java @@ -263,6 +263,7 @@ public synchronized RetentionLeases getRetentionLeases(final boolean expireLease final long retentionLeaseMillis = indexSettings.getRetentionLeaseMillis(); final Set leaseIdsForCurrentPeers = routingTable.assignedShards() .stream() + .filter(ShardRouting::isPromotableToPrimary) .map(ReplicationTracker::getPeerRecoveryRetentionLeaseId) .collect(Collectors.toSet()); final boolean allShardsStarted = routingTable.allShardsStarted(); @@ -607,7 +608,7 @@ public synchronized void renewPeerRecoveryRetentionLeases() { boolean renewalNeeded = false; for (int copy = 0; copy < routingTable.size(); copy++) { final ShardRouting shardRouting = routingTable.shard(copy); - if (shardRouting.assignedToNode() == false) { + if (shardRouting.assignedToNode() == false || shardRouting.isPromotableToPrimary() == false) { continue; } final RetentionLease retentionLease = retentionLeases.get(getPeerRecoveryRetentionLeaseId(shardRouting)); @@ -628,7 +629,7 @@ public synchronized void renewPeerRecoveryRetentionLeases() { if (renewalNeeded) { for (int copy = 0; copy < routingTable.size(); copy++) { final ShardRouting shardRouting = routingTable.shard(copy); - if (shardRouting.assignedToNode()) { + if (shardRouting.assignedToNode() && shardRouting.isPromotableToPrimary()) { final RetentionLease retentionLease = retentionLeases.get(getPeerRecoveryRetentionLeaseId(shardRouting)); if (retentionLease != null) { final CheckpointState checkpointState = checkpoints.get(shardRouting.allocationId().getId()); @@ -874,8 +875,15 @@ private boolean invariant() { assert replicationGroup == null || replicationGroup.equals(calculateReplicationGroup()) : "cached replication group out of sync: expected: " + calculateReplicationGroup() + " but was: " + replicationGroup; + if (replicationGroup != null) { + assert replicationGroup.getReplicationTargets().stream().allMatch(ShardRouting::isPromotableToPrimary) + : "expected all replication target shards of the replication group to be promotable to primary"; + assert replicationGroup.getSkippedShards().stream().allMatch(ShardRouting::isPromotableToPrimary) + : "expected all skipped shards of the replication group to be promotable to primary"; + } + // all assigned shards from the routing table are tracked - assert routingTable == null || checkpoints.keySet().containsAll(routingTable.getAllAllocationIds()) + assert routingTable == null || checkpoints.keySet().containsAll(routingTable.getPromotableAllocationIds()) : "local checkpoints " + checkpoints + " not in-sync with routing table " + routingTable; for (Map.Entry entry : checkpoints.entrySet()) { @@ -895,7 +903,7 @@ private boolean invariant() { if (primaryMode && indexSettings.isSoftDeleteEnabled() && hasAllPeerRecoveryRetentionLeases) { // all tracked shard copies have a corresponding peer-recovery retention lease for (final ShardRouting shardRouting : routingTable.assignedShards()) { - if (checkpoints.get(shardRouting.allocationId().getId()).tracked) { + if (shardRouting.isPromotableToPrimary() && checkpoints.get(shardRouting.allocationId().getId()).tracked) { assert retentionLeases.contains(getPeerRecoveryRetentionLeaseId(shardRouting)) : "no retention lease for tracked shard [" + shardRouting + "] in " + retentionLeases; assert PEER_RECOVERY_RETENTION_LEASE_SOURCE.equals( @@ -1151,6 +1159,7 @@ private void addPeerRecoveryRetentionLeaseForSolePrimary() { } else if (hasAllPeerRecoveryRetentionLeases == false && routingTable.assignedShards() .stream() + .filter(ShardRouting::isPromotableToPrimary) .allMatch( shardRouting -> retentionLeases.contains(getPeerRecoveryRetentionLeaseId(shardRouting)) || checkpoints.get(shardRouting.allocationId().getId()).tracked == false @@ -1185,6 +1194,7 @@ public synchronized void updateFromMaster( // remove entries which don't exist on master Set initializingAllocationIds = routingTable.getAllInitializingShards() .stream() + .filter(ShardRouting::isPromotableToPrimary) .map(ShardRouting::allocationId) .map(AllocationId::getId) .collect(Collectors.toSet()); @@ -1495,7 +1505,10 @@ public synchronized boolean hasAllPeerRecoveryRetentionLeases() { */ public synchronized void createMissingPeerRecoveryRetentionLeases(ActionListener listener) { if (hasAllPeerRecoveryRetentionLeases == false) { - final List shardRoutings = routingTable.assignedShards(); + final List shardRoutings = routingTable.assignedShards() + .stream() + .filter(ShardRouting::isPromotableToPrimary) + .toList(); final GroupedActionListener groupedActionListener = new GroupedActionListener<>( shardRoutings.size(), ActionListener.wrap(vs -> { diff --git a/server/src/main/java/org/elasticsearch/index/shard/IndexShard.java b/server/src/main/java/org/elasticsearch/index/shard/IndexShard.java index 280934c73364f..871928a96e4b6 100644 --- a/server/src/main/java/org/elasticsearch/index/shard/IndexShard.java +++ b/server/src/main/java/org/elasticsearch/index/shard/IndexShard.java @@ -55,7 +55,6 @@ import org.elasticsearch.common.util.concurrent.AbstractRunnable; import org.elasticsearch.common.util.concurrent.AsyncIOProcessor; import org.elasticsearch.common.util.concurrent.ThreadContext; -import org.elasticsearch.common.util.set.Sets; import org.elasticsearch.common.xcontent.XContentHelper; import org.elasticsearch.core.Booleans; import org.elasticsearch.core.CheckedConsumer; @@ -709,17 +708,26 @@ public void onFailure(Exception e) { if (indexSettings.isSoftDeleteEnabled() && useRetentionLeasesInPeerRecovery == false) { final RetentionLeases retentionLeases = replicationTracker.getRetentionLeases(); - final Set shardRoutings = Sets.newHashSetWithExpectedSize(routingTable.size()); + boolean allShardsUseRetentionLeases = true; for (int copy = 0; copy < routingTable.size(); copy++) { - shardRoutings.add(routingTable.shard(copy)); - } - shardRoutings.addAll(routingTable.assignedShards()); // include relocation targets - if (shardRoutings.stream() - .allMatch( - shr -> shr.assignedToNode() && retentionLeases.contains(ReplicationTracker.getPeerRecoveryRetentionLeaseId(shr)) - )) { - useRetentionLeasesInPeerRecovery = true; + ShardRouting shardRouting = routingTable.shard(copy); + if (shardRouting.isPromotableToPrimary()) { + if (shardRouting.assignedToNode() == false + || retentionLeases.contains(ReplicationTracker.getPeerRecoveryRetentionLeaseId(shardRouting)) == false) { + allShardsUseRetentionLeases = false; + break; + } + if (this.shardRouting.relocating()) { + ShardRouting shardRoutingReloc = this.shardRouting.getTargetRelocatingShard(); + if (shardRoutingReloc.assignedToNode() == false + || retentionLeases.contains(ReplicationTracker.getPeerRecoveryRetentionLeaseId(shardRoutingReloc)) == false) { + allShardsUseRetentionLeases = false; + break; + } + } + } } + useRetentionLeasesInPeerRecovery = allShardsUseRetentionLeases; } } diff --git a/server/src/main/java/org/elasticsearch/index/shard/ReplicationGroup.java b/server/src/main/java/org/elasticsearch/index/shard/ReplicationGroup.java index cf3b8fc0fbaf3..53f932faf4512 100644 --- a/server/src/main/java/org/elasticsearch/index/shard/ReplicationGroup.java +++ b/server/src/main/java/org/elasticsearch/index/shard/ReplicationGroup.java @@ -40,11 +40,14 @@ public ReplicationGroup( this.trackedAllocationIds = trackedAllocationIds; this.version = version; - this.unavailableInSyncShards = Sets.difference(inSyncAllocationIds, routingTable.getAllAllocationIds()); + this.unavailableInSyncShards = Sets.difference(inSyncAllocationIds, routingTable.getPromotableAllocationIds()); this.replicationTargets = new ArrayList<>(); this.skippedShards = new ArrayList<>(); for (int copy = 0; copy < routingTable.size(); copy++) { ShardRouting shard = routingTable.shard(copy); + if (shard.isPromotableToPrimary() == false) { + continue; + } if (shard.unassigned()) { assert shard.primary() == false : "primary shard should not be unassigned in a replication group: " + shard; skippedShards.add(shard); diff --git a/server/src/main/java/org/elasticsearch/indices/recovery/PeerRecoveryTargetService.java b/server/src/main/java/org/elasticsearch/indices/recovery/PeerRecoveryTargetService.java index 1d3120310f0c9..abd1ef4aaf958 100644 --- a/server/src/main/java/org/elasticsearch/indices/recovery/PeerRecoveryTargetService.java +++ b/server/src/main/java/org/elasticsearch/indices/recovery/PeerRecoveryTargetService.java @@ -216,43 +216,62 @@ private void doRecovery(final long recoveryId, final StartRecoveryRequest preExi } final RecoveryTarget recoveryTarget = recoveryRef.target(); assert recoveryTarget.sourceNode() != null : "cannot do a recovery without a source node"; - final RecoveryState.Timer timer = recoveryTarget.state().getTimer(); + final RecoveryState recoveryState = recoveryTarget.state(); + final RecoveryState.Timer timer = recoveryState.getTimer(); + final IndexShard indexShard = recoveryTarget.indexShard(); + + final var failureHandler = ActionListener.notifyOnce(ActionListener.runBefore(ActionListener.noop().delegateResponse((l, e) -> { + // this will be logged as warning later on... + logger.trace("unexpected error while preparing shard for peer recovery, failing recovery", e); + onGoingRecoveries.failRecovery( + recoveryId, + new RecoveryFailedException(recoveryTarget.state(), "failed to prepare shard for recovery", e), + true + ); + }), recoveryRef::close)); - record StartRecoveryRequestToSend(StartRecoveryRequest startRecoveryRequest, String actionName, TransportRequest requestToSend) {} - final ActionListener toSendListener = ActionListener.notifyOnce( - ActionListener.runBefore(new ActionListener<>() { - @Override - public void onResponse(StartRecoveryRequestToSend r) { - logger.trace( - "{} [{}]: recovery from {}", - r.startRecoveryRequest().shardId(), - r.actionName(), - r.startRecoveryRequest().sourceNode() - ); - transportService.sendRequest( - r.startRecoveryRequest().sourceNode(), - r.actionName(), - r.requestToSend(), - new RecoveryResponseHandler(r.startRecoveryRequest(), timer) - ); - } + if (indexShard.routingEntry().isPromotableToPrimary() == false) { + assert preExistingRequest == null; + assert indexShard.indexSettings().getIndexMetadata().isSearchableSnapshot() == false; + try { + indexShard.preRecovery(failureHandler.map(v -> { + logger.trace("{} preparing shard for peer recovery", recoveryTarget.shardId()); + indexShard.prepareForIndexRecovery(); + // Skip unnecessary intermediate stages + recoveryState.setStage(RecoveryState.Stage.VERIFY_INDEX); + recoveryState.setStage(RecoveryState.Stage.TRANSLOG); + indexShard.openEngineAndSkipTranslogRecovery(); + recoveryState.getIndex().setFileDetailsComplete(); + recoveryState.setStage(RecoveryState.Stage.FINALIZE); + onGoingRecoveries.markRecoveryAsDone(recoveryId); + return null; + })); + } catch (Exception e) { + failureHandler.onFailure(e); + } - @Override - public void onFailure(Exception e) { - // this will be logged as warning later on... - logger.trace("unexpected error while preparing shard for peer recovery, failing recovery", e); - onGoingRecoveries.failRecovery( - recoveryId, - new RecoveryFailedException(recoveryTarget.state(), "failed to prepare shard for recovery", e), - true - ); - } - }, recoveryRef::close) - ); + return; + } + + record StartRecoveryRequestToSend(StartRecoveryRequest startRecoveryRequest, String actionName, TransportRequest requestToSend) {} + final ActionListener toSendListener = failureHandler.map(r -> { + logger.trace( + "{} [{}]: recovery from {}", + r.startRecoveryRequest().shardId(), + r.actionName(), + r.startRecoveryRequest().sourceNode() + ); + transportService.sendRequest( + r.startRecoveryRequest().sourceNode(), + r.actionName(), + r.requestToSend(), + new RecoveryResponseHandler(r.startRecoveryRequest(), timer) + ); + return null; + }); if (preExistingRequest == null) { try { - final IndexShard indexShard = recoveryTarget.indexShard(); indexShard.preRecovery(toSendListener.delegateFailure((l, v) -> ActionListener.completeWith(l, () -> { logger.trace("{} preparing shard for peer recovery", recoveryTarget.shardId()); indexShard.prepareForIndexRecovery(); diff --git a/server/src/main/java/org/elasticsearch/indices/recovery/RecoveriesCollection.java b/server/src/main/java/org/elasticsearch/indices/recovery/RecoveriesCollection.java index d896425eef2cf..59ed1ba2b871f 100644 --- a/server/src/main/java/org/elasticsearch/indices/recovery/RecoveriesCollection.java +++ b/server/src/main/java/org/elasticsearch/indices/recovery/RecoveriesCollection.java @@ -164,6 +164,7 @@ public RecoveryRef getRecoverySafe(long id, ShardId shardId) { throw new IndexShardClosedException(shardId); } assert recoveryRef.target().shardId().equals(shardId); + assert recoveryRef.target().indexShard().routingEntry().isPromotableToPrimary(); return recoveryRef; } diff --git a/server/src/test/java/org/elasticsearch/action/admin/indices/close/TransportVerifyShardBeforeCloseActionTests.java b/server/src/test/java/org/elasticsearch/action/admin/indices/close/TransportVerifyShardBeforeCloseActionTests.java index 8dd0e89e1cbbe..cf096e35bdbc0 100644 --- a/server/src/test/java/org/elasticsearch/action/admin/indices/close/TransportVerifyShardBeforeCloseActionTests.java +++ b/server/src/test/java/org/elasticsearch/action/admin/indices/close/TransportVerifyShardBeforeCloseActionTests.java @@ -234,7 +234,7 @@ public void testUnavailableShardsMarkedAsStale() throws Exception { final long primaryTerm = indexMetadata.primaryTerm(0); final Set inSyncAllocationIds = indexMetadata.inSyncAllocationIds(0); - final Set trackedShards = shardRoutingTable.getAllAllocationIds(); + final Set trackedShards = shardRoutingTable.getPromotableAllocationIds(); List unavailableShards = randomSubsetOf(randomIntBetween(1, nbReplicas), shardRoutingTable.replicaShards()); IndexShardRoutingTable.Builder shardRoutingTableBuilder = new IndexShardRoutingTable.Builder(shardRoutingTable); diff --git a/server/src/test/java/org/elasticsearch/action/resync/TransportResyncReplicationActionTests.java b/server/src/test/java/org/elasticsearch/action/resync/TransportResyncReplicationActionTests.java index 4a3498ea6baae..919737caf2c7a 100644 --- a/server/src/test/java/org/elasticsearch/action/resync/TransportResyncReplicationActionTests.java +++ b/server/src/test/java/org/elasticsearch/action/resync/TransportResyncReplicationActionTests.java @@ -152,7 +152,7 @@ public void testResyncDoesNotBlockOnPrimaryAction() throws Exception { new ReplicationGroup( shardRoutingTable, clusterService.state().metadata().index(index).inSyncAllocationIds(shardId.id()), - shardRoutingTable.getAllAllocationIds(), + shardRoutingTable.getPromotableAllocationIds(), 0 ) ); diff --git a/server/src/test/java/org/elasticsearch/action/support/replication/ReplicationOperationTests.java b/server/src/test/java/org/elasticsearch/action/support/replication/ReplicationOperationTests.java index 8dab09fb6015f..543b673635ee0 100644 --- a/server/src/test/java/org/elasticsearch/action/support/replication/ReplicationOperationTests.java +++ b/server/src/test/java/org/elasticsearch/action/support/replication/ReplicationOperationTests.java @@ -507,7 +507,7 @@ public void testPrimaryFailureHandlingReplicaResponse() throws Exception { final Set inSyncAllocationIds = indexMetadata.inSyncAllocationIds(0); final IndexShardRoutingTable shardRoutingTable = state.routingTable().index(index).shard(shardId.id()); - final Set trackedShards = shardRoutingTable.getAllAllocationIds(); + final Set trackedShards = shardRoutingTable.getPromotableAllocationIds(); final ReplicationGroup initialReplicationGroup = new ReplicationGroup(shardRoutingTable, inSyncAllocationIds, trackedShards, 0); final Thread testThread = Thread.currentThread(); diff --git a/server/src/test/java/org/elasticsearch/action/support/replication/TransportReplicationActionTests.java b/server/src/test/java/org/elasticsearch/action/support/replication/TransportReplicationActionTests.java index 97605ec71928f..e64dddff3cdd3 100644 --- a/server/src/test/java/org/elasticsearch/action/support/replication/TransportReplicationActionTests.java +++ b/server/src/test/java/org/elasticsearch/action/support/replication/TransportReplicationActionTests.java @@ -928,7 +928,12 @@ public void testSeqNoIsSetOnPrimary() { Set inSyncIds = randomBoolean() ? singleton(routingEntry.allocationId().getId()) : clusterService.state().metadata().index(index).inSyncAllocationIds(0); - ReplicationGroup replicationGroup = new ReplicationGroup(shardRoutingTable, inSyncIds, shardRoutingTable.getAllAllocationIds(), 0); + ReplicationGroup replicationGroup = new ReplicationGroup( + shardRoutingTable, + inSyncIds, + shardRoutingTable.getPromotableAllocationIds(), + 0 + ); when(shard.getReplicationGroup()).thenReturn(replicationGroup); PendingReplicationActions replicationActions = new PendingReplicationActions(shardId, threadPool); replicationActions.accept(replicationGroup); diff --git a/server/src/test/java/org/elasticsearch/cluster/ClusterStateTests.java b/server/src/test/java/org/elasticsearch/cluster/ClusterStateTests.java index e4c13a13a16ba..f26caf2f98e5e 100644 --- a/server/src/test/java/org/elasticsearch/cluster/ClusterStateTests.java +++ b/server/src/test/java/org/elasticsearch/cluster/ClusterStateTests.java @@ -134,7 +134,7 @@ public void testToXContent() throws IOException { IndexRoutingTable index = clusterState.getRoutingTable().getIndicesRouting().get("index"); String ephemeralId = clusterState.getNodes().get("nodeId1").getEphemeralId(); - String allocationId = index.shard(0).getAllAllocationIds().iterator().next(); + String allocationId = index.shard(0).getPromotableAllocationIds().iterator().next(); XContentBuilder builder = JsonXContent.contentBuilder(); builder.startObject(); @@ -374,7 +374,7 @@ public void testToXContent_FlatSettingTrue_ReduceMappingFalse() throws IOExcepti IndexRoutingTable index = clusterState.getRoutingTable().getIndicesRouting().get("index"); String ephemeralId = clusterState.getNodes().get("nodeId1").getEphemeralId(); - String allocationId = index.shard(0).getAllAllocationIds().iterator().next(); + String allocationId = index.shard(0).getPromotableAllocationIds().iterator().next(); XContentBuilder builder = JsonXContent.contentBuilder().prettyPrint(); builder.startObject(); @@ -606,7 +606,7 @@ public void testToXContent_FlatSettingFalse_ReduceMappingTrue() throws IOExcepti IndexRoutingTable index = clusterState.getRoutingTable().getIndicesRouting().get("index"); String ephemeralId = clusterState.getNodes().get("nodeId1").getEphemeralId(); - String allocationId = index.shard(0).getAllAllocationIds().iterator().next(); + String allocationId = index.shard(0).getPromotableAllocationIds().iterator().next(); XContentBuilder builder = JsonXContent.contentBuilder().prettyPrint(); builder.startObject(); diff --git a/server/src/test/java/org/elasticsearch/cluster/metadata/AutoExpandReplicasTests.java b/server/src/test/java/org/elasticsearch/cluster/metadata/AutoExpandReplicasTests.java index fedbc31fcdeb7..c82b13918835e 100644 --- a/server/src/test/java/org/elasticsearch/cluster/metadata/AutoExpandReplicasTests.java +++ b/server/src/test/java/org/elasticsearch/cluster/metadata/AutoExpandReplicasTests.java @@ -162,7 +162,11 @@ public void testAutoExpandWhenNodeLeavesAndPossiblyRejoins() throws InterruptedE postTable = state.routingTable().index("index").shard(0); assertTrue("not all shards started in " + state.toString(), postTable.allShardsStarted()); - assertThat(postTable.toString(), postTable.getAllAllocationIds(), everyItem(is(in(preTable.getAllAllocationIds())))); + assertThat( + postTable.toString(), + postTable.getPromotableAllocationIds(), + everyItem(is(in(preTable.getPromotableAllocationIds()))) + ); } else { // fake an election where conflicting nodes are removed and readded state = ClusterState.builder(state).nodes(DiscoveryNodes.builder(state.nodes()).masterNodeId(null).build()).build(); @@ -199,7 +203,7 @@ public void testAutoExpandWhenNodeLeavesAndPossiblyRejoins() throws InterruptedE .map(shr -> shr.allocationId().getId()) .collect(Collectors.toSet()); - assertThat(postTable.toString(), unchangedAllocationIds, everyItem(is(in(postTable.getAllAllocationIds())))); + assertThat(postTable.toString(), unchangedAllocationIds, everyItem(is(in(postTable.getPromotableAllocationIds())))); RoutingNodesHelper.asStream(postTable).forEach(shardRouting -> { if (shardRouting.assignedToNode() && unchangedAllocationIds.contains(shardRouting.allocationId().getId())) { diff --git a/test/framework/src/main/java/org/elasticsearch/index/engine/EngineTestCase.java b/test/framework/src/main/java/org/elasticsearch/index/engine/EngineTestCase.java index 277dfeb913525..68b4f18fbcfd2 100644 --- a/test/framework/src/main/java/org/elasticsearch/index/engine/EngineTestCase.java +++ b/test/framework/src/main/java/org/elasticsearch/index/engine/EngineTestCase.java @@ -249,7 +249,7 @@ public static FieldType randomIdFieldType() { return randomBoolean() ? ProvidedIdFieldMapper.Defaults.FIELD_TYPE : TsidExtractingIdFieldMapper.FIELD_TYPE; } - public EngineConfig copy(EngineConfig config, LongSupplier globalCheckpointSupplier) { + public static EngineConfig copy(EngineConfig config, LongSupplier globalCheckpointSupplier) { return new EngineConfig( config.getShardId(), config.getThreadPool(), From 3d38173e567d1dce31df6811ae5f8826f87dfe25 Mon Sep 17 00:00:00 2001 From: Marwane Chahoud Date: Tue, 31 Jan 2023 10:42:49 +0100 Subject: [PATCH 27/63] Add a section about token-based authentication (#93344) * Add a section about token-based authentication It took me a considerable time to figure out the syntax for a token-based authentication, and I said why not add it to the documentation * Update x-pack/docs/en/watcher/input/http.asciidoc * Update x-pack/docs/en/watcher/input/http.asciidoc --------- Co-authored-by: Abdon Pijpelink --- x-pack/docs/en/watcher/input/http.asciidoc | 22 ++++++++++++++++++++++ 1 file changed, 22 insertions(+) diff --git a/x-pack/docs/en/watcher/input/http.asciidoc b/x-pack/docs/en/watcher/input/http.asciidoc index 023884dd8e53f..ca1738b5952d2 100644 --- a/x-pack/docs/en/watcher/input/http.asciidoc +++ b/x-pack/docs/en/watcher/input/http.asciidoc @@ -139,6 +139,28 @@ http://openweathermap.org/appid[OpenWeatherMap] service: -------------------------------------------------- // NOTCONSOLE +===== Using token-based authentication + +You can also call an API using a `Bearer token` instead of basic authentication. The `request.headers` object contains the HTTP headers: + +[source,js] +-------------------------------------------------- +"input" : { + "http" : { + "request" : { + "url": "https://api.example.com/v1/something", + "headers": { + "authorization" : "Bearer ABCD1234...", + "content-type": "application/json" + # other headers params.. + }, + "connection_timeout": "30s" + } + } +} +-------------------------------------------------- +// NOTCONSOLE + ==== Using templates The `http` input supports templating. You can use <> when From 71c280b85d1cd58d10ca6a50e4fd5cb923b0ad26 Mon Sep 17 00:00:00 2001 From: Nikolaj Volgushev Date: Tue, 31 Jan 2023 11:37:31 +0100 Subject: [PATCH 28/63] Build role for remote access authentication (#93316) This PR adds support for building roles for remote_access authentication instances, under the new remote cluster security model. This change is stand-alone and not wired up to active code flows yet. A proof of concept in #92089 highlights how the model change in this PR fits into the broader context of the fulfilling cluster processing cross cluster requests. --- .../authc/RemoteAccessAuthentication.java | 3 + .../xpack/core/security/authc/Subject.java | 26 +++- .../authz/RoleDescriptorsIntersection.java | 4 + .../security/authz/store/RoleReference.java | 33 ++++ .../authz/store/RoleReferenceResolver.java | 5 + .../authc/AuthenticationTestHelper.java | 55 +++---- .../core/security/authc/SubjectTests.java | 79 ++++++++++ .../authz/store/RoleReferenceTests.java | 13 ++ .../authz/store/RoleDescriptorStore.java | 15 ++ .../authz/store/CompositeRolesStoreTests.java | 141 ++++++++++++++++++ 10 files changed, 346 insertions(+), 28 deletions(-) diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/authc/RemoteAccessAuthentication.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/authc/RemoteAccessAuthentication.java index 089dd2be2a77f..d6c50bf66bd15 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/authc/RemoteAccessAuthentication.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/authc/RemoteAccessAuthentication.java @@ -10,6 +10,7 @@ import org.apache.lucene.util.BytesRef; import org.elasticsearch.TransportVersion; import org.elasticsearch.common.bytes.AbstractBytesReference; +import org.elasticsearch.common.bytes.BytesArray; import org.elasticsearch.common.bytes.BytesReference; import org.elasticsearch.common.io.stream.BytesStreamOutput; import org.elasticsearch.common.io.stream.StreamInput; @@ -148,6 +149,8 @@ public Map copyWithRemoteAccessEntries(final Map } public static final class RoleDescriptorsBytes extends AbstractBytesReference { + + public static final RoleDescriptorsBytes EMPTY = new RoleDescriptorsBytes(new BytesArray("{}")); private final BytesReference rawBytes; public RoleDescriptorsBytes(BytesReference rawBytes) { diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/authc/Subject.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/authc/Subject.java index 7329db1d17996..5ff7d9749d2b6 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/authc/Subject.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/authc/Subject.java @@ -19,6 +19,8 @@ import org.elasticsearch.xpack.core.security.user.AnonymousUser; import org.elasticsearch.xpack.core.security.user.User; +import java.util.ArrayList; +import java.util.List; import java.util.Map; import java.util.Objects; @@ -105,8 +107,7 @@ public RoleReferenceIntersection getRoleReferenceIntersection(@Nullable Anonymou case SERVICE_ACCOUNT: return new RoleReferenceIntersection(new RoleReference.ServiceAccountRoleReference(user.principal())); case REMOTE_ACCESS: - assert false : "unsupported subject type: [" + type + "]"; - throw new UnsupportedOperationException("unsupported subject type: [" + type + "]"); + return buildRoleReferencesForRemoteAccess(); default: assert false : "unknown subject type: [" + type + "]"; throw new IllegalStateException("unknown subject type: [" + type + "]"); @@ -231,6 +232,27 @@ private RoleReferenceIntersection buildRoleReferencesForApiKey() { ); } + private RoleReferenceIntersection buildRoleReferencesForRemoteAccess() { + final List roleReferences = new ArrayList<>(4); + @SuppressWarnings("unchecked") + final List remoteAccessRoleDescriptorsBytes = (List< + RemoteAccessAuthentication.RoleDescriptorsBytes>) metadata.get(AuthenticationField.REMOTE_ACCESS_ROLE_DESCRIPTORS_KEY); + if (remoteAccessRoleDescriptorsBytes.isEmpty()) { + // If the remote access role descriptors are empty, the remote user has no privileges. We need to add an empty role to restrict + // access of the overall intersection accordingly + roleReferences.add(new RoleReference.RemoteAccessRoleReference(RemoteAccessAuthentication.RoleDescriptorsBytes.EMPTY)); + } else { + // TODO handle this once we support API keys as querying subjects + assert remoteAccessRoleDescriptorsBytes.size() == 1 + : "only a singleton list of remote access role descriptors bytes is supported"; + for (RemoteAccessAuthentication.RoleDescriptorsBytes roleDescriptorsBytes : remoteAccessRoleDescriptorsBytes) { + roleReferences.add(new RoleReference.RemoteAccessRoleReference(roleDescriptorsBytes)); + } + } + roleReferences.addAll(buildRoleReferencesForApiKey().getRoleReferences()); + return new RoleReferenceIntersection(List.copyOf(roleReferences)); + } + private static boolean isEmptyRoleDescriptorsBytes(BytesReference roleDescriptorsBytes) { return roleDescriptorsBytes == null || (roleDescriptorsBytes.length() == 2 && "{}".equals(roleDescriptorsBytes.utf8ToString())); } diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/authz/RoleDescriptorsIntersection.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/authz/RoleDescriptorsIntersection.java index 16bf0a074c675..30139ae1b1dba 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/authz/RoleDescriptorsIntersection.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/authz/RoleDescriptorsIntersection.java @@ -26,6 +26,10 @@ public record RoleDescriptorsIntersection(Collection> roleDe public static RoleDescriptorsIntersection EMPTY = new RoleDescriptorsIntersection(Collections.emptyList()); + public RoleDescriptorsIntersection(RoleDescriptor roleDescriptor) { + this(List.of(Set.of(roleDescriptor))); + } + public RoleDescriptorsIntersection(StreamInput in) throws IOException { this(in.readImmutableList(inner -> inner.readSet(RoleDescriptor::new))); } diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/authz/store/RoleReference.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/authz/store/RoleReference.java index 9ceeb724b4202..3a4a377713294 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/authz/store/RoleReference.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/authz/store/RoleReference.java @@ -10,6 +10,7 @@ import org.elasticsearch.action.ActionListener; import org.elasticsearch.common.bytes.BytesReference; import org.elasticsearch.common.hash.MessageDigests; +import org.elasticsearch.xpack.core.security.authc.RemoteAccessAuthentication; import java.util.HashSet; import java.util.List; @@ -116,6 +117,38 @@ public ApiKeyRoleType getRoleType() { } } + final class RemoteAccessRoleReference implements RoleReference { + + private final RemoteAccessAuthentication.RoleDescriptorsBytes roleDescriptorsBytes; + private RoleKey id = null; + + public RemoteAccessRoleReference(RemoteAccessAuthentication.RoleDescriptorsBytes roleDescriptorsBytes) { + this.roleDescriptorsBytes = roleDescriptorsBytes; + } + + @Override + public RoleKey id() { + // Hashing can be expensive. memorize the result in case the method is called multiple times. + if (id == null) { + final String roleDescriptorsHash = MessageDigests.toHexString( + MessageDigests.digest(roleDescriptorsBytes, MessageDigests.sha256()) + ); + id = new RoleKey(Set.of("remote_access:" + roleDescriptorsHash), "remote_access"); + } + return id; + } + + @Override + public void resolve(RoleReferenceResolver resolver, ActionListener listener) { + resolver.resolveRemoteAccessRoleReference(this, listener); + } + + public RemoteAccessAuthentication.RoleDescriptorsBytes getRoleDescriptorsBytes() { + return roleDescriptorsBytes; + } + + } + /** * Same as {@link ApiKeyRoleReference} but for BWC purpose (prior to v7.9.0) */ diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/authz/store/RoleReferenceResolver.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/authz/store/RoleReferenceResolver.java index 44522b5884521..e39b26afdacad 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/authz/store/RoleReferenceResolver.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/authz/store/RoleReferenceResolver.java @@ -25,4 +25,9 @@ void resolveBwcApiKeyRoleReference( ); void resolveServiceAccountRoleReference(ServiceAccountRoleReference roleReference, ActionListener listener); + + void resolveRemoteAccessRoleReference( + RoleReference.RemoteAccessRoleReference remoteAccessRoleReference, + ActionListener listener + ); } diff --git a/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/security/authc/AuthenticationTestHelper.java b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/security/authc/AuthenticationTestHelper.java index 6dcf4c54ef98c..69f2b7d9ced1d 100644 --- a/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/security/authc/AuthenticationTestHelper.java +++ b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/security/authc/AuthenticationTestHelper.java @@ -237,43 +237,46 @@ public static String randomInternalRoleName() { ); } - public static RemoteAccessAuthentication randomRemoteAccessAuthentication() { + public static RemoteAccessAuthentication randomRemoteAccessAuthentication(RoleDescriptorsIntersection roleDescriptorsIntersection) { try { // TODO add apikey() once we have querying-cluster-side API key support final Authentication authentication = ESTestCase.randomFrom( AuthenticationTestHelper.builder().realm(), AuthenticationTestHelper.builder().internal(SystemUser.INSTANCE) ).build(); - return new RemoteAccessAuthentication( - authentication, - new RoleDescriptorsIntersection( - List.of( - // TODO randomize to add a second set once we have querying-cluster-side API key support - Set.of( - new RoleDescriptor( - "a", - null, - new RoleDescriptor.IndicesPrivileges[] { - RoleDescriptor.IndicesPrivileges.builder() - .indices("index1") - .privileges("read", "read_cross_cluster") - .build() }, - null, - null, - null, - null, - null, - null - ) - ) - ) - ) - ); + return new RemoteAccessAuthentication(authentication, roleDescriptorsIntersection); } catch (IOException e) { throw new UncheckedIOException(e); } } + public static RemoteAccessAuthentication randomRemoteAccessAuthentication() { + return randomRemoteAccessAuthentication( + new RoleDescriptorsIntersection( + List.of( + // TODO randomize to add a second set once we have querying-cluster-side API key support + Set.of( + new RoleDescriptor( + "_remote_user", + null, + new RoleDescriptor.IndicesPrivileges[] { + RoleDescriptor.IndicesPrivileges.builder() + .indices("index1") + .privileges("read", "read_cross_cluster") + .build() }, + null, + null, + null, + null, + null, + null + ) + ) + ) + ) + ); + } + public static class AuthenticationTestBuilder { private TransportVersion transportVersion; private Authentication authenticatingAuthentication; diff --git a/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/security/authc/SubjectTests.java b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/security/authc/SubjectTests.java index fefec2d24a7a9..7f575f29457b6 100644 --- a/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/security/authc/SubjectTests.java +++ b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/security/authc/SubjectTests.java @@ -16,6 +16,7 @@ import org.elasticsearch.test.ESTestCase; import org.elasticsearch.test.TransportVersionUtils; import org.elasticsearch.xpack.core.security.authc.service.ServiceAccountSettings; +import org.elasticsearch.xpack.core.security.authz.RoleDescriptorsIntersection; import org.elasticsearch.xpack.core.security.authz.store.RoleReference; import org.elasticsearch.xpack.core.security.authz.store.RoleReference.ApiKeyRoleReference; import org.elasticsearch.xpack.core.security.authz.store.RoleReference.BwcApiKeyRoleReference; @@ -35,7 +36,10 @@ import static org.elasticsearch.xpack.core.security.authc.AuthenticationField.API_KEY_REALM_NAME; import static org.elasticsearch.xpack.core.security.authc.AuthenticationField.API_KEY_REALM_TYPE; import static org.elasticsearch.xpack.core.security.authc.AuthenticationField.API_KEY_ROLE_DESCRIPTORS_KEY; +import static org.elasticsearch.xpack.core.security.authc.AuthenticationField.REMOTE_ACCESS_REALM_NAME; +import static org.elasticsearch.xpack.core.security.authc.AuthenticationField.REMOTE_ACCESS_REALM_TYPE; import static org.elasticsearch.xpack.core.security.authc.Subject.FLEET_SERVER_ROLE_DESCRIPTOR_BYTES_V_7_14; +import static org.elasticsearch.xpack.core.security.authz.store.RoleReference.RemoteAccessRoleReference; import static org.hamcrest.Matchers.arrayContaining; import static org.hamcrest.Matchers.contains; import static org.hamcrest.Matchers.equalTo; @@ -145,6 +149,81 @@ public void testGetRoleReferencesForApiKey() { } } + public void testGetRoleReferencesForRemoteAccess() { + Map authMetadata = new HashMap<>(); + final String apiKeyId = randomAlphaOfLength(12); + authMetadata.put(AuthenticationField.API_KEY_ID_KEY, apiKeyId); + authMetadata.put(AuthenticationField.API_KEY_NAME_KEY, randomBoolean() ? null : randomAlphaOfLength(12)); + final BytesReference roleBytes = new BytesArray(""" + {"role":{"indices":[{"names":["index*"],"privileges":["read"]}]}}"""); + final BytesReference limitedByRoleBytes = new BytesArray(""" + {"limited-by-role":{"indices":[{"names":["*"],"privileges":["all"]}]}}"""); + + final boolean emptyRoleBytes = randomBoolean(); + + authMetadata.put( + AuthenticationField.API_KEY_ROLE_DESCRIPTORS_KEY, + emptyRoleBytes ? randomFrom(Arrays.asList(null, new BytesArray("{}"))) : roleBytes + ); + authMetadata.put(AuthenticationField.API_KEY_LIMITED_ROLE_DESCRIPTORS_KEY, limitedByRoleBytes); + + final RemoteAccessAuthentication remoteAccessAuthentication = randomBoolean() + ? AuthenticationTestHelper.randomRemoteAccessAuthentication(RoleDescriptorsIntersection.EMPTY) + : AuthenticationTestHelper.randomRemoteAccessAuthentication(); + authMetadata = remoteAccessAuthentication.copyWithRemoteAccessEntries(authMetadata); + + final Subject subject = new Subject( + new User("joe"), + new Authentication.RealmRef(REMOTE_ACCESS_REALM_NAME, REMOTE_ACCESS_REALM_TYPE, "node"), + TransportVersion.CURRENT, + authMetadata + ); + + final RoleReferenceIntersection roleReferenceIntersection = subject.getRoleReferenceIntersection(getAnonymousUser()); + final List roleReferences = roleReferenceIntersection.getRoleReferences(); + if (emptyRoleBytes) { + assertThat(roleReferences, contains(isA(RemoteAccessRoleReference.class), isA(ApiKeyRoleReference.class))); + + final RemoteAccessRoleReference remoteAccessRoleReference = (RemoteAccessRoleReference) roleReferences.get(0); + assertThat( + remoteAccessRoleReference.getRoleDescriptorsBytes(), + equalTo( + remoteAccessAuthentication.getRoleDescriptorsBytesList().isEmpty() + ? RemoteAccessAuthentication.RoleDescriptorsBytes.EMPTY + : remoteAccessAuthentication.getRoleDescriptorsBytesList().get(0) + ) + ); + + final ApiKeyRoleReference roleReference = (ApiKeyRoleReference) roleReferences.get(1); + assertThat(roleReference.getApiKeyId(), equalTo(apiKeyId)); + assertThat(roleReference.getRoleDescriptorsBytes(), equalTo(authMetadata.get(API_KEY_LIMITED_ROLE_DESCRIPTORS_KEY))); + + } else { + assertThat( + roleReferences, + contains(isA(RemoteAccessRoleReference.class), isA(ApiKeyRoleReference.class), isA(ApiKeyRoleReference.class)) + ); + + final RemoteAccessRoleReference remoteAccessRoleReference = (RemoteAccessRoleReference) roleReferences.get(0); + assertThat( + remoteAccessRoleReference.getRoleDescriptorsBytes(), + equalTo( + remoteAccessAuthentication.getRoleDescriptorsBytesList().isEmpty() + ? RemoteAccessAuthentication.RoleDescriptorsBytes.EMPTY + : remoteAccessAuthentication.getRoleDescriptorsBytesList().get(0) + ) + ); + + final ApiKeyRoleReference roleReference = (ApiKeyRoleReference) roleReferences.get(1); + assertThat(roleReference.getApiKeyId(), equalTo(apiKeyId)); + assertThat(roleReference.getRoleDescriptorsBytes(), equalTo(authMetadata.get(API_KEY_ROLE_DESCRIPTORS_KEY))); + + final ApiKeyRoleReference limitedByRoleReference = (ApiKeyRoleReference) roleReferences.get(2); + assertThat(limitedByRoleReference.getApiKeyId(), equalTo(apiKeyId)); + assertThat(limitedByRoleReference.getRoleDescriptorsBytes(), equalTo(authMetadata.get(API_KEY_LIMITED_ROLE_DESCRIPTORS_KEY))); + } + } + public void testGetRoleReferencesForApiKeyBwc() { Map authMetadata = new HashMap<>(); final String apiKeyId = randomAlphaOfLength(12); diff --git a/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/security/authz/store/RoleReferenceTests.java b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/security/authz/store/RoleReferenceTests.java index 35ba3e171d6c5..c10b01f59c2ae 100644 --- a/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/security/authz/store/RoleReferenceTests.java +++ b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/security/authz/store/RoleReferenceTests.java @@ -10,6 +10,7 @@ import org.elasticsearch.common.bytes.BytesArray; import org.elasticsearch.common.hash.MessageDigests; import org.elasticsearch.test.ESTestCase; +import org.elasticsearch.xpack.core.security.authc.RemoteAccessAuthentication; import java.util.Set; @@ -65,6 +66,18 @@ public void testApiKeyRoleReference() { assertThat(roleKey.getSource(), equalTo("apikey_" + apiKeyRoleType)); } + public void testRemoteAccessRoleReference() { + final var roleDescriptorsBytes = new RemoteAccessAuthentication.RoleDescriptorsBytes(new BytesArray(randomAlphaOfLength(50))); + final var remoteAccessRoleReference = new RoleReference.RemoteAccessRoleReference(roleDescriptorsBytes); + + final RoleKey roleKey = remoteAccessRoleReference.id(); + assertThat( + roleKey.getNames(), + hasItem("remote_access:" + MessageDigests.toHexString(MessageDigests.digest(roleDescriptorsBytes, MessageDigests.sha256()))) + ); + assertThat(roleKey.getSource(), equalTo("remote_access")); + } + public void testServiceAccountRoleReference() { final String principal = randomAlphaOfLength(8) + "/" + randomAlphaOfLength(8); final RoleReference.ServiceAccountRoleReference serviceAccountRoleReference = new RoleReference.ServiceAccountRoleReference( diff --git a/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/authz/store/RoleDescriptorStore.java b/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/authz/store/RoleDescriptorStore.java index 756849193fe6c..acae8a3e255f1 100644 --- a/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/authz/store/RoleDescriptorStore.java +++ b/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/authz/store/RoleDescriptorStore.java @@ -132,6 +132,21 @@ public void resolveServiceAccountRoleReference( })); } + @Override + public void resolveRemoteAccessRoleReference( + RoleReference.RemoteAccessRoleReference remoteAccessRoleReference, + ActionListener listener + ) { + final Set roleDescriptors = remoteAccessRoleReference.getRoleDescriptorsBytes().toRoleDescriptors(); + if (roleDescriptors.isEmpty()) { + listener.onResponse(RolesRetrievalResult.EMPTY); + return; + } + final RolesRetrievalResult rolesRetrievalResult = new RolesRetrievalResult(); + rolesRetrievalResult.addDescriptors(Set.copyOf(roleDescriptors)); + listener.onResponse(rolesRetrievalResult); + } + private void resolveRoleNames(Set roleNames, ActionListener listener) { roleDescriptors(roleNames, ActionListener.wrap(rolesRetrievalResult -> { logDeprecatedRoles(rolesRetrievalResult.getRoleDescriptors()); diff --git a/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/authz/store/CompositeRolesStoreTests.java b/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/authz/store/CompositeRolesStoreTests.java index c7ad83832a7ee..b418740b1f01c 100644 --- a/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/authz/store/CompositeRolesStoreTests.java +++ b/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/authz/store/CompositeRolesStoreTests.java @@ -17,6 +17,7 @@ import org.elasticsearch.action.admin.cluster.settings.ClusterUpdateSettingsAction; import org.elasticsearch.action.admin.cluster.state.ClusterStateAction; import org.elasticsearch.action.admin.cluster.stats.ClusterStatsAction; +import org.elasticsearch.action.admin.indices.create.CreateIndexAction; import org.elasticsearch.action.delete.DeleteAction; import org.elasticsearch.action.get.GetAction; import org.elasticsearch.action.index.IndexAction; @@ -65,6 +66,7 @@ import org.elasticsearch.xpack.core.security.authc.Subject; import org.elasticsearch.xpack.core.security.authz.RoleDescriptor; import org.elasticsearch.xpack.core.security.authz.RoleDescriptor.IndicesPrivileges; +import org.elasticsearch.xpack.core.security.authz.RoleDescriptorsIntersection; import org.elasticsearch.xpack.core.security.authz.accesscontrol.DocumentSubsetBitsetCache; import org.elasticsearch.xpack.core.security.authz.accesscontrol.IndicesAccessControl; import org.elasticsearch.xpack.core.security.authz.permission.ClusterPermission; @@ -102,6 +104,7 @@ import org.elasticsearch.xpack.security.audit.index.IndexNameResolver; import org.elasticsearch.xpack.security.authc.ApiKeyService; import org.elasticsearch.xpack.security.authc.service.ServiceAccountService; +import org.elasticsearch.xpack.security.authz.RBACEngine; import org.elasticsearch.xpack.security.support.CacheInvalidatorRegistry; import org.elasticsearch.xpack.security.support.SecurityIndexManager; import org.hamcrest.BaseMatcher; @@ -1978,6 +1981,144 @@ public void testApiKeyAuthUsesApiKeyServiceWithScopedRole() throws Exception { assertThat(role.names()[0], containsString("user_role_")); } + public void testGetRoleForRemoteAccessAuthentication() throws Exception { + final FileRolesStore fileRolesStore = mock(FileRolesStore.class); + doCallRealMethod().when(fileRolesStore).accept(anySet(), anyActionListener()); + final NativeRolesStore nativeRolesStore = mock(NativeRolesStore.class); + doCallRealMethod().when(nativeRolesStore).accept(anySet(), anyActionListener()); + when(fileRolesStore.roleDescriptors(anySet())).thenReturn(Collections.emptySet()); + doAnswer((invocationOnMock) -> { + @SuppressWarnings("unchecked") + ActionListener callback = (ActionListener) invocationOnMock.getArguments()[1]; + callback.onResponse(RoleRetrievalResult.failure(new RuntimeException("intentionally failed!"))); + return null; + }).when(nativeRolesStore).getRoleDescriptors(isASet(), anyActionListener()); + final ReservedRolesStore reservedRolesStore = spy(new ReservedRolesStore()); + ThreadContext threadContext = new ThreadContext(SECURITY_ENABLED_SETTINGS); + final ClusterService clusterService = mock(ClusterService.class); + when(clusterService.getClusterSettings()).thenReturn( + new ClusterSettings(SECURITY_ENABLED_SETTINGS, Set.of(ApiKeyService.DELETE_RETENTION_PERIOD)) + ); + final ApiKeyService apiKeyService = spy( + new ApiKeyService( + SECURITY_ENABLED_SETTINGS, + Clock.systemUTC(), + mock(Client.class), + mock(SecurityIndexManager.class), + clusterService, + mock(CacheInvalidatorRegistry.class), + mock(ThreadPool.class) + ) + ); + final NativePrivilegeStore nativePrivStore = mock(NativePrivilegeStore.class); + doAnswer(invocationOnMock -> { + @SuppressWarnings("unchecked") + ActionListener> listener = (ActionListener< + Collection>) invocationOnMock.getArguments()[2]; + listener.onResponse(Collections.emptyList()); + return Void.TYPE; + }).when(nativePrivStore).getPrivileges(anyCollection(), anyCollection(), anyActionListener()); + + final AtomicReference> effectiveRoleDescriptors = new AtomicReference>(); + final CompositeRolesStore compositeRolesStore = buildCompositeRolesStore( + SECURITY_ENABLED_SETTINGS, + fileRolesStore, + nativeRolesStore, + reservedRolesStore, + nativePrivStore, + null, + apiKeyService, + null, + null, + effectiveRoleDescriptors::set + ); + AuditUtil.getOrGenerateRequestId(threadContext); + final TransportVersion version = TransportVersion.CURRENT; + final String apiKeyRoleName = "user_role_" + randomAlphaOfLength(4); + final Authentication apiKeyAuthentication = createApiKeyAuthentication( + apiKeyService, + randomValueOtherThanMany( + authc -> authc.getAuthenticationType() == AuthenticationType.API_KEY, + () -> AuthenticationTestHelper.builder().build() + ), + Collections.singleton( + new RoleDescriptor( + apiKeyRoleName, + null, + new IndicesPrivileges[] { IndicesPrivileges.builder().indices("index*").privileges("all").build() }, + null + ) + ), + null, + version + ); + final boolean emptyRemoteRole = randomBoolean(); + final Authentication authentication = apiKeyAuthentication.toRemoteAccess( + AuthenticationTestHelper.randomRemoteAccessAuthentication( + emptyRemoteRole + ? RoleDescriptorsIntersection.EMPTY + : new RoleDescriptorsIntersection( + new RoleDescriptor( + RBACEngine.REMOTE_USER_ROLE_NAME, + null, + new RoleDescriptor.IndicesPrivileges[] { + RoleDescriptor.IndicesPrivileges.builder().indices("index1").privileges("read").build() }, + null, + null, + null, + null, + null, + null + ) + ) + ) + ); + + final PlainActionFuture roleFuture = new PlainActionFuture<>(); + compositeRolesStore.getRole(authentication.getEffectiveSubject(), roleFuture); + final Role role = roleFuture.actionGet(); + assertThat(effectiveRoleDescriptors.get(), is(nullValue())); + + verify(apiKeyService, times(1)).parseRoleDescriptorsBytes(anyString(), any(BytesReference.class), any()); + assertThat(role.names().length, is(1)); + assertThat(role.names()[0], equalTo(apiKeyRoleName)); + + // Smoke-test for authorization + final Metadata indexMetadata = Metadata.builder() + .put( + IndexMetadata.builder("index1") + .settings( + Settings.builder() + .put(IndexMetadata.SETTING_NUMBER_OF_SHARDS, 1) + .put(IndexMetadata.SETTING_NUMBER_OF_REPLICAS, 1) + .put(IndexMetadata.SETTING_VERSION_CREATED, Version.CURRENT) + ) + ) + .put( + IndexMetadata.builder("index2") + .settings( + Settings.builder() + .put(IndexMetadata.SETTING_NUMBER_OF_SHARDS, 1) + .put(IndexMetadata.SETTING_NUMBER_OF_REPLICAS, 1) + .put(IndexMetadata.SETTING_VERSION_CREATED, Version.CURRENT) + ) + ) + .build(); + final var emptyCache = new FieldPermissionsCache(Settings.EMPTY); + assertThat( + role.authorize(SearchAction.NAME, Sets.newHashSet("index1"), indexMetadata.getIndicesLookup(), emptyCache).isGranted(), + is(false == emptyRemoteRole) + ); + assertThat( + role.authorize(CreateIndexAction.NAME, Sets.newHashSet("index1"), indexMetadata.getIndicesLookup(), emptyCache).isGranted(), + is(false) + ); + assertThat( + role.authorize(SearchAction.NAME, Sets.newHashSet("index2"), indexMetadata.getIndicesLookup(), emptyCache).isGranted(), + is(false) + ); + } + public void testGetRolesForRunAs() { final ApiKeyService apiKeyService = mock(ApiKeyService.class); final ServiceAccountService serviceAccountService = mock(ServiceAccountService.class); From 0e87d582662bdc924f11ec299dec06baba9d6c64 Mon Sep 17 00:00:00 2001 From: Ievgen Degtiarenko Date: Tue, 31 Jan 2023 11:48:03 +0100 Subject: [PATCH 29/63] Cleanup allocation commands test (#93368) --- .../allocation/AllocationCommandsTests.java | 92 ++++--------------- .../cluster/ESAllocationTestCase.java | 4 - 2 files changed, 17 insertions(+), 79 deletions(-) diff --git a/server/src/test/java/org/elasticsearch/cluster/routing/allocation/AllocationCommandsTests.java b/server/src/test/java/org/elasticsearch/cluster/routing/allocation/AllocationCommandsTests.java index 19c2732c0867a..5785e040c616f 100644 --- a/server/src/test/java/org/elasticsearch/cluster/routing/allocation/AllocationCommandsTests.java +++ b/server/src/test/java/org/elasticsearch/cluster/routing/allocation/AllocationCommandsTests.java @@ -13,7 +13,6 @@ import org.elasticsearch.Version; import org.elasticsearch.action.ActionListener; import org.elasticsearch.cluster.ClusterInfo; -import org.elasticsearch.cluster.ClusterModule; import org.elasticsearch.cluster.ClusterName; import org.elasticsearch.cluster.ClusterState; import org.elasticsearch.cluster.ESAllocationTestCase; @@ -59,8 +58,6 @@ import static java.util.Collections.emptyMap; import static java.util.Collections.singleton; -import static org.elasticsearch.cluster.ClusterModule.BALANCED_ALLOCATOR; -import static org.elasticsearch.cluster.ClusterModule.DESIRED_BALANCE_ALLOCATOR; import static org.elasticsearch.cluster.routing.RoutingNodesHelper.shardsWithState; import static org.elasticsearch.cluster.routing.ShardRoutingState.INITIALIZING; import static org.elasticsearch.cluster.routing.ShardRoutingState.RELOCATING; @@ -77,10 +74,7 @@ public class AllocationCommandsTests extends ESAllocationTestCase { public void testMoveShardCommand() { AllocationService allocation = createAllocationService( - Settings.builder() - .put("cluster.routing.allocation.node_concurrent_recoveries", 10) - .put(ClusterModule.SHARDS_ALLOCATOR_TYPE_SETTING.getKey(), randomShardsAllocator()) - .build() + Settings.builder().put("cluster.routing.allocation.node_concurrent_recoveries", 10).build() ); logger.info("creating an index with 1 shard, no replica"); @@ -144,7 +138,6 @@ public void testAllocateCommand() { Settings.builder() .put(EnableAllocationDecider.CLUSTER_ROUTING_ALLOCATION_ENABLE_SETTING.getKey(), "none") .put(EnableAllocationDecider.CLUSTER_ROUTING_REBALANCE_ENABLE_SETTING.getKey(), "none") - .put(ClusterModule.SHARDS_ALLOCATOR_TYPE_SETTING.getKey(), randomShardsAllocator()) .build() ); final String index = "test"; @@ -363,7 +356,6 @@ public void testAllocateStalePrimaryCommand() { Settings.builder() .put(EnableAllocationDecider.CLUSTER_ROUTING_ALLOCATION_ENABLE_SETTING.getKey(), "none") .put(EnableAllocationDecider.CLUSTER_ROUTING_REBALANCE_ENABLE_SETTING.getKey(), "none") - .put(ClusterModule.SHARDS_ALLOCATOR_TYPE_SETTING.getKey(), randomShardsAllocator()) .build() ); final String index = "test"; @@ -427,7 +419,6 @@ public void testCancelCommand() { Settings.builder() .put(EnableAllocationDecider.CLUSTER_ROUTING_ALLOCATION_ENABLE_SETTING.getKey(), "none") .put(EnableAllocationDecider.CLUSTER_ROUTING_REBALANCE_ENABLE_SETTING.getKey(), "none") - .put(ClusterModule.SHARDS_ALLOCATOR_TYPE_SETTING.getKey(), randomShardsAllocator()) .build() ); @@ -724,32 +715,7 @@ public void testSerialization() throws Exception { in = new NamedWriteableAwareStreamInput(in, namedWriteableRegistry); // Now we can read them! - AllocationCommands sCommands = AllocationCommands.readFrom(in); - - assertThat(sCommands.commands().size(), equalTo(5)); - assertThat(((AllocateEmptyPrimaryAllocationCommand) (sCommands.commands().get(0))).shardId(), equalTo(1)); - assertThat(((AllocateEmptyPrimaryAllocationCommand) (sCommands.commands().get(0))).index(), equalTo("test")); - assertThat(((AllocateEmptyPrimaryAllocationCommand) (sCommands.commands().get(0))).node(), equalTo("node1")); - assertThat(((AllocateEmptyPrimaryAllocationCommand) (sCommands.commands().get(0))).acceptDataLoss(), equalTo(true)); - - assertThat(((AllocateStalePrimaryAllocationCommand) (sCommands.commands().get(1))).shardId(), equalTo(2)); - assertThat(((AllocateStalePrimaryAllocationCommand) (sCommands.commands().get(1))).index(), equalTo("test")); - assertThat(((AllocateStalePrimaryAllocationCommand) (sCommands.commands().get(1))).node(), equalTo("node1")); - assertThat(((AllocateStalePrimaryAllocationCommand) (sCommands.commands().get(1))).acceptDataLoss(), equalTo(true)); - - assertThat(((AllocateReplicaAllocationCommand) (sCommands.commands().get(2))).shardId(), equalTo(2)); - assertThat(((AllocateReplicaAllocationCommand) (sCommands.commands().get(2))).index(), equalTo("test")); - assertThat(((AllocateReplicaAllocationCommand) (sCommands.commands().get(2))).node(), equalTo("node1")); - - assertThat(((MoveAllocationCommand) (sCommands.commands().get(3))).shardId(), equalTo(3)); - assertThat(((MoveAllocationCommand) (sCommands.commands().get(3))).index(), equalTo("test")); - assertThat(((MoveAllocationCommand) (sCommands.commands().get(3))).fromNode(), equalTo("node2")); - assertThat(((MoveAllocationCommand) (sCommands.commands().get(3))).toNode(), equalTo("node3")); - - assertThat(((CancelAllocationCommand) (sCommands.commands().get(4))).shardId(), equalTo(4)); - assertThat(((CancelAllocationCommand) (sCommands.commands().get(4))).index(), equalTo("test")); - assertThat(((CancelAllocationCommand) (sCommands.commands().get(4))).node(), equalTo("node5")); - assertThat(((CancelAllocationCommand) (sCommands.commands().get(4))).allowPrimary(), equalTo(true)); + assertThat(AllocationCommands.readFrom(in), equalTo(commands)); } public void testXContent() throws Exception { @@ -802,32 +768,19 @@ public void testXContent() throws Exception { // move two tokens, parser expected to be "on" `commands` field parser.nextToken(); parser.nextToken(); - AllocationCommands sCommands = AllocationCommands.fromXContent(parser); - - assertThat(sCommands.commands().size(), equalTo(5)); - assertThat(((AllocateEmptyPrimaryAllocationCommand) (sCommands.commands().get(0))).shardId(), equalTo(1)); - assertThat(((AllocateEmptyPrimaryAllocationCommand) (sCommands.commands().get(0))).index(), equalTo("test")); - assertThat(((AllocateEmptyPrimaryAllocationCommand) (sCommands.commands().get(0))).node(), equalTo("node1")); - assertThat(((AllocateEmptyPrimaryAllocationCommand) (sCommands.commands().get(0))).acceptDataLoss(), equalTo(true)); - - assertThat(((AllocateStalePrimaryAllocationCommand) (sCommands.commands().get(1))).shardId(), equalTo(2)); - assertThat(((AllocateStalePrimaryAllocationCommand) (sCommands.commands().get(1))).index(), equalTo("test")); - assertThat(((AllocateStalePrimaryAllocationCommand) (sCommands.commands().get(1))).node(), equalTo("node1")); - assertThat(((AllocateStalePrimaryAllocationCommand) (sCommands.commands().get(1))).acceptDataLoss(), equalTo(true)); - - assertThat(((AllocateReplicaAllocationCommand) (sCommands.commands().get(2))).shardId(), equalTo(2)); - assertThat(((AllocateReplicaAllocationCommand) (sCommands.commands().get(2))).index(), equalTo("test")); - assertThat(((AllocateReplicaAllocationCommand) (sCommands.commands().get(2))).node(), equalTo("node1")); - - assertThat(((MoveAllocationCommand) (sCommands.commands().get(3))).shardId(), equalTo(3)); - assertThat(((MoveAllocationCommand) (sCommands.commands().get(3))).index(), equalTo("test")); - assertThat(((MoveAllocationCommand) (sCommands.commands().get(3))).fromNode(), equalTo("node2")); - assertThat(((MoveAllocationCommand) (sCommands.commands().get(3))).toNode(), equalTo("node3")); - - assertThat(((CancelAllocationCommand) (sCommands.commands().get(4))).shardId(), equalTo(4)); - assertThat(((CancelAllocationCommand) (sCommands.commands().get(4))).index(), equalTo("test")); - assertThat(((CancelAllocationCommand) (sCommands.commands().get(4))).node(), equalTo("node5")); - assertThat(((CancelAllocationCommand) (sCommands.commands().get(4))).allowPrimary(), equalTo(true)); + + assertThat( + AllocationCommands.fromXContent(parser), + equalTo( + new AllocationCommands( + new AllocateEmptyPrimaryAllocationCommand("test", 1, "node1", true), + new AllocateStalePrimaryAllocationCommand("test", 2, "node1", true), + new AllocateReplicaAllocationCommand("test", 2, "node1"), + new MoveAllocationCommand("test", 3, "node2", "node3"), + new CancelAllocationCommand("test", 4, "node5", true) + ) + ) + ); } @Override @@ -837,10 +790,7 @@ protected NamedXContentRegistry xContentRegistry() { public void testMoveShardToNonDataNode() { AllocationService allocation = createAllocationService( - Settings.builder() - .put("cluster.routing.allocation.node_concurrent_recoveries", 10) - .put(ClusterModule.SHARDS_ALLOCATOR_TYPE_SETTING.getKey(), randomShardsAllocator()) - .build() + Settings.builder().put("cluster.routing.allocation.node_concurrent_recoveries", 10).build() ); logger.info("creating an index with 1 shard, no replica"); @@ -910,10 +860,7 @@ public void testMoveShardToNonDataNode() { public void testMoveShardFromNonDataNode() { AllocationService allocation = createAllocationService( - Settings.builder() - .put("cluster.routing.allocation.node_concurrent_recoveries", 10) - .put(ClusterModule.SHARDS_ALLOCATOR_TYPE_SETTING.getKey(), randomShardsAllocator()) - .build() + Settings.builder().put("cluster.routing.allocation.node_concurrent_recoveries", 10).build() ); logger.info("creating an index with 1 shard, no replica"); @@ -985,7 +932,6 @@ public void testConflictingCommandsInSingleRequest() { Settings.builder() .put(EnableAllocationDecider.CLUSTER_ROUTING_ALLOCATION_ENABLE_SETTING.getKey(), "none") .put(EnableAllocationDecider.CLUSTER_ROUTING_REBALANCE_ENABLE_SETTING.getKey(), "none") - .put(ClusterModule.SHARDS_ALLOCATOR_TYPE_SETTING.getKey(), randomShardsAllocator()) .build() ); @@ -1091,8 +1037,4 @@ public void testConflictingCommandsInSingleRequest() { ); }).getMessage(), containsString("all copies of [" + index3 + "][0] are already assigned. Use the move allocation command instead")); } - - private static String randomShardsAllocator() { - return randomFrom(BALANCED_ALLOCATOR, DESIRED_BALANCE_ALLOCATOR); - } } diff --git a/test/framework/src/main/java/org/elasticsearch/cluster/ESAllocationTestCase.java b/test/framework/src/main/java/org/elasticsearch/cluster/ESAllocationTestCase.java index 6c294502de5cc..18a611e39b5a2 100644 --- a/test/framework/src/main/java/org/elasticsearch/cluster/ESAllocationTestCase.java +++ b/test/framework/src/main/java/org/elasticsearch/cluster/ESAllocationTestCase.java @@ -57,10 +57,6 @@ import static org.elasticsearch.common.settings.ClusterSettings.createBuiltInClusterSettings; public abstract class ESAllocationTestCase extends ESTestCase { - private static final ClusterSettings EMPTY_CLUSTER_SETTINGS = new ClusterSettings( - Settings.EMPTY, - ClusterSettings.BUILT_IN_CLUSTER_SETTINGS - ); public static final SnapshotsInfoService SNAPSHOT_INFO_SERVICE_WITH_NO_SHARD_SIZES = () -> new SnapshotShardSizeInfo(Map.of()) { @Override From 4dd3b9a44ddcc8cf0d062f7c9958670deadf2ff2 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Francisco=20Fern=C3=A1ndez=20Casta=C3=B1o?= Date: Tue, 31 Jan 2023 11:59:20 +0100 Subject: [PATCH 30/63] Link to the time-units doc in S3 repository docs instead of explaining it in words (#93351) --- .../snapshot-restore/repository-s3.asciidoc | 13 ++++++------- 1 file changed, 6 insertions(+), 7 deletions(-) diff --git a/docs/reference/snapshot-restore/repository-s3.asciidoc b/docs/reference/snapshot-restore/repository-s3.asciidoc index 4ead755c409e5..de72511010b51 100644 --- a/docs/reference/snapshot-restore/repository-s3.asciidoc +++ b/docs/reference/snapshot-restore/repository-s3.asciidoc @@ -143,11 +143,9 @@ settings belong in the `elasticsearch.yml` file. `read_timeout`:: - The maximum time {es} will wait to receive the next byte of data over an established, - open connection to the repository before it closes the connection. The value should - specify the unit. - For example, a value of `5s` specifies a 5 second timeout. The default value - is 50 seconds. + (<>) The maximum time {es} will wait to receive the next byte + of data over an established, open connection to the repository before it closes the + connection. The default value is 50 seconds. `max_retries`:: @@ -285,7 +283,7 @@ multiple deployments may share the same bucket. `chunk_size`:: - Big files can be broken down into chunks during snapshotting if needed. + (<>) Big files can be broken down into chunks during snapshotting if needed. Specify the chunk size as a value and unit, for example: `1TB`, `1GB`, `10MB`. Defaults to the maximum size of a blob in the S3 which is `5TB`. @@ -304,7 +302,8 @@ include::repository-shared-settings.asciidoc[] `buffer_size`:: - Minimum threshold below which the chunk is uploaded using a single request. + (<>) Minimum threshold below which the chunk is + uploaded using a single request. Beyond this threshold, the S3 repository will use the https://docs.aws.amazon.com/AmazonS3/latest/dev/uploadobjusingmpu.html[AWS Multipart Upload API] to split the chunk into several parts, each of From 4fb06b2f35fa27b871616d04a32fd2a043c36570 Mon Sep 17 00:00:00 2001 From: Simon Cooper Date: Tue, 31 Jan 2023 11:24:32 +0000 Subject: [PATCH 31/63] Migrate misc packages to TransportVersion (#93272) --- .../AutoDateHistogramAggregationBuilder.java | 5 ++-- .../histogram/InternalAutoDateHistogram.java | 6 ++--- .../metric/MatrixStatsAggregationBuilder.java | 5 ++-- .../ingest/common/GrokProcessorGetAction.java | 6 ++--- .../stats/GeoIpDownloaderStatsAction.java | 6 ++--- .../mustache/MultiSearchTemplateResponse.java | 6 ++--- .../percolator/PercolateQueryBuilder.java | 8 +++--- .../upgrades/DesiredNodesUpgradeIT.java | 4 +-- .../main/java/org/elasticsearch/Build.java | 4 +-- .../TransportUpdateDesiredNodesAction.java | 2 +- .../UpdateDesiredNodesRequest.java | 3 +-- .../cluster/node/info/PluginsAndModules.java | 4 +-- .../admin/indices/rollover/Condition.java | 4 +-- .../MaxPrimaryShardDocsCondition.java | 6 ++--- .../indices/rollover/MinAgeCondition.java | 6 ++--- .../indices/rollover/MinDocsCondition.java | 6 ++--- .../MinPrimaryShardDocsCondition.java | 6 ++--- .../MinPrimaryShardSizeCondition.java | 6 ++--- .../indices/rollover/MinSizeCondition.java | 6 ++--- .../indices/rollover/RolloverRequest.java | 2 +- .../action/bulk/BulkItemResponse.java | 3 +-- .../action/explain/ExplainRequest.java | 3 +-- .../action/explain/ExplainResponse.java | 3 +-- .../coordination/JoinValidationService.java | 2 +- .../PublicationTransportHandler.java | 25 +++++++++--------- .../cluster/metadata/DesiredNode.java | 9 ++++--- .../cluster/metadata/Metadata.java | 2 +- .../cluster/metadata/RepositoryMetadata.java | 4 +-- .../cluster/node/DiscoveryNode.java | 7 ++--- .../cluster/routing/ShardRouting.java | 2 +- .../common/compress/CompressedXContent.java | 6 ++--- .../common/document/DocumentField.java | 10 +++---- .../common/io/stream/DelayableWriteable.java | 26 +++++++++---------- .../io/stream/RecyclerBytesStreamOutput.java | 2 +- .../common/io/stream/StreamInput.java | 4 +-- .../common/io/stream/StreamOutput.java | 4 +-- .../elasticsearch/common/unit/Processors.java | 16 ++++++------ .../common/xcontent/XContentHelper.java | 4 +-- .../discovery/DiscoveryStats.java | 10 +++---- .../gateway/LocalAllocateDangledIndices.java | 5 ++-- ...ransportNodesListGatewayStartedShards.java | 10 +++---- .../health/metadata/HealthMetadata.java | 9 +++---- .../index/translog/Translog.java | 10 ++++--- .../recovery/RecoverySnapshotFileRequest.java | 4 +-- .../elasticsearch/monitor/jvm/JvmInfo.java | 6 ++--- .../org/elasticsearch/monitor/os/OsInfo.java | 8 +++--- .../org/elasticsearch/monitor/os/OsStats.java | 6 ++--- .../plugins/PluginDescriptor.java | 23 ++++++++-------- .../plugins/PluginRuntimeInfo.java | 8 +++--- .../script/ScriptContextStats.java | 8 +++--- .../elasticsearch/script/ScriptException.java | 6 ++--- .../org/elasticsearch/script/ScriptStats.java | 6 ++--- .../org/elasticsearch/script/TimeSeries.java | 6 ++--- .../tasks/TaskCancellationService.java | 6 ++--- .../transport/ActionTransportException.java | 6 ++--- .../transport/BytesTransportRequest.java | 10 +++---- .../transport/ConnectTransportException.java | 6 ++--- .../transport/ProxyConnectionStrategy.java | 5 ++-- .../transport/RemoteConnectionInfo.java | 6 ++--- .../transport/TransportStats.java | 5 ++-- .../action/OriginalIndicesTests.java | 10 +++---- .../GetStoredScriptRequestTests.java | 6 ++--- .../TransportResolveIndexActionTests.java | 12 ++++++--- ...TransportFieldCapabilitiesActionTests.java | 9 ++++--- .../action/get/MultiGetShardRequestTests.java | 14 +++++----- .../PublicationTransportHandlerTests.java | 2 +- .../cluster/metadata/DesiredNodeTests.java | 13 +++++----- .../io/stream/DelayableWriteableTests.java | 25 ++++++++++-------- .../reindex/BulkByScrollResponseTests.java | 4 +-- .../reindex/BulkByScrollTaskStatusTests.java | 6 ++--- .../license/GetFeatureUsageResponse.java | 10 +++---- .../protocol/xpack/XPackInfoRequest.java | 10 +++---- .../protocol/xpack/XPackInfoResponse.java | 10 +++---- .../xpack/graph/GraphExploreRequest.java | 6 ++--- .../ilm/IndexLifecycleExplainResponse.java | 3 +-- .../ilm/IndexLifecycleFeatureSetUsage.java | 5 ++-- .../xpack/core/ilm/RolloverAction.java | 5 ++-- .../xpack/core/indexing/IndexerJobStats.java | 3 +-- .../pivot/DateHistogramGroupSource.java | 6 ++--- .../action/MonitoringBulkRequestTests.java | 2 +- .../blobstore/testkit/BlobAnalyzeAction.java | 8 +++--- .../testkit/RepositoryAnalyzeAction.java | 10 ++++--- 82 files changed, 298 insertions(+), 283 deletions(-) diff --git a/modules/aggregations/src/main/java/org/elasticsearch/aggregations/bucket/histogram/AutoDateHistogramAggregationBuilder.java b/modules/aggregations/src/main/java/org/elasticsearch/aggregations/bucket/histogram/AutoDateHistogramAggregationBuilder.java index 3c1742c2af77a..cac35ce644bf7 100644 --- a/modules/aggregations/src/main/java/org/elasticsearch/aggregations/bucket/histogram/AutoDateHistogramAggregationBuilder.java +++ b/modules/aggregations/src/main/java/org/elasticsearch/aggregations/bucket/histogram/AutoDateHistogramAggregationBuilder.java @@ -9,7 +9,6 @@ package org.elasticsearch.aggregations.bucket.histogram; import org.elasticsearch.TransportVersion; -import org.elasticsearch.Version; import org.elasticsearch.common.Rounding; import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.StreamOutput; @@ -123,7 +122,7 @@ public AutoDateHistogramAggregationBuilder(String name) { public AutoDateHistogramAggregationBuilder(StreamInput in) throws IOException { super(in); numBuckets = in.readVInt(); - if (in.getVersion().onOrAfter(Version.V_7_3_0)) { + if (in.getTransportVersion().onOrAfter(TransportVersion.V_7_3_0)) { minimumIntervalExpression = in.readOptionalString(); } } @@ -131,7 +130,7 @@ public AutoDateHistogramAggregationBuilder(StreamInput in) throws IOException { @Override protected void innerWriteTo(StreamOutput out) throws IOException { out.writeVInt(numBuckets); - if (out.getVersion().onOrAfter(Version.V_7_3_0)) { + if (out.getTransportVersion().onOrAfter(TransportVersion.V_7_3_0)) { out.writeOptionalString(minimumIntervalExpression); } } diff --git a/modules/aggregations/src/main/java/org/elasticsearch/aggregations/bucket/histogram/InternalAutoDateHistogram.java b/modules/aggregations/src/main/java/org/elasticsearch/aggregations/bucket/histogram/InternalAutoDateHistogram.java index 4860bedaee61a..c91a6bed8a716 100644 --- a/modules/aggregations/src/main/java/org/elasticsearch/aggregations/bucket/histogram/InternalAutoDateHistogram.java +++ b/modules/aggregations/src/main/java/org/elasticsearch/aggregations/bucket/histogram/InternalAutoDateHistogram.java @@ -8,7 +8,7 @@ package org.elasticsearch.aggregations.bucket.histogram; import org.apache.lucene.util.PriorityQueue; -import org.elasticsearch.Version; +import org.elasticsearch.TransportVersion; import org.elasticsearch.aggregations.bucket.histogram.AutoDateHistogramAggregationBuilder.RoundingInfo; import org.elasticsearch.common.Rounding; import org.elasticsearch.common.io.stream.StreamInput; @@ -226,7 +226,7 @@ public InternalAutoDateHistogram(StreamInput in) throws IOException { format = in.readNamedWriteable(DocValueFormat.class); buckets = in.readList(stream -> new Bucket(stream, format)); this.targetBuckets = in.readVInt(); - if (in.getVersion().onOrAfter(Version.V_8_3_0)) { + if (in.getTransportVersion().onOrAfter(TransportVersion.V_8_3_0)) { bucketInnerInterval = in.readVLong(); } else { bucketInnerInterval = 1; // Calculated on merge. @@ -239,7 +239,7 @@ protected void doWriteTo(StreamOutput out) throws IOException { out.writeNamedWriteable(format); out.writeList(buckets); out.writeVInt(targetBuckets); - if (out.getVersion().onOrAfter(Version.V_8_3_0)) { + if (out.getTransportVersion().onOrAfter(TransportVersion.V_8_3_0)) { out.writeVLong(bucketInnerInterval); } } diff --git a/modules/aggregations/src/main/java/org/elasticsearch/aggregations/metric/MatrixStatsAggregationBuilder.java b/modules/aggregations/src/main/java/org/elasticsearch/aggregations/metric/MatrixStatsAggregationBuilder.java index ee45a32dd8501..d8edb19c2782b 100644 --- a/modules/aggregations/src/main/java/org/elasticsearch/aggregations/metric/MatrixStatsAggregationBuilder.java +++ b/modules/aggregations/src/main/java/org/elasticsearch/aggregations/metric/MatrixStatsAggregationBuilder.java @@ -8,7 +8,6 @@ package org.elasticsearch.aggregations.metric; import org.elasticsearch.TransportVersion; -import org.elasticsearch.Version; import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.StreamOutput; import org.elasticsearch.search.MultiValueMode; @@ -56,14 +55,14 @@ public boolean supportsSampling() { */ public MatrixStatsAggregationBuilder(StreamInput in) throws IOException { super(in); - if (in.getVersion().onOrAfter(Version.V_8_7_0)) { + if (in.getTransportVersion().onOrAfter(TransportVersion.V_8_7_0)) { multiValueMode = MultiValueMode.readMultiValueModeFrom(in); } } @Override protected void innerWriteTo(StreamOutput out) throws IOException { - if (out.getVersion().onOrAfter(Version.V_8_7_0)) { + if (out.getTransportVersion().onOrAfter(TransportVersion.V_8_7_0)) { multiValueMode.writeTo(out); } } diff --git a/modules/ingest-common/src/main/java/org/elasticsearch/ingest/common/GrokProcessorGetAction.java b/modules/ingest-common/src/main/java/org/elasticsearch/ingest/common/GrokProcessorGetAction.java index ad00956d2dde7..a13b7d21bc115 100644 --- a/modules/ingest-common/src/main/java/org/elasticsearch/ingest/common/GrokProcessorGetAction.java +++ b/modules/ingest-common/src/main/java/org/elasticsearch/ingest/common/GrokProcessorGetAction.java @@ -7,7 +7,7 @@ */ package org.elasticsearch.ingest.common; -import org.elasticsearch.Version; +import org.elasticsearch.TransportVersion; import org.elasticsearch.action.ActionListener; import org.elasticsearch.action.ActionRequest; import org.elasticsearch.action.ActionRequestValidationException; @@ -57,7 +57,7 @@ public Request(boolean sorted, String ecsCompatibility) { Request(StreamInput in) throws IOException { super(in); this.sorted = in.readBoolean(); - this.ecsCompatibility = in.getVersion().onOrAfter(Version.V_8_0_0) + this.ecsCompatibility = in.getTransportVersion().onOrAfter(TransportVersion.V_8_0_0) ? in.readString() : GrokProcessor.DEFAULT_ECS_COMPATIBILITY_MODE; } @@ -71,7 +71,7 @@ public ActionRequestValidationException validate() { public void writeTo(StreamOutput out) throws IOException { super.writeTo(out); out.writeBoolean(sorted); - if (out.getVersion().onOrAfter(Version.V_8_0_0)) { + if (out.getTransportVersion().onOrAfter(TransportVersion.V_8_0_0)) { out.writeString(ecsCompatibility); } } diff --git a/modules/ingest-geoip/src/main/java/org/elasticsearch/ingest/geoip/stats/GeoIpDownloaderStatsAction.java b/modules/ingest-geoip/src/main/java/org/elasticsearch/ingest/geoip/stats/GeoIpDownloaderStatsAction.java index a160dfeec9b4a..228758e886c69 100644 --- a/modules/ingest-geoip/src/main/java/org/elasticsearch/ingest/geoip/stats/GeoIpDownloaderStatsAction.java +++ b/modules/ingest-geoip/src/main/java/org/elasticsearch/ingest/geoip/stats/GeoIpDownloaderStatsAction.java @@ -8,7 +8,7 @@ package org.elasticsearch.ingest.geoip.stats; -import org.elasticsearch.Version; +import org.elasticsearch.TransportVersion; import org.elasticsearch.action.ActionType; import org.elasticsearch.action.FailedNodeException; import org.elasticsearch.action.support.nodes.BaseNodeResponse; @@ -166,7 +166,7 @@ protected NodeResponse(StreamInput in) throws IOException { stats = in.readBoolean() ? new GeoIpDownloaderStats(in) : null; databases = in.readSet(StreamInput::readString); filesInTemp = in.readSet(StreamInput::readString); - configDatabases = in.getVersion().onOrAfter(Version.V_8_0_0) ? in.readSet(StreamInput::readString) : null; + configDatabases = in.getTransportVersion().onOrAfter(TransportVersion.V_8_0_0) ? in.readSet(StreamInput::readString) : null; } protected NodeResponse( @@ -208,7 +208,7 @@ public void writeTo(StreamOutput out) throws IOException { } out.writeCollection(databases, StreamOutput::writeString); out.writeCollection(filesInTemp, StreamOutput::writeString); - if (out.getVersion().onOrAfter(Version.V_8_0_0)) { + if (out.getTransportVersion().onOrAfter(TransportVersion.V_8_0_0)) { out.writeCollection(configDatabases, StreamOutput::writeString); } } diff --git a/modules/lang-mustache/src/main/java/org/elasticsearch/script/mustache/MultiSearchTemplateResponse.java b/modules/lang-mustache/src/main/java/org/elasticsearch/script/mustache/MultiSearchTemplateResponse.java index 5aa962973b6f8..f426480155356 100644 --- a/modules/lang-mustache/src/main/java/org/elasticsearch/script/mustache/MultiSearchTemplateResponse.java +++ b/modules/lang-mustache/src/main/java/org/elasticsearch/script/mustache/MultiSearchTemplateResponse.java @@ -10,7 +10,7 @@ import org.elasticsearch.ElasticsearchException; import org.elasticsearch.ExceptionsHelper; -import org.elasticsearch.Version; +import org.elasticsearch.TransportVersion; import org.elasticsearch.action.ActionResponse; import org.elasticsearch.action.search.MultiSearchResponse; import org.elasticsearch.common.Strings; @@ -102,7 +102,7 @@ public String toString() { MultiSearchTemplateResponse(StreamInput in) throws IOException { super(in); items = in.readArray(Item::new, Item[]::new); - if (in.getVersion().onOrAfter(Version.V_7_0_0)) { + if (in.getTransportVersion().onOrAfter(TransportVersion.V_7_0_0)) { tookInMillis = in.readVLong(); } else { tookInMillis = -1L; @@ -136,7 +136,7 @@ public TimeValue getTook() { @Override public void writeTo(StreamOutput out) throws IOException { out.writeArray(items); - if (out.getVersion().onOrAfter(Version.V_7_0_0)) { + if (out.getTransportVersion().onOrAfter(TransportVersion.V_7_0_0)) { out.writeVLong(tookInMillis); } } diff --git a/modules/percolator/src/main/java/org/elasticsearch/percolator/PercolateQueryBuilder.java b/modules/percolator/src/main/java/org/elasticsearch/percolator/PercolateQueryBuilder.java index ec924eef6184d..f2d06e2b72d47 100644 --- a/modules/percolator/src/main/java/org/elasticsearch/percolator/PercolateQueryBuilder.java +++ b/modules/percolator/src/main/java/org/elasticsearch/percolator/PercolateQueryBuilder.java @@ -217,12 +217,12 @@ protected PercolateQueryBuilder(String field, Supplier documentS super(in); field = in.readString(); name = in.readOptionalString(); - if (in.getVersion().before(Version.V_8_0_0)) { + if (in.getTransportVersion().before(TransportVersion.V_8_0_0)) { String documentType = in.readOptionalString(); assert documentType == null; } indexedDocumentIndex = in.readOptionalString(); - if (in.getVersion().before(Version.V_8_0_0)) { + if (in.getTransportVersion().before(TransportVersion.V_8_0_0)) { String indexedDocumentType = in.readOptionalString(); assert indexedDocumentType == null; } @@ -259,12 +259,12 @@ protected void doWriteTo(StreamOutput out) throws IOException { } out.writeString(field); out.writeOptionalString(name); - if (out.getVersion().before(Version.V_8_0_0)) { + if (out.getTransportVersion().before(TransportVersion.V_8_0_0)) { // In 7x, typeless percolate queries are represented by null documentType values out.writeOptionalString(null); } out.writeOptionalString(indexedDocumentIndex); - if (out.getVersion().before(Version.V_8_0_0)) { + if (out.getTransportVersion().before(TransportVersion.V_8_0_0)) { // In 7x, typeless percolate queries are represented by null indexedDocumentType values out.writeOptionalString(null); } diff --git a/qa/rolling-upgrade/src/test/java/org/elasticsearch/upgrades/DesiredNodesUpgradeIT.java b/qa/rolling-upgrade/src/test/java/org/elasticsearch/upgrades/DesiredNodesUpgradeIT.java index e0c42429f71c5..2cbfe030be09c 100644 --- a/qa/rolling-upgrade/src/test/java/org/elasticsearch/upgrades/DesiredNodesUpgradeIT.java +++ b/qa/rolling-upgrade/src/test/java/org/elasticsearch/upgrades/DesiredNodesUpgradeIT.java @@ -43,9 +43,9 @@ public void testUpgradeDesiredNodes() throws Exception { return; } - if (UPGRADE_FROM_VERSION.onOrAfter(Processors.DOUBLE_PROCESSORS_SUPPORT_VERSION)) { + if (UPGRADE_FROM_VERSION.transportVersion.onOrAfter(Processors.DOUBLE_PROCESSORS_SUPPORT_VERSION)) { assertUpgradedNodesCanReadDesiredNodes(); - } else if (UPGRADE_FROM_VERSION.onOrAfter(DesiredNode.RANGE_FLOAT_PROCESSORS_SUPPORT_VERSION)) { + } else if (UPGRADE_FROM_VERSION.transportVersion.onOrAfter(DesiredNode.RANGE_FLOAT_PROCESSORS_SUPPORT_VERSION)) { assertDesiredNodesUpdatedWithRoundedUpFloatsAreIdempotent(); } else { assertDesiredNodesWithFloatProcessorsAreRejectedInOlderVersions(); diff --git a/server/src/main/java/org/elasticsearch/Build.java b/server/src/main/java/org/elasticsearch/Build.java index 9cddfd504a0e5..7279e0c4aa4c1 100644 --- a/server/src/main/java/org/elasticsearch/Build.java +++ b/server/src/main/java/org/elasticsearch/Build.java @@ -151,7 +151,7 @@ static URL getElasticsearchCodeSourceLocation() { public static Build readBuild(StreamInput in) throws IOException { final Type type; // be lenient when reading on the wire, the enumeration values from other versions might be different than what we know - if (in.getVersion().before(Version.V_8_3_0)) { + if (in.getTransportVersion().before(TransportVersion.V_8_3_0)) { // this was the flavor, which is always the default distribution now in.readString(); } @@ -167,7 +167,7 @@ public static Build readBuild(StreamInput in) throws IOException { } public static void writeBuild(Build build, StreamOutput out) throws IOException { - if (out.getVersion().before(Version.V_8_3_0)) { + if (out.getTransportVersion().before(TransportVersion.V_8_3_0)) { // this was the flavor, which is always the default distribution now out.writeString("default"); } diff --git a/server/src/main/java/org/elasticsearch/action/admin/cluster/desirednodes/TransportUpdateDesiredNodesAction.java b/server/src/main/java/org/elasticsearch/action/admin/cluster/desirednodes/TransportUpdateDesiredNodesAction.java index 19e5762b0a72f..e1fcf41de5ebc 100644 --- a/server/src/main/java/org/elasticsearch/action/admin/cluster/desirednodes/TransportUpdateDesiredNodesAction.java +++ b/server/src/main/java/org/elasticsearch/action/admin/cluster/desirednodes/TransportUpdateDesiredNodesAction.java @@ -95,7 +95,7 @@ protected void masterOperation( @Override protected void doExecute(Task task, UpdateDesiredNodesRequest request, ActionListener listener) { final var minNodeVersion = clusterService.state().nodes().getMinNodeVersion(); - if (request.isCompatibleWithVersion(minNodeVersion) == false) { + if (request.isCompatibleWithVersion(minNodeVersion.transportVersion) == false) { listener.onFailure( new IllegalArgumentException( "Unable to use processor ranges, floating-point (with greater precision) processors " diff --git a/server/src/main/java/org/elasticsearch/action/admin/cluster/desirednodes/UpdateDesiredNodesRequest.java b/server/src/main/java/org/elasticsearch/action/admin/cluster/desirednodes/UpdateDesiredNodesRequest.java index fa95c4a7df69e..825db3c31a998 100644 --- a/server/src/main/java/org/elasticsearch/action/admin/cluster/desirednodes/UpdateDesiredNodesRequest.java +++ b/server/src/main/java/org/elasticsearch/action/admin/cluster/desirednodes/UpdateDesiredNodesRequest.java @@ -9,7 +9,6 @@ package org.elasticsearch.action.admin.cluster.desirednodes; import org.elasticsearch.TransportVersion; -import org.elasticsearch.Version; import org.elasticsearch.action.ActionRequestValidationException; import org.elasticsearch.action.ValidateActions; import org.elasticsearch.action.support.master.AcknowledgedRequest; @@ -99,7 +98,7 @@ public boolean isDryRun() { return dryRun; } - public boolean isCompatibleWithVersion(Version version) { + public boolean isCompatibleWithVersion(TransportVersion version) { if (version.onOrAfter(DesiredNode.RANGE_FLOAT_PROCESSORS_SUPPORT_VERSION)) { return true; } diff --git a/server/src/main/java/org/elasticsearch/action/admin/cluster/node/info/PluginsAndModules.java b/server/src/main/java/org/elasticsearch/action/admin/cluster/node/info/PluginsAndModules.java index 974e90b11d8dd..e6a91b152cad5 100644 --- a/server/src/main/java/org/elasticsearch/action/admin/cluster/node/info/PluginsAndModules.java +++ b/server/src/main/java/org/elasticsearch/action/admin/cluster/node/info/PluginsAndModules.java @@ -8,7 +8,7 @@ package org.elasticsearch.action.admin.cluster.node.info; -import org.elasticsearch.Version; +import org.elasticsearch.TransportVersion; import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.StreamOutput; import org.elasticsearch.node.ReportingService; @@ -41,7 +41,7 @@ public PluginsAndModules(StreamInput in) throws IOException { @Override public void writeTo(StreamOutput out) throws IOException { - if (out.getVersion().onOrAfter(Version.V_8_3_0)) { + if (out.getTransportVersion().onOrAfter(TransportVersion.V_8_3_0)) { out.writeList(plugins); } else { out.writeList(plugins.stream().map(PluginRuntimeInfo::descriptor).toList()); diff --git a/server/src/main/java/org/elasticsearch/action/admin/indices/rollover/Condition.java b/server/src/main/java/org/elasticsearch/action/admin/indices/rollover/Condition.java index b61e73bbfa26b..ba7d6b03043c5 100644 --- a/server/src/main/java/org/elasticsearch/action/admin/indices/rollover/Condition.java +++ b/server/src/main/java/org/elasticsearch/action/admin/indices/rollover/Condition.java @@ -8,7 +8,7 @@ package org.elasticsearch.action.admin.indices.rollover; -import org.elasticsearch.Version; +import org.elasticsearch.TransportVersion; import org.elasticsearch.common.io.stream.NamedWriteable; import org.elasticsearch.common.unit.ByteSizeValue; import org.elasticsearch.xcontent.ToXContentFragment; @@ -41,7 +41,7 @@ protected Condition(String name, Type type) { * Checks if this condition is available in a specific version. * This makes sure BWC when introducing a new condition which is not recognized by older versions. */ - boolean includedInVersion(Version version) { + boolean includedInVersion(TransportVersion version) { return true; } diff --git a/server/src/main/java/org/elasticsearch/action/admin/indices/rollover/MaxPrimaryShardDocsCondition.java b/server/src/main/java/org/elasticsearch/action/admin/indices/rollover/MaxPrimaryShardDocsCondition.java index c27b4a7b7e739..678ec96c217ca 100644 --- a/server/src/main/java/org/elasticsearch/action/admin/indices/rollover/MaxPrimaryShardDocsCondition.java +++ b/server/src/main/java/org/elasticsearch/action/admin/indices/rollover/MaxPrimaryShardDocsCondition.java @@ -8,7 +8,7 @@ package org.elasticsearch.action.admin.indices.rollover; -import org.elasticsearch.Version; +import org.elasticsearch.TransportVersion; import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.StreamOutput; import org.elasticsearch.xcontent.XContentBuilder; @@ -62,7 +62,7 @@ public static MaxPrimaryShardDocsCondition fromXContent(XContentParser parser) t } @Override - boolean includedInVersion(Version version) { - return version.onOrAfter(Version.V_8_2_0); + boolean includedInVersion(TransportVersion version) { + return version.onOrAfter(TransportVersion.V_8_2_0); } } diff --git a/server/src/main/java/org/elasticsearch/action/admin/indices/rollover/MinAgeCondition.java b/server/src/main/java/org/elasticsearch/action/admin/indices/rollover/MinAgeCondition.java index ddcfadd53dd74..98958d3b015c7 100644 --- a/server/src/main/java/org/elasticsearch/action/admin/indices/rollover/MinAgeCondition.java +++ b/server/src/main/java/org/elasticsearch/action/admin/indices/rollover/MinAgeCondition.java @@ -8,7 +8,7 @@ package org.elasticsearch.action.admin.indices.rollover; -import org.elasticsearch.Version; +import org.elasticsearch.TransportVersion; import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.StreamOutput; import org.elasticsearch.core.TimeValue; @@ -64,7 +64,7 @@ public static MinAgeCondition fromXContent(XContentParser parser) throws IOExcep } @Override - boolean includedInVersion(Version version) { - return version.onOrAfter(Version.V_8_4_0); + boolean includedInVersion(TransportVersion version) { + return version.onOrAfter(TransportVersion.V_8_4_0); } } diff --git a/server/src/main/java/org/elasticsearch/action/admin/indices/rollover/MinDocsCondition.java b/server/src/main/java/org/elasticsearch/action/admin/indices/rollover/MinDocsCondition.java index 9a4fffc17018f..8c6274cfadb81 100644 --- a/server/src/main/java/org/elasticsearch/action/admin/indices/rollover/MinDocsCondition.java +++ b/server/src/main/java/org/elasticsearch/action/admin/indices/rollover/MinDocsCondition.java @@ -8,7 +8,7 @@ package org.elasticsearch.action.admin.indices.rollover; -import org.elasticsearch.Version; +import org.elasticsearch.TransportVersion; import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.StreamOutput; import org.elasticsearch.xcontent.XContentBuilder; @@ -62,7 +62,7 @@ public static MinDocsCondition fromXContent(XContentParser parser) throws IOExce } @Override - boolean includedInVersion(Version version) { - return version.onOrAfter(Version.V_8_4_0); + boolean includedInVersion(TransportVersion version) { + return version.onOrAfter(TransportVersion.V_8_4_0); } } diff --git a/server/src/main/java/org/elasticsearch/action/admin/indices/rollover/MinPrimaryShardDocsCondition.java b/server/src/main/java/org/elasticsearch/action/admin/indices/rollover/MinPrimaryShardDocsCondition.java index e1aee305742f3..6aaea57e5b55b 100644 --- a/server/src/main/java/org/elasticsearch/action/admin/indices/rollover/MinPrimaryShardDocsCondition.java +++ b/server/src/main/java/org/elasticsearch/action/admin/indices/rollover/MinPrimaryShardDocsCondition.java @@ -8,7 +8,7 @@ package org.elasticsearch.action.admin.indices.rollover; -import org.elasticsearch.Version; +import org.elasticsearch.TransportVersion; import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.StreamOutput; import org.elasticsearch.xcontent.XContentBuilder; @@ -62,7 +62,7 @@ public static MinPrimaryShardDocsCondition fromXContent(XContentParser parser) t } @Override - boolean includedInVersion(Version version) { - return version.onOrAfter(Version.V_8_4_0); + boolean includedInVersion(TransportVersion version) { + return version.onOrAfter(TransportVersion.V_8_4_0); } } diff --git a/server/src/main/java/org/elasticsearch/action/admin/indices/rollover/MinPrimaryShardSizeCondition.java b/server/src/main/java/org/elasticsearch/action/admin/indices/rollover/MinPrimaryShardSizeCondition.java index 5ec8d26d9672a..d7149e2a91be4 100644 --- a/server/src/main/java/org/elasticsearch/action/admin/indices/rollover/MinPrimaryShardSizeCondition.java +++ b/server/src/main/java/org/elasticsearch/action/admin/indices/rollover/MinPrimaryShardSizeCondition.java @@ -8,7 +8,7 @@ package org.elasticsearch.action.admin.indices.rollover; -import org.elasticsearch.Version; +import org.elasticsearch.TransportVersion; import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.StreamOutput; import org.elasticsearch.common.unit.ByteSizeValue; @@ -63,7 +63,7 @@ public static MinPrimaryShardSizeCondition fromXContent(XContentParser parser) t } @Override - boolean includedInVersion(Version version) { - return version.onOrAfter(Version.V_8_4_0); + boolean includedInVersion(TransportVersion version) { + return version.onOrAfter(TransportVersion.V_8_4_0); } } diff --git a/server/src/main/java/org/elasticsearch/action/admin/indices/rollover/MinSizeCondition.java b/server/src/main/java/org/elasticsearch/action/admin/indices/rollover/MinSizeCondition.java index 82cf3c0daf301..52db7ff90cf26 100644 --- a/server/src/main/java/org/elasticsearch/action/admin/indices/rollover/MinSizeCondition.java +++ b/server/src/main/java/org/elasticsearch/action/admin/indices/rollover/MinSizeCondition.java @@ -8,7 +8,7 @@ package org.elasticsearch.action.admin.indices.rollover; -import org.elasticsearch.Version; +import org.elasticsearch.TransportVersion; import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.StreamOutput; import org.elasticsearch.common.unit.ByteSizeValue; @@ -63,7 +63,7 @@ public static MinSizeCondition fromXContent(XContentParser parser) throws IOExce } @Override - boolean includedInVersion(Version version) { - return version.onOrAfter(Version.V_8_4_0); + boolean includedInVersion(TransportVersion version) { + return version.onOrAfter(TransportVersion.V_8_4_0); } } diff --git a/server/src/main/java/org/elasticsearch/action/admin/indices/rollover/RolloverRequest.java b/server/src/main/java/org/elasticsearch/action/admin/indices/rollover/RolloverRequest.java index 9916acbef125f..fd773a9e19b58 100644 --- a/server/src/main/java/org/elasticsearch/action/admin/indices/rollover/RolloverRequest.java +++ b/server/src/main/java/org/elasticsearch/action/admin/indices/rollover/RolloverRequest.java @@ -212,7 +212,7 @@ public void writeTo(StreamOutput out) throws IOException { out.writeOptionalString(newIndexName); out.writeBoolean(dryRun); out.writeCollection( - conditions.values().stream().filter(c -> c.includedInVersion(out.getVersion())).toList(), + conditions.values().stream().filter(c -> c.includedInVersion(out.getTransportVersion())).toList(), StreamOutput::writeNamedWriteable ); createIndexRequest.writeTo(out); diff --git a/server/src/main/java/org/elasticsearch/action/bulk/BulkItemResponse.java b/server/src/main/java/org/elasticsearch/action/bulk/BulkItemResponse.java index 1f140be7522ad..b5894d322b90e 100644 --- a/server/src/main/java/org/elasticsearch/action/bulk/BulkItemResponse.java +++ b/server/src/main/java/org/elasticsearch/action/bulk/BulkItemResponse.java @@ -11,7 +11,6 @@ import org.elasticsearch.ElasticsearchException; import org.elasticsearch.ExceptionsHelper; import org.elasticsearch.TransportVersion; -import org.elasticsearch.Version; import org.elasticsearch.action.DocWriteRequest.OpType; import org.elasticsearch.action.DocWriteResponse; import org.elasticsearch.action.delete.DeleteResponse; @@ -254,7 +253,7 @@ public Failure(StreamInput in) throws IOException { @Override public void writeTo(StreamOutput out) throws IOException { out.writeString(index); - if (out.getVersion().before(Version.V_8_0_0)) { + if (out.getTransportVersion().before(TransportVersion.V_8_0_0)) { out.writeString(MapperService.SINGLE_MAPPING_NAME); } out.writeOptionalString(id); diff --git a/server/src/main/java/org/elasticsearch/action/explain/ExplainRequest.java b/server/src/main/java/org/elasticsearch/action/explain/ExplainRequest.java index 43a00accdb998..685eb0b8a1995 100644 --- a/server/src/main/java/org/elasticsearch/action/explain/ExplainRequest.java +++ b/server/src/main/java/org/elasticsearch/action/explain/ExplainRequest.java @@ -9,7 +9,6 @@ package org.elasticsearch.action.explain; import org.elasticsearch.TransportVersion; -import org.elasticsearch.Version; import org.elasticsearch.action.ActionRequestValidationException; import org.elasticsearch.action.ValidateActions; import org.elasticsearch.action.support.single.shard.SingleShardRequest; @@ -161,7 +160,7 @@ public ActionRequestValidationException validate() { @Override public void writeTo(StreamOutput out) throws IOException { super.writeTo(out); - if (out.getVersion().before(Version.V_8_0_0)) { + if (out.getTransportVersion().before(TransportVersion.V_8_0_0)) { out.writeString(MapperService.SINGLE_MAPPING_NAME); } out.writeString(id); diff --git a/server/src/main/java/org/elasticsearch/action/explain/ExplainResponse.java b/server/src/main/java/org/elasticsearch/action/explain/ExplainResponse.java index 880ed44db4460..97c56069fa762 100644 --- a/server/src/main/java/org/elasticsearch/action/explain/ExplainResponse.java +++ b/server/src/main/java/org/elasticsearch/action/explain/ExplainResponse.java @@ -10,7 +10,6 @@ import org.apache.lucene.search.Explanation; import org.elasticsearch.TransportVersion; -import org.elasticsearch.Version; import org.elasticsearch.action.ActionResponse; import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.StreamOutput; @@ -119,7 +118,7 @@ public RestStatus status() { @Override public void writeTo(StreamOutput out) throws IOException { out.writeString(index); - if (out.getVersion().before(Version.V_8_0_0)) { + if (out.getTransportVersion().before(TransportVersion.V_8_0_0)) { out.writeString(MapperService.SINGLE_MAPPING_NAME); } out.writeString(id); diff --git a/server/src/main/java/org/elasticsearch/cluster/coordination/JoinValidationService.java b/server/src/main/java/org/elasticsearch/cluster/coordination/JoinValidationService.java index f2dde93f16e84..ecadc771ebdaf 100644 --- a/server/src/main/java/org/elasticsearch/cluster/coordination/JoinValidationService.java +++ b/server/src/main/java/org/elasticsearch/cluster/coordination/JoinValidationService.java @@ -308,7 +308,7 @@ protected void doRun() throws Exception { transportService.sendRequest( discoveryNode, JOIN_VALIDATE_ACTION_NAME, - new BytesTransportRequest(bytes, discoveryNode.getVersion()), + new BytesTransportRequest(bytes, discoveryNode.getVersion().transportVersion), REQUEST_OPTIONS, new CleanableResponseHandler<>( listener, diff --git a/server/src/main/java/org/elasticsearch/cluster/coordination/PublicationTransportHandler.java b/server/src/main/java/org/elasticsearch/cluster/coordination/PublicationTransportHandler.java index 526eac3f2687d..4d6b4ce1edd07 100644 --- a/server/src/main/java/org/elasticsearch/cluster/coordination/PublicationTransportHandler.java +++ b/server/src/main/java/org/elasticsearch/cluster/coordination/PublicationTransportHandler.java @@ -10,6 +10,7 @@ import org.apache.logging.log4j.LogManager; import org.apache.logging.log4j.Logger; import org.elasticsearch.ElasticsearchException; +import org.elasticsearch.TransportVersion; import org.elasticsearch.Version; import org.elasticsearch.action.ActionListener; import org.elasticsearch.action.ActionRunnable; @@ -85,7 +86,7 @@ public class PublicationTransportHandler { TransportRequestOptions.Type.STATE ); - public static final Version INCLUDES_LAST_COMMITTED_DATA_VERSION = Version.V_8_6_0; + public static final TransportVersion INCLUDES_LAST_COMMITTED_DATA_VERSION = TransportVersion.V_8_6_0; private final SerializationStatsTracker serializationStatsTracker = new SerializationStatsTracker(); @@ -125,7 +126,7 @@ private PublishWithJoinResponse handleIncomingPublishRequest(BytesTransportReque in = new InputStreamStreamInput(compressor.threadLocalInputStream(in)); } in = new NamedWriteableAwareStreamInput(in, namedWriteableRegistry); - in.setVersion(request.version()); + in.setTransportVersion(request.version()); // If true we received full cluster state - otherwise diffs if (in.readBoolean()) { final ClusterState incomingState; @@ -226,7 +227,7 @@ public PublicationContext newPublicationContext(ClusterStatePublicationEvent clu } private ReleasableBytesReference serializeFullClusterState(ClusterState clusterState, DiscoveryNode node) { - final Version nodeVersion = node.getVersion(); + final TransportVersion serializeVersion = node.getVersion().transportVersion; final RecyclerBytesStreamOutput bytesStream = transportService.newNetworkBytesStream(); boolean success = false; try { @@ -236,7 +237,7 @@ private ReleasableBytesReference serializeFullClusterState(ClusterState clusterS CompressorFactory.COMPRESSOR.threadLocalOutputStream(Streams.flushOnCloseStream(bytesStream)) ) ) { - stream.setVersion(nodeVersion); + stream.setTransportVersion(serializeVersion); stream.writeBoolean(true); clusterState.writeTo(stream); uncompressedBytes = stream.position(); @@ -246,9 +247,9 @@ private ReleasableBytesReference serializeFullClusterState(ClusterState clusterS final ReleasableBytesReference result = new ReleasableBytesReference(bytesStream.bytes(), bytesStream); serializationStatsTracker.serializedFullState(uncompressedBytes, result.length()); logger.trace( - "serialized full cluster state version [{}] for node version [{}] with size [{}]", + "serialized full cluster state version [{}] using transport version [{}] with size [{}]", clusterState.version(), - nodeVersion, + serializeVersion, result.length() ); success = true; @@ -262,7 +263,7 @@ private ReleasableBytesReference serializeFullClusterState(ClusterState clusterS private ReleasableBytesReference serializeDiffClusterState(ClusterState newState, Diff diff, DiscoveryNode node) { final long clusterStateVersion = newState.version(); - final Version nodeVersion = node.getVersion(); + final TransportVersion serializeVersion = node.getVersion().transportVersion; final RecyclerBytesStreamOutput bytesStream = transportService.newNetworkBytesStream(); boolean success = false; try { @@ -272,10 +273,10 @@ private ReleasableBytesReference serializeDiffClusterState(ClusterState newState CompressorFactory.COMPRESSOR.threadLocalOutputStream(Streams.flushOnCloseStream(bytesStream)) ) ) { - stream.setVersion(nodeVersion); + stream.setTransportVersion(serializeVersion); stream.writeBoolean(false); diff.writeTo(stream); - if (nodeVersion.onOrAfter(INCLUDES_LAST_COMMITTED_DATA_VERSION)) { + if (serializeVersion.onOrAfter(INCLUDES_LAST_COMMITTED_DATA_VERSION)) { stream.writeBoolean(newState.metadata().clusterUUIDCommitted()); newState.getLastCommittedConfiguration().writeTo(stream); } @@ -286,9 +287,9 @@ private ReleasableBytesReference serializeDiffClusterState(ClusterState newState final ReleasableBytesReference result = new ReleasableBytesReference(bytesStream.bytes(), bytesStream); serializationStatsTracker.serializedDiff(uncompressedBytes, result.length()); logger.trace( - "serialized cluster state diff for version [{}] for node version [{}] with size [{}]", + "serialized cluster state diff for version [{}] using transport version [{}] with size [{}]", clusterStateVersion, - nodeVersion, + serializeVersion, result.length() ); success = true; @@ -466,7 +467,7 @@ private void sendClusterState( transportService.sendChildRequest( destination, PUBLISH_STATE_ACTION_NAME, - new BytesTransportRequest(bytes, destination.getVersion()), + new BytesTransportRequest(bytes, destination.getVersion().transportVersion), task, STATE_REQUEST_OPTIONS, new CleanableResponseHandler<>(listener, PublishWithJoinResponse::new, ThreadPool.Names.CLUSTER_COORDINATION, bytes::decRef) diff --git a/server/src/main/java/org/elasticsearch/cluster/metadata/DesiredNode.java b/server/src/main/java/org/elasticsearch/cluster/metadata/DesiredNode.java index 32b060cc9682a..2c29df2e661c7 100644 --- a/server/src/main/java/org/elasticsearch/cluster/metadata/DesiredNode.java +++ b/server/src/main/java/org/elasticsearch/cluster/metadata/DesiredNode.java @@ -8,6 +8,7 @@ package org.elasticsearch.cluster.metadata; +import org.elasticsearch.TransportVersion; import org.elasticsearch.Version; import org.elasticsearch.cluster.node.DiscoveryNode; import org.elasticsearch.cluster.node.DiscoveryNodeRole; @@ -38,7 +39,7 @@ import static org.elasticsearch.node.NodeRoleSettings.NODE_ROLES_SETTING; public final class DesiredNode implements Writeable, ToXContentObject, Comparable { - public static final Version RANGE_FLOAT_PROCESSORS_SUPPORT_VERSION = Version.V_8_3_0; + public static final TransportVersion RANGE_FLOAT_PROCESSORS_SUPPORT_VERSION = TransportVersion.V_8_3_0; private static final ParseField SETTINGS_FIELD = new ParseField("settings"); private static final ParseField PROCESSORS_FIELD = new ParseField("processors"); @@ -173,7 +174,7 @@ public static DesiredNode readFrom(StreamInput in) throws IOException { final var settings = Settings.readSettingsFromStream(in); final Processors processors; final ProcessorsRange processorsRange; - if (in.getTransportVersion().onOrAfter(RANGE_FLOAT_PROCESSORS_SUPPORT_VERSION.transportVersion)) { + if (in.getTransportVersion().onOrAfter(RANGE_FLOAT_PROCESSORS_SUPPORT_VERSION)) { processors = in.readOptionalWriteable(Processors::readFrom); processorsRange = in.readOptionalWriteable(ProcessorsRange::readFrom); } else { @@ -189,7 +190,7 @@ public static DesiredNode readFrom(StreamInput in) throws IOException { @Override public void writeTo(StreamOutput out) throws IOException { settings.writeTo(out); - if (out.getTransportVersion().onOrAfter(RANGE_FLOAT_PROCESSORS_SUPPORT_VERSION.transportVersion)) { + if (out.getTransportVersion().onOrAfter(RANGE_FLOAT_PROCESSORS_SUPPORT_VERSION)) { out.writeOptionalWriteable(processors); out.writeOptionalWriteable(processorsRange); } else { @@ -296,7 +297,7 @@ public Set getRoles() { return roles; } - public boolean isCompatibleWithVersion(Version version) { + public boolean isCompatibleWithVersion(TransportVersion version) { if (version.onOrAfter(RANGE_FLOAT_PROCESSORS_SUPPORT_VERSION)) { return true; } diff --git a/server/src/main/java/org/elasticsearch/cluster/metadata/Metadata.java b/server/src/main/java/org/elasticsearch/cluster/metadata/Metadata.java index 06f4b29b681b3..4d19109738a88 100644 --- a/server/src/main/java/org/elasticsearch/cluster/metadata/Metadata.java +++ b/server/src/main/java/org/elasticsearch/cluster/metadata/Metadata.java @@ -1446,7 +1446,7 @@ private static class MetadataDiff implements Diff { private static final TransportVersion NOOP_METADATA_DIFF_VERSION = TransportVersion.V_8_5_0; private static final TransportVersion NOOP_METADATA_DIFF_SAFE_VERSION = - PublicationTransportHandler.INCLUDES_LAST_COMMITTED_DATA_VERSION.transportVersion; + PublicationTransportHandler.INCLUDES_LAST_COMMITTED_DATA_VERSION; private final long version; private final String clusterUUID; diff --git a/server/src/main/java/org/elasticsearch/cluster/metadata/RepositoryMetadata.java b/server/src/main/java/org/elasticsearch/cluster/metadata/RepositoryMetadata.java index 7790e265220ae..66e339a474ed7 100644 --- a/server/src/main/java/org/elasticsearch/cluster/metadata/RepositoryMetadata.java +++ b/server/src/main/java/org/elasticsearch/cluster/metadata/RepositoryMetadata.java @@ -129,7 +129,7 @@ public long pendingGeneration() { public RepositoryMetadata(StreamInput in) throws IOException { name = in.readString(); - if (in.getVersion().onOrAfter(SnapshotsService.UUIDS_IN_REPO_DATA_VERSION)) { + if (in.getTransportVersion().onOrAfter(SnapshotsService.UUIDS_IN_REPO_DATA_VERSION.transportVersion)) { uuid = in.readString(); } else { uuid = RepositoryData.MISSING_UUID; @@ -148,7 +148,7 @@ public RepositoryMetadata(StreamInput in) throws IOException { @Override public void writeTo(StreamOutput out) throws IOException { out.writeString(name); - if (out.getVersion().onOrAfter(SnapshotsService.UUIDS_IN_REPO_DATA_VERSION)) { + if (out.getTransportVersion().onOrAfter(SnapshotsService.UUIDS_IN_REPO_DATA_VERSION.transportVersion)) { out.writeString(uuid); } out.writeString(type); diff --git a/server/src/main/java/org/elasticsearch/cluster/node/DiscoveryNode.java b/server/src/main/java/org/elasticsearch/cluster/node/DiscoveryNode.java index 68bb027245861..4db38a81c5f3c 100644 --- a/server/src/main/java/org/elasticsearch/cluster/node/DiscoveryNode.java +++ b/server/src/main/java/org/elasticsearch/cluster/node/DiscoveryNode.java @@ -8,6 +8,7 @@ package org.elasticsearch.cluster.node; +import org.elasticsearch.TransportVersion; import org.elasticsearch.Version; import org.elasticsearch.common.Strings; import org.elasticsearch.common.UUIDs; @@ -63,7 +64,7 @@ public static boolean isStateless(final Settings settings) { } static final String COORDINATING_ONLY = "coordinating_only"; - public static final Version EXTERNAL_ID_VERSION = Version.V_8_3_0; + public static final TransportVersion EXTERNAL_ID_VERSION = TransportVersion.V_8_3_0; public static final Comparator DISCOVERY_NODE_COMPARATOR = Comparator.comparing(DiscoveryNode::getName) .thenComparing(DiscoveryNode::getId); @@ -411,7 +412,7 @@ public DiscoveryNode(StreamInput in) throws IOException { } this.roles = Collections.unmodifiableSortedSet(roles); this.version = Version.readVersion(in); - if (in.getVersion().onOrAfter(EXTERNAL_ID_VERSION)) { + if (in.getTransportVersion().onOrAfter(EXTERNAL_ID_VERSION)) { this.externalId = readStringLiteral.read(in); } else { this.externalId = nodeName; @@ -444,7 +445,7 @@ public void writeTo(StreamOutput out) throws IOException { o.writeBoolean(role.canContainData()); }); Version.writeVersion(version, out); - if (out.getVersion().onOrAfter(EXTERNAL_ID_VERSION)) { + if (out.getTransportVersion().onOrAfter(EXTERNAL_ID_VERSION)) { out.writeString(externalId); } } diff --git a/server/src/main/java/org/elasticsearch/cluster/routing/ShardRouting.java b/server/src/main/java/org/elasticsearch/cluster/routing/ShardRouting.java index 2e993cc120c18..37ae9784d9cfa 100644 --- a/server/src/main/java/org/elasticsearch/cluster/routing/ShardRouting.java +++ b/server/src/main/java/org/elasticsearch/cluster/routing/ShardRouting.java @@ -393,7 +393,7 @@ public void writeToThin(StreamOutput out) throws IOException { role.writeTo(out); } else if (role != Role.DEFAULT) { throw new IllegalStateException( - Strings.format("cannot send role [%s] to node of version [%s]", role, out.getTransportVersion()) + Strings.format("cannot send role [%s] with transport version [%s]", role, out.getTransportVersion()) ); } } diff --git a/server/src/main/java/org/elasticsearch/common/compress/CompressedXContent.java b/server/src/main/java/org/elasticsearch/common/compress/CompressedXContent.java index b6b69a9d4d22a..2de39744e40a7 100644 --- a/server/src/main/java/org/elasticsearch/common/compress/CompressedXContent.java +++ b/server/src/main/java/org/elasticsearch/common/compress/CompressedXContent.java @@ -9,7 +9,7 @@ package org.elasticsearch.common.compress; import org.elasticsearch.ElasticsearchException; -import org.elasticsearch.Version; +import org.elasticsearch.TransportVersion; import org.elasticsearch.common.bytes.BytesArray; import org.elasticsearch.common.bytes.BytesReference; import org.elasticsearch.common.hash.MessageDigests; @@ -209,7 +209,7 @@ public String getSha256() { public static CompressedXContent readCompressedString(StreamInput in) throws IOException { final String sha256; final byte[] compressedData; - if (in.getVersion().onOrAfter(Version.V_8_0_0)) { + if (in.getTransportVersion().onOrAfter(TransportVersion.V_8_0_0)) { sha256 = in.readString(); compressedData = in.readByteArray(); } else { @@ -221,7 +221,7 @@ public static CompressedXContent readCompressedString(StreamInput in) throws IOE } public void writeTo(StreamOutput out) throws IOException { - if (out.getVersion().onOrAfter(Version.V_8_0_0)) { + if (out.getTransportVersion().onOrAfter(TransportVersion.V_8_0_0)) { out.writeString(sha256); } else { int crc32 = crc32FromCompressed(bytes); diff --git a/server/src/main/java/org/elasticsearch/common/document/DocumentField.java b/server/src/main/java/org/elasticsearch/common/document/DocumentField.java index ec6f378c4c07c..5828b485ce36d 100644 --- a/server/src/main/java/org/elasticsearch/common/document/DocumentField.java +++ b/server/src/main/java/org/elasticsearch/common/document/DocumentField.java @@ -8,7 +8,7 @@ package org.elasticsearch.common.document; -import org.elasticsearch.Version; +import org.elasticsearch.TransportVersion; import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.StreamOutput; import org.elasticsearch.common.io.stream.Writeable; @@ -44,12 +44,12 @@ public class DocumentField implements Writeable, Iterable { public DocumentField(StreamInput in) throws IOException { name = in.readString(); values = in.readList(StreamInput::readGenericValue); - if (in.getVersion().onOrAfter(Version.V_7_16_0)) { + if (in.getTransportVersion().onOrAfter(TransportVersion.V_7_16_0)) { ignoredValues = in.readList(StreamInput::readGenericValue); } else { ignoredValues = Collections.emptyList(); } - if (in.getVersion().onOrAfter(Version.V_8_2_0)) { + if (in.getTransportVersion().onOrAfter(TransportVersion.V_8_2_0)) { lookupFields = in.readList(LookupField::new); } else { lookupFields = List.of(); @@ -114,10 +114,10 @@ public List getIgnoredValues() { public void writeTo(StreamOutput out) throws IOException { out.writeString(name); out.writeCollection(values, StreamOutput::writeGenericValue); - if (out.getVersion().onOrAfter(Version.V_7_16_0)) { + if (out.getTransportVersion().onOrAfter(TransportVersion.V_7_16_0)) { out.writeCollection(ignoredValues, StreamOutput::writeGenericValue); } - if (out.getVersion().onOrAfter(Version.V_8_2_0)) { + if (out.getTransportVersion().onOrAfter(TransportVersion.V_8_2_0)) { out.writeList(lookupFields); } else { if (lookupFields.isEmpty() == false) { diff --git a/server/src/main/java/org/elasticsearch/common/io/stream/DelayableWriteable.java b/server/src/main/java/org/elasticsearch/common/io/stream/DelayableWriteable.java index 5c637f27dfd19..8ec408c2b08c3 100644 --- a/server/src/main/java/org/elasticsearch/common/io/stream/DelayableWriteable.java +++ b/server/src/main/java/org/elasticsearch/common/io/stream/DelayableWriteable.java @@ -8,7 +8,7 @@ package org.elasticsearch.common.io.stream; -import org.elasticsearch.Version; +import org.elasticsearch.TransportVersion; import org.elasticsearch.common.bytes.BytesReference; import org.elasticsearch.common.bytes.ReleasableBytesReference; import org.elasticsearch.core.Releasable; @@ -49,12 +49,12 @@ public static DelayableWriteable referencing(T referenc * when {@link #expand()} is called. */ public static DelayableWriteable delayed(Writeable.Reader reader, StreamInput in) throws IOException { - return new Serialized<>(reader, in.getVersion(), in.namedWriteableRegistry(), in.readReleasableBytesReference()); + return new Serialized<>(reader, in.getTransportVersion(), in.namedWriteableRegistry(), in.readReleasableBytesReference()); } public static DelayableWriteable referencing(Writeable.Reader reader, StreamInput in) throws IOException { try (ReleasableBytesReference serialized = in.readReleasableBytesReference()) { - return new Referencing<>(deserialize(reader, in.getVersion(), in.namedWriteableRegistry(), serialized)); + return new Referencing<>(deserialize(reader, in.getTransportVersion(), in.namedWriteableRegistry(), serialized)); } } @@ -103,12 +103,12 @@ public T expand() { public Serialized asSerialized(Reader reader, NamedWriteableRegistry registry) { BytesStreamOutput buffer; try { - buffer = writeToBuffer(Version.CURRENT); + buffer = writeToBuffer(TransportVersion.CURRENT); } catch (IOException e) { throw new RuntimeException("unexpected error writing writeable to buffer", e); } // TODO: this path is currently not used in production code, if it ever is this should start using pooled buffers - return new Serialized<>(reader, Version.CURRENT, registry, ReleasableBytesReference.wrap(buffer.bytes())); + return new Serialized<>(reader, TransportVersion.CURRENT, registry, ReleasableBytesReference.wrap(buffer.bytes())); } @Override @@ -121,9 +121,9 @@ public long getSerializedSize() { return DelayableWriteable.getSerializedSize(reference); } - private BytesStreamOutput writeToBuffer(Version version) throws IOException { + private BytesStreamOutput writeToBuffer(TransportVersion version) throws IOException { try (BytesStreamOutput buffer = new BytesStreamOutput()) { - buffer.setVersion(version); + buffer.setTransportVersion(version); reference.writeTo(buffer); return buffer; } @@ -141,13 +141,13 @@ public void close() { */ public static class Serialized extends DelayableWriteable { private final Writeable.Reader reader; - private final Version serializedAtVersion; + private final TransportVersion serializedAtVersion; private final NamedWriteableRegistry registry; private final ReleasableBytesReference serialized; private Serialized( Writeable.Reader reader, - Version serializedAtVersion, + TransportVersion serializedAtVersion, NamedWriteableRegistry registry, ReleasableBytesReference serialized ) { @@ -159,7 +159,7 @@ private Serialized( @Override public void writeTo(StreamOutput out) throws IOException { - if (out.getVersion() == serializedAtVersion) { + if (out.getTransportVersion() == serializedAtVersion) { /* * If the version *does* line up we can just copy the bytes * which is good because this is how shard request caching @@ -214,7 +214,7 @@ public void close() { */ public static long getSerializedSize(Writeable ref) { try (CountingStreamOutput out = new CountingStreamOutput()) { - out.setVersion(Version.CURRENT); + out.setTransportVersion(TransportVersion.CURRENT); ref.writeTo(out); return out.size; } catch (IOException exc) { @@ -224,7 +224,7 @@ public static long getSerializedSize(Writeable ref) { private static T deserialize( Reader reader, - Version serializedAtVersion, + TransportVersion serializedAtVersion, NamedWriteableRegistry registry, BytesReference serialized ) throws IOException { @@ -233,7 +233,7 @@ private static T deserialize( ? serialized.streamInput() : new NamedWriteableAwareStreamInput(serialized.streamInput(), registry) ) { - in.setVersion(serializedAtVersion); + in.setTransportVersion(serializedAtVersion); return reader.read(in); } } diff --git a/server/src/main/java/org/elasticsearch/common/io/stream/RecyclerBytesStreamOutput.java b/server/src/main/java/org/elasticsearch/common/io/stream/RecyclerBytesStreamOutput.java index 7c372f6b52bff..4ebebdbd8e9bb 100644 --- a/server/src/main/java/org/elasticsearch/common/io/stream/RecyclerBytesStreamOutput.java +++ b/server/src/main/java/org/elasticsearch/common/io/stream/RecyclerBytesStreamOutput.java @@ -127,7 +127,7 @@ public void writeWithSizePrefix(Writeable writeable) throws IOException { // manipulation of the offsets on the pages after writing to tmp. This will require adjustments to the places in this class // that make assumptions about the page size try (RecyclerBytesStreamOutput tmp = new RecyclerBytesStreamOutput(recycler)) { - tmp.setVersion(getVersion()); + tmp.setTransportVersion(getTransportVersion()); writeable.writeTo(tmp); int size = tmp.size(); writeVInt(size); diff --git a/server/src/main/java/org/elasticsearch/common/io/stream/StreamInput.java b/server/src/main/java/org/elasticsearch/common/io/stream/StreamInput.java index cab1e3611295b..faea2ad8bc864 100644 --- a/server/src/main/java/org/elasticsearch/common/io/stream/StreamInput.java +++ b/server/src/main/java/org/elasticsearch/common/io/stream/StreamInput.java @@ -769,10 +769,10 @@ public Object readGenericValue() throws IOException { case 6 -> readByteArray(); case 7 -> readArrayList(); case 8 -> readArray(); - case 9 -> getVersion().onOrAfter(Version.V_8_7_0) + case 9 -> getTransportVersion().onOrAfter(TransportVersion.V_8_7_0) ? readOrderedMap(StreamInput::readGenericValue, StreamInput::readGenericValue) : readOrderedMap(StreamInput::readString, StreamInput::readGenericValue); - case 10 -> getVersion().onOrAfter(Version.V_8_7_0) + case 10 -> getTransportVersion().onOrAfter(TransportVersion.V_8_7_0) ? readMap(StreamInput::readGenericValue, StreamInput::readGenericValue) : readMap(StreamInput::readString, StreamInput::readGenericValue); case 11 -> readByte(); diff --git a/server/src/main/java/org/elasticsearch/common/io/stream/StreamOutput.java b/server/src/main/java/org/elasticsearch/common/io/stream/StreamOutput.java index f69a229c901f4..19b792f332541 100644 --- a/server/src/main/java/org/elasticsearch/common/io/stream/StreamOutput.java +++ b/server/src/main/java/org/elasticsearch/common/io/stream/StreamOutput.java @@ -591,7 +591,7 @@ public void writeMapWithConsistentOrder(@Nullable Map .iterator(); while (iterator.hasNext()) { Map.Entry next = iterator.next(); - if (this.getVersion().onOrAfter(Version.V_8_7_0)) { + if (this.getTransportVersion().onOrAfter(TransportVersion.V_8_7_0)) { this.writeGenericValue(next.getKey()); } else { this.writeString(next.getKey()); @@ -722,7 +722,7 @@ public final void writeOptionalInstant(@Nullable Instant instant) throws IOExcep } else { o.writeByte((byte) 10); } - if (o.getVersion().onOrAfter(Version.V_8_7_0)) { + if (o.getTransportVersion().onOrAfter(TransportVersion.V_8_7_0)) { final Map map = (Map) v; o.writeMap(map, StreamOutput::writeGenericValue, StreamOutput::writeGenericValue); } else { diff --git a/server/src/main/java/org/elasticsearch/common/unit/Processors.java b/server/src/main/java/org/elasticsearch/common/unit/Processors.java index b95bdd3615f3c..89db778266eae 100644 --- a/server/src/main/java/org/elasticsearch/common/unit/Processors.java +++ b/server/src/main/java/org/elasticsearch/common/unit/Processors.java @@ -8,7 +8,7 @@ package org.elasticsearch.common.unit; -import org.elasticsearch.Version; +import org.elasticsearch.TransportVersion; import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.StreamOutput; import org.elasticsearch.common.io.stream.Writeable; @@ -29,8 +29,8 @@ public class Processors implements Writeable, Comparable, ToXContent public static final Processors ZERO = new Processors(0.0); public static final Processors MAX_PROCESSORS = new Processors(Double.MAX_VALUE); - public static final Version FLOAT_PROCESSORS_SUPPORT_VERSION = Version.V_8_3_0; - public static final Version DOUBLE_PROCESSORS_SUPPORT_VERSION = Version.V_8_5_0; + public static final TransportVersion FLOAT_PROCESSORS_SUPPORT_VERSION = TransportVersion.V_8_3_0; + public static final TransportVersion DOUBLE_PROCESSORS_SUPPORT_VERSION = TransportVersion.V_8_5_0; static final int NUMBER_OF_DECIMAL_PLACES = 5; private static final double MIN_REPRESENTABLE_PROCESSORS = 1E-5; @@ -63,9 +63,9 @@ public static Processors of(Double count) { public static Processors readFrom(StreamInput in) throws IOException { final double processorCount; - if (in.getVersion().before(FLOAT_PROCESSORS_SUPPORT_VERSION)) { + if (in.getTransportVersion().before(FLOAT_PROCESSORS_SUPPORT_VERSION)) { processorCount = in.readInt(); - } else if (in.getVersion().before(DOUBLE_PROCESSORS_SUPPORT_VERSION)) { + } else if (in.getTransportVersion().before(DOUBLE_PROCESSORS_SUPPORT_VERSION)) { processorCount = in.readFloat(); } else { processorCount = in.readDouble(); @@ -75,10 +75,10 @@ public static Processors readFrom(StreamInput in) throws IOException { @Override public void writeTo(StreamOutput out) throws IOException { - if (out.getVersion().before(FLOAT_PROCESSORS_SUPPORT_VERSION)) { + if (out.getTransportVersion().before(FLOAT_PROCESSORS_SUPPORT_VERSION)) { assert hasDecimals() == false; out.writeInt((int) count); - } else if (out.getVersion().before(DOUBLE_PROCESSORS_SUPPORT_VERSION)) { + } else if (out.getTransportVersion().before(DOUBLE_PROCESSORS_SUPPORT_VERSION)) { out.writeFloat((float) count); } else { out.writeDouble(count); @@ -143,7 +143,7 @@ private boolean hasDecimals() { return ((int) count) != Math.ceil(count); } - public boolean isCompatibleWithVersion(Version version) { + public boolean isCompatibleWithVersion(TransportVersion version) { if (version.onOrAfter(FLOAT_PROCESSORS_SUPPORT_VERSION)) { return true; } diff --git a/server/src/main/java/org/elasticsearch/common/xcontent/XContentHelper.java b/server/src/main/java/org/elasticsearch/common/xcontent/XContentHelper.java index a71f4fe70f324..fda5055e5585a 100644 --- a/server/src/main/java/org/elasticsearch/common/xcontent/XContentHelper.java +++ b/server/src/main/java/org/elasticsearch/common/xcontent/XContentHelper.java @@ -10,7 +10,7 @@ import org.elasticsearch.ElasticsearchGenerationException; import org.elasticsearch.ElasticsearchParseException; -import org.elasticsearch.Version; +import org.elasticsearch.TransportVersion; import org.elasticsearch.common.Strings; import org.elasticsearch.common.bytes.BytesArray; import org.elasticsearch.common.bytes.BytesReference; @@ -594,7 +594,7 @@ public static BytesReference childBytes(XContentParser parser) throws IOExceptio * @param xContentType an instance to serialize */ public static void writeTo(StreamOutput out, XContentType xContentType) throws IOException { - if (out.getVersion().before(Version.V_8_0_0)) { + if (out.getTransportVersion().before(TransportVersion.V_8_0_0)) { // when sending an enumeration to { public Request(StreamInput in) throws IOException { super(in); shardId = new ShardId(in); - if (in.getVersion().onOrAfter(Version.V_7_6_0)) { + if (in.getTransportVersion().onOrAfter(TransportVersion.V_7_6_0)) { customDataPath = in.readString(); } else { customDataPath = null; @@ -222,7 +222,7 @@ public String getCustomDataPath() { public void writeTo(StreamOutput out) throws IOException { super.writeTo(out); shardId.writeTo(out); - if (out.getVersion().onOrAfter(Version.V_7_6_0)) { + if (out.getTransportVersion().onOrAfter(TransportVersion.V_7_6_0)) { out.writeString(customDataPath); } } @@ -262,7 +262,7 @@ public static class NodeRequest extends TransportRequest { public NodeRequest(StreamInput in) throws IOException { super(in); shardId = new ShardId(in); - if (in.getVersion().onOrAfter(Version.V_7_6_0)) { + if (in.getTransportVersion().onOrAfter(TransportVersion.V_7_6_0)) { customDataPath = in.readString(); } else { customDataPath = null; @@ -278,7 +278,7 @@ public NodeRequest(Request request) { public void writeTo(StreamOutput out) throws IOException { super.writeTo(out); shardId.writeTo(out); - if (out.getVersion().onOrAfter(Version.V_7_6_0)) { + if (out.getTransportVersion().onOrAfter(TransportVersion.V_7_6_0)) { assert customDataPath != null; out.writeString(customDataPath); } diff --git a/server/src/main/java/org/elasticsearch/health/metadata/HealthMetadata.java b/server/src/main/java/org/elasticsearch/health/metadata/HealthMetadata.java index 4a20913c094c7..859a4fc2a8c15 100644 --- a/server/src/main/java/org/elasticsearch/health/metadata/HealthMetadata.java +++ b/server/src/main/java/org/elasticsearch/health/metadata/HealthMetadata.java @@ -9,7 +9,6 @@ package org.elasticsearch.health.metadata; import org.elasticsearch.TransportVersion; -import org.elasticsearch.Version; import org.elasticsearch.cluster.AbstractNamedDiffable; import org.elasticsearch.cluster.ClusterState; import org.elasticsearch.cluster.NamedDiff; @@ -117,7 +116,7 @@ public record Disk( ) implements ToXContentFragment, Writeable { public static final String TYPE = "disk"; - public static Version VERSION_SUPPORTING_HEADROOM_FIELDS = Version.V_8_5_0; + public static final TransportVersion VERSION_SUPPORTING_HEADROOM_FIELDS = TransportVersion.V_8_5_0; private static final ParseField HIGH_WATERMARK_FIELD = new ParseField("high_watermark"); private static final ParseField HIGH_MAX_HEADROOM_FIELD = new ParseField("high_max_headroom"); @@ -140,10 +139,10 @@ static Disk readFrom(StreamInput in) throws IOException { FROZEN_FLOOD_STAGE_WATERMARK_FIELD.getPreferredName() ); ByteSizeValue frozenFloodStageMaxHeadroom = ByteSizeValue.readFrom(in); - ByteSizeValue highMaxHeadroom = in.getVersion().onOrAfter(VERSION_SUPPORTING_HEADROOM_FIELDS) + ByteSizeValue highMaxHeadroom = in.getTransportVersion().onOrAfter(VERSION_SUPPORTING_HEADROOM_FIELDS) ? ByteSizeValue.readFrom(in) : ByteSizeValue.MINUS_ONE; - ByteSizeValue floodStageMaxHeadroom = in.getVersion().onOrAfter(VERSION_SUPPORTING_HEADROOM_FIELDS) + ByteSizeValue floodStageMaxHeadroom = in.getTransportVersion().onOrAfter(VERSION_SUPPORTING_HEADROOM_FIELDS) ? ByteSizeValue.readFrom(in) : ByteSizeValue.MINUS_ONE; return new Disk( @@ -162,7 +161,7 @@ public void writeTo(StreamOutput out) throws IOException { out.writeString(describeFloodStageWatermark()); out.writeString(describeFrozenFloodStageWatermark()); frozenFloodStageMaxHeadroom.writeTo(out); - if (out.getVersion().onOrAfter(VERSION_SUPPORTING_HEADROOM_FIELDS)) { + if (out.getTransportVersion().onOrAfter(VERSION_SUPPORTING_HEADROOM_FIELDS)) { highMaxHeadroom.writeTo(out); floodStageMaxHeadroom.writeTo(out); } diff --git a/server/src/main/java/org/elasticsearch/index/translog/Translog.java b/server/src/main/java/org/elasticsearch/index/translog/Translog.java index daf29a95c15a6..8eba045ee3cc8 100644 --- a/server/src/main/java/org/elasticsearch/index/translog/Translog.java +++ b/server/src/main/java/org/elasticsearch/index/translog/Translog.java @@ -9,7 +9,7 @@ package org.elasticsearch.index.translog; import org.apache.lucene.store.AlreadyClosedException; -import org.elasticsearch.Version; +import org.elasticsearch.TransportVersion; import org.elasticsearch.common.Strings; import org.elasticsearch.common.UUIDs; import org.elasticsearch.common.bytes.BytesReference; @@ -1240,7 +1240,9 @@ public long version() { } private void write(final StreamOutput out) throws IOException { - final int format = out.getVersion().onOrAfter(Version.V_8_0_0) ? SERIALIZATION_FORMAT : FORMAT_NO_VERSION_TYPE; + final int format = out.getTransportVersion().onOrAfter(TransportVersion.V_8_0_0) + ? SERIALIZATION_FORMAT + : FORMAT_NO_VERSION_TYPE; out.writeVInt(format); out.writeString(id); if (format < FORMAT_NO_DOC_TYPE) { @@ -1401,7 +1403,9 @@ public BytesReference source() { } private void write(final StreamOutput out) throws IOException { - final int format = out.getVersion().onOrAfter(Version.V_8_0_0) ? SERIALIZATION_FORMAT : FORMAT_NO_VERSION_TYPE; + final int format = out.getTransportVersion().onOrAfter(TransportVersion.V_8_0_0) + ? SERIALIZATION_FORMAT + : FORMAT_NO_VERSION_TYPE; out.writeVInt(format); if (format < FORMAT_NO_DOC_TYPE) { out.writeString(MapperService.SINGLE_MAPPING_NAME); diff --git a/server/src/main/java/org/elasticsearch/indices/recovery/RecoverySnapshotFileRequest.java b/server/src/main/java/org/elasticsearch/indices/recovery/RecoverySnapshotFileRequest.java index 2f2558eab3d82..47cf4ef4824ee 100644 --- a/server/src/main/java/org/elasticsearch/indices/recovery/RecoverySnapshotFileRequest.java +++ b/server/src/main/java/org/elasticsearch/indices/recovery/RecoverySnapshotFileRequest.java @@ -50,8 +50,8 @@ public RecoverySnapshotFileRequest(StreamInput in) throws IOException { @Override public void writeTo(StreamOutput out) throws IOException { - assert out.getVersion().onOrAfter(RecoverySettings.SNAPSHOT_RECOVERIES_SUPPORTED_VERSION) - : "Unexpected serialization version " + out.getVersion(); + assert out.getTransportVersion().onOrAfter(RecoverySettings.SNAPSHOT_RECOVERIES_SUPPORTED_VERSION.transportVersion) + : "Unexpected serialization version " + out.getTransportVersion(); super.writeTo(out); out.writeLong(recoveryId); shardId.writeTo(out); diff --git a/server/src/main/java/org/elasticsearch/monitor/jvm/JvmInfo.java b/server/src/main/java/org/elasticsearch/monitor/jvm/JvmInfo.java index c4b450946adc0..f928f83531aa0 100644 --- a/server/src/main/java/org/elasticsearch/monitor/jvm/JvmInfo.java +++ b/server/src/main/java/org/elasticsearch/monitor/jvm/JvmInfo.java @@ -9,7 +9,7 @@ package org.elasticsearch.monitor.jvm; import org.apache.lucene.util.Constants; -import org.elasticsearch.Version; +import org.elasticsearch.TransportVersion; import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.StreamOutput; import org.elasticsearch.common.io.stream.Writeable; @@ -268,7 +268,7 @@ public JvmInfo(StreamInput in) throws IOException { vmName = in.readString(); vmVersion = in.readString(); vmVendor = in.readString(); - if (in.getVersion().before(Version.V_8_3_0)) { + if (in.getTransportVersion().before(TransportVersion.V_8_3_0)) { // Before 8.0 the no-jdk distributions could have bundledJdk false, this is always true now. in.readBoolean(); } @@ -302,7 +302,7 @@ public void writeTo(StreamOutput out) throws IOException { out.writeString(vmName); out.writeString(vmVersion); out.writeString(vmVendor); - if (out.getVersion().before(Version.V_8_3_0)) { + if (out.getTransportVersion().before(TransportVersion.V_8_3_0)) { out.writeBoolean(true); } out.writeOptionalBoolean(usingBundledJdk); diff --git a/server/src/main/java/org/elasticsearch/monitor/os/OsInfo.java b/server/src/main/java/org/elasticsearch/monitor/os/OsInfo.java index a7be10d87308f..d956ee4f6852c 100644 --- a/server/src/main/java/org/elasticsearch/monitor/os/OsInfo.java +++ b/server/src/main/java/org/elasticsearch/monitor/os/OsInfo.java @@ -8,7 +8,7 @@ package org.elasticsearch.monitor.os; -import org.elasticsearch.Version; +import org.elasticsearch.TransportVersion; import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.StreamOutput; import org.elasticsearch.common.unit.Processors; @@ -19,7 +19,7 @@ import java.io.IOException; public class OsInfo implements ReportingService.Info { - private static final Version DOUBLE_PRECISION_ALLOCATED_PROCESSORS_SUPPORT = Version.V_8_5_0; + private static final TransportVersion DOUBLE_PRECISION_ALLOCATED_PROCESSORS_SUPPORT = TransportVersion.V_8_5_0; private final long refreshInterval; private final int availableProcessors; @@ -50,7 +50,7 @@ public OsInfo( public OsInfo(StreamInput in) throws IOException { this.refreshInterval = in.readLong(); this.availableProcessors = in.readInt(); - if (in.getVersion().onOrAfter(DOUBLE_PRECISION_ALLOCATED_PROCESSORS_SUPPORT)) { + if (in.getTransportVersion().onOrAfter(DOUBLE_PRECISION_ALLOCATED_PROCESSORS_SUPPORT)) { this.allocatedProcessors = Processors.readFrom(in); } else { this.allocatedProcessors = Processors.of((double) in.readInt()); @@ -65,7 +65,7 @@ public OsInfo(StreamInput in) throws IOException { public void writeTo(StreamOutput out) throws IOException { out.writeLong(refreshInterval); out.writeInt(availableProcessors); - if (out.getVersion().onOrAfter(DOUBLE_PRECISION_ALLOCATED_PROCESSORS_SUPPORT)) { + if (out.getTransportVersion().onOrAfter(DOUBLE_PRECISION_ALLOCATED_PROCESSORS_SUPPORT)) { allocatedProcessors.writeTo(out); } else { out.writeInt(getAllocatedProcessors()); diff --git a/server/src/main/java/org/elasticsearch/monitor/os/OsStats.java b/server/src/main/java/org/elasticsearch/monitor/os/OsStats.java index 1d56d2f1387fc..dab3eb100e9fb 100644 --- a/server/src/main/java/org/elasticsearch/monitor/os/OsStats.java +++ b/server/src/main/java/org/elasticsearch/monitor/os/OsStats.java @@ -10,7 +10,7 @@ import org.apache.logging.log4j.LogManager; import org.apache.logging.log4j.Logger; -import org.elasticsearch.Version; +import org.elasticsearch.TransportVersion; import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.StreamOutput; import org.elasticsearch.common.io.stream.Writeable; @@ -276,7 +276,7 @@ public Mem(StreamInput in) throws IOException { total = 0; } this.total = total; - if (in.getVersion().onOrAfter(Version.V_8_0_0)) { + if (in.getTransportVersion().onOrAfter(TransportVersion.V_8_0_0)) { long adjustedTotal = in.readLong(); assert adjustedTotal >= 0 : "expected adjusted total memory to be positive, got: " + adjustedTotal; if (adjustedTotal < 0) { @@ -299,7 +299,7 @@ public Mem(StreamInput in) throws IOException { @Override public void writeTo(StreamOutput out) throws IOException { out.writeLong(total); - if (out.getVersion().onOrAfter(Version.V_8_0_0)) { + if (out.getTransportVersion().onOrAfter(TransportVersion.V_8_0_0)) { out.writeLong(adjustedTotal); } out.writeLong(free); diff --git a/server/src/main/java/org/elasticsearch/plugins/PluginDescriptor.java b/server/src/main/java/org/elasticsearch/plugins/PluginDescriptor.java index df8cbed44e6f9..dd56e18957318 100644 --- a/server/src/main/java/org/elasticsearch/plugins/PluginDescriptor.java +++ b/server/src/main/java/org/elasticsearch/plugins/PluginDescriptor.java @@ -8,6 +8,7 @@ package org.elasticsearch.plugins; +import org.elasticsearch.TransportVersion; import org.elasticsearch.Version; import org.elasticsearch.common.Strings; import org.elasticsearch.common.io.stream.StreamInput; @@ -45,9 +46,9 @@ public class PluginDescriptor implements Writeable, ToXContentObject { public static final String ES_PLUGIN_POLICY = "plugin-security.policy"; - private static final Version LICENSED_PLUGINS_SUPPORT = Version.V_7_11_0; - private static final Version MODULE_NAME_SUPPORT = Version.V_8_3_0; - private static final Version BOOTSTRAP_SUPPORT_REMOVED = Version.V_8_4_0; + private static final TransportVersion LICENSED_PLUGINS_SUPPORT = TransportVersion.V_7_11_0; + private static final TransportVersion MODULE_NAME_SUPPORT = TransportVersion.V_8_3_0; + private static final TransportVersion BOOTSTRAP_SUPPORT_REMOVED = TransportVersion.V_8_4_0; private final String name; private final String description; @@ -118,7 +119,7 @@ public PluginDescriptor(final StreamInput in) throws IOException { elasticsearchVersion = Version.readVersion(in); javaVersion = in.readString(); this.classname = in.readString(); - if (in.getVersion().onOrAfter(MODULE_NAME_SUPPORT)) { + if (in.getTransportVersion().onOrAfter(MODULE_NAME_SUPPORT)) { this.moduleName = in.readOptionalString(); } else { this.moduleName = null; @@ -126,8 +127,8 @@ public PluginDescriptor(final StreamInput in) throws IOException { extendedPlugins = in.readStringList(); hasNativeController = in.readBoolean(); - if (in.getVersion().onOrAfter(LICENSED_PLUGINS_SUPPORT)) { - if (in.getVersion().before(BOOTSTRAP_SUPPORT_REMOVED)) { + if (in.getTransportVersion().onOrAfter(LICENSED_PLUGINS_SUPPORT)) { + if (in.getTransportVersion().before(BOOTSTRAP_SUPPORT_REMOVED)) { in.readString(); // plugin type in.readOptionalString(); // java opts } @@ -136,7 +137,7 @@ public PluginDescriptor(final StreamInput in) throws IOException { isLicensed = false; } - if (in.getVersion().onOrAfter(Version.V_8_4_0)) { + if (in.getTransportVersion().onOrAfter(TransportVersion.V_8_4_0)) { isModular = in.readBoolean(); isStable = in.readBoolean(); } else { @@ -153,20 +154,20 @@ public void writeTo(final StreamOutput out) throws IOException { Version.writeVersion(elasticsearchVersion, out); out.writeString(javaVersion); out.writeString(classname); - if (out.getVersion().onOrAfter(MODULE_NAME_SUPPORT)) { + if (out.getTransportVersion().onOrAfter(MODULE_NAME_SUPPORT)) { out.writeOptionalString(moduleName); } out.writeStringCollection(extendedPlugins); out.writeBoolean(hasNativeController); - if (out.getVersion().onOrAfter(LICENSED_PLUGINS_SUPPORT)) { - if (out.getVersion().before(BOOTSTRAP_SUPPORT_REMOVED)) { + if (out.getTransportVersion().onOrAfter(LICENSED_PLUGINS_SUPPORT)) { + if (out.getTransportVersion().before(BOOTSTRAP_SUPPORT_REMOVED)) { out.writeString("ISOLATED"); out.writeOptionalString(null); } out.writeBoolean(isLicensed); } - if (out.getVersion().onOrAfter(Version.V_8_4_0)) { + if (out.getTransportVersion().onOrAfter(TransportVersion.V_8_4_0)) { out.writeBoolean(isModular); out.writeBoolean(isStable); } diff --git a/server/src/main/java/org/elasticsearch/plugins/PluginRuntimeInfo.java b/server/src/main/java/org/elasticsearch/plugins/PluginRuntimeInfo.java index bf6c077b4de4a..f58f14bcd7a77 100644 --- a/server/src/main/java/org/elasticsearch/plugins/PluginRuntimeInfo.java +++ b/server/src/main/java/org/elasticsearch/plugins/PluginRuntimeInfo.java @@ -8,7 +8,7 @@ package org.elasticsearch.plugins; -import org.elasticsearch.Version; +import org.elasticsearch.TransportVersion; import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.StreamOutput; import org.elasticsearch.common.io.stream.Writeable; @@ -40,7 +40,7 @@ public PluginRuntimeInfo(StreamInput in) throws IOException { } private static Boolean readIsOfficial(StreamInput in) throws IOException { - if (in.getVersion().onOrAfter(Version.V_8_3_0)) { + if (in.getTransportVersion().onOrAfter(TransportVersion.V_8_3_0)) { return in.readBoolean(); } else { return null; @@ -48,7 +48,7 @@ private static Boolean readIsOfficial(StreamInput in) throws IOException { } private static PluginApiInfo readApiInfo(StreamInput in) throws IOException { - if (in.getVersion().onOrAfter(Version.V_8_3_0)) { + if (in.getTransportVersion().onOrAfter(TransportVersion.V_8_3_0)) { return in.readOptionalWriteable(PluginApiInfo::new); } else { return null; @@ -72,7 +72,7 @@ public XContentBuilder toXContent(XContentBuilder builder, Params params) throws @Override public void writeTo(StreamOutput out) throws IOException { descriptor.writeTo(out); - if (out.getVersion().onOrAfter(Version.V_8_3_0)) { + if (out.getTransportVersion().onOrAfter(TransportVersion.V_8_3_0)) { out.writeBoolean(isOfficial); out.writeOptionalWriteable(pluginApiInfo); } diff --git a/server/src/main/java/org/elasticsearch/script/ScriptContextStats.java b/server/src/main/java/org/elasticsearch/script/ScriptContextStats.java index acde189431c44..6e5eba4834ac4 100644 --- a/server/src/main/java/org/elasticsearch/script/ScriptContextStats.java +++ b/server/src/main/java/org/elasticsearch/script/ScriptContextStats.java @@ -8,7 +8,7 @@ package org.elasticsearch.script; -import org.elasticsearch.Version; +import org.elasticsearch.TransportVersion; import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.StreamOutput; import org.elasticsearch.common.io.stream.Writeable; @@ -45,10 +45,10 @@ public ScriptContextStats(StreamInput in) throws IOException { compilations = in.readVLong(); cacheEvictions = in.readVLong(); compilationLimitTriggered = in.readVLong(); - if (in.getVersion().onOrAfter(Version.V_8_1_0)) { + if (in.getTransportVersion().onOrAfter(TransportVersion.V_8_1_0)) { compilationsHistory = new TimeSeries(in); cacheEvictionsHistory = new TimeSeries(in); - } else if (in.getVersion().onOrAfter(Version.V_8_0_0)) { + } else if (in.getTransportVersion().onOrAfter(TransportVersion.V_8_0_0)) { compilationsHistory = new TimeSeries(in).withTotal(compilations); cacheEvictionsHistory = new TimeSeries(in).withTotal(cacheEvictions); } else { @@ -63,7 +63,7 @@ public void writeTo(StreamOutput out) throws IOException { out.writeVLong(compilations); out.writeVLong(cacheEvictions); out.writeVLong(compilationLimitTriggered); - if (out.getVersion().onOrAfter(Version.V_8_0_0)) { + if (out.getTransportVersion().onOrAfter(TransportVersion.V_8_0_0)) { compilationsHistory.writeTo(out); cacheEvictionsHistory.writeTo(out); } diff --git a/server/src/main/java/org/elasticsearch/script/ScriptException.java b/server/src/main/java/org/elasticsearch/script/ScriptException.java index 05a291b6562bc..b066ddb24d1ee 100644 --- a/server/src/main/java/org/elasticsearch/script/ScriptException.java +++ b/server/src/main/java/org/elasticsearch/script/ScriptException.java @@ -9,7 +9,7 @@ package org.elasticsearch.script; import org.elasticsearch.ElasticsearchException; -import org.elasticsearch.Version; +import org.elasticsearch.TransportVersion; import org.elasticsearch.common.Strings; import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.StreamOutput; @@ -78,7 +78,7 @@ public ScriptException(StreamInput in) throws IOException { scriptStack = Arrays.asList(in.readStringArray()); script = in.readString(); lang = in.readString(); - if (in.getVersion().onOrAfter(Version.V_7_7_0) && in.readBoolean()) { + if (in.getTransportVersion().onOrAfter(TransportVersion.V_7_7_0) && in.readBoolean()) { pos = new Position(in); } else { pos = null; @@ -91,7 +91,7 @@ public void writeTo(StreamOutput out) throws IOException { out.writeStringArray(scriptStack.toArray(new String[0])); out.writeString(script); out.writeString(lang); - if (out.getVersion().onOrAfter(Version.V_7_7_0)) { + if (out.getTransportVersion().onOrAfter(TransportVersion.V_7_7_0)) { if (pos == null) { out.writeBoolean(false); } else { diff --git a/server/src/main/java/org/elasticsearch/script/ScriptStats.java b/server/src/main/java/org/elasticsearch/script/ScriptStats.java index 0b161ff7bca87..76f512d627817 100644 --- a/server/src/main/java/org/elasticsearch/script/ScriptStats.java +++ b/server/src/main/java/org/elasticsearch/script/ScriptStats.java @@ -8,7 +8,7 @@ package org.elasticsearch.script; -import org.elasticsearch.Version; +import org.elasticsearch.TransportVersion; import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.StreamOutput; import org.elasticsearch.common.io.stream.Writeable; @@ -76,7 +76,7 @@ public ScriptStats(ScriptContextStats context) { } public ScriptStats(StreamInput in) throws IOException { - if (in.getVersion().onOrAfter(Version.V_8_1_0)) { + if (in.getTransportVersion().onOrAfter(TransportVersion.V_8_1_0)) { compilationsHistory = new TimeSeries(in); cacheEvictionsHistory = new TimeSeries(in); compilations = compilationsHistory.total; @@ -93,7 +93,7 @@ public ScriptStats(StreamInput in) throws IOException { @Override public void writeTo(StreamOutput out) throws IOException { - if (out.getVersion().onOrAfter(Version.V_8_1_0)) { + if (out.getTransportVersion().onOrAfter(TransportVersion.V_8_1_0)) { compilationsHistory.writeTo(out); cacheEvictionsHistory.writeTo(out); } else { diff --git a/server/src/main/java/org/elasticsearch/script/TimeSeries.java b/server/src/main/java/org/elasticsearch/script/TimeSeries.java index 8399a65a57e08..0311d3322e456 100644 --- a/server/src/main/java/org/elasticsearch/script/TimeSeries.java +++ b/server/src/main/java/org/elasticsearch/script/TimeSeries.java @@ -8,7 +8,7 @@ package org.elasticsearch.script; -import org.elasticsearch.Version; +import org.elasticsearch.TransportVersion; import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.StreamOutput; import org.elasticsearch.common.io.stream.Writeable; @@ -49,7 +49,7 @@ public TimeSeries(StreamInput in) throws IOException { fiveMinutes = in.readVLong(); fifteenMinutes = in.readVLong(); twentyFourHours = in.readVLong(); - if (in.getVersion().onOrAfter(Version.V_8_1_0)) { + if (in.getTransportVersion().onOrAfter(TransportVersion.V_8_1_0)) { total = in.readVLong(); } else { total = 0; @@ -70,7 +70,7 @@ public void writeTo(StreamOutput out) throws IOException { out.writeVLong(fiveMinutes); out.writeVLong(fifteenMinutes); out.writeVLong(twentyFourHours); - if (out.getVersion().onOrAfter(Version.V_8_1_0)) { + if (out.getTransportVersion().onOrAfter(TransportVersion.V_8_1_0)) { out.writeVLong(total); } } diff --git a/server/src/main/java/org/elasticsearch/tasks/TaskCancellationService.java b/server/src/main/java/org/elasticsearch/tasks/TaskCancellationService.java index 5d6fda57b19f0..122a25a613917 100644 --- a/server/src/main/java/org/elasticsearch/tasks/TaskCancellationService.java +++ b/server/src/main/java/org/elasticsearch/tasks/TaskCancellationService.java @@ -12,7 +12,7 @@ import org.apache.logging.log4j.Logger; import org.elasticsearch.ElasticsearchSecurityException; import org.elasticsearch.ExceptionsHelper; -import org.elasticsearch.Version; +import org.elasticsearch.TransportVersion; import org.elasticsearch.action.ActionListener; import org.elasticsearch.action.ResultDeduplicator; import org.elasticsearch.action.StepListener; @@ -294,7 +294,7 @@ private BanParentTaskRequest(StreamInput in) throws IOException { parentTaskId = TaskId.readFromStream(in); ban = in.readBoolean(); reason = ban ? in.readString() : null; - if (in.getVersion().onOrAfter(Version.V_7_8_0)) { + if (in.getTransportVersion().onOrAfter(TransportVersion.V_7_8_0)) { waitForCompletion = in.readBoolean(); } else { waitForCompletion = false; @@ -309,7 +309,7 @@ public void writeTo(StreamOutput out) throws IOException { if (ban) { out.writeString(reason); } - if (out.getVersion().onOrAfter(Version.V_7_8_0)) { + if (out.getTransportVersion().onOrAfter(TransportVersion.V_7_8_0)) { out.writeBoolean(waitForCompletion); } } diff --git a/server/src/main/java/org/elasticsearch/transport/ActionTransportException.java b/server/src/main/java/org/elasticsearch/transport/ActionTransportException.java index 8f6f0fd477b1a..444a34e4bcc3e 100644 --- a/server/src/main/java/org/elasticsearch/transport/ActionTransportException.java +++ b/server/src/main/java/org/elasticsearch/transport/ActionTransportException.java @@ -8,7 +8,7 @@ package org.elasticsearch.transport; -import org.elasticsearch.Version; +import org.elasticsearch.TransportVersion; import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.StreamOutput; import org.elasticsearch.common.network.NetworkAddress; @@ -24,7 +24,7 @@ public class ActionTransportException extends TransportException { public ActionTransportException(StreamInput in) throws IOException { super(in); - if (in.getVersion().before(Version.V_8_1_0)) { + if (in.getTransportVersion().before(TransportVersion.V_8_1_0)) { in.readOptionalWriteable(TransportAddress::new); in.readOptionalString(); } @@ -45,7 +45,7 @@ public ActionTransportException(String name, InetSocketAddress address, String a @Override public void writeTo(StreamOutput out) throws IOException { super.writeTo(out); - if (out.getVersion().before(Version.V_8_1_0)) { + if (out.getTransportVersion().before(TransportVersion.V_8_1_0)) { out.writeMissingWriteable(TransportAddress.class); out.writeMissingString(); // action } diff --git a/server/src/main/java/org/elasticsearch/transport/BytesTransportRequest.java b/server/src/main/java/org/elasticsearch/transport/BytesTransportRequest.java index 1f05adbac0336..51aca540f7aa6 100644 --- a/server/src/main/java/org/elasticsearch/transport/BytesTransportRequest.java +++ b/server/src/main/java/org/elasticsearch/transport/BytesTransportRequest.java @@ -8,7 +8,7 @@ package org.elasticsearch.transport; -import org.elasticsearch.Version; +import org.elasticsearch.TransportVersion; import org.elasticsearch.common.bytes.BytesReference; import org.elasticsearch.common.bytes.ReleasableBytesReference; import org.elasticsearch.common.io.stream.StreamInput; @@ -24,20 +24,20 @@ public class BytesTransportRequest extends TransportRequest implements RefCounted { final ReleasableBytesReference bytes; - private final Version version; + private final TransportVersion version; public BytesTransportRequest(StreamInput in) throws IOException { super(in); bytes = in.readReleasableBytesReference(); - version = in.getVersion(); + version = in.getTransportVersion(); } - public BytesTransportRequest(ReleasableBytesReference bytes, Version version) { + public BytesTransportRequest(ReleasableBytesReference bytes, TransportVersion version) { this.bytes = bytes; this.version = version; } - public Version version() { + public TransportVersion version() { return this.version; } diff --git a/server/src/main/java/org/elasticsearch/transport/ConnectTransportException.java b/server/src/main/java/org/elasticsearch/transport/ConnectTransportException.java index eddd6d6f108ba..e6e566e26b03b 100644 --- a/server/src/main/java/org/elasticsearch/transport/ConnectTransportException.java +++ b/server/src/main/java/org/elasticsearch/transport/ConnectTransportException.java @@ -8,7 +8,7 @@ package org.elasticsearch.transport; -import org.elasticsearch.Version; +import org.elasticsearch.TransportVersion; import org.elasticsearch.cluster.node.DiscoveryNode; import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.StreamOutput; @@ -35,7 +35,7 @@ public ConnectTransportException(DiscoveryNode node, String msg, String action, public ConnectTransportException(StreamInput in) throws IOException { super(in); - if (in.getVersion().before(Version.V_8_1_0)) { + if (in.getTransportVersion().before(TransportVersion.V_8_1_0)) { in.readOptionalWriteable(DiscoveryNode::new); } } @@ -43,7 +43,7 @@ public ConnectTransportException(StreamInput in) throws IOException { @Override public void writeTo(StreamOutput out) throws IOException { super.writeTo(out); - if (out.getVersion().before(Version.V_8_1_0)) { + if (out.getTransportVersion().before(TransportVersion.V_8_1_0)) { out.writeMissingWriteable(DiscoveryNode.class); } } diff --git a/server/src/main/java/org/elasticsearch/transport/ProxyConnectionStrategy.java b/server/src/main/java/org/elasticsearch/transport/ProxyConnectionStrategy.java index d49e760ab85ab..a410f0e912f8e 100644 --- a/server/src/main/java/org/elasticsearch/transport/ProxyConnectionStrategy.java +++ b/server/src/main/java/org/elasticsearch/transport/ProxyConnectionStrategy.java @@ -8,6 +8,7 @@ package org.elasticsearch.transport; +import org.elasticsearch.TransportVersion; import org.elasticsearch.Version; import org.elasticsearch.action.ActionListener; import org.elasticsearch.cluster.ClusterName; @@ -316,7 +317,7 @@ public ProxyModeInfo(String address, String serverName, int maxSocketConnections private ProxyModeInfo(StreamInput input) throws IOException { address = input.readString(); - if (input.getVersion().onOrAfter(Version.V_7_7_0)) { + if (input.getTransportVersion().onOrAfter(TransportVersion.V_7_7_0)) { serverName = input.readString(); } else { serverName = null; @@ -337,7 +338,7 @@ public XContentBuilder toXContent(XContentBuilder builder, Params params) throws @Override public void writeTo(StreamOutput out) throws IOException { out.writeString(address); - if (out.getVersion().onOrAfter(Version.V_7_7_0)) { + if (out.getTransportVersion().onOrAfter(TransportVersion.V_7_7_0)) { out.writeString(serverName); } out.writeVInt(maxSocketConnections); diff --git a/server/src/main/java/org/elasticsearch/transport/RemoteConnectionInfo.java b/server/src/main/java/org/elasticsearch/transport/RemoteConnectionInfo.java index fbbd2483e6939..b21455829fb05 100644 --- a/server/src/main/java/org/elasticsearch/transport/RemoteConnectionInfo.java +++ b/server/src/main/java/org/elasticsearch/transport/RemoteConnectionInfo.java @@ -8,7 +8,7 @@ package org.elasticsearch.transport; -import org.elasticsearch.Version; +import org.elasticsearch.TransportVersion; import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.StreamOutput; import org.elasticsearch.common.io.stream.Writeable; @@ -40,7 +40,7 @@ public RemoteConnectionInfo(String clusterAlias, ModeInfo modeInfo, TimeValue in } public RemoteConnectionInfo(StreamInput input) throws IOException { - if (input.getVersion().onOrAfter(Version.V_7_6_0)) { + if (input.getTransportVersion().onOrAfter(TransportVersion.V_7_6_0)) { RemoteConnectionStrategy.ConnectionStrategy mode = input.readEnum(RemoteConnectionStrategy.ConnectionStrategy.class); modeInfo = mode.getReader().read(input); initialConnectionTimeout = input.readTimeValue(); @@ -79,7 +79,7 @@ public boolean isSkipUnavailable() { @Override public void writeTo(StreamOutput out) throws IOException { - if (out.getVersion().onOrAfter(Version.V_7_6_0)) { + if (out.getTransportVersion().onOrAfter(TransportVersion.V_7_6_0)) { out.writeEnum(modeInfo.modeType()); modeInfo.writeTo(out); out.writeTimeValue(initialConnectionTimeout); diff --git a/server/src/main/java/org/elasticsearch/transport/TransportStats.java b/server/src/main/java/org/elasticsearch/transport/TransportStats.java index fe0f1742f9eb6..694fb0628d145 100644 --- a/server/src/main/java/org/elasticsearch/transport/TransportStats.java +++ b/server/src/main/java/org/elasticsearch/transport/TransportStats.java @@ -8,6 +8,7 @@ package org.elasticsearch.transport; +import org.elasticsearch.TransportVersion; import org.elasticsearch.Version; import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.StreamOutput; @@ -59,7 +60,7 @@ public TransportStats(StreamInput in) throws IOException { rxSize = in.readVLong(); txCount = in.readVLong(); txSize = in.readVLong(); - if (in.getVersion().onOrAfter(Version.V_8_1_0) && in.readBoolean()) { + if (in.getTransportVersion().onOrAfter(TransportVersion.V_8_1_0) && in.readBoolean()) { inboundHandlingTimeBucketFrequencies = new long[HandlingTimeTracker.BUCKET_COUNT]; for (int i = 0; i < inboundHandlingTimeBucketFrequencies.length; i++) { inboundHandlingTimeBucketFrequencies[i] = in.readVLong(); @@ -83,7 +84,7 @@ public void writeTo(StreamOutput out) throws IOException { out.writeVLong(rxSize); out.writeVLong(txCount); out.writeVLong(txSize); - if (out.getVersion().onOrAfter(Version.V_8_1_0)) { + if (out.getTransportVersion().onOrAfter(TransportVersion.V_8_1_0)) { assert (inboundHandlingTimeBucketFrequencies.length > 0) == (outboundHandlingTimeBucketFrequencies.length > 0); out.writeBoolean(inboundHandlingTimeBucketFrequencies.length > 0); for (long handlingTimeBucketFrequency : inboundHandlingTimeBucketFrequencies) { diff --git a/server/src/test/java/org/elasticsearch/action/OriginalIndicesTests.java b/server/src/test/java/org/elasticsearch/action/OriginalIndicesTests.java index 5d87e3b7a4961..5b3fe6e904f13 100644 --- a/server/src/test/java/org/elasticsearch/action/OriginalIndicesTests.java +++ b/server/src/test/java/org/elasticsearch/action/OriginalIndicesTests.java @@ -8,7 +8,7 @@ package org.elasticsearch.action; -import org.elasticsearch.Version; +import org.elasticsearch.TransportVersion; import org.elasticsearch.action.support.IndicesOptions; import org.elasticsearch.common.io.stream.BytesStreamOutput; import org.elasticsearch.common.io.stream.StreamInput; @@ -16,7 +16,7 @@ import java.io.IOException; -import static org.elasticsearch.test.VersionUtils.randomCompatibleVersion; +import static org.elasticsearch.test.TransportVersionUtils.randomCompatibleVersion; import static org.hamcrest.CoreMatchers.equalTo; public class OriginalIndicesTests extends ESTestCase { @@ -34,18 +34,18 @@ public void testOriginalIndicesSerialization() throws IOException { OriginalIndices originalIndices = randomOriginalIndices(); BytesStreamOutput out = new BytesStreamOutput(); - out.setVersion(randomCompatibleVersion(random(), Version.CURRENT)); + out.setTransportVersion(randomCompatibleVersion(random(), TransportVersion.CURRENT)); OriginalIndices.writeOriginalIndices(originalIndices, out); StreamInput in = out.bytes().streamInput(); - in.setVersion(out.getVersion()); + in.setTransportVersion(out.getTransportVersion()); OriginalIndices originalIndices2 = OriginalIndices.readOriginalIndices(in); assertThat(originalIndices2.indices(), equalTo(originalIndices.indices())); // indices options are not equivalent when sent to an older version and re-read due // to the addition of hidden indices as expand to hidden indices is always true when // read from a prior version - if (out.getVersion().onOrAfter(Version.V_7_7_0) || originalIndices.indicesOptions().expandWildcardsHidden()) { + if (out.getTransportVersion().onOrAfter(TransportVersion.V_7_7_0) || originalIndices.indicesOptions().expandWildcardsHidden()) { assertThat(originalIndices2.indicesOptions(), equalTo(originalIndices.indicesOptions())); } else if (originalIndices.indicesOptions().expandWildcardsHidden()) { assertThat(originalIndices2.indicesOptions(), equalTo(originalIndices.indicesOptions())); diff --git a/server/src/test/java/org/elasticsearch/action/admin/cluster/storedscripts/GetStoredScriptRequestTests.java b/server/src/test/java/org/elasticsearch/action/admin/cluster/storedscripts/GetStoredScriptRequestTests.java index e5ecc8722146c..cf7bda6fc4c0b 100644 --- a/server/src/test/java/org/elasticsearch/action/admin/cluster/storedscripts/GetStoredScriptRequestTests.java +++ b/server/src/test/java/org/elasticsearch/action/admin/cluster/storedscripts/GetStoredScriptRequestTests.java @@ -14,7 +14,7 @@ import java.io.IOException; -import static org.elasticsearch.test.VersionUtils.randomVersion; +import static org.elasticsearch.test.TransportVersionUtils.randomVersion; import static org.hamcrest.CoreMatchers.equalTo; public class GetStoredScriptRequestTests extends ESTestCase { @@ -22,11 +22,11 @@ public void testGetIndexedScriptRequestSerialization() throws IOException { GetStoredScriptRequest request = new GetStoredScriptRequest("id"); BytesStreamOutput out = new BytesStreamOutput(); - out.setVersion(randomVersion(random())); + out.setTransportVersion(randomVersion(random())); request.writeTo(out); StreamInput in = out.bytes().streamInput(); - in.setVersion(out.getVersion()); + in.setTransportVersion(out.getTransportVersion()); GetStoredScriptRequest request2 = new GetStoredScriptRequest(in); assertThat(request2.id(), equalTo(request.id())); diff --git a/server/src/test/java/org/elasticsearch/action/admin/indices/resolve/TransportResolveIndexActionTests.java b/server/src/test/java/org/elasticsearch/action/admin/indices/resolve/TransportResolveIndexActionTests.java index 9b751702dfd6c..cf3fd1efb37f8 100644 --- a/server/src/test/java/org/elasticsearch/action/admin/indices/resolve/TransportResolveIndexActionTests.java +++ b/server/src/test/java/org/elasticsearch/action/admin/indices/resolve/TransportResolveIndexActionTests.java @@ -8,6 +8,7 @@ package org.elasticsearch.action.admin.indices.resolve; +import org.elasticsearch.TransportVersion; import org.elasticsearch.Version; import org.elasticsearch.action.ActionListener; import org.elasticsearch.action.support.ActionFilter; @@ -54,8 +55,10 @@ public void testCCSCompatibilityCheck() throws Exception { @Override public void writeTo(StreamOutput out) throws IOException { super.writeTo(out); - if (out.getVersion().before(Version.CURRENT)) { - throw new IllegalArgumentException("This request isn't serializable to nodes before " + Version.CURRENT); + if (out.getTransportVersion().before(TransportVersion.CURRENT)) { + throw new IllegalArgumentException( + "This request isn't serializable before transport version " + TransportVersion.CURRENT + ); } } }; @@ -81,7 +84,10 @@ public void writeTo(StreamOutput out) throws IOException { assertThat(ex.getMessage(), containsString("not compatible with version")); assertThat(ex.getMessage(), containsString("and the 'search.check_ccs_compatibility' setting is enabled.")); - assertEquals("This request isn't serializable to nodes before " + Version.CURRENT, ex.getCause().getMessage()); + assertEquals( + "This request isn't serializable before transport version " + TransportVersion.CURRENT, + ex.getCause().getMessage() + ); } finally { assertTrue(ESTestCase.terminate(threadPool)); } diff --git a/server/src/test/java/org/elasticsearch/action/fieldcaps/TransportFieldCapabilitiesActionTests.java b/server/src/test/java/org/elasticsearch/action/fieldcaps/TransportFieldCapabilitiesActionTests.java index 1e5f9762b1b5e..a390924b2e20b 100644 --- a/server/src/test/java/org/elasticsearch/action/fieldcaps/TransportFieldCapabilitiesActionTests.java +++ b/server/src/test/java/org/elasticsearch/action/fieldcaps/TransportFieldCapabilitiesActionTests.java @@ -8,6 +8,7 @@ package org.elasticsearch.action.fieldcaps; +import org.elasticsearch.TransportVersion; import org.elasticsearch.Version; import org.elasticsearch.action.ActionListener; import org.elasticsearch.action.support.ActionFilter; @@ -56,8 +57,10 @@ public void testCCSCompatibilityCheck() throws Exception { fieldCapsRequest.indexFilter(new DummyQueryBuilder() { @Override protected void doWriteTo(StreamOutput out) throws IOException { - if (out.getVersion().before(Version.CURRENT)) { - throw new IllegalArgumentException("This query isn't serializable to nodes before " + Version.CURRENT); + if (out.getTransportVersion().before(TransportVersion.CURRENT)) { + throw new IllegalArgumentException( + "This query isn't serializable before transport version " + TransportVersion.CURRENT + ); } } }); @@ -88,7 +91,7 @@ protected void doWriteTo(StreamOutput out) throws IOException { containsString("[class org.elasticsearch.action.fieldcaps.FieldCapabilitiesRequest] is not compatible with version") ); assertThat(ex.getMessage(), containsString("and the 'search.check_ccs_compatibility' setting is enabled.")); - assertEquals("This query isn't serializable to nodes before " + Version.CURRENT, ex.getCause().getMessage()); + assertEquals("This query isn't serializable before transport version " + TransportVersion.CURRENT, ex.getCause().getMessage()); } finally { assertTrue(ESTestCase.terminate(threadPool)); } diff --git a/server/src/test/java/org/elasticsearch/action/get/MultiGetShardRequestTests.java b/server/src/test/java/org/elasticsearch/action/get/MultiGetShardRequestTests.java index 2546e4ef2a0ec..879e4c73dd3cb 100644 --- a/server/src/test/java/org/elasticsearch/action/get/MultiGetShardRequestTests.java +++ b/server/src/test/java/org/elasticsearch/action/get/MultiGetShardRequestTests.java @@ -8,7 +8,7 @@ package org.elasticsearch.action.get; -import org.elasticsearch.Version; +import org.elasticsearch.TransportVersion; import org.elasticsearch.common.io.stream.BytesStreamOutput; import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.StreamOutput; @@ -18,7 +18,7 @@ import java.io.IOException; -import static org.elasticsearch.test.VersionUtils.randomVersionBetween; +import static org.elasticsearch.test.TransportVersionUtils.randomVersionBetween; import static org.hamcrest.CoreMatchers.equalTo; public class MultiGetShardRequestTests extends ESTestCase { @@ -26,15 +26,15 @@ public void testSerialization() throws IOException { MultiGetShardRequest multiGetShardRequest = createTestInstance(randomBoolean()); BytesStreamOutput out = new BytesStreamOutput(); - Version minVersion = Version.CURRENT.minimumCompatibilityVersion(); + TransportVersion minVersion = TransportVersion.CURRENT.minimumCompatibilityVersion(); if (multiGetShardRequest.isForceSyntheticSource()) { - minVersion = Version.V_8_4_0; + minVersion = TransportVersion.V_8_4_0; } - out.setVersion(randomVersionBetween(random(), minVersion, Version.CURRENT)); + out.setTransportVersion(randomVersionBetween(random(), minVersion, TransportVersion.CURRENT)); multiGetShardRequest.writeTo(out); StreamInput in = out.bytes().streamInput(); - in.setVersion(out.getVersion()); + in.setTransportVersion(out.getTransportVersion()); MultiGetShardRequest multiGetShardRequest2 = new MultiGetShardRequest(in); assertThat(multiGetShardRequest2.index(), equalTo(multiGetShardRequest.index())); assertThat(multiGetShardRequest2.preference(), equalTo(multiGetShardRequest.preference())); @@ -58,7 +58,7 @@ public void testSerialization() throws IOException { public void testForceSyntheticUnsupported() { MultiGetShardRequest request = createTestInstance(true); StreamOutput out = new BytesStreamOutput(); - out.setVersion(Version.V_8_3_0); + out.setTransportVersion(TransportVersion.V_8_3_0); Exception e = expectThrows(IllegalArgumentException.class, () -> request.writeTo(out)); assertEquals(e.getMessage(), "force_synthetic_source is not supported before 8.4.0"); } diff --git a/server/src/test/java/org/elasticsearch/cluster/coordination/PublicationTransportHandlerTests.java b/server/src/test/java/org/elasticsearch/cluster/coordination/PublicationTransportHandlerTests.java index ccd34e74c610f..a4b6b1f7fe871 100644 --- a/server/src/test/java/org/elasticsearch/cluster/coordination/PublicationTransportHandlerTests.java +++ b/server/src/test/java/org/elasticsearch/cluster/coordination/PublicationTransportHandlerTests.java @@ -137,7 +137,7 @@ private static boolean isDiff(BytesTransportRequest request, DiscoveryNode node) if (compressor != null) { in = new InputStreamStreamInput(compressor.threadLocalInputStream(in)); } - in.setVersion(node.getVersion()); + in.setTransportVersion(node.getVersion().transportVersion); return in.readBoolean() == false; } finally { IOUtils.close(in); diff --git a/server/src/test/java/org/elasticsearch/cluster/metadata/DesiredNodeTests.java b/server/src/test/java/org/elasticsearch/cluster/metadata/DesiredNodeTests.java index 5451da0c22351..826edafbae354 100644 --- a/server/src/test/java/org/elasticsearch/cluster/metadata/DesiredNodeTests.java +++ b/server/src/test/java/org/elasticsearch/cluster/metadata/DesiredNodeTests.java @@ -8,6 +8,7 @@ package org.elasticsearch.cluster.metadata; +import org.elasticsearch.TransportVersion; import org.elasticsearch.Version; import org.elasticsearch.cluster.node.DiscoveryNodeRole; import org.elasticsearch.common.settings.Settings; @@ -201,8 +202,8 @@ public void testDesiredNodeIsCompatible() { ByteSizeValue.ofGb(1), Version.CURRENT ); - assertThat(desiredNode.isCompatibleWithVersion(Version.V_8_2_0), is(equalTo(false))); - assertThat(desiredNode.isCompatibleWithVersion(Version.V_8_3_0), is(equalTo(true))); + assertThat(desiredNode.isCompatibleWithVersion(TransportVersion.V_8_2_0), is(equalTo(false))); + assertThat(desiredNode.isCompatibleWithVersion(TransportVersion.V_8_3_0), is(equalTo(true))); } { @@ -213,14 +214,14 @@ public void testDesiredNodeIsCompatible() { ByteSizeValue.ofGb(1), Version.CURRENT ); - assertThat(desiredNode.isCompatibleWithVersion(Version.V_8_2_0), is(equalTo(false))); - assertThat(desiredNode.isCompatibleWithVersion(Version.V_8_3_0), is(equalTo(true))); + assertThat(desiredNode.isCompatibleWithVersion(TransportVersion.V_8_2_0), is(equalTo(false))); + assertThat(desiredNode.isCompatibleWithVersion(TransportVersion.V_8_3_0), is(equalTo(true))); } { final var desiredNode = new DesiredNode(settings, 2.0f, ByteSizeValue.ofGb(1), ByteSizeValue.ofGb(1), Version.CURRENT); - assertThat(desiredNode.isCompatibleWithVersion(Version.V_8_2_0), is(equalTo(true))); - assertThat(desiredNode.isCompatibleWithVersion(Version.V_8_3_0), is(equalTo(true))); + assertThat(desiredNode.isCompatibleWithVersion(TransportVersion.V_8_2_0), is(equalTo(true))); + assertThat(desiredNode.isCompatibleWithVersion(TransportVersion.V_8_3_0), is(equalTo(true))); } } diff --git a/server/src/test/java/org/elasticsearch/common/io/stream/DelayableWriteableTests.java b/server/src/test/java/org/elasticsearch/common/io/stream/DelayableWriteableTests.java index 5fd585cdb35b0..078a09bf44fe1 100644 --- a/server/src/test/java/org/elasticsearch/common/io/stream/DelayableWriteableTests.java +++ b/server/src/test/java/org/elasticsearch/common/io/stream/DelayableWriteableTests.java @@ -8,9 +8,9 @@ package org.elasticsearch.common.io.stream; -import org.elasticsearch.Version; +import org.elasticsearch.TransportVersion; import org.elasticsearch.test.ESTestCase; -import org.elasticsearch.test.VersionUtils; +import org.elasticsearch.test.TransportVersionUtils; import java.io.IOException; @@ -87,19 +87,19 @@ public int hashCode() { } private static class SneakOtherSideVersionOnWire implements Writeable { - private final Version version; + private final TransportVersion version; SneakOtherSideVersionOnWire() { - version = Version.CURRENT; + version = TransportVersion.CURRENT; } SneakOtherSideVersionOnWire(StreamInput in) throws IOException { - version = Version.readVersion(in); + version = TransportVersion.readVersion(in); } @Override public void writeTo(StreamOutput out) throws IOException { - Version.writeVersion(out.getVersion(), out); + TransportVersion.writeVersion(out.getTransportVersion(), out); } } @@ -144,7 +144,7 @@ public void testRoundTripFromDelayedFromOldVersionWithNamedWriteable() throws IO } public void testSerializesWithRemoteVersion() throws IOException { - Version remoteVersion = VersionUtils.randomCompatibleVersion(random(), Version.CURRENT); + TransportVersion remoteVersion = TransportVersionUtils.randomCompatibleVersion(random(), TransportVersion.CURRENT); DelayableWriteable original = DelayableWriteable.referencing(new SneakOtherSideVersionOnWire()); assertThat(roundTrip(original, SneakOtherSideVersionOnWire::new, remoteVersion).expand().version, equalTo(remoteVersion)); } @@ -157,14 +157,14 @@ public void testAsSerializedIsNoopOnSerialized() throws IOException { } private void roundTripTestCase(DelayableWriteable original, Writeable.Reader reader) throws IOException { - DelayableWriteable roundTripped = roundTrip(original, reader, Version.CURRENT); + DelayableWriteable roundTripped = roundTrip(original, reader, TransportVersion.CURRENT); assertThat(roundTripped.expand(), equalTo(original.expand())); } private DelayableWriteable roundTrip( DelayableWriteable original, Writeable.Reader reader, - Version version + TransportVersion version ) throws IOException { DelayableWriteable delayed = copyInstance( original, @@ -192,7 +192,10 @@ protected NamedWriteableRegistry writableRegistry() { return new NamedWriteableRegistry(singletonList(new NamedWriteableRegistry.Entry(Example.class, "example", Example::new))); } - private static Version randomOldVersion() { - return randomValueOtherThanMany(Version.CURRENT::before, () -> VersionUtils.randomCompatibleVersion(random(), Version.CURRENT)); + private static TransportVersion randomOldVersion() { + return randomValueOtherThanMany( + TransportVersion.CURRENT::before, + () -> TransportVersionUtils.randomCompatibleVersion(random(), TransportVersion.CURRENT) + ); } } diff --git a/server/src/test/java/org/elasticsearch/index/reindex/BulkByScrollResponseTests.java b/server/src/test/java/org/elasticsearch/index/reindex/BulkByScrollResponseTests.java index 04ebb70ec328c..244a66338ce94 100644 --- a/server/src/test/java/org/elasticsearch/index/reindex/BulkByScrollResponseTests.java +++ b/server/src/test/java/org/elasticsearch/index/reindex/BulkByScrollResponseTests.java @@ -10,7 +10,7 @@ import org.elasticsearch.ElasticsearchException; import org.elasticsearch.ResourceNotFoundException; -import org.elasticsearch.Version; +import org.elasticsearch.TransportVersion; import org.elasticsearch.action.bulk.BulkItemResponse.Failure; import org.elasticsearch.client.internal.transport.NoNodeAvailableException; import org.elasticsearch.common.io.stream.BytesStreamOutput; @@ -82,7 +82,7 @@ private List randomSearchFailures() { private void assertResponseEquals(BulkByScrollResponse expected, BulkByScrollResponse actual) { assertEquals(expected.getTook(), actual.getTook()); - BulkByScrollTaskStatusTests.assertTaskStatusEquals(Version.CURRENT, expected.getStatus(), actual.getStatus()); + BulkByScrollTaskStatusTests.assertTaskStatusEquals(TransportVersion.CURRENT, expected.getStatus(), actual.getStatus()); assertEquals(expected.getBulkFailures().size(), actual.getBulkFailures().size()); for (int i = 0; i < expected.getBulkFailures().size(); i++) { Failure expectedFailure = expected.getBulkFailures().get(i); diff --git a/server/src/test/java/org/elasticsearch/index/reindex/BulkByScrollTaskStatusTests.java b/server/src/test/java/org/elasticsearch/index/reindex/BulkByScrollTaskStatusTests.java index b341e8c3503c5..1d2ec8ae082e8 100644 --- a/server/src/test/java/org/elasticsearch/index/reindex/BulkByScrollTaskStatusTests.java +++ b/server/src/test/java/org/elasticsearch/index/reindex/BulkByScrollTaskStatusTests.java @@ -10,7 +10,7 @@ import org.apache.lucene.tests.util.LuceneTestCase; import org.elasticsearch.ElasticsearchException; -import org.elasticsearch.Version; +import org.elasticsearch.TransportVersion; import org.elasticsearch.common.Randomness; import org.elasticsearch.common.Strings; import org.elasticsearch.common.io.stream.BytesStreamOutput; @@ -44,14 +44,14 @@ public void testBulkByTaskStatus() throws IOException { BytesStreamOutput out = new BytesStreamOutput(); status.writeTo(out); BulkByScrollTask.Status tripped = new BulkByScrollTask.Status(out.bytes().streamInput()); - assertTaskStatusEquals(out.getVersion(), status, tripped); + assertTaskStatusEquals(out.getTransportVersion(), status, tripped); } /** * Assert that two task statuses are equal after serialization. * @param version the version at which expected was serialized */ - public static void assertTaskStatusEquals(Version version, BulkByScrollTask.Status expected, BulkByScrollTask.Status actual) { + public static void assertTaskStatusEquals(TransportVersion version, BulkByScrollTask.Status expected, BulkByScrollTask.Status actual) { assertEquals(expected.getTotal(), actual.getTotal()); assertEquals(expected.getUpdated(), actual.getUpdated()); assertEquals(expected.getCreated(), actual.getCreated()); diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/license/GetFeatureUsageResponse.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/license/GetFeatureUsageResponse.java index 8a91c9b0d2456..91380980a58a7 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/license/GetFeatureUsageResponse.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/license/GetFeatureUsageResponse.java @@ -7,7 +7,7 @@ package org.elasticsearch.license; -import org.elasticsearch.Version; +import org.elasticsearch.TransportVersion; import org.elasticsearch.action.ActionResponse; import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.StreamOutput; @@ -48,14 +48,14 @@ public FeatureUsageInfo( } public FeatureUsageInfo(StreamInput in) throws IOException { - if (in.getVersion().onOrAfter(Version.V_7_16_0)) { + if (in.getTransportVersion().onOrAfter(TransportVersion.V_7_16_0)) { this.family = in.readOptionalString(); } else { this.family = null; } this.name = in.readString(); this.lastUsedTime = ZonedDateTime.ofInstant(Instant.ofEpochSecond(in.readLong()), ZoneOffset.UTC); - if (in.getVersion().onOrAfter(Version.V_7_15_0)) { + if (in.getTransportVersion().onOrAfter(TransportVersion.V_7_15_0)) { this.context = in.readOptionalString(); } else { this.context = null; @@ -65,12 +65,12 @@ public FeatureUsageInfo(StreamInput in) throws IOException { @Override public void writeTo(StreamOutput out) throws IOException { - if (out.getVersion().onOrAfter(Version.V_7_16_0)) { + if (out.getTransportVersion().onOrAfter(TransportVersion.V_7_16_0)) { out.writeOptionalString(this.family); } out.writeString(name); out.writeLong(lastUsedTime.toEpochSecond()); - if (out.getVersion().onOrAfter(Version.V_7_15_0)) { + if (out.getTransportVersion().onOrAfter(TransportVersion.V_7_15_0)) { out.writeOptionalString(this.context); } out.writeString(licenseLevel); diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/protocol/xpack/XPackInfoRequest.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/protocol/xpack/XPackInfoRequest.java index 9889a88c91382..dca4352690bd6 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/protocol/xpack/XPackInfoRequest.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/protocol/xpack/XPackInfoRequest.java @@ -6,7 +6,7 @@ */ package org.elasticsearch.protocol.xpack; -import org.elasticsearch.Version; +import org.elasticsearch.TransportVersion; import org.elasticsearch.action.ActionRequest; import org.elasticsearch.action.ActionRequestValidationException; import org.elasticsearch.common.io.stream.StreamInput; @@ -57,7 +57,7 @@ public XPackInfoRequest(StreamInput in) throws IOException { categories.add(Category.valueOf(in.readString())); } this.categories = categories; - if (hasLicenseVersionField(in.getVersion())) { + if (hasLicenseVersionField(in.getTransportVersion())) { int ignoredLicenseVersion = in.readVInt(); } } @@ -91,12 +91,12 @@ public void writeTo(StreamOutput out) throws IOException { for (Category category : categories) { out.writeString(category.name()); } - if (hasLicenseVersionField(out.getVersion())) { + if (hasLicenseVersionField(out.getTransportVersion())) { out.writeVInt(License.VERSION_CURRENT); } } - private static boolean hasLicenseVersionField(Version streamVersion) { - return streamVersion.onOrAfter(Version.V_7_8_1) && streamVersion.before(Version.V_8_0_0); + private static boolean hasLicenseVersionField(TransportVersion streamVersion) { + return streamVersion.onOrAfter(TransportVersion.V_7_8_1) && streamVersion.before(TransportVersion.V_8_0_0); } } diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/protocol/xpack/XPackInfoResponse.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/protocol/xpack/XPackInfoResponse.java index 05fffeca9cc2b..820181c5c3218 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/protocol/xpack/XPackInfoResponse.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/protocol/xpack/XPackInfoResponse.java @@ -6,7 +6,7 @@ */ package org.elasticsearch.protocol.xpack; -import org.elasticsearch.Version; +import org.elasticsearch.TransportVersion; import org.elasticsearch.action.ActionResponse; import org.elasticsearch.common.Strings; import org.elasticsearch.common.io.stream.StreamInput; @@ -362,7 +362,7 @@ public FeatureSet(String name, boolean available, boolean enabled) { public FeatureSet(StreamInput in) throws IOException { this(in.readString(), readAvailable(in), in.readBoolean()); - if (in.getVersion().before(Version.V_8_0_0)) { + if (in.getTransportVersion().before(TransportVersion.V_8_0_0)) { in.readMap(); // backcompat reading native code info, but no longer used here } } @@ -370,7 +370,7 @@ public FeatureSet(StreamInput in) throws IOException { // this is separated out so that the removed description can be read from the stream on construction // TODO: remove this for 8.0 private static boolean readAvailable(StreamInput in) throws IOException { - if (in.getVersion().before(Version.V_7_3_0)) { + if (in.getTransportVersion().before(TransportVersion.V_7_3_0)) { in.readOptionalString(); } return in.readBoolean(); @@ -379,12 +379,12 @@ private static boolean readAvailable(StreamInput in) throws IOException { @Override public void writeTo(StreamOutput out) throws IOException { out.writeString(name); - if (out.getVersion().before(Version.V_7_3_0)) { + if (out.getTransportVersion().before(TransportVersion.V_7_3_0)) { out.writeOptionalString(null); } out.writeBoolean(available); out.writeBoolean(enabled); - if (out.getVersion().before(Version.V_8_0_0)) { + if (out.getTransportVersion().before(TransportVersion.V_8_0_0)) { out.writeGenericMap(Collections.emptyMap()); } } diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/protocol/xpack/graph/GraphExploreRequest.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/protocol/xpack/graph/GraphExploreRequest.java index a2f73fcab266a..94604dbd8227c 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/protocol/xpack/graph/GraphExploreRequest.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/protocol/xpack/graph/GraphExploreRequest.java @@ -6,7 +6,7 @@ */ package org.elasticsearch.protocol.xpack.graph; -import org.elasticsearch.Version; +import org.elasticsearch.TransportVersion; import org.elasticsearch.action.ActionRequest; import org.elasticsearch.action.ActionRequestValidationException; import org.elasticsearch.action.IndicesRequest; @@ -111,7 +111,7 @@ public GraphExploreRequest(StreamInput in) throws IOException { indices = in.readStringArray(); indicesOptions = IndicesOptions.readIndicesOptions(in); - if (in.getVersion().before(Version.V_8_0_0)) { + if (in.getTransportVersion().before(TransportVersion.V_8_0_0)) { String[] types = in.readStringArray(); assert types.length == 0; } @@ -180,7 +180,7 @@ public void writeTo(StreamOutput out) throws IOException { super.writeTo(out); out.writeStringArray(indices); indicesOptions.writeIndicesOptions(out); - if (out.getVersion().before(Version.V_8_0_0)) { + if (out.getTransportVersion().before(TransportVersion.V_8_0_0)) { out.writeStringArray(Strings.EMPTY_ARRAY); } out.writeOptionalString(routing); diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ilm/IndexLifecycleExplainResponse.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ilm/IndexLifecycleExplainResponse.java index 7ff06b3fe9049..879db231a99e3 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ilm/IndexLifecycleExplainResponse.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ilm/IndexLifecycleExplainResponse.java @@ -8,7 +8,6 @@ package org.elasticsearch.xpack.core.ilm; import org.elasticsearch.TransportVersion; -import org.elasticsearch.Version; import org.elasticsearch.common.Strings; import org.elasticsearch.common.bytes.BytesReference; import org.elasticsearch.common.io.stream.StreamInput; @@ -353,7 +352,7 @@ public void writeTo(StreamOutput out) throws IOException { out.writeOptionalString(repositoryName); out.writeOptionalString(snapshotName); out.writeOptionalString(shrinkIndexName); - if (out.getVersion().onOrAfter(Version.V_8_1_0)) { + if (out.getTransportVersion().onOrAfter(TransportVersion.V_8_1_0)) { out.writeOptionalLong(indexCreationDate); } } diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ilm/IndexLifecycleFeatureSetUsage.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ilm/IndexLifecycleFeatureSetUsage.java index d160341b5f38d..59b614fc805be 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ilm/IndexLifecycleFeatureSetUsage.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ilm/IndexLifecycleFeatureSetUsage.java @@ -7,7 +7,6 @@ package org.elasticsearch.xpack.core.ilm; import org.elasticsearch.TransportVersion; -import org.elasticsearch.Version; import org.elasticsearch.common.Strings; import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.StreamOutput; @@ -457,10 +456,10 @@ public void writeTo(StreamOutput out) throws IOException { out.writeOptionalVInt(setPriorityPriority); out.writeOptionalWriteable(shrinkMaxPrimaryShardSize); out.writeOptionalVInt(shrinkNumberOfShards); - if (out.getVersion().onOrAfter(Version.V_8_2_0)) { + if (out.getTransportVersion().onOrAfter(TransportVersion.V_8_2_0)) { out.writeOptionalVLong(rolloverMaxPrimaryShardDocs); } - if (out.getVersion().onOrAfter(Version.V_8_4_0)) { + if (out.getTransportVersion().onOrAfter(TransportVersion.V_8_4_0)) { out.writeOptionalTimeValue(rolloverMinAge); out.writeOptionalVLong(rolloverMinDocs); out.writeOptionalWriteable(rolloverMinPrimaryShardSize); diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ilm/RolloverAction.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ilm/RolloverAction.java index 4d74afbcccd99..960f2ae58153a 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ilm/RolloverAction.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ilm/RolloverAction.java @@ -7,7 +7,6 @@ package org.elasticsearch.xpack.core.ilm; import org.elasticsearch.TransportVersion; -import org.elasticsearch.Version; import org.elasticsearch.client.internal.Client; import org.elasticsearch.common.Strings; import org.elasticsearch.common.io.stream.StreamInput; @@ -188,10 +187,10 @@ public void writeTo(StreamOutput out) throws IOException { out.writeOptionalWriteable(maxPrimaryShardSize); out.writeOptionalTimeValue(maxAge); out.writeOptionalVLong(maxDocs); - if (out.getVersion().onOrAfter(Version.V_8_2_0)) { + if (out.getTransportVersion().onOrAfter(TransportVersion.V_8_2_0)) { out.writeOptionalVLong(maxPrimaryShardDocs); } - if (out.getVersion().onOrAfter(Version.V_8_4_0)) { + if (out.getTransportVersion().onOrAfter(TransportVersion.V_8_4_0)) { out.writeOptionalWriteable(minSize); out.writeOptionalWriteable(minPrimaryShardSize); out.writeOptionalTimeValue(minAge); diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/indexing/IndexerJobStats.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/indexing/IndexerJobStats.java index 394f3cbe47d17..1bdc35962f7fe 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/indexing/IndexerJobStats.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/indexing/IndexerJobStats.java @@ -7,7 +7,6 @@ package org.elasticsearch.xpack.core.indexing; import org.elasticsearch.TransportVersion; -import org.elasticsearch.Version; import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.StreamOutput; import org.elasticsearch.common.io.stream.Writeable; @@ -206,7 +205,7 @@ public void writeTo(StreamOutput out) throws IOException { out.writeVLong(searchTotal); out.writeVLong(indexFailures); out.writeVLong(searchFailures); - if (out.getVersion().onOrAfter(Version.V_7_7_0)) { + if (out.getTransportVersion().onOrAfter(TransportVersion.V_7_7_0)) { out.writeVLong(processingTime); out.writeVLong(processingTotal); } diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/transform/transforms/pivot/DateHistogramGroupSource.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/transform/transforms/pivot/DateHistogramGroupSource.java index aff6188a34c56..b36a6a447b6b2 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/transform/transforms/pivot/DateHistogramGroupSource.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/transform/transforms/pivot/DateHistogramGroupSource.java @@ -6,7 +6,7 @@ */ package org.elasticsearch.xpack.core.transform.transforms.pivot; -import org.elasticsearch.Version; +import org.elasticsearch.TransportVersion; import org.elasticsearch.common.Rounding; import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.StreamOutput; @@ -228,7 +228,7 @@ public DateHistogramGroupSource(StreamInput in) throws IOException { super(in); this.interval = readInterval(in); this.timeZone = in.readOptionalZoneId(); - if (in.getVersion().onOrAfter(Version.V_8_7_0)) { + if (in.getTransportVersion().onOrAfter(TransportVersion.V_8_7_0)) { this.offset = in.readLong(); } else { this.offset = 0; @@ -331,7 +331,7 @@ public void writeTo(StreamOutput out) throws IOException { super.writeTo(out); writeInterval(interval, out); out.writeOptionalZoneId(timeZone); - if (out.getVersion().onOrAfter(Version.V_8_7_0)) { + if (out.getTransportVersion().onOrAfter(TransportVersion.V_8_7_0)) { out.writeLong(offset); } } diff --git a/x-pack/plugin/monitoring/src/test/java/org/elasticsearch/xpack/monitoring/action/MonitoringBulkRequestTests.java b/x-pack/plugin/monitoring/src/test/java/org/elasticsearch/xpack/monitoring/action/MonitoringBulkRequestTests.java index 67a25ecdb1c2c..37fe08b302e7d 100644 --- a/x-pack/plugin/monitoring/src/test/java/org/elasticsearch/xpack/monitoring/action/MonitoringBulkRequestTests.java +++ b/x-pack/plugin/monitoring/src/test/java/org/elasticsearch/xpack/monitoring/action/MonitoringBulkRequestTests.java @@ -237,7 +237,7 @@ public void testSerialization() throws IOException { originalRequest.writeTo(out); final StreamInput in = out.bytes().streamInput(); - in.setVersion(out.getVersion()); + in.setTransportVersion(out.getTransportVersion()); final MonitoringBulkRequest deserializedRequest = new MonitoringBulkRequest(in); diff --git a/x-pack/plugin/snapshot-repo-test-kit/src/main/java/org/elasticsearch/repositories/blobstore/testkit/BlobAnalyzeAction.java b/x-pack/plugin/snapshot-repo-test-kit/src/main/java/org/elasticsearch/repositories/blobstore/testkit/BlobAnalyzeAction.java index 5109166a22c48..65acc3b7e2f14 100644 --- a/x-pack/plugin/snapshot-repo-test-kit/src/main/java/org/elasticsearch/repositories/blobstore/testkit/BlobAnalyzeAction.java +++ b/x-pack/plugin/snapshot-repo-test-kit/src/main/java/org/elasticsearch/repositories/blobstore/testkit/BlobAnalyzeAction.java @@ -9,7 +9,7 @@ import org.apache.logging.log4j.LogManager; import org.apache.logging.log4j.Logger; -import org.elasticsearch.Version; +import org.elasticsearch.TransportVersion; import org.elasticsearch.action.ActionListener; import org.elasticsearch.action.ActionListenerResponseHandler; import org.elasticsearch.action.ActionRequest; @@ -676,7 +676,7 @@ public static class Request extends ActionRequest implements TaskAwareRequest { earlyReadNodeCount = in.readVInt(); readEarly = in.readBoolean(); writeAndOverwrite = in.readBoolean(); - if (in.getVersion().onOrAfter(Version.V_7_14_0)) { + if (in.getTransportVersion().onOrAfter(TransportVersion.V_7_14_0)) { abortWrite = in.readBoolean(); } else { abortWrite = false; @@ -696,10 +696,10 @@ public void writeTo(StreamOutput out) throws IOException { out.writeVInt(earlyReadNodeCount); out.writeBoolean(readEarly); out.writeBoolean(writeAndOverwrite); - if (out.getVersion().onOrAfter(Version.V_7_14_0)) { + if (out.getTransportVersion().onOrAfter(TransportVersion.V_7_14_0)) { out.writeBoolean(abortWrite); } else if (abortWrite) { - throw new IllegalStateException("cannot send abortWrite request to node of version [" + out.getVersion() + "]"); + throw new IllegalStateException("cannot send abortWrite request on transport version [" + out.getTransportVersion() + "]"); } } diff --git a/x-pack/plugin/snapshot-repo-test-kit/src/main/java/org/elasticsearch/repositories/blobstore/testkit/RepositoryAnalyzeAction.java b/x-pack/plugin/snapshot-repo-test-kit/src/main/java/org/elasticsearch/repositories/blobstore/testkit/RepositoryAnalyzeAction.java index 43030941e8028..ec79367f2b57c 100644 --- a/x-pack/plugin/snapshot-repo-test-kit/src/main/java/org/elasticsearch/repositories/blobstore/testkit/RepositoryAnalyzeAction.java +++ b/x-pack/plugin/snapshot-repo-test-kit/src/main/java/org/elasticsearch/repositories/blobstore/testkit/RepositoryAnalyzeAction.java @@ -10,7 +10,7 @@ import org.apache.logging.log4j.LogManager; import org.apache.logging.log4j.Logger; import org.elasticsearch.ExceptionsHelper; -import org.elasticsearch.Version; +import org.elasticsearch.TransportVersion; import org.elasticsearch.action.ActionListener; import org.elasticsearch.action.ActionListenerResponseHandler; import org.elasticsearch.action.ActionRequest; @@ -673,7 +673,7 @@ public Request(StreamInput in) throws IOException { maxTotalDataSize = ByteSizeValue.readFrom(in); detailed = in.readBoolean(); reroutedFrom = in.readOptionalWriteable(DiscoveryNode::new); - if (in.getVersion().onOrAfter(Version.V_7_14_0)) { + if (in.getTransportVersion().onOrAfter(TransportVersion.V_7_14_0)) { abortWritePermitted = in.readBoolean(); } else { abortWritePermitted = false; @@ -700,10 +700,12 @@ public void writeTo(StreamOutput out) throws IOException { maxTotalDataSize.writeTo(out); out.writeBoolean(detailed); out.writeOptionalWriteable(reroutedFrom); - if (out.getVersion().onOrAfter(Version.V_7_14_0)) { + if (out.getTransportVersion().onOrAfter(TransportVersion.V_7_14_0)) { out.writeBoolean(abortWritePermitted); } else if (abortWritePermitted) { - throw new IllegalStateException("cannot send abortWritePermitted request to node of version [" + out.getVersion() + "]"); + throw new IllegalStateException( + "cannot send abortWritePermitted request on transport version [" + out.getTransportVersion() + "]" + ); } } From 8aa40545bd75fd6cd3d357803c11b7cd6cbcd4e3 Mon Sep 17 00:00:00 2001 From: Martijn van Groningen Date: Tue, 31 Jan 2023 13:29:03 +0100 Subject: [PATCH 32/63] Update rollup dependencies (#93369) Change ilm and data streams dependencies to be test dependencies. --- x-pack/plugin/rollup/build.gradle | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/x-pack/plugin/rollup/build.gradle b/x-pack/plugin/rollup/build.gradle index e2da0ba0f866e..0e97e4f555e79 100644 --- a/x-pack/plugin/rollup/build.gradle +++ b/x-pack/plugin/rollup/build.gradle @@ -10,8 +10,8 @@ archivesBaseName = 'x-pack-rollup' dependencies { compileOnly project(path: xpackModule('core')) - compileOnly project(':modules:data-streams') - compileOnly project(path: xpackModule('ilm')) + testImplementation project(':modules:data-streams') + testImplementation project(path: xpackModule('ilm')) compileOnly project(path: xpackModule('mapper-aggregate-metric')) testImplementation(testArtifact(project(xpackModule('core')))) } From 93544797f35ae9d7c8a34432ef0e21e69ae587d1 Mon Sep 17 00:00:00 2001 From: Keith Massey Date: Tue, 31 Jan 2023 07:34:30 -0600 Subject: [PATCH 33/63] Avoiding race conditions in GeoIpDownloaderIT (#93363) --- .../org/elasticsearch/ingest/geoip/GeoIpDownloaderIT.java | 7 ++++++- 1 file changed, 6 insertions(+), 1 deletion(-) diff --git a/modules/ingest-geoip/src/internalClusterTest/java/org/elasticsearch/ingest/geoip/GeoIpDownloaderIT.java b/modules/ingest-geoip/src/internalClusterTest/java/org/elasticsearch/ingest/geoip/GeoIpDownloaderIT.java index f3f37f50147fb..5d02fde827160 100644 --- a/modules/ingest-geoip/src/internalClusterTest/java/org/elasticsearch/ingest/geoip/GeoIpDownloaderIT.java +++ b/modules/ingest-geoip/src/internalClusterTest/java/org/elasticsearch/ingest/geoip/GeoIpDownloaderIT.java @@ -254,6 +254,7 @@ public void testGeoIpDatabasesDownload() throws Exception { assertBusy(() -> { GeoIpTaskState state = getGeoIpTaskState(); assertEquals(Set.of("GeoLite2-ASN.mmdb", "GeoLite2-City.mmdb", "GeoLite2-Country.mmdb"), state.getDatabases().keySet()); + putGeoIpPipeline(); // This is to work around the race condition described in #92888 }, 2, TimeUnit.MINUTES); for (String id : List.of("GeoLite2-ASN.mmdb", "GeoLite2-City.mmdb", "GeoLite2-Country.mmdb")) { @@ -309,7 +310,11 @@ public void testGeoIpDatabasesDownloadNoGeoipProcessors() throws Exception { .setPersistentSettings(Settings.builder().put(GeoIpDownloaderTaskExecutor.ENABLED_SETTING.getKey(), true)) .get(); assertTrue(settingsResponse.isAcknowledged()); - assertBusy(() -> { assertNull(getTask().getState()); }); + assertBusy(() -> { + assertNotNull(getTask()); + assertNull(getTask().getState()); + putGeoIpPipeline(); // This is to work around the race condition described in #92888 + }); putNonGeoipPipeline(pipelineId); assertBusy(() -> { assertNull(getTask().getState()); }); putNonGeoipPipeline(pipelineId); From e68c2586b5cfe91e35555e4db277335e402b600d Mon Sep 17 00:00:00 2001 From: Pooya Salehi Date: Tue, 31 Jan 2023 14:42:07 +0100 Subject: [PATCH 34/63] Set forced_refresh to true when using stateless refresh work-around (#93383) In #93160, we never set the forced_refresh flag in the response. With this change, the bulk response now correctly reflects what happened. It also unblocks a bunch of YAML tests for Stateless. Relates ES-5292 --- .../action/bulk/TransportBulkAction.java | 13 +++++++++++-- 1 file changed, 11 insertions(+), 2 deletions(-) diff --git a/server/src/main/java/org/elasticsearch/action/bulk/TransportBulkAction.java b/server/src/main/java/org/elasticsearch/action/bulk/TransportBulkAction.java index 66b365f6a092e..ba4bf8c343cb4 100644 --- a/server/src/main/java/org/elasticsearch/action/bulk/TransportBulkAction.java +++ b/server/src/main/java/org/elasticsearch/action/bulk/TransportBulkAction.java @@ -192,11 +192,20 @@ public static ActionListe @Override protected void doExecute(Task task, BulkRequest bulkRequest, ActionListener outerListener) { - // As a work-around to support `?refresh`, explicitly replace the refresh policy with a call to the Refresh API. + // As a work-around to support `?refresh`, explicitly replace the refresh policy with a call to the Refresh API, + // and always set forced_refresh to true. // TODO: Replace with a less hacky approach. ActionListener listener = outerListener; if (DiscoveryNode.isStateless(clusterService.getSettings()) && bulkRequest.getRefreshPolicy() != WriteRequest.RefreshPolicy.NONE) { - listener = outerListener.delegateFailure((l, r) -> { client.admin().indices().prepareRefresh().execute(l.map(ignored -> r)); }); + listener = outerListener.delegateFailure((l, r) -> client.admin().indices().prepareRefresh().execute(l.map(ignored -> { + for (BulkItemResponse response : r.getItems()) { + DocWriteResponse docWriteResponse = response.getResponse(); + if (docWriteResponse != null) { + docWriteResponse.setForcedRefresh(true); + } + } + return r; + }))); bulkRequest.setRefreshPolicy(WriteRequest.RefreshPolicy.NONE); } /* From 845178b697e9147764b896324e6e32778ab39992 Mon Sep 17 00:00:00 2001 From: Pablo Alcantar Morales Date: Tue, 31 Jan 2023 14:52:29 +0100 Subject: [PATCH 35/63] Cache the creation of parsers within `DateProcessor` (#92880) cache potentially duped values in the `DateProcessor`, avoiding the creation of disposable objects during the different executions --- docs/changelog/92880.yaml | 5 ++ .../ingest/common/DateFormat.java | 11 +++- .../ingest/common/DateProcessor.java | 61 ++++++++++++++++++- .../ingest/common/DateProcessorTests.java | 33 ++++++++++ 4 files changed, 107 insertions(+), 3 deletions(-) create mode 100644 docs/changelog/92880.yaml diff --git a/docs/changelog/92880.yaml b/docs/changelog/92880.yaml new file mode 100644 index 0000000000000..5336987ee2cde --- /dev/null +++ b/docs/changelog/92880.yaml @@ -0,0 +1,5 @@ +pr: 92880 +summary: Cache the creation of parsers within DateProcessor +area: Ingest Node +type: enhancement +issues: [] diff --git a/modules/ingest-common/src/main/java/org/elasticsearch/ingest/common/DateFormat.java b/modules/ingest-common/src/main/java/org/elasticsearch/ingest/common/DateFormat.java index 4a9fa93662bde..84c4315a69017 100644 --- a/modules/ingest-common/src/main/java/org/elasticsearch/ingest/common/DateFormat.java +++ b/modules/ingest-common/src/main/java/org/elasticsearch/ingest/common/DateFormat.java @@ -36,11 +36,10 @@ enum DateFormat { @Override Function getFunction(String format, ZoneId timezone, Locale locale) { return (date) -> { - TemporalAccessor accessor = DateFormatter.forPattern("iso8601").parse(date); + TemporalAccessor accessor = ISO_8601.parse(date); // even though locale could be set to en-us, Locale.ROOT (following iso8601 calendar data rules) should be used return DateFormatters.from(accessor, Locale.ROOT, timezone).withZoneSameInstant(timezone); }; - } }, Unix { @@ -115,6 +114,14 @@ Function getFunction(String format, ZoneId zoneId, Locale } }; + /** It's important to keep this variable as a constant because {@link DateFormatter#forPattern(String)} is an expensive method and, + * in this case, it's a never changing value. + *
+ * Also, we shouldn't inline it in the {@link DateFormat#Iso8601}'s enum because it'd make useless the cache used + * at {@link DateProcessor}). + */ + private static final DateFormatter ISO_8601 = DateFormatter.forPattern("iso8601"); + abstract Function getFunction(String format, ZoneId timezone, Locale locale); static DateFormat fromString(String format) { diff --git a/modules/ingest-common/src/main/java/org/elasticsearch/ingest/common/DateProcessor.java b/modules/ingest-common/src/main/java/org/elasticsearch/ingest/common/DateProcessor.java index 71a73f605a4c2..8ef870f773779 100644 --- a/modules/ingest-common/src/main/java/org/elasticsearch/ingest/common/DateProcessor.java +++ b/modules/ingest-common/src/main/java/org/elasticsearch/ingest/common/DateProcessor.java @@ -9,8 +9,10 @@ package org.elasticsearch.ingest.common; import org.elasticsearch.ExceptionsHelper; +import org.elasticsearch.common.settings.SettingsException; import org.elasticsearch.common.time.DateFormatter; import org.elasticsearch.common.util.LocaleUtils; +import org.elasticsearch.common.util.concurrent.ConcurrentCollections; import org.elasticsearch.core.Nullable; import org.elasticsearch.ingest.AbstractProcessor; import org.elasticsearch.ingest.ConfigurationUtils; @@ -19,6 +21,7 @@ import org.elasticsearch.script.ScriptService; import org.elasticsearch.script.TemplateScript; +import java.lang.ref.SoftReference; import java.time.ZoneId; import java.time.ZoneOffset; import java.time.ZonedDateTime; @@ -26,7 +29,9 @@ import java.util.List; import java.util.Locale; import java.util.Map; +import java.util.concurrent.ConcurrentMap; import java.util.function.Function; +import java.util.function.Supplier; public final class DateProcessor extends AbstractProcessor { @@ -72,9 +77,17 @@ public final class DateProcessor extends AbstractProcessor { this.targetField = targetField; this.formats = formats; this.dateParsers = new ArrayList<>(this.formats.size()); + for (String format : formats) { DateFormat dateFormat = DateFormat.fromString(format); - dateParsers.add((params) -> dateFormat.getFunction(format, newDateTimeZone(params), newLocale(params))); + dateParsers.add((params) -> { + var documentZoneId = newDateTimeZone(params); + var documentLocale = newLocale(params); + return Cache.INSTANCE.getOrCompute( + new Cache.Key(format, documentZoneId, documentLocale), + () -> dateFormat.getFunction(format, documentZoneId, documentLocale) + ); + }); } this.outputFormat = outputFormat; formatter = DateFormatter.forPattern(this.outputFormat); @@ -198,4 +211,50 @@ public DateProcessor create( ); } } + + /** + * An ad-hoc cache class that just throws away the cached values once it's full because we don't want to affect the performance + * while applying eviction policies when adding new values or retrieving them. + */ + static final class Cache { + + private static final String CACHE_CAPACITY_SETTING = "es.ingest.date_processor.cache_capacity"; + static final Cache INSTANCE; + + static { + var cacheSizeStr = System.getProperty(CACHE_CAPACITY_SETTING, "256"); + try { + INSTANCE = new Cache(Integer.parseInt(cacheSizeStr)); + } catch (NumberFormatException e) { + throw new SettingsException("{} must be a valid number but was [{}]", CACHE_CAPACITY_SETTING, cacheSizeStr); + } + } + private final ConcurrentMap>> map; + private final int capacity; + + Cache(int capacity) { + if (capacity <= 0) { + throw new IllegalArgumentException("cache capacity must be a value greater than 0 but was " + capacity); + } + this.capacity = capacity; + this.map = ConcurrentCollections.newConcurrentMapWithAggressiveConcurrency(this.capacity); + } + + Function getOrCompute(Key key, Supplier> supplier) { + Function fn; + var element = map.get(key); + // element exist and wasn't GCed + if (element != null && (fn = element.get()) != null) { + return fn; + } + if (map.size() >= capacity) { + map.clear(); + } + fn = supplier.get(); + map.put(key, new SoftReference<>(fn)); + return fn; + } + + record Key(String format, ZoneId zoneId, Locale locale) {} + } } diff --git a/modules/ingest-common/src/test/java/org/elasticsearch/ingest/common/DateProcessorTests.java b/modules/ingest-common/src/test/java/org/elasticsearch/ingest/common/DateProcessorTests.java index fcf1e9a301ebb..18454c866cb28 100644 --- a/modules/ingest-common/src/test/java/org/elasticsearch/ingest/common/DateProcessorTests.java +++ b/modules/ingest-common/src/test/java/org/elasticsearch/ingest/common/DateProcessorTests.java @@ -24,9 +24,15 @@ import java.util.List; import java.util.Locale; import java.util.Map; +import java.util.function.Function; +import java.util.function.Supplier; import static org.hamcrest.Matchers.containsString; import static org.hamcrest.Matchers.equalTo; +import static org.mockito.Mockito.mock; +import static org.mockito.Mockito.times; +import static org.mockito.Mockito.verify; +import static org.mockito.Mockito.when; public class DateProcessorTests extends ESTestCase { @@ -335,4 +341,31 @@ public void testOutputFormat() { String expectedDate = "00:00:00." + Strings.format("%09d", nanosAfterEpoch); assertThat(ingestDocument.getFieldValue("date_as_date", String.class), equalTo(expectedDate)); } + + @SuppressWarnings("unchecked") + public void testCacheIsEvictedAfterReachMaxCapacity() { + Supplier> supplier1 = mock(Supplier.class); + Supplier> supplier2 = mock(Supplier.class); + Function zonedDateTimeFunction1 = str -> ZonedDateTime.now(); + Function zonedDateTimeFunction2 = str -> ZonedDateTime.now(); + var cache = new DateProcessor.Cache(1); + var key1 = new DateProcessor.Cache.Key("format-1", ZoneId.systemDefault(), Locale.ROOT); + var key2 = new DateProcessor.Cache.Key("format-2", ZoneId.systemDefault(), Locale.ROOT); + + when(supplier1.get()).thenReturn(zonedDateTimeFunction1); + when(supplier2.get()).thenReturn(zonedDateTimeFunction2); + + assertEquals(cache.getOrCompute(key1, supplier1), zonedDateTimeFunction1); // 1 call to supplier1 + assertEquals(cache.getOrCompute(key2, supplier2), zonedDateTimeFunction2); // 1 call to supplier2 + assertEquals(cache.getOrCompute(key1, supplier1), zonedDateTimeFunction1); // 1 more call to supplier1 + assertEquals(cache.getOrCompute(key1, supplier1), zonedDateTimeFunction1); // should use cached value + assertEquals(cache.getOrCompute(key2, supplier2), zonedDateTimeFunction2); // 1 more call to supplier2 + assertEquals(cache.getOrCompute(key2, supplier2), zonedDateTimeFunction2); // should use cached value + assertEquals(cache.getOrCompute(key2, supplier2), zonedDateTimeFunction2); // should use cached value + assertEquals(cache.getOrCompute(key2, supplier2), zonedDateTimeFunction2); // should use cached value + assertEquals(cache.getOrCompute(key1, supplier1), zonedDateTimeFunction1); // 1 more to call to supplier1 + + verify(supplier1, times(3)).get(); + verify(supplier2, times(2)).get(); + } } From 85a31872c36599b388586c17a4cb7d114eba6938 Mon Sep 17 00:00:00 2001 From: Nicolas Ruflin Date: Tue, 31 Jan 2023 16:40:29 +0100 Subject: [PATCH 36/63] Add `ignore_missing_component_templates` config option (#92436) This change introduces the configuration option `ignore_missing_component_templates` as discussed in https://github.com/elastic/elasticsearch/issues/92426 The implementation [option 6](https://github.com/elastic/elasticsearch/issues/92426#issuecomment-1372675683) was picked with a slight adjustment meaning no patterns are allowed. ## Implementation During the creation of an index template, the list of component templates is checked if all component templates exist. This check is extended to skip any component templates which are listed under `ignore_missing_component_templates`. An index template that skips the check for the component template `logs-foo@custom` looks as following: ``` PUT _index_template/logs-foo { "index_patterns": ["logs-foo-*"], "data_stream": { }, "composed_of": ["logs-foo@package", "logs-foo@custom"], "ignore_missing_component_templates": ["logs-foo@custom"], "priority": 500 } ``` The component template `logs-foo@package` has to exist before creation. It can be created with: ``` PUT _component_template/logs-foo@custom { "template": { "mappings": { "properties": { "host.ip": { "type": "ip" } } } } } ``` ## Testing For manual testing, different scenarios can be tested. To simplify testing, the commands from `.http` file are added. Before each test run, a clean cluster is expected. ### New behaviour, missing component template With the new config option, it must be possible to create an index template with a missing component templates without getting an error: ``` ### Add logs-foo@package component template PUT http://localhost:9200/ _component_template/logs-foo@package Authorization: Basic elastic password Content-Type: application/json { "template": { "mappings": { "properties": { "host.name": { "type": "keyword" } } } } } ### Add logs-foo index template PUT http://localhost:9200/ _index_template/logs-foo Authorization: Basic elastic password Content-Type: application/json { "index_patterns": ["logs-foo-*"], "data_stream": { }, "composed_of": ["logs-foo@package", "logs-foo@custom"], "ignore_missing_component_templates": ["logs-foo@custom"], "priority": 500 } ### Create data stream PUT http://localhost:9200/ _data_stream/logs-foo-bar Authorization: Basic elastic password Content-Type: application/json ### Check if mappings exist GET http://localhost:9200/ logs-foo-bar Authorization: Basic elastic password Content-Type: application/json ``` It is checked if all templates could be created and data stream mappings are correct. ### Old behaviour, with all component templates In the following, a component template is made optional but it already exists. It is checked, that it will show up in the mappings: ``` ### Add logs-foo@package component template PUT http://localhost:9200/ _component_template/logs-foo@package Authorization: Basic elastic password Content-Type: application/json { "template": { "mappings": { "properties": { "host.name": { "type": "keyword" } } } } } ### Add logs-foo@custom component template PUT http://localhost:9200/ _component_template/logs-foo@custom Authorization: Basic elastic password Content-Type: application/json { "template": { "mappings": { "properties": { "host.ip": { "type": "ip" } } } } } ### Add logs-foo index template PUT http://localhost:9200/ _index_template/logs-foo Authorization: Basic elastic password Content-Type: application/json { "index_patterns": ["logs-foo-*"], "data_stream": { }, "composed_of": ["logs-foo@package", "logs-foo@custom"], "ignore_missing_component_templates": ["logs-foo@custom"], "priority": 500 } ### Create data stream PUT http://localhost:9200/ _data_stream/logs-foo-bar Authorization: Basic elastic password Content-Type: application/json ### Check if mappings exist GET http://localhost:9200/ logs-foo-bar Authorization: Basic elastic password Content-Type: application/json ``` ### Check old behaviour Ensure, that the old behaviour still exists when a component template is used that is not part of `ignore_missing_component_templates`: ``` ### Add logs-foo index template PUT http://localhost:9200/ _index_template/logs-foo Authorization: Basic elastic password Content-Type: application/json { "index_patterns": ["logs-foo-*"], "data_stream": { }, "composed_of": ["logs-foo@package", "logs-foo@custom"], "ignore_missing_component_templates": ["logs-foo@custom"], "priority": 500 } ``` Co-authored-by: Lee Hinman --- docs/changelog/92436.yaml | 6 + ...gnore-missing-component-templates.asciidoc | 95 +++++++++++ .../indices/index-templates.asciidoc | 2 + .../15_composition.yml | 63 ++++++++ .../metadata/ComposableIndexTemplate.java | 58 ++++++- .../MetadataIndexTemplateService.java | 25 ++- .../ComposableIndexTemplateTests.java | 40 ++++- .../MetadataIndexTemplateServiceTests.java | 150 +++++++++++++++++- ...adataMigrateToDataTiersRoutingService.java | 3 + 9 files changed, 423 insertions(+), 19 deletions(-) create mode 100644 docs/changelog/92436.yaml create mode 100644 docs/reference/indices/ignore-missing-component-templates.asciidoc diff --git a/docs/changelog/92436.yaml b/docs/changelog/92436.yaml new file mode 100644 index 0000000000000..1f8b4a9bf1877 --- /dev/null +++ b/docs/changelog/92436.yaml @@ -0,0 +1,6 @@ +pr: 92436 +summary: Add `ignore_missing_component_templates` config option +area: Indices APIs +type: enhancement +issues: + - 92426 diff --git a/docs/reference/indices/ignore-missing-component-templates.asciidoc b/docs/reference/indices/ignore-missing-component-templates.asciidoc new file mode 100644 index 0000000000000..8337be779c709 --- /dev/null +++ b/docs/reference/indices/ignore-missing-component-templates.asciidoc @@ -0,0 +1,95 @@ +[[ignore_missing_component_templates]] +== Config ignore_missing_component_templates + +The configuration option `ignore_missing_component_templates` can be used when an index template references a component template that might not exist. Every time a data stream is created based on the index template, the existence of the component template will be checked. If it exists, it will used to form the index's composite settings. If it does not exist, it is ignored. + +=== Usage example + +In the following, one component template and an index template are created. The index template references two component templates, but only the `@package` one exists. + + +Create the component template `logs-foo_component1`. This has to be created before the index template as it is not optional: + +[source,console] +---- +PUT _component_template/logs-foo_component1 +{ + "template": { + "mappings": { + "properties": { + "host.name": { + "type": "keyword" + } + } + } + } +} +---- + +Next, the index template will be created and it references two component templates: + +[source,JSON] +---- + "composed_of": ["logs-foo_component1", "logs-foo_component2"] +---- + +Before, only the `logs-foo_component1` compontent template was created, meaning the `logs-foo_component2` is missing. Because of this the following entry was added to the config: + +[source,JSON] +---- + "ignore_missing_component_templates": ["logs-foo_component2"], +---- + +During creation of the template, it will not validate that `logs-foo_component2` exists: + + +[source,console] +---- +PUT _index_template/logs-foo +{ + "index_patterns": ["logs-foo-*"], + "data_stream": { }, + "composed_of": ["logs-foo_component1", "logs-foo_component2"], + "ignore_missing_component_templates": ["logs-foo_component2"], + "priority": 500 +} +---- +// TEST[continued] + +The index template `logs-foo` was successfully created. A data stream can be created based on this template: + +[source,console] +---- +PUT _data_stream/logs-foo-bar +---- +// TEST[continued] + +Looking at the mappings of the data stream, it will contain the `host.name` field. + +At a later stage, the missing component template might be added: + +[source,console] +---- +PUT _component_template/logs-foo_component2 +{ + "template": { + "mappings": { + "properties": { + "host.ip": { + "type": "ip" + } + } + } + } +} +---- +// TEST[continued] + +This will not have an immediate effect on the data stream. The mapping `host.ip` will only show up in the data stream mappings when the data stream is rolled over automatically next time or a manual rollover is triggered: + +[source,console] +---- +POST logs-foo-bar/_rollover +---- +// TEST[continued] +// TEST[teardown:data_stream_cleanup] diff --git a/docs/reference/indices/index-templates.asciidoc b/docs/reference/indices/index-templates.asciidoc index 8a4c985970b26..6128ab48998f3 100644 --- a/docs/reference/indices/index-templates.asciidoc +++ b/docs/reference/indices/index-templates.asciidoc @@ -161,3 +161,5 @@ DELETE _component_template/component_template1 //// include::simulate-multi-component-templates.asciidoc[] + +include::ignore-missing-component-templates.asciidoc[] diff --git a/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/indices.put_index_template/15_composition.yml b/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/indices.put_index_template/15_composition.yml index 5eef78a8c63ba..2aaf492f0ff0d 100644 --- a/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/indices.put_index_template/15_composition.yml +++ b/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/indices.put_index_template/15_composition.yml @@ -286,3 +286,66 @@ - is_false: purple-index.mappings.properties.nested.include_in_root - is_true: purple-index.mappings.properties.nested.include_in_parent + +--- +"Index template ignore_missing_component_template valid": + - skip: + version: " - 8.6.99" + reason: "index template v2 ignore_missing_component_template config not available before 8.7" + features: allowed_warnings + + - do: + cluster.put_component_template: + name: red + body: + template: + mappings: + properties: + foo: + type: keyword + + - do: + allowed_warnings: + - "index template [blue] has index patterns [purple-index] matching patterns from existing older templates [global] with patterns (global => [*]); this template [blue] will take precedence during new index creation" + indices.put_index_template: + name: blue + body: + index_patterns: ["purple-index"] + composed_of: ["red", "blue"] + ignore_missing_component_templates: ["blue"] + + - do: + indices.create: + index: purple-index + + - do: + indices.get: + index: purple-index + + - match: {purple-index.mappings.properties.foo: {type: keyword}} + +--- +"Index template ignore_missing_component_template invalid": + - skip: + version: " - 8.6.99" + reason: "index template v2 ignore_missing_component_template config not available before 8.7" + features: allowed_warnings + + - do: + cluster.put_component_template: + name: red + body: + template: + mappings: + properties: + foo: + type: keyword + + - do: + catch: /index_template \[blue\] invalid, cause \[index template \[blue\] specifies a missing component templates \[blue\] that does not exist/ + indices.put_index_template: + name: blue + body: + index_patterns: ["purple-index"] + composed_of: ["red", "blue"] + ignore_missing_component_templates: ["foo"] diff --git a/server/src/main/java/org/elasticsearch/cluster/metadata/ComposableIndexTemplate.java b/server/src/main/java/org/elasticsearch/cluster/metadata/ComposableIndexTemplate.java index 367b0f9f6f00d..377d91d60a99e 100644 --- a/server/src/main/java/org/elasticsearch/cluster/metadata/ComposableIndexTemplate.java +++ b/server/src/main/java/org/elasticsearch/cluster/metadata/ComposableIndexTemplate.java @@ -9,6 +9,7 @@ package org.elasticsearch.cluster.metadata; import org.elasticsearch.TransportVersion; +import org.elasticsearch.Version; import org.elasticsearch.cluster.Diff; import org.elasticsearch.cluster.SimpleDiffable; import org.elasticsearch.common.Strings; @@ -46,6 +47,7 @@ public class ComposableIndexTemplate implements SimpleDiffable PARSER = new ConstructingObjectParser<>( @@ -59,7 +61,8 @@ public class ComposableIndexTemplate implements SimpleDiffable) a[5], (DataStreamTemplate) a[6], - (Boolean) a[7] + (Boolean) a[7], + (List) a[8] ) ); @@ -72,6 +75,7 @@ public class ComposableIndexTemplate implements SimpleDiffable p.map(), METADATA); PARSER.declareObject(ConstructingObjectParser.optionalConstructorArg(), DataStreamTemplate.PARSER, DATA_STREAM); PARSER.declareBoolean(ConstructingObjectParser.optionalConstructorArg(), ALLOW_AUTO_CREATE); + PARSER.declareStringArray(ConstructingObjectParser.optionalConstructorArg(), IGNORE_MISSING_COMPONENT_TEMPLATES); } private final List indexPatterns; @@ -89,6 +93,8 @@ public class ComposableIndexTemplate implements SimpleDiffable ignoreMissingComponentTemplates; static Diff readITV2DiffFrom(StreamInput in) throws IOException { return SimpleDiffable.readDiffFrom(ComposableIndexTemplate::new, in); @@ -106,7 +112,7 @@ public ComposableIndexTemplate( @Nullable Long version, @Nullable Map metadata ) { - this(indexPatterns, template, componentTemplates, priority, version, metadata, null, null); + this(indexPatterns, template, componentTemplates, priority, version, metadata, null, null, null); } public ComposableIndexTemplate( @@ -118,7 +124,7 @@ public ComposableIndexTemplate( @Nullable Map metadata, @Nullable DataStreamTemplate dataStreamTemplate ) { - this(indexPatterns, template, componentTemplates, priority, version, metadata, dataStreamTemplate, null); + this(indexPatterns, template, componentTemplates, priority, version, metadata, dataStreamTemplate, null, null); } public ComposableIndexTemplate( @@ -130,6 +136,20 @@ public ComposableIndexTemplate( @Nullable Map metadata, @Nullable DataStreamTemplate dataStreamTemplate, @Nullable Boolean allowAutoCreate + ) { + this(indexPatterns, template, componentTemplates, priority, version, metadata, dataStreamTemplate, null, null); + } + + public ComposableIndexTemplate( + List indexPatterns, + @Nullable Template template, + @Nullable List componentTemplates, + @Nullable Long priority, + @Nullable Long version, + @Nullable Map metadata, + @Nullable DataStreamTemplate dataStreamTemplate, + @Nullable Boolean allowAutoCreate, + @Nullable List ignoreMissingComponentTemplates ) { this.indexPatterns = indexPatterns; this.template = template; @@ -139,6 +159,7 @@ public ComposableIndexTemplate( this.metadata = metadata; this.dataStreamTemplate = dataStreamTemplate; this.allowAutoCreate = allowAutoCreate; + this.ignoreMissingComponentTemplates = ignoreMissingComponentTemplates; } public ComposableIndexTemplate(StreamInput in) throws IOException { @@ -154,6 +175,11 @@ public ComposableIndexTemplate(StreamInput in) throws IOException { this.metadata = in.readMap(); this.dataStreamTemplate = in.readOptionalWriteable(DataStreamTemplate::new); this.allowAutoCreate = in.readOptionalBoolean(); + if (in.getVersion().onOrAfter(Version.V_8_7_0)) { + this.ignoreMissingComponentTemplates = in.readOptionalStringList(); + } else { + this.ignoreMissingComponentTemplates = null; + } } public List indexPatterns() { @@ -204,6 +230,11 @@ public Boolean getAllowAutoCreate() { return this.allowAutoCreate; } + @Nullable + public List getIgnoreMissingComponentTemplates() { + return ignoreMissingComponentTemplates; + } + @Override public void writeTo(StreamOutput out) throws IOException { out.writeStringCollection(this.indexPatterns); @@ -219,6 +250,9 @@ public void writeTo(StreamOutput out) throws IOException { out.writeGenericMap(this.metadata); out.writeOptionalWriteable(dataStreamTemplate); out.writeOptionalBoolean(allowAutoCreate); + if (out.getVersion().onOrAfter(Version.V_8_7_0)) { + out.writeOptionalStringCollection(ignoreMissingComponentTemplates); + } } @Override @@ -246,6 +280,9 @@ public XContentBuilder toXContent(XContentBuilder builder, Params params) throws if (this.allowAutoCreate != null) { builder.field(ALLOW_AUTO_CREATE.getPreferredName(), allowAutoCreate); } + if (this.ignoreMissingComponentTemplates != null) { + builder.stringListField(IGNORE_MISSING_COMPONENT_TEMPLATES.getPreferredName(), ignoreMissingComponentTemplates); + } builder.endObject(); return builder; } @@ -260,7 +297,8 @@ public int hashCode() { this.version, this.metadata, this.dataStreamTemplate, - this.allowAutoCreate + this.allowAutoCreate, + this.ignoreMissingComponentTemplates ); } @@ -280,7 +318,8 @@ && componentTemplatesEquals(this.componentTemplates, other.componentTemplates) && Objects.equals(this.version, other.version) && Objects.equals(this.metadata, other.metadata) && Objects.equals(this.dataStreamTemplate, other.dataStreamTemplate) - && Objects.equals(this.allowAutoCreate, other.allowAutoCreate); + && Objects.equals(this.allowAutoCreate, other.allowAutoCreate) + && Objects.equals(this.ignoreMissingComponentTemplates, other.ignoreMissingComponentTemplates); } static boolean componentTemplatesEquals(List c1, List c2) { @@ -421,6 +460,7 @@ public static class Builder { private Map metadata; private DataStreamTemplate dataStreamTemplate; private Boolean allowAutoCreate; + private List ignoreMissingComponentTemplates; public Builder() {} @@ -464,6 +504,11 @@ public Builder allowAutoCreate(Boolean allowAutoCreate) { return this; } + public Builder ignoreMissingComponentTemplates(List ignoreMissingComponentTemplates) { + this.ignoreMissingComponentTemplates = ignoreMissingComponentTemplates; + return this; + } + public ComposableIndexTemplate build() { return new ComposableIndexTemplate( this.indexPatterns, @@ -473,7 +518,8 @@ public ComposableIndexTemplate build() { this.version, this.metadata, this.dataStreamTemplate, - this.allowAutoCreate + this.allowAutoCreate, + this.ignoreMissingComponentTemplates ); } } diff --git a/server/src/main/java/org/elasticsearch/cluster/metadata/MetadataIndexTemplateService.java b/server/src/main/java/org/elasticsearch/cluster/metadata/MetadataIndexTemplateService.java index 36a432b7625e9..3dca87dcbde41 100644 --- a/server/src/main/java/org/elasticsearch/cluster/metadata/MetadataIndexTemplateService.java +++ b/server/src/main/java/org/elasticsearch/cluster/metadata/MetadataIndexTemplateService.java @@ -505,17 +505,34 @@ public static void validateV2TemplateRequest(Metadata metadata, String name, Com } final Map componentTemplates = metadata.componentTemplates(); + final List ignoreMissingComponentTemplates = (template.getIgnoreMissingComponentTemplates() == null + ? List.of() + : template.getIgnoreMissingComponentTemplates()); final List missingComponentTemplates = template.composedOf() .stream() .filter(componentTemplate -> componentTemplates.containsKey(componentTemplate) == false) + .filter(componentTemplate -> ignoreMissingComponentTemplates.contains(componentTemplate) == false) .toList(); - if (missingComponentTemplates.size() > 0) { + if (missingComponentTemplates.size() > 0 && ignoreMissingComponentTemplates.size() == 0) { throw new InvalidIndexTemplateException( name, "index template [" + name + "] specifies component templates " + missingComponentTemplates + " that do not exist" ); } + + if (missingComponentTemplates.size() > 0 && ignoreMissingComponentTemplates.size() > 0) { + + throw new InvalidIndexTemplateException( + name, + "index template [" + + name + + "] specifies a missing component templates " + + missingComponentTemplates + + " " + + "that does not exist and is not part of 'ignore_missing_component_templates'" + ); + } } public ClusterState addIndexTemplateV2( @@ -579,7 +596,8 @@ public ClusterState addIndexTemplateV2( template.version(), template.metadata(), template.getDataStreamTemplate(), - template.getAllowAutoCreate() + template.getAllowAutoCreate(), + template.getIgnoreMissingComponentTemplates() ); } @@ -679,7 +697,8 @@ private void validateIndexTemplateV2(String name, ComposableIndexTemplate indexT indexTemplate.version(), indexTemplate.metadata(), indexTemplate.getDataStreamTemplate(), - indexTemplate.getAllowAutoCreate() + indexTemplate.getAllowAutoCreate(), + indexTemplate.getIgnoreMissingComponentTemplates() ); validate(name, templateToValidate); diff --git a/server/src/test/java/org/elasticsearch/cluster/metadata/ComposableIndexTemplateTests.java b/server/src/test/java/org/elasticsearch/cluster/metadata/ComposableIndexTemplateTests.java index d4e7993ac0529..649355fb8b7f4 100644 --- a/server/src/test/java/org/elasticsearch/cluster/metadata/ComposableIndexTemplateTests.java +++ b/server/src/test/java/org/elasticsearch/cluster/metadata/ComposableIndexTemplateTests.java @@ -75,6 +75,7 @@ public static ComposableIndexTemplate randomInstance() { } List indexPatterns = randomList(1, 4, () -> randomAlphaOfLength(4)); + List ignoreMissingComponentTemplates = randomList(0, 4, () -> randomAlphaOfLength(4)); return new ComposableIndexTemplate( indexPatterns, template, @@ -83,7 +84,8 @@ public static ComposableIndexTemplate randomInstance() { randomBoolean() ? null : randomNonNegativeLong(), meta, dataStreamTemplate, - randomBoolean() ? null : randomBoolean() + randomBoolean() ? null : randomBoolean(), + ignoreMissingComponentTemplates ); } @@ -149,7 +151,7 @@ protected ComposableIndexTemplate mutateInstance(ComposableIndexTemplate orig) { } public static ComposableIndexTemplate mutateTemplate(ComposableIndexTemplate orig) { - switch (randomIntBetween(0, 6)) { + switch (randomIntBetween(0, 7)) { case 0: List newIndexPatterns = randomValueOtherThan( orig.indexPatterns(), @@ -177,7 +179,8 @@ public static ComposableIndexTemplate mutateTemplate(ComposableIndexTemplate ori orig.version(), orig.metadata(), orig.getDataStreamTemplate(), - orig.getAllowAutoCreate() + orig.getAllowAutoCreate(), + orig.getIgnoreMissingComponentTemplates() ); case 2: List newComposedOf = randomValueOtherThan(orig.composedOf(), () -> randomList(0, 10, () -> randomAlphaOfLength(5))); @@ -189,7 +192,8 @@ public static ComposableIndexTemplate mutateTemplate(ComposableIndexTemplate ori orig.version(), orig.metadata(), orig.getDataStreamTemplate(), - orig.getAllowAutoCreate() + orig.getAllowAutoCreate(), + orig.getIgnoreMissingComponentTemplates() ); case 3: return new ComposableIndexTemplate( @@ -200,7 +204,8 @@ public static ComposableIndexTemplate mutateTemplate(ComposableIndexTemplate ori orig.version(), orig.metadata(), orig.getDataStreamTemplate(), - orig.getAllowAutoCreate() + orig.getAllowAutoCreate(), + orig.getIgnoreMissingComponentTemplates() ); case 4: return new ComposableIndexTemplate( @@ -211,7 +216,8 @@ public static ComposableIndexTemplate mutateTemplate(ComposableIndexTemplate ori randomValueOtherThan(orig.version(), ESTestCase::randomNonNegativeLong), orig.metadata(), orig.getDataStreamTemplate(), - orig.getAllowAutoCreate() + orig.getAllowAutoCreate(), + orig.getIgnoreMissingComponentTemplates() ); case 5: return new ComposableIndexTemplate( @@ -222,7 +228,8 @@ public static ComposableIndexTemplate mutateTemplate(ComposableIndexTemplate ori orig.version(), randomValueOtherThan(orig.metadata(), ComposableIndexTemplateTests::randomMeta), orig.getDataStreamTemplate(), - orig.getAllowAutoCreate() + orig.getAllowAutoCreate(), + orig.getIgnoreMissingComponentTemplates() ); case 6: return new ComposableIndexTemplate( @@ -233,7 +240,24 @@ public static ComposableIndexTemplate mutateTemplate(ComposableIndexTemplate ori orig.version(), orig.metadata(), randomValueOtherThan(orig.getDataStreamTemplate(), ComposableIndexTemplateTests::randomDataStreamTemplate), - orig.getAllowAutoCreate() + orig.getAllowAutoCreate(), + orig.getIgnoreMissingComponentTemplates() + ); + case 7: + List ignoreMissingComponentTemplates = randomValueOtherThan( + orig.getIgnoreMissingComponentTemplates(), + () -> randomList(1, 4, () -> randomAlphaOfLength(4)) + ); + return new ComposableIndexTemplate( + orig.indexPatterns(), + orig.template(), + orig.composedOf(), + orig.priority(), + orig.version(), + orig.metadata(), + orig.getDataStreamTemplate(), + orig.getAllowAutoCreate(), + ignoreMissingComponentTemplates ); default: throw new IllegalStateException("illegal randomization branch"); diff --git a/server/src/test/java/org/elasticsearch/cluster/metadata/MetadataIndexTemplateServiceTests.java b/server/src/test/java/org/elasticsearch/cluster/metadata/MetadataIndexTemplateServiceTests.java index 9702810e3afc6..e8ad6d75736b2 100644 --- a/server/src/test/java/org/elasticsearch/cluster/metadata/MetadataIndexTemplateServiceTests.java +++ b/server/src/test/java/org/elasticsearch/cluster/metadata/MetadataIndexTemplateServiceTests.java @@ -597,6 +597,7 @@ public void testRemoveIndexTemplateV2Wildcards() throws Exception { ClusterState state = metadataIndexTemplateService.addIndexTemplateV2(ClusterState.EMPTY_STATE, false, "foo", template); assertThat(state.metadata().templatesV2().get("foo"), notNullValue()); + assertTemplatesEqual(state.metadata().templatesV2().get("foo"), template); Exception e = expectThrows( @@ -1529,7 +1530,12 @@ public void testAddInvalidTemplate() throws Exception { }); assertThat(e.name(), equalTo("template")); - assertThat(e.getMessage(), containsString("index template [template] specifies " + "component templates [bad] that do not exist")); + assertThat( + e.getMessage(), + containsString( + "index_template [template] invalid, cause [index template [template] specifies component templates [bad] that do not exist]" + ) + ); } public void testRemoveComponentTemplate() throws Exception { @@ -2108,6 +2114,146 @@ public void testV2TemplateOverlaps() throws Exception { } } + /** + * Tests to add two component templates but ignores both with is valid + * + * @throws Exception + */ + public void testIgnoreMissingComponentTemplateValid() throws Exception { + + String indexTemplateName = "metric-test"; + + List componentTemplates = new ArrayList<>(); + componentTemplates.add("foo"); + componentTemplates.add("bar"); + + // Order of params is mixed up on purpose + List ignoreMissingComponentTemplates = new ArrayList<>(); + ignoreMissingComponentTemplates.add("bar"); + ignoreMissingComponentTemplates.add("foo"); + + ComposableIndexTemplate template = new ComposableIndexTemplate( + Arrays.asList("metrics-test-*"), + null, + componentTemplates, + 1L, + null, + null, + null, + null, + ignoreMissingComponentTemplates + ); + MetadataIndexTemplateService metadataIndexTemplateService = getMetadataIndexTemplateService(); + + ClusterState state = metadataIndexTemplateService.addIndexTemplateV2(ClusterState.EMPTY_STATE, false, indexTemplateName, template); + MetadataIndexTemplateService.validateV2TemplateRequest(state.metadata(), indexTemplateName, template); + } + + public void testIgnoreMissingComponentTemplateInvalid() throws Exception { + + String indexTemplateName = "metric-test"; + + List componentTemplates = new ArrayList<>(); + componentTemplates.add("foo"); + componentTemplates.add("fail"); + + List ignoreMissingComponentTemplates = new ArrayList<>(); + ignoreMissingComponentTemplates.add("bar"); + ignoreMissingComponentTemplates.add("foo"); + + ComposableIndexTemplate template = new ComposableIndexTemplate( + Arrays.asList("metrics-foo-*"), + null, + componentTemplates, + 1L, + null, + null, + null, + null, + ignoreMissingComponentTemplates + ); + + MetadataIndexTemplateService metadataIndexTemplateService = getMetadataIndexTemplateService(); + ClusterState state = metadataIndexTemplateService.addIndexTemplateV2(ClusterState.EMPTY_STATE, false, indexTemplateName, template); + + // try now the same thing with validation on + InvalidIndexTemplateException e = expectThrows( + InvalidIndexTemplateException.class, + () -> MetadataIndexTemplateService.validateV2TemplateRequest(state.metadata(), indexTemplateName, template) + + ); + assertThat(e.getMessage(), containsString("specifies a missing component templates [fail] that does not exist")); + } + + /** + * This is a similar test as above but with running the service + * @throws Exception + */ + public void testAddInvalidTemplateIgnoreService() throws Exception { + + String indexTemplateName = "metric-test"; + + List componentTemplates = new ArrayList<>(); + componentTemplates.add("foo"); + componentTemplates.add("fail"); + + List ignoreMissingComponentTemplates = new ArrayList<>(); + ignoreMissingComponentTemplates.add("bar"); + ignoreMissingComponentTemplates.add("foo"); + + ComposableIndexTemplate template = new ComposableIndexTemplate( + Arrays.asList("metrics-foo-*"), + null, + componentTemplates, + 1L, + null, + null, + null, + null, + ignoreMissingComponentTemplates + ); + + ComponentTemplate ct = new ComponentTemplate(new Template(Settings.EMPTY, null, null), null, null); + + final MetadataIndexTemplateService service = getMetadataIndexTemplateService(); + CountDownLatch ctLatch = new CountDownLatch(1); + // Makes ure the foo template exists + service.putComponentTemplate( + "api", + randomBoolean(), + "foo", + TimeValue.timeValueSeconds(5), + ct, + ActionListener.wrap(r -> ctLatch.countDown(), e -> { + logger.error("unexpected error", e); + fail("unexpected error"); + }) + ); + ctLatch.await(5, TimeUnit.SECONDS); + InvalidIndexTemplateException e = expectThrows(InvalidIndexTemplateException.class, () -> { + CountDownLatch latch = new CountDownLatch(1); + AtomicReference err = new AtomicReference<>(); + service.putIndexTemplateV2( + "api", + randomBoolean(), + "template", + TimeValue.timeValueSeconds(30), + template, + ActionListener.wrap(r -> fail("should have failed!"), exception -> { + err.set(exception); + latch.countDown(); + }) + ); + latch.await(5, TimeUnit.SECONDS); + if (err.get() != null) { + throw err.get(); + } + }); + + assertThat(e.name(), equalTo("template")); + assertThat(e.getMessage(), containsString("missing component templates [fail] that does not exist")); + } + private static List putTemplate(NamedXContentRegistry xContentRegistry, PutRequest request) { ThreadPool testThreadPool = mock(ThreadPool.class); ClusterService clusterService = ClusterServiceUtils.createClusterService(testThreadPool); @@ -2200,6 +2346,6 @@ private MetadataIndexTemplateService getMetadataIndexTemplateService() { } public static void assertTemplatesEqual(ComposableIndexTemplate actual, ComposableIndexTemplate expected) { - assertTrue(Objects.equals(actual, expected)); + assertEquals(actual, expected); } } diff --git a/x-pack/plugin/ilm/src/main/java/org/elasticsearch/xpack/cluster/metadata/MetadataMigrateToDataTiersRoutingService.java b/x-pack/plugin/ilm/src/main/java/org/elasticsearch/xpack/cluster/metadata/MetadataMigrateToDataTiersRoutingService.java index a0d64321041f7..ff9659235f2d3 100644 --- a/x-pack/plugin/ilm/src/main/java/org/elasticsearch/xpack/cluster/metadata/MetadataMigrateToDataTiersRoutingService.java +++ b/x-pack/plugin/ilm/src/main/java/org/elasticsearch/xpack/cluster/metadata/MetadataMigrateToDataTiersRoutingService.java @@ -693,6 +693,9 @@ static List migrateComposableTemplates(Metadata.Builder mb, ClusterState migratedComposableTemplateBuilder.metadata(composableTemplate.metadata()); migratedComposableTemplateBuilder.dataStreamTemplate(composableTemplate.getDataStreamTemplate()); migratedComposableTemplateBuilder.allowAutoCreate(composableTemplate.getAllowAutoCreate()); + migratedComposableTemplateBuilder.ignoreMissingComponentTemplates( + composableTemplate.getIgnoreMissingComponentTemplates() + ); mb.put(templateEntry.getKey(), migratedComposableTemplateBuilder.build()); migratedComposableTemplates.add(templateEntry.getKey()); From 610d507a58b7b9149693059c81ea9998b6ed610a Mon Sep 17 00:00:00 2001 From: Mark Vieira Date: Wed, 1 Feb 2023 08:15:43 -0800 Subject: [PATCH 37/63] Fix BWC tests when FIPS is enabled --- .../gradle/internal/ElasticsearchTestBasePlugin.java | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/build-tools-internal/src/main/java/org/elasticsearch/gradle/internal/ElasticsearchTestBasePlugin.java b/build-tools-internal/src/main/java/org/elasticsearch/gradle/internal/ElasticsearchTestBasePlugin.java index 854dc6d204382..c6758092b17ec 100644 --- a/build-tools-internal/src/main/java/org/elasticsearch/gradle/internal/ElasticsearchTestBasePlugin.java +++ b/build-tools-internal/src/main/java/org/elasticsearch/gradle/internal/ElasticsearchTestBasePlugin.java @@ -89,7 +89,7 @@ public void execute(Task t) { test.getJvmArgumentProviders().add(nonInputProperties); test.getExtensions().add("nonInputProperties", nonInputProperties); - test.setWorkingDir(project.file(project.getBuildDir() + "/testrun/" + test.getName())); + test.setWorkingDir(project.file(project.getBuildDir() + "/testrun/" + test.getName().replace("#", "_"))); test.setMaxParallelForks(Integer.parseInt(System.getProperty("tests.jvms", BuildParams.getDefaultParallel().toString()))); test.exclude("**/*$*.class"); From 0857e41cf1037f20a5773d3e53337b00653dc0e4 Mon Sep 17 00:00:00 2001 From: Mark Vieira Date: Wed, 1 Feb 2023 08:57:26 -0800 Subject: [PATCH 38/63] Attempt to fix windows failures --- .../test/cluster/local/LocalClusterFactory.java | 8 ++++++-- 1 file changed, 6 insertions(+), 2 deletions(-) diff --git a/test/test-clusters/src/main/java/org/elasticsearch/test/cluster/local/LocalClusterFactory.java b/test/test-clusters/src/main/java/org/elasticsearch/test/cluster/local/LocalClusterFactory.java index 963566f52e8a9..5f43bb8aa71b6 100644 --- a/test/test-clusters/src/main/java/org/elasticsearch/test/cluster/local/LocalClusterFactory.java +++ b/test/test-clusters/src/main/java/org/elasticsearch/test/cluster/local/LocalClusterFactory.java @@ -79,13 +79,13 @@ public LocalClusterHandle create(LocalClusterSpec spec) { public class Node { private final LocalNodeSpec spec; private final Path workingDir; - private final Path distributionDir; private final Path repoDir; private final Path dataDir; private final Path logsDir; private final Path configDir; private final Path tempDir; + private Path distributionDir; private Version currentVersion; private Process process = null; private DistributionDescriptor distributionDescriptor; @@ -93,7 +93,6 @@ public class Node { public Node(LocalNodeSpec spec) { this.spec = spec; this.workingDir = baseWorkingDir.resolve(spec.getCluster().getName()).resolve(spec.getName()); - this.distributionDir = workingDir.resolve("distro"); // location of es distribution files, typically hard-linked this.repoDir = baseWorkingDir.resolve("repo"); this.dataDir = workingDir.resolve("data"); this.logsDir = workingDir.resolve("logs"); @@ -111,6 +110,11 @@ public synchronized void start(Version version) { LOGGER.info("Creating installation for node '{}' in {}", spec.getName(), workingDir); distributionDescriptor = resolveDistribution(); LOGGER.info("Distribution for node '{}': {}", spec.getName(), distributionDescriptor); + distributionDir = OS.conditional( + // Use per-version distribution directories on Windows to avoid cleanup failures + c -> c.onWindows(() -> workingDir.resolve("distro").resolve(distributionDescriptor.getVersion().toString())) + .onUnix(() -> workingDir.resolve("distro")) + ); initializeWorkingDirectory(currentVersion != null); createConfigDirectory(); copyExtraConfigFiles(); // extra config files might be needed for running cli tools like plugin install From 737a993048c89ce82154d304fa1d6979fe5a52d2 Mon Sep 17 00:00:00 2001 From: Jake Landis Date: Wed, 1 Feb 2023 12:01:52 -0600 Subject: [PATCH 39/63] Consolidate google-oauth-client to latest version (#91722) related: #87800 fixes #90126 --- build-tools-internal/version.properties | 6 ++---- docs/changelog/91722.yaml | 5 +++++ gradle/verification-metadata.xml | 5 ----- modules/repository-gcs/build.gradle | 2 +- plugins/discovery-gce/build.gradle | 3 +-- 5 files changed, 9 insertions(+), 12 deletions(-) create mode 100644 docs/changelog/91722.yaml diff --git a/build-tools-internal/version.properties b/build-tools-internal/version.properties index 2c51d98eedd89..270d557dfd209 100644 --- a/build-tools-internal/version.properties +++ b/build-tools-internal/version.properties @@ -14,12 +14,10 @@ supercsv = 2.4.0 log4j = 2.19.0 slf4j = 1.6.2 ecsLogging = 1.2.0 - jna = 5.10.0 - netty = 4.1.86.Final - -commons_lang3 = 3.9 +commons_lang3 = 3.9 +google_oauth_client = 1.34.1 antlr4 = 4.11.1 # when updating this version, you need to ensure compatibility with: diff --git a/docs/changelog/91722.yaml b/docs/changelog/91722.yaml new file mode 100644 index 0000000000000..ff403d33f7e44 --- /dev/null +++ b/docs/changelog/91722.yaml @@ -0,0 +1,5 @@ +pr: 91722 +summary: Consolidate google-oauth-client to latest version +area: Snapshot/Restore +type: upgrade +issues: [] diff --git a/gradle/verification-metadata.xml b/gradle/verification-metadata.xml index e0cfa911e80d7..88b574e2c1972 100644 --- a/gradle/verification-metadata.xml +++ b/gradle/verification-metadata.xml @@ -699,11 +699,6 @@ - - - - - diff --git a/modules/repository-gcs/build.gradle b/modules/repository-gcs/build.gradle index 4772ba7310b0b..4f878eb9d79de 100644 --- a/modules/repository-gcs/build.gradle +++ b/modules/repository-gcs/build.gradle @@ -44,7 +44,7 @@ dependencies { api 'com.google.api.grpc:proto-google-iam-v1:1.6.2' api 'com.google.auth:google-auth-library-credentials:1.11.0' api 'com.google.auth:google-auth-library-oauth2-http:1.11.0' - api 'com.google.oauth-client:google-oauth-client:1.34.1' + api "com.google.oauth-client:google-oauth-client:${versions.google_oauth_client}" api 'com.google.api-client:google-api-client:2.1.1' api 'com.google.http-client:google-http-client:1.42.3' api 'com.google.http-client:google-http-client-gson:1.42.3' diff --git a/plugins/discovery-gce/build.gradle b/plugins/discovery-gce/build.gradle index 89e53f94853e2..75a8095ef412b 100644 --- a/plugins/discovery-gce/build.gradle +++ b/plugins/discovery-gce/build.gradle @@ -9,8 +9,7 @@ esplugin { versions << [ 'google' : '1.41.1', 'google_api_client' : '1.33.1', - 'api_services_compute': 'v1-rev20220322-1.32.1', - 'google_oauth_client' : '1.33.0', + 'api_services_compute': 'v1-rev20220322-1.32.1' ] dependencies { From 3340a54e33086f747ca6122b8c4f566acb22788c Mon Sep 17 00:00:00 2001 From: Benjamin Trent Date: Wed, 1 Feb 2023 13:31:46 -0500 Subject: [PATCH 40/63] Add new query_vector_builder option to knn search clause (#93331) This adds a new option to the knn search clause called query_vector_builder. This is a pluggable configuration that allows the query_vector created or retrieved. --- docs/changelog/93331.yaml | 5 + docs/reference/search/search.asciidoc | 8 +- .../elasticsearch/plugins/SearchPlugin.java | 42 +++++ .../elasticsearch/search/SearchModule.java | 14 ++ .../search/vectors/KnnSearchBuilder.java | 155 ++++++++++++++++- .../search/vectors/QueryVectorBuilder.java | 34 ++++ .../AbstractQueryVectorBuilderTestCase.java | 164 ++++++++++++++++++ .../search/vectors/KnnSearchBuilderTests.java | 83 ++++++++- .../vectors/QueryVectorBuilderTests.java | 67 +++++++ .../vectors/TestQueryVectorBuilderPlugin.java | 114 ++++++++++++ 10 files changed, 675 insertions(+), 11 deletions(-) create mode 100644 docs/changelog/93331.yaml create mode 100644 server/src/main/java/org/elasticsearch/search/vectors/QueryVectorBuilder.java create mode 100644 server/src/test/java/org/elasticsearch/search/vectors/AbstractQueryVectorBuilderTestCase.java create mode 100644 server/src/test/java/org/elasticsearch/search/vectors/QueryVectorBuilderTests.java create mode 100644 server/src/test/java/org/elasticsearch/search/vectors/TestQueryVectorBuilderPlugin.java diff --git a/docs/changelog/93331.yaml b/docs/changelog/93331.yaml new file mode 100644 index 0000000000000..5dd1cddc7207c --- /dev/null +++ b/docs/changelog/93331.yaml @@ -0,0 +1,5 @@ +pr: 93331 +summary: Add new `query_vector_builder` option to knn search clause +area: Search +type: enhancement +issues: [] diff --git a/docs/reference/search/search.asciidoc b/docs/reference/search/search.asciidoc index d856949e9a478..63eebb225e0e3 100644 --- a/docs/reference/search/search.asciidoc +++ b/docs/reference/search/search.asciidoc @@ -506,8 +506,14 @@ include::{es-repo-dir}/rest-api/common-parms.asciidoc[tag=knn-k] include::{es-repo-dir}/rest-api/common-parms.asciidoc[tag=knn-num-candidates] `query_vector`:: -(Required, array of floats) +(Optional, array of floats) include::{es-repo-dir}/rest-api/common-parms.asciidoc[tag=knn-query-vector] + +`query_vector_builder`:: +(Optional, object) +A configuration object indicating how to build a query_vector before executing the request. You must provide +a `query_vector_builder` or `query_vector`, but not both. + ==== [[search-api-min-score]] diff --git a/server/src/main/java/org/elasticsearch/plugins/SearchPlugin.java b/server/src/main/java/org/elasticsearch/plugins/SearchPlugin.java index 6067768715aa1..bcd82b18470ca 100644 --- a/server/src/main/java/org/elasticsearch/plugins/SearchPlugin.java +++ b/server/src/main/java/org/elasticsearch/plugins/SearchPlugin.java @@ -39,6 +39,7 @@ import org.elasticsearch.search.suggest.Suggest; import org.elasticsearch.search.suggest.Suggester; import org.elasticsearch.search.suggest.SuggestionBuilder; +import org.elasticsearch.search.vectors.QueryVectorBuilder; import org.elasticsearch.xcontent.ContextParser; import org.elasticsearch.xcontent.ParseField; import org.elasticsearch.xcontent.XContent; @@ -73,6 +74,14 @@ default List> getSignificanceHeuristics() { return emptyList(); } + /** + * The new {@link QueryVectorBuilder}s defined by this plugin. {@linkplain QueryVectorBuilder}s can be used within a kNN + * search to build the query vector instead of having the user provide the vector directly + */ + default List> getQueryVectorBuilders() { + return emptyList(); + } + /** * The new {@link FetchSubPhase}s defined by this plugin. */ @@ -592,4 +601,37 @@ public Map getHighlighters() { return highlighters; } } + + /** + * Specification of custom {@link QueryVectorBuilder}. + */ + class QueryVectorBuilderSpec extends SearchExtensionSpec> { + /** + * Specification of custom {@link QueryVectorBuilder}. + * + * @param name holds the names by which this query vector builder might be parsed. + * The {@link ParseField#getPreferredName()} is special as it + * is the name by under which the reader is registered. So it is the name that the builder should use as its + * {@link NamedWriteable#getWriteableName()} too. + * @param reader the reader registered for this query vector builder. Typically a reference to a constructor that takes a + * {@link StreamInput} + * @param parser the parser the reads the query vector builder from xcontent + */ + public QueryVectorBuilderSpec(ParseField name, Writeable.Reader reader, BiFunction parser) { + super(name, reader, parser); + } + + /** + * Specification of custom {@link QueryVectorBuilder}. + * + * @param name the name by which this query vector builder might be parsed or deserialized. + * Make sure that the query builder returns this name for {@link NamedWriteable#getWriteableName()}. + * @param reader the reader registered for this query vector builder. Typically a reference to a constructor that takes a + * {@link StreamInput} + * @param parser the parser the reads the query vector builder from xcontent + */ + public QueryVectorBuilderSpec(String name, Writeable.Reader reader, BiFunction parser) { + super(name, reader, parser); + } + } } diff --git a/server/src/main/java/org/elasticsearch/search/SearchModule.java b/server/src/main/java/org/elasticsearch/search/SearchModule.java index fa2699a30b897..f7f9e5ce66511 100644 --- a/server/src/main/java/org/elasticsearch/search/SearchModule.java +++ b/server/src/main/java/org/elasticsearch/search/SearchModule.java @@ -82,6 +82,7 @@ import org.elasticsearch.plugins.SearchPlugin.FetchPhaseConstructionContext; import org.elasticsearch.plugins.SearchPlugin.PipelineAggregationSpec; import org.elasticsearch.plugins.SearchPlugin.QuerySpec; +import org.elasticsearch.plugins.SearchPlugin.QueryVectorBuilderSpec; import org.elasticsearch.plugins.SearchPlugin.RescorerSpec; import org.elasticsearch.plugins.SearchPlugin.ScoreFunctionSpec; import org.elasticsearch.plugins.SearchPlugin.SearchExtSpec; @@ -244,6 +245,7 @@ import org.elasticsearch.search.suggest.term.TermSuggestionBuilder; import org.elasticsearch.search.vectors.KnnScoreDocQueryBuilder; import org.elasticsearch.search.vectors.KnnVectorQueryBuilder; +import org.elasticsearch.search.vectors.QueryVectorBuilder; import org.elasticsearch.xcontent.NamedXContentRegistry; import org.elasticsearch.xcontent.ParseField; import org.elasticsearch.xcontent.XContentParser; @@ -305,6 +307,7 @@ public SearchModule(Settings settings, List plugins) { registerSorts(); registerValueFormats(); registerSignificanceHeuristics(plugins); + registerQueryVectorBuilders(plugins); this.valuesSourceRegistry = registerAggregations(plugins); registerPipelineAggregations(plugins); registerFetchSubPhases(plugins); @@ -980,6 +983,17 @@ private void registerSignificanceHeuristic(Sig ); } + private void registerQueryVectorBuilders(List plugins) { + registerFromPlugin(plugins, SearchPlugin::getQueryVectorBuilders, this::registerQueryVectorBuilder); + } + + private void registerQueryVectorBuilder(QueryVectorBuilderSpec spec) { + namedXContents.add(new NamedXContentRegistry.Entry(QueryVectorBuilder.class, spec.getName(), p -> spec.getParser().apply(p, null))); + namedWriteables.add( + new NamedWriteableRegistry.Entry(QueryVectorBuilder.class, spec.getName().getPreferredName(), spec.getReader()) + ); + } + private void registerFetchSubPhases(List plugins) { registerFetchSubPhase(new ExplainPhase()); registerFetchSubPhase(new StoredFieldsPhase()); diff --git a/server/src/main/java/org/elasticsearch/search/vectors/KnnSearchBuilder.java b/server/src/main/java/org/elasticsearch/search/vectors/KnnSearchBuilder.java index bed980e531134..e650aa1992110 100644 --- a/server/src/main/java/org/elasticsearch/search/vectors/KnnSearchBuilder.java +++ b/server/src/main/java/org/elasticsearch/search/vectors/KnnSearchBuilder.java @@ -8,6 +8,9 @@ package org.elasticsearch.search.vectors; +import org.apache.lucene.util.SetOnce; +import org.elasticsearch.TransportVersion; +import org.elasticsearch.action.ActionListener; import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.StreamOutput; import org.elasticsearch.common.io.stream.Writeable; @@ -27,8 +30,11 @@ import java.util.Arrays; import java.util.List; import java.util.Objects; +import java.util.function.Supplier; +import static org.elasticsearch.common.Strings.format; import static org.elasticsearch.xcontent.ConstructingObjectParser.constructorArg; +import static org.elasticsearch.xcontent.ConstructingObjectParser.optionalConstructorArg; /** * Defines a kNN search to run in the search request. @@ -39,6 +45,7 @@ public class KnnSearchBuilder implements Writeable, ToXContentFragment, Rewritea public static final ParseField K_FIELD = new ParseField("k"); public static final ParseField NUM_CANDS_FIELD = new ParseField("num_candidates"); public static final ParseField QUERY_VECTOR_FIELD = new ParseField("query_vector"); + public static final ParseField QUERY_VECTOR_BUILDER_FIELD = new ParseField("query_vector_builder"); public static final ParseField FILTER_FIELD = new ParseField("filter"); public static final ParseField BOOST_FIELD = AbstractQueryBuilder.BOOST_FIELD; @@ -46,18 +53,28 @@ public class KnnSearchBuilder implements Writeable, ToXContentFragment, Rewritea @SuppressWarnings("unchecked") // TODO optimize parsing for when BYTE values are provided List vector = (List) args[1]; - float[] vectorArray = new float[vector.size()]; - for (int i = 0; i < vector.size(); i++) { - vectorArray[i] = vector.get(i); + final float[] vectorArray; + if (vector != null) { + vectorArray = new float[vector.size()]; + for (int i = 0; i < vector.size(); i++) { + vectorArray[i] = vector.get(i); + } + } else { + vectorArray = null; } - return new KnnSearchBuilder((String) args[0], vectorArray, (int) args[2], (int) args[3]); + return new KnnSearchBuilder((String) args[0], vectorArray, (QueryVectorBuilder) args[4], (int) args[2], (int) args[3]); }); static { PARSER.declareString(constructorArg(), FIELD_FIELD); - PARSER.declareFloatArray(constructorArg(), QUERY_VECTOR_FIELD); + PARSER.declareFloatArray(optionalConstructorArg(), QUERY_VECTOR_FIELD); PARSER.declareInt(constructorArg(), K_FIELD); PARSER.declareInt(constructorArg(), NUM_CANDS_FIELD); + PARSER.declareNamedObject( + optionalConstructorArg(), + (p, c, n) -> p.namedObject(QueryVectorBuilder.class, n, c), + QUERY_VECTOR_BUILDER_FIELD + ); PARSER.declareFieldArray( KnnSearchBuilder::addFilterQueries, (p, c) -> AbstractQueryBuilder.parseTopLevelQuery(p), @@ -73,6 +90,8 @@ public static KnnSearchBuilder fromXContent(XContentParser parser) throws IOExce final String field; final float[] queryVector; + final QueryVectorBuilder queryVectorBuilder; + private final Supplier querySupplier; final int k; final int numCands; final List filterQueries; @@ -87,6 +106,27 @@ public static KnnSearchBuilder fromXContent(XContentParser parser) throws IOExce * @param numCands the number of nearest neighbor candidates to consider per shard */ public KnnSearchBuilder(String field, float[] queryVector, int k, int numCands) { + this(field, Objects.requireNonNull(queryVector, format("[%s] cannot be null", QUERY_VECTOR_FIELD)), null, k, numCands); + } + + /** + * Defines a kNN search where the query vector will be provided by the queryVectorBuilder + * @param field the name of the vector field to search against + * @param queryVectorBuilder the query vector builder + * @param k the final number of nearest neighbors to return as top hits + * @param numCands the number of nearest neighbor candidates to consider per shard + */ + public KnnSearchBuilder(String field, QueryVectorBuilder queryVectorBuilder, int k, int numCands) { + this( + field, + null, + Objects.requireNonNull(queryVectorBuilder, format("[%s] cannot be null", QUERY_VECTOR_BUILDER_FIELD.getPreferredName())), + k, + numCands + ); + } + + private KnnSearchBuilder(String field, float[] queryVector, QueryVectorBuilder queryVectorBuilder, int k, int numCands) { if (k < 1) { throw new IllegalArgumentException("[" + K_FIELD.getPreferredName() + "] must be greater than 0"); } @@ -98,11 +138,41 @@ public KnnSearchBuilder(String field, float[] queryVector, int k, int numCands) if (numCands > NUM_CANDS_LIMIT) { throw new IllegalArgumentException("[" + NUM_CANDS_FIELD.getPreferredName() + "] cannot exceed [" + NUM_CANDS_LIMIT + "]"); } + if (queryVector == null && queryVectorBuilder == null) { + throw new IllegalArgumentException( + format( + "either [%s] or [%s] must be provided", + QUERY_VECTOR_BUILDER_FIELD.getPreferredName(), + QUERY_VECTOR_FIELD.getPreferredName() + ) + ); + } + if (queryVector != null && queryVectorBuilder != null) { + throw new IllegalArgumentException( + format( + "cannot provide both [%s] and [%s]", + QUERY_VECTOR_BUILDER_FIELD.getPreferredName(), + QUERY_VECTOR_FIELD.getPreferredName() + ) + ); + } this.field = field; - this.queryVector = queryVector; + this.queryVector = queryVector == null ? new float[0] : queryVector; + this.queryVectorBuilder = queryVectorBuilder; this.k = k; this.numCands = numCands; this.filterQueries = new ArrayList<>(); + this.querySupplier = null; + } + + private KnnSearchBuilder(String field, Supplier querySupplier, int k, int numCands, List filterQueries) { + this.field = field; + this.queryVector = new float[0]; + this.queryVectorBuilder = null; + this.k = k; + this.numCands = numCands; + this.filterQueries = filterQueries; + this.querySupplier = querySupplier; } public KnnSearchBuilder(StreamInput in) throws IOException { @@ -112,6 +182,12 @@ public KnnSearchBuilder(StreamInput in) throws IOException { this.queryVector = in.readFloatArray(); this.filterQueries = in.readNamedWriteableList(QueryBuilder.class); this.boost = in.readFloat(); + if (in.getTransportVersion().onOrAfter(TransportVersion.V_8_7_0)) { + this.queryVectorBuilder = in.readOptionalNamedWriteable(QueryVectorBuilder.class); + } else { + this.queryVectorBuilder = null; + } + this.querySupplier = null; } public int k() { @@ -140,6 +216,32 @@ public KnnSearchBuilder boost(float boost) { @Override public KnnSearchBuilder rewrite(QueryRewriteContext ctx) throws IOException { + if (querySupplier != null) { + if (querySupplier.get() == null) { + return this; + } + return new KnnSearchBuilder(field, querySupplier.get(), k, numCands).boost(boost).addFilterQueries(filterQueries); + } + if (queryVectorBuilder != null) { + SetOnce toSet = new SetOnce<>(); + ctx.registerAsyncAction((c, l) -> queryVectorBuilder.buildVector(c, ActionListener.wrap(v -> { + toSet.set(v); + if (v == null) { + l.onFailure( + new IllegalArgumentException( + format( + "[%s] with name [%s] returned null query_vector", + QUERY_VECTOR_BUILDER_FIELD.getPreferredName(), + queryVectorBuilder.getWriteableName() + ) + ) + ); + return; + } + l.onResponse(null); + }, l::onFailure))); + return new KnnSearchBuilder(field, toSet::get, k, numCands, filterQueries).boost(boost); + } boolean changed = false; List rewrittenQueries = new ArrayList<>(filterQueries.size()); for (QueryBuilder query : filterQueries) { @@ -156,6 +258,9 @@ public KnnSearchBuilder rewrite(QueryRewriteContext ctx) throws IOException { } public KnnVectorQueryBuilder toQueryBuilder() { + if (queryVectorBuilder != null) { + throw new IllegalArgumentException("missing rewrite"); + } return new KnnVectorQueryBuilder(field, queryVector, numCands).boost(boost).addFilterQueries(filterQueries); } @@ -168,21 +273,38 @@ public boolean equals(Object o) { && numCands == that.numCands && Objects.equals(field, that.field) && Arrays.equals(queryVector, that.queryVector) + && Objects.equals(queryVectorBuilder, that.queryVectorBuilder) + && Objects.equals(querySupplier, that.querySupplier) && Objects.equals(filterQueries, that.filterQueries) && boost == that.boost; } @Override public int hashCode() { - return Objects.hash(field, k, numCands, Arrays.hashCode(queryVector), Objects.hashCode(filterQueries), boost); + return Objects.hash( + field, + k, + numCands, + querySupplier, + queryVectorBuilder, + Arrays.hashCode(queryVector), + Objects.hashCode(filterQueries), + boost + ); } @Override public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException { builder.field(FIELD_FIELD.getPreferredName(), field) .field(K_FIELD.getPreferredName(), k) - .field(NUM_CANDS_FIELD.getPreferredName(), numCands) - .array(QUERY_VECTOR_FIELD.getPreferredName(), queryVector); + .field(NUM_CANDS_FIELD.getPreferredName(), numCands); + if (queryVectorBuilder != null) { + builder.startObject(QUERY_VECTOR_BUILDER_FIELD.getPreferredName()); + builder.field(queryVectorBuilder.getWriteableName(), queryVectorBuilder); + builder.endObject(); + } else { + builder.array(QUERY_VECTOR_FIELD.getPreferredName(), queryVector); + } if (filterQueries.isEmpty() == false) { builder.startArray(FILTER_FIELD.getPreferredName()); @@ -201,11 +323,26 @@ public XContentBuilder toXContent(XContentBuilder builder, Params params) throws @Override public void writeTo(StreamOutput out) throws IOException { + if (querySupplier != null) { + throw new IllegalStateException("missing a rewriteAndFetch?"); + } out.writeString(field); out.writeVInt(k); out.writeVInt(numCands); out.writeFloatArray(queryVector); out.writeNamedWriteableList(filterQueries); out.writeFloat(boost); + if (out.getTransportVersion().before(TransportVersion.V_8_7_0) && queryVectorBuilder != null) { + throw new IllegalArgumentException( + format( + "cannot serialize [%s] to older node of version [%s]", + QUERY_VECTOR_BUILDER_FIELD.getPreferredName(), + out.getTransportVersion() + ) + ); + } + if (out.getTransportVersion().onOrAfter(TransportVersion.V_8_7_0)) { + out.writeOptionalNamedWriteable(queryVectorBuilder); + } } } diff --git a/server/src/main/java/org/elasticsearch/search/vectors/QueryVectorBuilder.java b/server/src/main/java/org/elasticsearch/search/vectors/QueryVectorBuilder.java new file mode 100644 index 0000000000000..aa35657c60543 --- /dev/null +++ b/server/src/main/java/org/elasticsearch/search/vectors/QueryVectorBuilder.java @@ -0,0 +1,34 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0 and the Server Side Public License, v 1; you may not use this file except + * in compliance with, at your election, the Elastic License 2.0 or the Server + * Side Public License, v 1. + */ + +package org.elasticsearch.search.vectors; + +import org.elasticsearch.action.ActionListener; +import org.elasticsearch.client.internal.Client; +import org.elasticsearch.common.io.stream.VersionedNamedWriteable; +import org.elasticsearch.xcontent.ToXContentObject; + +/** + * Provides a mechanism for building a KNN query vector in an asynchronous manner during the rewrite phase + */ +public interface QueryVectorBuilder extends VersionedNamedWriteable, ToXContentObject { + + /** + * Method for building a vector via the client. This method is called during RerwiteAndFetch. + * Typical implementation for this method will: + * 1. call some asynchronous client action + * 2. Handle failure/success for that action (usually passing failure to the provided listener) + * 3. Parse the success case and extract the query vector + * 4. Pass the extracted query vector to the provided listener + * + * @param client for performing asynchronous actions against the cluster + * @param listener listener to accept the created vector + */ + void buildVector(Client client, ActionListener listener); + +} diff --git a/server/src/test/java/org/elasticsearch/search/vectors/AbstractQueryVectorBuilderTestCase.java b/server/src/test/java/org/elasticsearch/search/vectors/AbstractQueryVectorBuilderTestCase.java new file mode 100644 index 0000000000000..c889f71324cd0 --- /dev/null +++ b/server/src/test/java/org/elasticsearch/search/vectors/AbstractQueryVectorBuilderTestCase.java @@ -0,0 +1,164 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0 and the Server Side Public License, v 1; you may not use this file except + * in compliance with, at your election, the Elastic License 2.0 or the Server + * Side Public License, v 1. + */ + +package org.elasticsearch.search.vectors; + +import org.elasticsearch.TransportVersion; +import org.elasticsearch.action.ActionListener; +import org.elasticsearch.action.ActionRequest; +import org.elasticsearch.action.ActionResponse; +import org.elasticsearch.action.ActionType; +import org.elasticsearch.action.support.PlainActionFuture; +import org.elasticsearch.common.io.stream.NamedWriteableRegistry; +import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.index.query.QueryRewriteContext; +import org.elasticsearch.index.query.Rewriteable; +import org.elasticsearch.plugins.SearchPlugin; +import org.elasticsearch.search.SearchModule; +import org.elasticsearch.test.AbstractXContentSerializingTestCase; +import org.elasticsearch.test.AbstractXContentTestCase; +import org.elasticsearch.test.client.NoOpClient; +import org.elasticsearch.xcontent.NamedXContentRegistry; +import org.junit.Before; + +import java.io.IOException; +import java.util.List; + +import static org.elasticsearch.search.vectors.KnnSearchBuilderTests.randomVector; +import static org.hamcrest.Matchers.equalTo; +import static org.hamcrest.Matchers.nullValue; + +/** + * Tests a query vector builder + * @param the query vector builder type to test + */ +public abstract class AbstractQueryVectorBuilderTestCase extends AbstractXContentSerializingTestCase { + + private NamedWriteableRegistry namedWriteableRegistry; + private NamedXContentRegistry namedXContentRegistry; + + protected List additionalPlugins() { + return List.of(); + } + + @Before + public void registerNamedXContents() { + SearchModule searchModule = new SearchModule(Settings.EMPTY, additionalPlugins()); + namedXContentRegistry = new NamedXContentRegistry(searchModule.getNamedXContents()); + namedWriteableRegistry = new NamedWriteableRegistry(searchModule.getNamedWriteables()); + } + + @Override + protected NamedXContentRegistry xContentRegistry() { + return namedXContentRegistry; + } + + @Override + protected NamedWriteableRegistry getNamedWriteableRegistry() { + return namedWriteableRegistry; + } + + // Just in case the vector builder needs to know the expected value when testing + protected T createTestInstance(float[] expected) { + return createTestInstance(); + } + + public final void testKnnSearchBuilderXContent() throws Exception { + AbstractXContentTestCase.XContentTester tester = AbstractXContentTestCase.xContentTester( + this::createParser, + () -> new KnnSearchBuilder(randomAlphaOfLength(10), createTestInstance(), 5, 10), + getToXContentParams(), + KnnSearchBuilder::fromXContent + ); + tester.test(); + } + + public final void testKnnSearchBuilderWireSerialization() throws IOException { + for (int i = 0; i < NUMBER_OF_TEST_RUNS; i++) { + KnnSearchBuilder searchBuilder = new KnnSearchBuilder(randomAlphaOfLength(10), createTestInstance(), 5, 10); + KnnSearchBuilder serialized = copyWriteable( + searchBuilder, + getNamedWriteableRegistry(), + KnnSearchBuilder::new, + TransportVersion.CURRENT + ); + assertThat(serialized, equalTo(searchBuilder)); + assertNotSame(serialized, searchBuilder); + } + } + + public final void testKnnSearchRewrite() throws Exception { + for (int i = 0; i < NUMBER_OF_TEST_RUNS; i++) { + float[] expected = randomVector(randomIntBetween(10, 1024)); + T queryVectorBuilder = createTestInstance(expected); + KnnSearchBuilder searchBuilder = new KnnSearchBuilder(randomAlphaOfLength(10), queryVectorBuilder, 5, 10); + KnnSearchBuilder serialized = copyWriteable( + searchBuilder, + getNamedWriteableRegistry(), + KnnSearchBuilder::new, + TransportVersion.CURRENT + ); + try (NoOpClient client = new AssertingClient(expected, queryVectorBuilder)) { + QueryRewriteContext context = new QueryRewriteContext(null, null, client, null); + PlainActionFuture future = new PlainActionFuture<>(); + Rewriteable.rewriteAndFetch(randomFrom(serialized, searchBuilder), context, future); + KnnSearchBuilder rewritten = future.get(); + assertThat(rewritten.queryVector, equalTo(expected)); + assertThat(rewritten.queryVectorBuilder, nullValue()); + } + } + } + + public final void testVectorFetch() throws Exception { + float[] expected = randomVector(randomIntBetween(10, 1024)); + T queryVectorBuilder = createTestInstance(expected); + try (NoOpClient client = new AssertingClient(expected, queryVectorBuilder)) { + PlainActionFuture future = new PlainActionFuture<>(); + queryVectorBuilder.buildVector(client, future); + assertThat(future.get(), equalTo(expected)); + } + } + + /** + * Assert that the client action request is correct given this provided random builder + * @param request The built request to be executed by the client + * @param builder The builder used when generating this request + */ + abstract void doAssertClientRequest(ActionRequest request, T builder); + + /** + * Create a response given this expected array that is acceptable to the query builder + * @param array The expected final array + * @param builder The original randomly built query vector builder + * @return An action response to be handled by the query vector builder + */ + abstract ActionResponse createResponse(float[] array, T builder); + + private class AssertingClient extends NoOpClient { + + private final float[] array; + private final T queryVectorBuilder; + + AssertingClient(float[] array, T queryVectorBuilder) { + super("query_vector_builder_tests"); + this.array = array; + this.queryVectorBuilder = queryVectorBuilder; + } + + @Override + @SuppressWarnings("unchecked") + protected void doExecute( + ActionType action, + Request request, + ActionListener listener + ) { + doAssertClientRequest(request, queryVectorBuilder); + listener.onResponse((Response) createResponse(array, queryVectorBuilder)); + } + } +} diff --git a/server/src/test/java/org/elasticsearch/search/vectors/KnnSearchBuilderTests.java b/server/src/test/java/org/elasticsearch/search/vectors/KnnSearchBuilderTests.java index d33505796bf32..9d66f06cc9bf3 100644 --- a/server/src/test/java/org/elasticsearch/search/vectors/KnnSearchBuilderTests.java +++ b/server/src/test/java/org/elasticsearch/search/vectors/KnnSearchBuilderTests.java @@ -8,25 +8,37 @@ package org.elasticsearch.search.vectors; +import org.apache.lucene.search.Query; +import org.elasticsearch.TransportVersion; +import org.elasticsearch.action.support.PlainActionFuture; import org.elasticsearch.common.io.stream.NamedWriteableRegistry; +import org.elasticsearch.common.io.stream.StreamOutput; import org.elasticsearch.common.io.stream.Writeable; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.index.query.AbstractQueryBuilder; import org.elasticsearch.index.query.QueryBuilder; import org.elasticsearch.index.query.QueryBuilders; +import org.elasticsearch.index.query.QueryRewriteContext; +import org.elasticsearch.index.query.Rewriteable; +import org.elasticsearch.index.query.SearchExecutionContext; import org.elasticsearch.search.SearchModule; import org.elasticsearch.test.AbstractXContentSerializingTestCase; import org.elasticsearch.test.ESTestCase; import org.elasticsearch.xcontent.NamedXContentRegistry; +import org.elasticsearch.xcontent.XContentBuilder; import org.elasticsearch.xcontent.XContentParser; import org.junit.Before; import java.io.IOException; import java.util.ArrayList; import java.util.List; +import java.util.Objects; import static java.util.Collections.emptyList; import static org.hamcrest.Matchers.containsString; +import static org.hamcrest.Matchers.equalTo; +import static org.hamcrest.Matchers.hasSize; +import static org.hamcrest.Matchers.nullValue; public class KnnSearchBuilderTests extends AbstractXContentSerializingTestCase { private NamedWriteableRegistry namedWriteableRegistry; @@ -161,11 +173,80 @@ public void testInvalidK() { assertThat(e.getMessage(), containsString("[k] must be greater than 0")); } - private static float[] randomVector(int dim) { + public void testRewrite() throws Exception { + float[] expectedArray = randomVector(randomIntBetween(10, 1024)); + KnnSearchBuilder searchBuilder = new KnnSearchBuilder( + "field", + new TestQueryVectorBuilderPlugin.TestQueryVectorBuilder(expectedArray), + 5, + 10 + ); + searchBuilder.boost(randomFloat()); + searchBuilder.addFilterQueries(List.of(new RewriteableQuery())); + + QueryRewriteContext context = new QueryRewriteContext(null, null, null, null); + PlainActionFuture future = new PlainActionFuture<>(); + Rewriteable.rewriteAndFetch(searchBuilder, context, future); + KnnSearchBuilder rewritten = future.get(); + + assertThat(rewritten.field, equalTo(searchBuilder.field)); + assertThat(rewritten.boost, equalTo(searchBuilder.boost)); + assertThat(rewritten.queryVector, equalTo(expectedArray)); + assertThat(rewritten.queryVectorBuilder, nullValue()); + assertThat(rewritten.filterQueries, hasSize(1)); + assertThat(((RewriteableQuery) rewritten.filterQueries.get(0)).rewrites, equalTo(1)); + } + + static float[] randomVector(int dim) { float[] vector = new float[dim]; for (int i = 0; i < vector.length; i++) { vector[i] = randomFloat(); } return vector; } + + private static class RewriteableQuery extends AbstractQueryBuilder { + private int rewrites; + + @Override + public String getWriteableName() { + throw new UnsupportedOperationException(); + } + + @Override + public TransportVersion getMinimalSupportedVersion() { + throw new UnsupportedOperationException(); + } + + @Override + protected void doWriteTo(StreamOutput out) { + throw new UnsupportedOperationException(); + } + + @Override + protected void doXContent(XContentBuilder builder, Params params) throws IOException { + throw new UnsupportedOperationException(); + } + + @Override + protected Query doToQuery(SearchExecutionContext context) throws IOException { + throw new UnsupportedOperationException(); + } + + @Override + protected boolean doEquals(RewriteableQuery other) { + return true; + } + + @Override + protected int doHashCode() { + return Objects.hashCode(RewriteableQuery.class); + } + + @Override + protected QueryBuilder doRewrite(QueryRewriteContext queryRewriteContext) { + rewrites++; + return this; + } + } } diff --git a/server/src/test/java/org/elasticsearch/search/vectors/QueryVectorBuilderTests.java b/server/src/test/java/org/elasticsearch/search/vectors/QueryVectorBuilderTests.java new file mode 100644 index 0000000000000..e8b879948fb79 --- /dev/null +++ b/server/src/test/java/org/elasticsearch/search/vectors/QueryVectorBuilderTests.java @@ -0,0 +1,67 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0 and the Server Side Public License, v 1; you may not use this file except + * in compliance with, at your election, the Elastic License 2.0 or the Server + * Side Public License, v 1. + */ + +package org.elasticsearch.search.vectors; + +import org.elasticsearch.action.ActionRequest; +import org.elasticsearch.action.ActionResponse; +import org.elasticsearch.common.io.stream.Writeable; +import org.elasticsearch.plugins.SearchPlugin; +import org.elasticsearch.test.ESTestCase; +import org.elasticsearch.xcontent.XContentParser; + +import java.io.IOException; +import java.util.List; + +/** + * Test the query vector builder logic with a test plugin + */ +public class QueryVectorBuilderTests extends AbstractQueryVectorBuilderTestCase { + + @Override + protected List additionalPlugins() { + return List.of(new TestQueryVectorBuilderPlugin()); + } + + @Override + protected Writeable.Reader instanceReader() { + return TestQueryVectorBuilderPlugin.TestQueryVectorBuilder::new; + } + + @Override + protected TestQueryVectorBuilderPlugin.TestQueryVectorBuilder createTestInstance() { + return new TestQueryVectorBuilderPlugin.TestQueryVectorBuilder(randomList(2, 1024, ESTestCase::randomFloat)); + } + + @Override + protected TestQueryVectorBuilderPlugin.TestQueryVectorBuilder createTestInstance(float[] expected) { + return new TestQueryVectorBuilderPlugin.TestQueryVectorBuilder(expected); + } + + @Override + protected TestQueryVectorBuilderPlugin.TestQueryVectorBuilder mutateInstance( + TestQueryVectorBuilderPlugin.TestQueryVectorBuilder instance + ) throws IOException { + return createTestInstance(); + } + + @Override + protected TestQueryVectorBuilderPlugin.TestQueryVectorBuilder doParseInstance(XContentParser parser) throws IOException { + return TestQueryVectorBuilderPlugin.TestQueryVectorBuilder.PARSER.apply(parser, null); + } + + @Override + protected void doAssertClientRequest(ActionRequest request, TestQueryVectorBuilderPlugin.TestQueryVectorBuilder builder) { + // Nothing to assert here as this object does not make client calls + } + + @Override + protected ActionResponse createResponse(float[] array, TestQueryVectorBuilderPlugin.TestQueryVectorBuilder builder) { + return new ActionResponse.Empty(); + } +} diff --git a/server/src/test/java/org/elasticsearch/search/vectors/TestQueryVectorBuilderPlugin.java b/server/src/test/java/org/elasticsearch/search/vectors/TestQueryVectorBuilderPlugin.java new file mode 100644 index 0000000000000..bdec097f355f3 --- /dev/null +++ b/server/src/test/java/org/elasticsearch/search/vectors/TestQueryVectorBuilderPlugin.java @@ -0,0 +1,114 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0 and the Server Side Public License, v 1; you may not use this file except + * in compliance with, at your election, the Elastic License 2.0 or the Server + * Side Public License, v 1. + */ + +package org.elasticsearch.search.vectors; + +import org.elasticsearch.TransportVersion; +import org.elasticsearch.action.ActionListener; +import org.elasticsearch.client.internal.Client; +import org.elasticsearch.common.io.stream.StreamInput; +import org.elasticsearch.common.io.stream.StreamOutput; +import org.elasticsearch.plugins.SearchPlugin; +import org.elasticsearch.xcontent.ConstructingObjectParser; +import org.elasticsearch.xcontent.ParseField; +import org.elasticsearch.xcontent.XContentBuilder; + +import java.io.IOException; +import java.util.ArrayList; +import java.util.List; +import java.util.Objects; + +/** + * A SearchPlugin to exercise query vector builder + */ +class TestQueryVectorBuilderPlugin implements SearchPlugin { + + static class TestQueryVectorBuilder implements QueryVectorBuilder { + private static final String NAME = "test_query_vector_builder"; + + private static final ParseField QUERY_VECTOR = new ParseField("query_vector"); + + @SuppressWarnings("unchecked") + static final ConstructingObjectParser PARSER = new ConstructingObjectParser<>( + NAME + "_parser", + true, + a -> new TestQueryVectorBuilder((List) a[0]) + ); + + static { + PARSER.declareFloatArray(ConstructingObjectParser.constructorArg(), QUERY_VECTOR); + } + + private List vectorToBuild; + + TestQueryVectorBuilder(List vectorToBuild) { + this.vectorToBuild = vectorToBuild; + } + + TestQueryVectorBuilder(float[] expected) { + this.vectorToBuild = new ArrayList<>(expected.length); + for (float f : expected) { + vectorToBuild.add(f); + } + } + + TestQueryVectorBuilder(StreamInput in) throws IOException { + this.vectorToBuild = in.readList(StreamInput::readFloat); + } + + @Override + public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException { + return builder.startObject().field(QUERY_VECTOR.getPreferredName(), vectorToBuild).endObject(); + } + + @Override + public String getWriteableName() { + return NAME; + } + + @Override + public TransportVersion getMinimalSupportedVersion() { + return TransportVersion.CURRENT; + } + + @Override + public void writeTo(StreamOutput out) throws IOException { + out.writeCollection(vectorToBuild, StreamOutput::writeFloat); + } + + @Override + public void buildVector(Client client, ActionListener listener) { + float[] response = new float[vectorToBuild.size()]; + int i = 0; + for (Float f : vectorToBuild) { + response[i++] = f; + } + listener.onResponse(response); + } + + @Override + public boolean equals(Object o) { + if (this == o) return true; + if (o == null || getClass() != o.getClass()) return false; + TestQueryVectorBuilder that = (TestQueryVectorBuilder) o; + return Objects.equals(vectorToBuild, that.vectorToBuild); + } + + @Override + public int hashCode() { + return Objects.hash(vectorToBuild); + } + } + + @Override + public List> getQueryVectorBuilders() { + return List.of( + new QueryVectorBuilderSpec<>(TestQueryVectorBuilder.NAME, TestQueryVectorBuilder::new, TestQueryVectorBuilder.PARSER::apply) + ); + } +} From 641cc0e5046bd471f0224f52e295049d7b93ed4c Mon Sep 17 00:00:00 2001 From: Benjamin Trent Date: Wed, 1 Feb 2023 13:32:13 -0500 Subject: [PATCH 41/63] Add `term` query support to rank_features mapped field (#93247) This adds term query capabilities for rank_features fields. term queries against rank_features are not scored in the typical way as regular fields. This is because the stored feature values take advantage of the term frequency storage mechanism, and thus regular BM25 does not work. Instead, a term query against a rank_features field is very similar to linear rank_feature query. If more complicated combinations of features and values are required, the rank_feature query should be used. --- docs/changelog/93247.yaml | 5 + .../mapping/types/rank-features.asciidoc | 5 +- .../RankFeaturesMapperIntegrationIT.java | 116 ++++++++++++++++++ .../extras/RankFeaturesFieldMapper.java | 18 ++- 4 files changed, 137 insertions(+), 7 deletions(-) create mode 100644 docs/changelog/93247.yaml create mode 100644 modules/mapper-extras/src/internalClusterTest/java/org/elasticsearch/index/mapper/RankFeaturesMapperIntegrationIT.java diff --git a/docs/changelog/93247.yaml b/docs/changelog/93247.yaml new file mode 100644 index 0000000000000..e32d2acde2701 --- /dev/null +++ b/docs/changelog/93247.yaml @@ -0,0 +1,5 @@ +pr: 93247 +summary: Add `term` query support to `rank_features` mapped field +area: Search +type: enhancement +issues: [] diff --git a/docs/reference/mapping/types/rank-features.asciidoc b/docs/reference/mapping/types/rank-features.asciidoc index 8be5e00185788..b54e99ede3fae 100644 --- a/docs/reference/mapping/types/rank-features.asciidoc +++ b/docs/reference/mapping/types/rank-features.asciidoc @@ -83,7 +83,10 @@ NOTE: `rank_features` fields only support single-valued features and strictly positive values. Multi-valued fields and zero or negative values will be rejected. NOTE: `rank_features` fields do not support sorting or aggregating and may -only be queried using <> queries. +only be queried using <> or <> queries. + +NOTE: <> queries on `rank_features` fields are scored by multiplying the matched +stored feature value by the provided `boost`. NOTE: `rank_features` fields only preserve 9 significant bits for the precision, which translates to a relative error of about 0.4%. diff --git a/modules/mapper-extras/src/internalClusterTest/java/org/elasticsearch/index/mapper/RankFeaturesMapperIntegrationIT.java b/modules/mapper-extras/src/internalClusterTest/java/org/elasticsearch/index/mapper/RankFeaturesMapperIntegrationIT.java new file mode 100644 index 0000000000000..badc2dd568f57 --- /dev/null +++ b/modules/mapper-extras/src/internalClusterTest/java/org/elasticsearch/index/mapper/RankFeaturesMapperIntegrationIT.java @@ -0,0 +1,116 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0 and the Server Side Public License, v 1; you may not use this file except + * in compliance with, at your election, the Elastic License 2.0 or the Server + * Side Public License, v 1. + */ + +package org.elasticsearch.index.mapper; + +import org.elasticsearch.action.bulk.BulkResponse; +import org.elasticsearch.action.search.SearchResponse; +import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.index.mapper.extras.MapperExtrasPlugin; +import org.elasticsearch.index.query.QueryBuilders; +import org.elasticsearch.plugins.Plugin; +import org.elasticsearch.search.SearchHit; +import org.elasticsearch.test.ESIntegTestCase; + +import java.io.IOException; +import java.util.Arrays; +import java.util.Collection; +import java.util.Map; + +import static org.elasticsearch.xcontent.XContentFactory.jsonBuilder; +import static org.hamcrest.Matchers.equalTo; + +public class RankFeaturesMapperIntegrationIT extends ESIntegTestCase { + + private static final String LOWER_RANKED_FEATURE = "ten"; + private static final String HIGHER_RANKED_FEATURE = "twenty"; + private static final String INDEX_NAME = "rank_feature_test"; + private static final String FIELD_NAME = "all_rank_features"; + + @Override + protected Collection> nodePlugins() { + return Arrays.asList(MapperExtrasPlugin.class); + } + + public void testRankFeaturesTermQuery() throws IOException { + init(); + SearchResponse response = client().prepareSearch(INDEX_NAME) + .setQuery(QueryBuilders.termQuery(FIELD_NAME, HIGHER_RANKED_FEATURE)) + .get(); + assertThat(response.getHits().getTotalHits().value, equalTo(2L)); + for (SearchHit hit : response.getHits().getHits()) { + assertThat(hit.getScore(), equalTo(20f)); + } + + response = client().prepareSearch(INDEX_NAME) + .setQuery(QueryBuilders.termQuery(FIELD_NAME, HIGHER_RANKED_FEATURE).boost(100f)) + .get(); + assertThat(response.getHits().getTotalHits().value, equalTo(2L)); + for (SearchHit hit : response.getHits().getHits()) { + assertThat(hit.getScore(), equalTo(2000f)); + } + + response = client().prepareSearch(INDEX_NAME) + .setQuery( + QueryBuilders.boolQuery() + .should(QueryBuilders.termQuery(FIELD_NAME, HIGHER_RANKED_FEATURE)) + .should(QueryBuilders.termQuery(FIELD_NAME, LOWER_RANKED_FEATURE).boost(3f)) + .minimumShouldMatch(1) + ) + .get(); + assertThat(response.getHits().getTotalHits().value, equalTo(3L)); + for (SearchHit hit : response.getHits().getHits()) { + if (hit.getId().equals("all")) { + assertThat(hit.getScore(), equalTo(50f)); + } + if (hit.getId().equals("lower")) { + assertThat(hit.getScore(), equalTo(30f)); + } + if (hit.getId().equals("higher")) { + assertThat(hit.getScore(), equalTo(20f)); + } + } + + response = client().prepareSearch(INDEX_NAME).setQuery(QueryBuilders.termQuery(FIELD_NAME, "missing_feature")).get(); + assertThat(response.getHits().getTotalHits().value, equalTo(0L)); + } + + private void init() throws IOException { + Settings.Builder settings = Settings.builder(); + settings.put(indexSettings()); + prepareCreate(INDEX_NAME).setSettings(settings) + .setMapping( + jsonBuilder().startObject() + .startObject("_doc") + .startObject("properties") + .startObject("all_rank_features") + .field("type", "rank_features") + .endObject() + .endObject() + .endObject() + .endObject() + ) + .get(); + ensureGreen(); + + BulkResponse bulk = client().prepareBulk() + .add( + client().prepareIndex(INDEX_NAME) + .setId("all") + .setSource(Map.of("all_rank_features", Map.of(LOWER_RANKED_FEATURE, 10, HIGHER_RANKED_FEATURE, 20))) + ) + .add(client().prepareIndex(INDEX_NAME).setId("lower").setSource(Map.of("all_rank_features", Map.of(LOWER_RANKED_FEATURE, 10)))) + .add( + client().prepareIndex(INDEX_NAME).setId("higher").setSource(Map.of("all_rank_features", Map.of(HIGHER_RANKED_FEATURE, 20))) + ) + .get(); + assertFalse(bulk.buildFailureMessage(), bulk.hasFailures()); + assertThat(refresh().getFailedShards(), equalTo(0)); + } + +} diff --git a/modules/mapper-extras/src/main/java/org/elasticsearch/index/mapper/extras/RankFeaturesFieldMapper.java b/modules/mapper-extras/src/main/java/org/elasticsearch/index/mapper/extras/RankFeaturesFieldMapper.java index aea9fe9b24939..4187963e061ce 100644 --- a/modules/mapper-extras/src/main/java/org/elasticsearch/index/mapper/extras/RankFeaturesFieldMapper.java +++ b/modules/mapper-extras/src/main/java/org/elasticsearch/index/mapper/extras/RankFeaturesFieldMapper.java @@ -10,6 +10,7 @@ import org.apache.lucene.document.FeatureField; import org.apache.lucene.search.Query; +import org.apache.lucene.util.BytesRef; import org.elasticsearch.common.lucene.Lucene; import org.elasticsearch.index.analysis.NamedAnalyzer; import org.elasticsearch.index.fielddata.FieldDataContext; @@ -27,6 +28,8 @@ import java.io.IOException; import java.util.Map; +import static org.elasticsearch.index.query.AbstractQueryBuilder.DEFAULT_BOOST; + /** * A {@link FieldMapper} that exposes Lucene's {@link FeatureField} as a sparse * vector of features. @@ -77,7 +80,7 @@ public static final class RankFeaturesFieldType extends MappedFieldType { private final boolean positiveScoreImpact; public RankFeaturesFieldType(String name, Map meta, boolean positiveScoreImpact) { - super(name, false, false, false, TextSearchInfo.NONE, meta); + super(name, true, false, false, TextSearchInfo.SIMPLE_MATCH_ONLY, meta); this.positiveScoreImpact = positiveScoreImpact; } @@ -86,10 +89,6 @@ public String typeName() { return CONTENT_TYPE; } - public boolean positiveScoreImpact() { - return positiveScoreImpact; - } - @Override public Query existsQuery(SearchExecutionContext context) { throw new IllegalArgumentException("[rank_features] fields do not support [exists] queries"); @@ -107,7 +106,14 @@ public ValueFetcher valueFetcher(SearchExecutionContext context, String format) @Override public Query termQuery(Object value, SearchExecutionContext context) { - throw new IllegalArgumentException("Queries on [rank_features] fields are not supported"); + return FeatureField.newLinearQuery(name(), indexedValueForSearch(value), DEFAULT_BOOST); + } + + private static String indexedValueForSearch(Object value) { + if (value instanceof BytesRef) { + return ((BytesRef) value).utf8ToString(); + } + return value.toString(); } } From 2332d882fb51a21857221ca304949c97d54b176c Mon Sep 17 00:00:00 2001 From: Mark Vieira Date: Wed, 1 Feb 2023 11:51:42 -0800 Subject: [PATCH 42/63] Increase volume size of encryption at rest job --- .ci/jobs.t/elastic+elasticsearch+periodic+ear.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.ci/jobs.t/elastic+elasticsearch+periodic+ear.yml b/.ci/jobs.t/elastic+elasticsearch+periodic+ear.yml index 47a8d4f48cc5c..de4886c9f8ebe 100644 --- a/.ci/jobs.t/elastic+elasticsearch+periodic+ear.yml +++ b/.ci/jobs.t/elastic+elasticsearch+periodic+ear.yml @@ -16,7 +16,7 @@ #!/bin/bash # Configure a dm-crypt volume backed by a file set -e - dd if=/dev/zero of=dm-crypt.img bs=1 count=0 seek=60GB + dd if=/dev/zero of=dm-crypt.img bs=1 count=0 seek=80GB dd if=/dev/urandom of=key.secret bs=2k count=1 LOOP=$(losetup -f) sudo losetup $LOOP dm-crypt.img From 9aa67edf8e835c4af6b6bfb214d5ee1bd0d79f9d Mon Sep 17 00:00:00 2001 From: Yang Wang Date: Thu, 2 Feb 2023 15:37:36 +1100 Subject: [PATCH 43/63] Support configuring SSL separately for remote access port (#93334) Though the remote access is implemented with the transport profile. Its configuration and behaviour should not be tied to the default transport profile like other profiles do. Users should be able to enable or disable SSL separately for the remote access port and configure differnt values for all SSL settings. These settings can also have different defaults. This PR implements the above by: * Adds a new xpack.security.remote_cluster.ssl.enabled setting to control whether SSL is enabled separately for the remote access port * The above enabled setting defaults to true (unlike the default for tranport SSL) * Client auth defaults to none for the remote access port * Separate server SSL configuration validation The PR also moves the remote access profile to be built first for more consistent error message. --- .../RemoteClusterPortSettingsIT.java | 12 +- .../elasticsearch/transport/TcpTransport.java | 8 +- .../xpack/core/XPackSettings.java | 11 ++ .../xpack/core/ssl/SSLService.java | 59 ++++++-- .../core/ssl/SslSettingsLoaderTests.java | 4 + .../security/qa/multi-cluster/build.gradle | 81 ++++++----- .../RemoteClusterSecuritySmokeIT.java | 9 ++ .../src/test/resources/ssl/README.asciidoc | 55 +++++++ .../test/resources/ssl/remote-cluster-ca.crt | 20 +++ .../test/resources/ssl/remote-cluster-ca.key | 30 ++++ .../src/test/resources/ssl/remote_cluster.crt | 22 +++ .../src/test/resources/ssl/remote_cluster.key | 30 ++++ .../src/test/resources/ssl/transport-ca.crt | 19 +++ .../src/test/resources/ssl/transport-ca.key | 30 ++++ .../src/test/resources/ssl/transport.crt | 22 +++ .../src/test/resources/ssl/transport.key | 30 ++++ .../transport/ProfileConfigurations.java | 101 +++++++++++-- .../netty4/SecurityNetty4Transport.java | 36 +++-- .../SecurityUsageTransportAction.java | 5 + .../SecurityServerTransportInterceptor.java | 24 +++- .../transport/ServerTransportFilter.java | 5 + .../transport/ProfileConfigurationsTests.java | 135 ++++++++++++++++-- .../SecurityInfoTransportActionTests.java | 10 ++ ...curityServerTransportInterceptorTests.java | 106 ++++++++++++++ .../xpack/ssl/SSLErrorMessageFileTests.java | 48 +++++++ ...> LegacyRemoteClusterSecuritySmokeIT.java} | 2 +- 26 files changed, 817 insertions(+), 97 deletions(-) create mode 100644 x-pack/plugin/security/qa/multi-cluster/src/test/resources/ssl/README.asciidoc create mode 100644 x-pack/plugin/security/qa/multi-cluster/src/test/resources/ssl/remote-cluster-ca.crt create mode 100644 x-pack/plugin/security/qa/multi-cluster/src/test/resources/ssl/remote-cluster-ca.key create mode 100644 x-pack/plugin/security/qa/multi-cluster/src/test/resources/ssl/remote_cluster.crt create mode 100644 x-pack/plugin/security/qa/multi-cluster/src/test/resources/ssl/remote_cluster.key create mode 100644 x-pack/plugin/security/qa/multi-cluster/src/test/resources/ssl/transport-ca.crt create mode 100644 x-pack/plugin/security/qa/multi-cluster/src/test/resources/ssl/transport-ca.key create mode 100644 x-pack/plugin/security/qa/multi-cluster/src/test/resources/ssl/transport.crt create mode 100644 x-pack/plugin/security/qa/multi-cluster/src/test/resources/ssl/transport.key rename x-pack/qa/multi-cluster-search-security/legacy-with-restricted-trust/src/test/java/org/elasticsearch/xpack/remotecluster/{RemoteClusterSecuritySmokeIT.java => LegacyRemoteClusterSecuritySmokeIT.java} (97%) diff --git a/server/src/internalClusterTest/java/org/elasticsearch/transport/RemoteClusterPortSettingsIT.java b/server/src/internalClusterTest/java/org/elasticsearch/transport/RemoteClusterPortSettingsIT.java index f1be526aebc08..2543bcdfc5f6c 100644 --- a/server/src/internalClusterTest/java/org/elasticsearch/transport/RemoteClusterPortSettingsIT.java +++ b/server/src/internalClusterTest/java/org/elasticsearch/transport/RemoteClusterPortSettingsIT.java @@ -26,8 +26,16 @@ public void testDirectlyConfiguringTransportProfileForRemoteClusterWillFailToSta final Settings.Builder builder = Settings.builder() .put(randomBoolean() ? masterNode() : dataOnlyNode()) .put("discovery.initial_state_timeout", "1s") - .put("remote_cluster.enabled", true) - .put("transport.profiles._remote_cluster.port", 9900); + .put("remote_cluster.enabled", true); + + // Test that the same error message is always reported for direct usage of the _remote_cluster profile + switch (randomIntBetween(0, 2)) { + case 0 -> builder.put("transport.profiles._remote_cluster.tcp.keep_alive", true); + case 1 -> builder.put("transport.profiles._remote_cluster.port", 9900); + default -> builder.put("transport.profiles._remote_cluster.port", 9900) + .put("transport.profiles._remote_cluster.tcp.keep_alive", true); + } + final IllegalArgumentException e = expectThrows(IllegalArgumentException.class, () -> internalCluster().startNode(builder)); assertThat( e.getMessage(), diff --git a/server/src/main/java/org/elasticsearch/transport/TcpTransport.java b/server/src/main/java/org/elasticsearch/transport/TcpTransport.java index 13e027cc94bf7..57e1962d505be 100644 --- a/server/src/main/java/org/elasticsearch/transport/TcpTransport.java +++ b/server/src/main/java/org/elasticsearch/transport/TcpTransport.java @@ -1018,6 +1018,11 @@ public final TransportStats getStats() { */ public static Set getProfileSettings(Settings settings) { HashSet profiles = new HashSet<>(); + // Process remote cluster port first so that errors are consistently reported if there + // is direct usage of the _remote_cluster profile + if (REMOTE_CLUSTER_PORT_ENABLED.get(settings)) { + profiles.add(RemoteClusterPortSettings.buildRemoteAccessProfileSettings(settings)); + } boolean isDefaultSet = false; for (String profile : settings.getGroups("transport.profiles.", true).keySet()) { profiles.add(new ProfileSettings(settings, profile)); @@ -1028,9 +1033,6 @@ public static Set getProfileSettings(Settings settings) { if (isDefaultSet == false) { profiles.add(new ProfileSettings(settings, TransportSettings.DEFAULT_PROFILE)); } - if (REMOTE_CLUSTER_PORT_ENABLED.get(settings)) { - profiles.add(RemoteClusterPortSettings.buildRemoteAccessProfileSettings(settings)); - } // Add the remote access profile return Collections.unmodifiableSet(profiles); } diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/XPackSettings.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/XPackSettings.java index f4f8dcb2b5385..8e2039efe241b 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/XPackSettings.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/XPackSettings.java @@ -100,6 +100,13 @@ private XPackSettings() { Setting.Property.NodeScope ); + /** Setting for enabling or disabling remote cluster access TLS. Defaults to true. */ + public static final Setting REMOTE_CLUSTER_SSL_ENABLED = Setting.boolSetting( + "xpack.security." + RemoteClusterPortSettings.REMOTE_CLUSTER_PREFIX + "ssl.enabled", + true, + Property.NodeScope + ); + /** Setting for enabling or disabling the reserved realm. Defaults to true */ public static final Setting RESERVED_REALM_ENABLED_SETTING = Setting.boolSetting( "xpack.security.authc.reserved_realm.enabled", @@ -237,6 +244,7 @@ public static Setting defaultStoredHashAlgorithmSetting(String key, Func public static final SslClientAuthenticationMode CLIENT_AUTH_DEFAULT = SslClientAuthenticationMode.REQUIRED; public static final SslClientAuthenticationMode HTTP_CLIENT_AUTH_DEFAULT = SslClientAuthenticationMode.NONE; + public static final SslClientAuthenticationMode REMOTE_CLUSTER_CLIENT_AUTH_DEFAULT = SslClientAuthenticationMode.NONE; public static final SslVerificationMode VERIFICATION_MODE_DEFAULT = SslVerificationMode.FULL; // http specific settings @@ -270,6 +278,9 @@ public static List> getAllSettings() { settings.add(DLS_FLS_ENABLED); settings.add(TRANSPORT_SSL_ENABLED); settings.add(HTTP_SSL_ENABLED); + if (TcpTransport.isUntrustedRemoteClusterEnabled()) { + settings.add(REMOTE_CLUSTER_SSL_ENABLED); + } settings.add(RESERVED_REALM_ENABLED_SETTING); settings.add(TOKEN_SERVICE_ENABLED_SETTING); settings.add(API_KEY_SERVICE_ENABLED_SETTING); diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ssl/SSLService.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ssl/SSLService.java index af588cb175834..0af6e18824a38 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ssl/SSLService.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ssl/SSLService.java @@ -30,7 +30,6 @@ import org.elasticsearch.common.util.Maps; import org.elasticsearch.common.util.set.Sets; import org.elasticsearch.env.Environment; -import org.elasticsearch.transport.TcpTransport; import org.elasticsearch.xpack.core.XPackSettings; import org.elasticsearch.xpack.core.common.socket.SocketAccess; import org.elasticsearch.xpack.core.ssl.cert.CertificateInfo; @@ -79,7 +78,9 @@ import javax.net.ssl.X509ExtendedTrustManager; import javax.security.auth.x500.X500Principal; +import static org.elasticsearch.transport.RemoteClusterPortSettings.REMOTE_CLUSTER_PORT_ENABLED; import static org.elasticsearch.xpack.core.XPackSettings.DEFAULT_SUPPORTED_PROTOCOLS; +import static org.elasticsearch.xpack.core.XPackSettings.REMOTE_CLUSTER_SSL_ENABLED; /** * Provides access to {@link SSLEngine} and {@link SSLSocketFactory} objects based on a provided configuration. All @@ -591,8 +592,8 @@ static Map getSSLSettingsMap(Settings settings) { sslSettingsMap.put(WatcherField.EMAIL_NOTIFICATION_SSL_PREFIX, settings.getByPrefix(WatcherField.EMAIL_NOTIFICATION_SSL_PREFIX)); sslSettingsMap.put(XPackSettings.TRANSPORT_SSL_PREFIX, settings.getByPrefix(XPackSettings.TRANSPORT_SSL_PREFIX)); sslSettingsMap.putAll(getTransportProfileSSLSettings(settings)); - if (TcpTransport.isUntrustedRemoteClusterEnabled()) { - sslSettingsMap.put(XPackSettings.REMOTE_CLUSTER_SSL_PREFIX, settings.getByPrefix(XPackSettings.REMOTE_CLUSTER_SSL_PREFIX)); + if (REMOTE_CLUSTER_PORT_ENABLED.get(settings)) { + sslSettingsMap.put(XPackSettings.REMOTE_CLUSTER_SSL_PREFIX, getRemoteClusterSslSettings(settings)); } return Collections.unmodifiableMap(sslSettingsMap); } @@ -615,6 +616,7 @@ Map loadSslConfigurations(Map sslSettingNames = settings.keySet().stream().filter(s -> s.startsWith(prefix)).sorted().toList(); @@ -656,6 +647,35 @@ private void validateServerConfiguration(String prefix) { } } + private void maybeValidateRemoteClusterServerConfiguration() { + if (REMOTE_CLUSTER_PORT_ENABLED.get(settings) == false) { + return; + } + final String prefix = "xpack.security.remote_cluster.ssl"; + final SslConfiguration sslConfiguration = getSSLConfiguration(prefix); + if (REMOTE_CLUSTER_SSL_ENABLED.get(settings)) { + if (isConfigurationValidForServerUsage(sslConfiguration) == false) { + final SSLConfigurationSettings configurationSettings = SSLConfigurationSettings.withPrefix(prefix + ".", false); + throwExceptionForMissingKeyMaterial(prefix, configurationSettings); + } + } + } + + private static void throwExceptionForMissingKeyMaterial(String prefix, SSLConfigurationSettings configurationSettings) { + throw new ElasticsearchSecurityException( + "invalid SSL configuration for " + + prefix + + " - server ssl configuration requires a key and certificate, but these have not been configured; " + + "you must set either [" + + configurationSettings.x509KeyPair.keystorePath.getKey() + + "], or both [" + + configurationSettings.x509KeyPair.keyPath.getKey() + + "] and [" + + configurationSettings.x509KeyPair.certificatePath.getKey() + + "]" + ); + } + /** * Returns information about each certificate that is referenced by any SSL configuration. * This includes certificates used for identity (with a private key) and those used for trust, but excludes @@ -881,6 +901,15 @@ private static Settings getHttpTransportSSLSettings(Settings settings) { return builder.build(); } + private static Settings getRemoteClusterSslSettings(Settings settings) { + final Settings remoteClusterSslSettings = settings.getByPrefix(XPackSettings.REMOTE_CLUSTER_SSL_PREFIX); + final Settings.Builder builder = Settings.builder().put(remoteClusterSslSettings); + if (builder.get("client_authentication") == null) { + builder.put("client_authentication", XPackSettings.REMOTE_CLUSTER_CLIENT_AUTH_DEFAULT); + } + return builder.build(); + } + public SslConfiguration getHttpTransportSSLConfiguration() { return getSSLConfiguration(XPackSettings.HTTP_SSL_PREFIX); } diff --git a/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/ssl/SslSettingsLoaderTests.java b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/ssl/SslSettingsLoaderTests.java index 06a29c1b75ca7..4889a9b4937b8 100644 --- a/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/ssl/SslSettingsLoaderTests.java +++ b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/ssl/SslSettingsLoaderTests.java @@ -15,6 +15,7 @@ import org.elasticsearch.common.ssl.KeyStoreUtil; import org.elasticsearch.common.ssl.PemKeyConfig; import org.elasticsearch.common.ssl.PemTrustConfig; +import org.elasticsearch.common.ssl.SslClientAuthenticationMode; import org.elasticsearch.common.ssl.SslConfiguration; import org.elasticsearch.common.ssl.SslConfigurationKeys; import org.elasticsearch.common.ssl.SslKeyConfig; @@ -88,6 +89,7 @@ public void testRemoteClusterPortConfigurationIsInjectedWithDefaultsIfEnabled() assertThat(sslConfiguration.trustConfig().getClass().getSimpleName(), is("DefaultJdkTrustConfig")); assertThat(sslConfiguration.supportedProtocols(), equalTo(XPackSettings.DEFAULT_SUPPORTED_PROTOCOLS)); assertThat(sslConfiguration.supportedProtocols(), not(hasItem("TLSv1"))); + assertThat(sslConfiguration.clientAuth(), is(SslClientAuthenticationMode.NONE)); } /** @@ -104,12 +106,14 @@ public void testRemoteClusterPortConfigurationIsInjectedWithItsSettingsIfEnabled .put(RemoteClusterPortSettings.REMOTE_CLUSTER_PORT_ENABLED.getKey(), true) .put(XPackSettings.REMOTE_CLUSTER_SSL_PREFIX + SslConfigurationKeys.KEYSTORE_PATH, path) .putList(XPackSettings.REMOTE_CLUSTER_SSL_PREFIX + SslConfigurationKeys.PROTOCOLS, "TLSv1.3", "TLSv1.2") + .put(XPackSettings.REMOTE_CLUSTER_SSL_PREFIX + SslConfigurationKeys.CLIENT_AUTH, "required") .setSecureSettings(secureSettings) .build(); Map settingsMap = SSLService.getSSLSettingsMap(testSettings); assertThat(settingsMap, hasKey(XPackSettings.REMOTE_CLUSTER_SSL_PREFIX)); SslConfiguration sslConfiguration = getSslConfiguration(settingsMap.get(XPackSettings.REMOTE_CLUSTER_SSL_PREFIX)); assertThat(sslConfiguration.supportedProtocols(), contains("TLSv1.3", "TLSv1.2")); + assertThat(sslConfiguration.clientAuth(), is(SslClientAuthenticationMode.REQUIRED)); SslKeyConfig keyStore = sslConfiguration.keyConfig(); assertThat(keyStore.getDependentFiles(), contains(path)); diff --git a/x-pack/plugin/security/qa/multi-cluster/build.gradle b/x-pack/plugin/security/qa/multi-cluster/build.gradle index 433aae9ec0e10..8588d8e316ba2 100644 --- a/x-pack/plugin/security/qa/multi-cluster/build.gradle +++ b/x-pack/plugin/security/qa/multi-cluster/build.gradle @@ -6,76 +6,88 @@ */ import org.elasticsearch.gradle.Version +import org.elasticsearch.gradle.internal.info.BuildParams import org.elasticsearch.gradle.internal.test.RestIntegTestTask apply plugin: 'elasticsearch.standalone-rest-test' apply plugin: 'elasticsearch.rest-resources' -configurations { - signedCerts - rootCert -} - -dependencies { - signedCerts project(path: ':x-pack:plugin:core', configuration: 'signedCerts') - rootCert project(path: ':x-pack:plugin:core', configuration: 'rootCert') -} - -tasks.register("copyCerts", Sync) { - dependsOn configurations.signedCerts - from(configurations.signedCerts) - from(configurations.rootCert) - into "${buildDir}/certs" -} - - def fulfillingCluster = testClusters.register('fulfilling-cluster') { requiresFeature 'es.untrusted_remote_cluster_feature_flag_registered', Version.fromString("8.5.0") + setting 'xpack.security.enabled', 'true' setting 'xpack.license.self_generated.type', 'trial' setting 'remote_cluster.enabled', 'true' setting 'remote_cluster.port', '0' - extraConfigFile 'transport.key', file("${buildDir}/certs/n1.c1.key") - extraConfigFile 'transport.cert', file("${buildDir}/certs/n1.c1.crt") - extraConfigFile 'transport.ca', file("${buildDir}/certs/ca.crt") + extraConfigFile 'transport.key', file("src/test/resources/ssl/transport.key") + extraConfigFile 'transport.cert', file("src/test/resources/ssl/transport.crt") + extraConfigFile 'transport.ca', file("src/test/resources/ssl/transport-ca.crt") - setting 'xpack.security.transport.ssl.enabled', 'true' + // Transport SSL can be enabled or disabled. It is independent from the remote cluster server SSL + if (BuildParams.random.nextBoolean()) { + setting 'xpack.security.transport.ssl.enabled', 'true' + } else { + setting 'xpack.security.transport.ssl.enabled', 'false' + } setting 'xpack.security.transport.ssl.key', 'transport.key' setting 'xpack.security.transport.ssl.certificate', 'transport.cert' + setting 'xpack.security.transport.ssl.key_passphrase', 'transport-password' setting 'xpack.security.transport.ssl.certificate_authorities', 'transport.ca' setting 'xpack.security.transport.ssl.client_authentication', 'required' setting 'xpack.security.transport.ssl.verification_mode', 'certificate' - // It is intentionally to use none for both verification_mode and client_authentication. - // Because SSL is not wired up properly on the client (QC) side. These settings - // just test that they can be configured. - // Once SSL is all wired up, we will need (1) proper SSL verification and - // (2) different set of key and cert than the ones used for the transport interface. - setting 'xpack.security.remote_cluster.ssl.key', 'transport.key' - setting 'xpack.security.remote_cluster.ssl.certificate', 'transport.cert' - setting 'xpack.security.remote_cluster.ssl.verification_mode', 'none' - setting 'xpack.security.remote_cluster.ssl.client_authentication', 'none' + // Server side SSL configuration for remote cluster + extraConfigFile 'remote-cluster.key', file("src/test/resources/ssl/remote_cluster.key") + extraConfigFile 'remote-cluster.cert', file("src/test/resources/ssl/remote_cluster.crt") + extraConfigFile 'remote-cluster.ca', file("src/test/resources/ssl/remote-cluster-ca.crt") + + setting 'xpack.security.remote_cluster.ssl.enabled', 'true' + setting 'xpack.security.remote_cluster.ssl.key', 'remote-cluster.key' + setting 'xpack.security.remote_cluster.ssl.certificate', 'remote-cluster.cert' + keystore 'xpack.security.remote_cluster.ssl.secure_key_passphrase', 'remote-cluster-password' + // client auth defaults to none for remote_cluster + if (BuildParams.random.nextBoolean()) { + setting 'xpack.security.remote_cluster.ssl.client_authentication', 'none' + } + + user username: "test_user", password: "x-pack-test-password" } def queryingCluster = testClusters.register('querying-cluster') { requiresFeature 'es.untrusted_remote_cluster_feature_flag_registered', Version.fromString("8.5.0") + setting 'xpack.security.enabled', 'true' setting 'xpack.license.self_generated.type', 'trial' setting 'cluster.remote.connections_per_cluster', "1" + // TODO: For now, the client SSL configuration on the query cluster side is shared between + // the default transport and remote_cluster profiles. + // Therefore we cannot configure them separately. This will be separated once we add the support. + extraConfigFile 'transport.key', file("src/test/resources/ssl/remote_cluster.key") + extraConfigFile 'transport.cert', file("src/test/resources/ssl/remote_cluster.crt") + extraConfigFile 'transport.ca', file("src/test/resources/ssl/remote-cluster-ca.crt") + + setting 'xpack.security.transport.ssl.enabled', 'true' + setting 'xpack.security.transport.ssl.key', 'transport.key' + setting 'xpack.security.transport.ssl.certificate', 'transport.cert' + setting 'xpack.security.transport.ssl.key_passphrase', 'remote-cluster-password' + setting 'xpack.security.transport.ssl.certificate_authorities', 'transport.ca' + setting 'xpack.security.transport.ssl.client_authentication', 'required' + setting 'xpack.security.transport.ssl.verification_mode', 'certificate' + setting 'cluster.remote.my_remote_cluster.mode', 'proxy' setting 'cluster.remote.my_remote_cluster.proxy_address', { - "\"${fulfillingCluster.get().getAllRemoteAccessPortURI() .get(0)}\"" + "\"${fulfillingCluster.get().getAllRemoteAccessPortURI().get(0)}\"" } + + user username: "test_user", password: "x-pack-test-password" } tasks.register('fulfilling-cluster', RestIntegTestTask) { - dependsOn 'copyCerts' useCluster fulfillingCluster systemProperty 'tests.rest.suite', 'fulfilling_cluster' } tasks.register('querying-cluster', RestIntegTestTask) { - dependsOn 'copyCerts' dependsOn 'fulfilling-cluster' useCluster queryingCluster useCluster fulfillingCluster @@ -84,7 +96,6 @@ tasks.register('querying-cluster', RestIntegTestTask) { // runs the fulfilling-cluster cluster tests then the querying-cluster tests tasks.register("integTest") { - dependsOn 'copyCerts' dependsOn 'querying-cluster' } diff --git a/x-pack/plugin/security/qa/multi-cluster/src/test/java/org/elasticsearch/xpack/remotecluster/RemoteClusterSecuritySmokeIT.java b/x-pack/plugin/security/qa/multi-cluster/src/test/java/org/elasticsearch/xpack/remotecluster/RemoteClusterSecuritySmokeIT.java index b79fb988270bd..864738080cd7f 100644 --- a/x-pack/plugin/security/qa/multi-cluster/src/test/java/org/elasticsearch/xpack/remotecluster/RemoteClusterSecuritySmokeIT.java +++ b/x-pack/plugin/security/qa/multi-cluster/src/test/java/org/elasticsearch/xpack/remotecluster/RemoteClusterSecuritySmokeIT.java @@ -9,6 +9,9 @@ import org.elasticsearch.client.Request; import org.elasticsearch.client.Response; +import org.elasticsearch.common.settings.SecureString; +import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.common.util.concurrent.ThreadContext; import org.elasticsearch.test.rest.ESRestTestCase; import org.elasticsearch.test.rest.ObjectPath; @@ -29,6 +32,12 @@ protected boolean preserveDataStreamsUponCompletion() { return true; } + @Override + protected Settings restClientSettings() { + String token = basicAuthHeaderValue("test_user", new SecureString("x-pack-test-password".toCharArray())); + return Settings.builder().put(ThreadContext.PREFIX + ".Authorization", token).build(); + } + private boolean isFulfillingCluster() { return "fulfilling_cluster".equals(System.getProperty("tests.rest.suite")); } diff --git a/x-pack/plugin/security/qa/multi-cluster/src/test/resources/ssl/README.asciidoc b/x-pack/plugin/security/qa/multi-cluster/src/test/resources/ssl/README.asciidoc new file mode 100644 index 0000000000000..04c2ce9bc88dd --- /dev/null +++ b/x-pack/plugin/security/qa/multi-cluster/src/test/resources/ssl/README.asciidoc @@ -0,0 +1,55 @@ += Keystore Details +This document details the steps used to create the certificate and keystore files in this directory. + +== Instructions on generating certificates +The certificates in this directory have been generated using elasticsearch-certutil (8.7.0 SNAPSHOT) + +[source,shell] +----------------------------------------------------------------------------------------------------------- +elasticsearch-certutil ca --pem --ca-dn 'CN=Elastic Auto Transport CA' --out=ca.zip --pass="transport-ca-password" --days=3500 +unzip ca.zip +mv ca/ca.crt ./transport-ca.crt +mv ca/ca.key ./transport-ca.key + +rm ca.zip +rmdir ca +----------------------------------------------------------------------------------------------------------- + +[source,shell] +----------------------------------------------------------------------------------------------------------- +elasticsearch-certutil cert --pem --name=transport --out=transport.zip --pass="transport-password" --days=3500 \ + --ca-cert=transport-ca.crt --ca-key=transport-ca.key --ca-pass="transport-ca-password" \ + --dns=localhost --dns=localhost.localdomain --dns=localhost4 --dns=localhost4.localdomain4 --dns=localhost6 --dns=localhost6.localdomain6 \ + --ip=127.0.0.1 --ip=0:0:0:0:0:0:0:1 + +unzip transport.zip +mv transport/transport.* ./ + +rm transport.zip +rmdir transport +----------------------------------------------------------------------------------------------------------- + +[source,shell] +----------------------------------------------------------------------------------------------------------- +elasticsearch-certutil ca --pem --ca-dn 'CN=Elastic Auto RemoteCluster CA' --out=ca.zip --pass="remote-cluster-ca-password" --days=3500 +unzip ca.zip +mv ca/ca.crt ./remote-cluster-ca.crt +mv ca/ca.key ./remote-cluster-ca.key + +rm ca.zip +rmdir ca +----------------------------------------------------------------------------------------------------------- + +[source,shell] +----------------------------------------------------------------------------------------------------------- +elasticsearch-certutil cert --pem --name=remote_cluster --out=remote_cluster.zip --pass="remote-cluster-password" --days=3500 \ + --ca-cert=remote-cluster-ca.crt --ca-key=remote-cluster-ca.key --ca-pass="remote-cluster-ca-password" \ + --dns=localhost --dns=localhost.localdomain --dns=localhost4 --dns=localhost4.localdomain4 --dns=localhost6 --dns=localhost6.localdomain6 \ + --ip=127.0.0.1 --ip=0:0:0:0:0:0:0:1 + +unzip remote_cluster.zip +mv remote_cluster/remote_cluster.* ./ + +rm remote_cluster.zip +rmdir remote_cluster +----------------------------------------------------------------------------------------------------------- diff --git a/x-pack/plugin/security/qa/multi-cluster/src/test/resources/ssl/remote-cluster-ca.crt b/x-pack/plugin/security/qa/multi-cluster/src/test/resources/ssl/remote-cluster-ca.crt new file mode 100644 index 0000000000000..b0683c9c8cf84 --- /dev/null +++ b/x-pack/plugin/security/qa/multi-cluster/src/test/resources/ssl/remote-cluster-ca.crt @@ -0,0 +1,20 @@ +-----BEGIN CERTIFICATE----- +MIIDMjCCAhqgAwIBAgIVAM6zI4OMux23oGuXq3gxYWB40sUJMA0GCSqGSIb3DQEB +CwUAMCgxJjAkBgNVBAMTHUVsYXN0aWMgQXV0byBSZW1vdGVDbHVzdGVyIENBMB4X +DTIzMDEyOTEyMDcxOFoXDTMyMDgyOTEyMDcxOFowKDEmMCQGA1UEAxMdRWxhc3Rp +YyBBdXRvIFJlbW90ZUNsdXN0ZXIgQ0EwggEiMA0GCSqGSIb3DQEBAQUAA4IBDwAw +ggEKAoIBAQC98BMOj4AAVx0IytNIg5r6xY1E3YOKYVpyXo0qLca8JIYLWDPiKpou +IiroziGXtT8d/tw2KNgHlICqq2iKZ+6uWkYKZjnzIw1CQBwwdEJNm04QK4Ti2QI1 +d5RRHjKGUWwHNXZzFokDeDfiqhkWF32E3hKmV2zeCQpLmCULzIUq8G4T2P2KsWI8 +WVscWYjiOd1DcX6pK90OFlJ+nN6tptb1nWvG0cbrHnTqw+rPIvEfz9zQlP3kQjG+ +kECrdMqhTpJ5h5enj/Wr/fzqGqPfK5PHYZO5Y64/AXTZDNiYugEuGpwHZDMOo19v +pIZYQ6CT2pHYpl0elFzEredB9s19878jAgMBAAGjUzBRMB0GA1UdDgQWBBQenD5A +nV/NdQ+nFuQuninbD1ArjzAfBgNVHSMEGDAWgBQenD5AnV/NdQ+nFuQuninbD1Ar +jzAPBgNVHRMBAf8EBTADAQH/MA0GCSqGSIb3DQEBCwUAA4IBAQAw5ZbObu/D6n8N +3Q21ohkPmppr1gtHbFqDpXHcVWMRYRO4ktk6mCXtPVnaZi4sONolwlhV5SXJwL0p +1g3yFbXrTlEZ5xs9QdIREUUDW12QKO1WuEYTTNgIu1my6UkQ9x7rlogX4gQYJyon +KVNrpNWWanNY5VjdUxb7DZ5dDDcRgKJQ/63xvtS1rQfMJSdXG1W3YCZjFDmMTN8K +af8r5N9X8wEZJBq2hG6n45XyvnbgErgQ2FMdIWeeHDEeM9Nw1u0VzQ/Y/U1j0mch +UltZAcAn9NC8IXGdHS58ZiM6ncWzXlM0qlw1hodv5GLY6pqg2jrW5uTGj43PPx3S +izlLT1CM +-----END CERTIFICATE----- diff --git a/x-pack/plugin/security/qa/multi-cluster/src/test/resources/ssl/remote-cluster-ca.key b/x-pack/plugin/security/qa/multi-cluster/src/test/resources/ssl/remote-cluster-ca.key new file mode 100644 index 0000000000000..344c54e135aa1 --- /dev/null +++ b/x-pack/plugin/security/qa/multi-cluster/src/test/resources/ssl/remote-cluster-ca.key @@ -0,0 +1,30 @@ +-----BEGIN RSA PRIVATE KEY----- +Proc-Type: 4,ENCRYPTED +DEK-Info: AES-128-CBC,DD3FAB8EEE02EB0A2B4F399D5B0569BD + +q4gCQQKkPiXLN6CtzmNK3GNPgPj3qwaC+cKbcJClwI2B7vN6OvwilwOcwWHvcxlb +cH4ivLkyu4zoc5pYBGHDXwDiq8KXvh+9aSjDcvdg/4KU5fLvlRxPBBKCXyasL7vd +xqu9hlcs5KzXnhizn+pN79FWEaTUCLMZmxKkQ/Bzlh1UPFRvFfnmBoSMw6pzIRVQ +KKVwXnKIBN1sgVm9nsIMVSsKRoctIkOueWlgFxOpVR0/YAPmjueqIY2pL+Nkt0Ue +xxJMWj7RABbf8DecJHVRcIFMMJrPwCqkBAhH6t6DjbnVhW0edu1GEmvQN8fp2PY2 +vpuUF9jdpZxqYbxlEvwtqUuiLNt5NDyXcCwmP+k6gc9g4pp+rZ9ZqzS95HADTkUq +YZnBmofBCY7kFVB58VyP1iOiT+wsVPoie4cSZnLZzNYro/v22jjEoIYGnCDl24qy +pBd4radE7qnLZFSk2z5yBh1b/gw1+S/IdkMivb5rWOUQXmE28C4VvUU/Dc+JSX+h +NfJZK7YwKr4n5414ecebQ91FxIA1zbHWk2d4k9kQn1tbRINQP76/TB42BMLuZBYg +xMjx5GAnPASRGmkZObZemapeRNkSfBQuw/Wmzs8hgntzGZvkdyIsH4SIMAoCrieO +OZv9BR33c/aLSPJE0LWnJLSmtaF2KQ2lya2c/sc14c2h+UwU4jo4pcdlSKqIFgEl +2l+XKbYFjJiEQBa+2j9eUXERGXHuwCZkYuP50MiXpN7PUO13NOPNUnOWMwtEm9TX +ddIi2x1De2FZ8FuCgy8cdrX2ybiasce1OC9gxr83Zz8+2D5q2d4eFfmGj2PIolzN +HnC+SPbShUfQiGLs8PSlWo2y2XKlA/S53o5ZBO4NNQlGNvjwIH1sBpEogPWVkIgH +tW9i9LWaNturH0EzYG4T53ItMQtvXRLFTASwYZfcNgi7r5TGUfpa7oVpdK2bmz+T +XLKwNU7HeKD9W39NQt8yS8Y2iEKFCT/pUyMxRNki6toPXt/7nX+Wx6/SB9938NaT +0pD3lC/zIXkDx8/tSg+K0C/+d4Tzjflwu/7rrDsE8cE+MAWN/03uUMjPmgMltfvO +n+fczxmjOYqo53K9RnMYy54JSMb6tbgxTwl0Zr/Dr+8Qo9u37+3zAlCZWqJhMdKs +Wbokpi4qr5TNEdkgmcA5pBOdU79Gii2kyuoKtZCvuGhWMFTliiUmt0y08HiKfUay +Uj6X+N59rufUseLSSDgphMkiPGzwc/octyTdxsQIhu+vDh63TARWdDiMau5KGuqh +MwnJfyphJ/jOkX1U1fSJNesRTOpiQD0HwkoI/RV/CvBc0ehheXneFH84pXCY/qen +f3Y1aO+gaC9oonyh6ZQylj6pnC1bXE6V88GagpIM7moODdHJedOFT8AKUJUUX0vq +7rcgAo4lScllamoHetbgIt5OIfGqdDtO0K6sKpTDXU1Wa6tIYPHkbigrLPqgbIf7 +ica+Wvw1x4WTwkuiOAps/KvHFQyL3X08HYf4r69/yx7yVXoidd+9U8vxRyJzMkI0 +DJRDUcLvq4id08a6nPMo3cfj7RoDOQtTw7uZTNvCBWKCy6vGlRW0UXb7G8IZTba6 +-----END RSA PRIVATE KEY----- diff --git a/x-pack/plugin/security/qa/multi-cluster/src/test/resources/ssl/remote_cluster.crt b/x-pack/plugin/security/qa/multi-cluster/src/test/resources/ssl/remote_cluster.crt new file mode 100644 index 0000000000000..604ac9a1c29a4 --- /dev/null +++ b/x-pack/plugin/security/qa/multi-cluster/src/test/resources/ssl/remote_cluster.crt @@ -0,0 +1,22 @@ +-----BEGIN CERTIFICATE----- +MIIDsTCCApmgAwIBAgIVAIjtnAa2buuAJoEm0ngGn6XfziRVMA0GCSqGSIb3DQEB +CwUAMCgxJjAkBgNVBAMTHUVsYXN0aWMgQXV0byBSZW1vdGVDbHVzdGVyIENBMB4X +DTIzMDEyOTEyMDgzN1oXDTMyMDgyOTEyMDgzN1owGTEXMBUGA1UEAwwOcmVtb3Rl +X2NsdXN0ZXIwggEiMA0GCSqGSIb3DQEBAQUAA4IBDwAwggEKAoIBAQCus8b7ZOw7 +2wTGq7DkaHpHdSs77t4hGig93xyR8+/ega3zpn7bzBU9MD8L0NqLCEmAIAzc7vzj +3UG8KJiBFA+1a9O3Xpk/iYFyGxaBXaC/dbBeZC8nNuaXLlRby6lWlJ0uUQ4uhp1T +4zzSNFO7cb8OjzkHnq8saPyuh51I1X7S/ND1CEXP/DV2MUmfFOYCxwN2SnZVKSuR +hZrq2sYkbAiHq1XVvRwxQJ/BQDh+hfmd9Fqx9Q9uzA0Ck3/wfixIn2bci3Hcl90G +sgRbtnJc/oi//bAkxi5sZdJ6Blq6EJYPtLQpX+FgmkeGPWsJKInrV/j3xTunr0Uw +xGR0p2MrwbM7AgMBAAGjgeAwgd0wHQYDVR0OBBYEFARRmEVNytzppHf98wEcc0XB +5zz3MB8GA1UdIwQYMBaAFB6cPkCdX811D6cW5C6eKdsPUCuPMIGPBgNVHREEgYcw +gYSCCWxvY2FsaG9zdIIXbG9jYWxob3N0Ni5sb2NhbGRvbWFpbjaHBH8AAAGHEAAA +AAAAAAAAAAAAAAAAAAGCCmxvY2FsaG9zdDSCCmxvY2FsaG9zdDaCFWxvY2FsaG9z +dC5sb2NhbGRvbWFpboIXbG9jYWxob3N0NC5sb2NhbGRvbWFpbjQwCQYDVR0TBAIw +ADANBgkqhkiG9w0BAQsFAAOCAQEARQjnYcx+494l9G/r92IkBPVtuvPBF7+rjXZA +jVyagFXm4h+oNyn+9VL629UO/1sNGlWGL03B3CC4uEXKElkdJKFUL9Rim6UV9ofj +ytNu43i/vp3j6VDBznxBZ4Jc/5nLmCb1prekZ78OG3rf2iub8L8HSUkq6ypC39c7 +gDNwAdYwNl8DbvHzHL/ukUzaLqBjD1AodlDi/p7/LQyhzCQCbWLWorzzQS65old4 +Ku6Fyu5yBAWvme9W5Gjk4n6FJWpTRCnAQDB5usLOoN07wKC5taP6NFmZQ5aI9Ht5 +BbwIk+vPkx1aWENdj7btmwbrBRfVryXLR4xY1KmAlJa4Crg/VA== +-----END CERTIFICATE----- diff --git a/x-pack/plugin/security/qa/multi-cluster/src/test/resources/ssl/remote_cluster.key b/x-pack/plugin/security/qa/multi-cluster/src/test/resources/ssl/remote_cluster.key new file mode 100644 index 0000000000000..47eed51430e68 --- /dev/null +++ b/x-pack/plugin/security/qa/multi-cluster/src/test/resources/ssl/remote_cluster.key @@ -0,0 +1,30 @@ +-----BEGIN RSA PRIVATE KEY----- +Proc-Type: 4,ENCRYPTED +DEK-Info: AES-128-CBC,A85335FAC92C3F890E39FCF60C5FD295 + +vJivcDLeMyb7q8G9iqBiH6KnW8NUFBf3TpRw301Vsv6FTHZi1H65eA8x8VVf/pPO +IIfdW9QIzBXmPAx0Zj9Lj6BpwflDswrRkcRCbyrv6khXC9GZjTsGrKBdvKahca3L +DlR1mmOjPSXlm+VIMOx6FsWhuLOYihui253CcJMxcWsuQLJu6juz6Gij7B2cuCBm +NaW6EXj0c9ZxM1FD3TFxp4hawEmD4uKEckjkEfV3i+cGa+yAkjMIx1D90gyqkRl1 +Jrh8+VFkRm9OfGNKvvYkX9UAc9/pt37LweE/ECtrBLGYm+WbLD6QmdIXPWzJSpVS +MFsljPHk35XDOYke9qEdQlmS1Q8SOAAEGyS9eUroAgPXbK7BhgSkR+v52Qjpo/ud +YH1QrZv+YQzY2ew1wVShJsMgiUjxBn05+4BwXXWJ1xkvQ4cZuTcUrkKEXLn1kjuh +Nv1aCtOI8awz7Xvhd8Dp7FCjBFwFKMW+J5pVbbTdI+JuWEkh/p9RxdtDrn8IHcGE +9r84Y0LDCjz3nmXlhHVC57XVSZdofAK0V/AbtkLeqmTnvv7/I9kA9155Uvmx0OWR +tHnCmKnMVR3d0zxbpIzSNjdsGqA4n5van5DyKVH3eF+ip6tMNs5BrPRBX3XO2tV1 +7/icQCYDoyFBEcevAalytSMoh7/ZFiGX3K8WGps6/aP3nfPKEHHqPrOPQJbrmsAX +VtEJE8clO83P0HnoGQlopBYRJrr5sFs1KNipcSejyUdB7sbnrH5HqRIGWSc8+kPc +9f2HS0vm4G94oOSTe14blWsGq7nagJYWRFnK8FBIwggA3tNN3YfUzAo3fIZOasX6 +IEs1WmoPfGK7VUsaN0uWEVi5S49QlvrG804/CWWfUKeXOHGcqG2UQ5QMHW72jqzV +qk8+1g6UTYQybXWz7Xfwb1yKFMx2H6b+yUfTUsOZb88hjvNlhZvXkWsqd78Gb2XK +IfEpWA/3f0YVP1TpsbBCnCpVTnncx0yo/jPIodUuiPgKOiaI+/X/7Jfv4ylXVxsS +aN+AnNPZKgO7SvvEdKpoDvJ1tzsa4lY3z3h1KU8i4PFTS1Rh1gdwJTC1+nVnL8O/ +KjIqT1FD5LU1XX2UXJviQkXhSVMPM3n1luC0vIdwp/v04yY+SzQL0Vb7CfoEZ5bp +vbAkMlz0/Cq0eS3zgeJTt8zqMGyr7Z7vADqAlYjEYD9l7aMxdlKzN0UVJuApwuWA +6VHLGY6WbAHVaqhd+1UOKQAlgdJ9C7knWTIZMtXdDsIEllFbVMnc3lYg1QcGZqcp +ejRNYfzkK8HLLVFRHH15KyLqvhJc8LE6kjYZ7snwHRFfw61OQJ2d/HIrp6abmmrt +N/NDFB3TyMtK6sS+tosSXeCErc6iUVVXcn/EtK7JXf9N0RlNCDJ1GldfS+avXXw9 +84peAvhSt+0gOfKAfG2IDu1KnNvcz67qXH0a6OkvMXUevqW/VVeEq/3+wMkOCUeo +3Zzo1+Est2fpCLsyOfLEQsKeRFDtYYsT6iFOYPslnRSzzPsL1I0TJUrCifDM/2eN +gg4ydl3a8Ixme64smmywc3Ptf57lFPhN4q/83lTNl63QtiE87GZtMT68ru81/x6i +-----END RSA PRIVATE KEY----- diff --git a/x-pack/plugin/security/qa/multi-cluster/src/test/resources/ssl/transport-ca.crt b/x-pack/plugin/security/qa/multi-cluster/src/test/resources/ssl/transport-ca.crt new file mode 100644 index 0000000000000..d5b219f6de4ab --- /dev/null +++ b/x-pack/plugin/security/qa/multi-cluster/src/test/resources/ssl/transport-ca.crt @@ -0,0 +1,19 @@ +-----BEGIN CERTIFICATE----- +MIIDKTCCAhGgAwIBAgIURWCLvSo2ZzpMSlw9yblrKk+FCIIwDQYJKoZIhvcNAQEL +BQAwJDEiMCAGA1UEAxMZRWxhc3RpYyBBdXRvIFRyYW5zcG9ydCBDQTAeFw0yMzAx +MjkxMjAzMzFaFw0zMjA4MjkxMjAzMzFaMCQxIjAgBgNVBAMTGUVsYXN0aWMgQXV0 +byBUcmFuc3BvcnQgQ0EwggEiMA0GCSqGSIb3DQEBAQUAA4IBDwAwggEKAoIBAQD5 +zm51wptQfKbbFmdTcbpk+tGT8/MXMkNpKdtxUQgrPu6urCxTs6UkjsYhw10Tw8qa +r7+txjDDZPZOIAFA9rFHotp+/dGvsEuBjT084KIS48rQiLhJzD4q+U+aborIuUwb +iLjQOIflP5olOmFWXjZUoRUaYelzmZp4/laVzvRC2l+Gj5+422S1EQEidr8EZZyU +9NHz2kGOz9ObVw68aBXZgWZwE4ctumvyAprBF8Y3w836edL3EjVcMHOjg2FXjMDh +y7H+I/RcKDUw0AHHdvuRui8E4+kBVCiErnKqNQ1YOqDAXdMDnB4ZT052LflJkVEF +u8y/Jjs9OHtxyHjMsY+PAgMBAAGjUzBRMB0GA1UdDgQWBBRTknQaF13CHJCK2nyw +j6/ohmCkHDAfBgNVHSMEGDAWgBRTknQaF13CHJCK2nywj6/ohmCkHDAPBgNVHRMB +Af8EBTADAQH/MA0GCSqGSIb3DQEBCwUAA4IBAQCCsKTXLjs/g48iRe7hDzln6GXr +wp3uuwGs0E5LjEgqQld+D1jALRPuCiStYL8wpKINy2VKCB2bbtEb3lGBog1ns5Pg +ntVu5z54wq76f+kFe+NGt54TCEELkxccvkhTPm7yDTmYOOgUwYXJ/sqSjNABNmAu +1wFl3B48iWrEd0QkSHjutEaRuODaoApjdpfaLUUMC6C6C0WYMtK2qE4DklwyIDSH +/JZQsmLNRVrNNvmwphVhiXMwehnPPGpMa3x1qTZWBtgOoiWnwt0qNCPoTcImZr2h +8rDuVFNOjGk9zwPz3mv7x1H2rqF57/1ItBPmARzR6TkIollRicODPo4WXqVC +-----END CERTIFICATE----- diff --git a/x-pack/plugin/security/qa/multi-cluster/src/test/resources/ssl/transport-ca.key b/x-pack/plugin/security/qa/multi-cluster/src/test/resources/ssl/transport-ca.key new file mode 100644 index 0000000000000..29ca330770ad2 --- /dev/null +++ b/x-pack/plugin/security/qa/multi-cluster/src/test/resources/ssl/transport-ca.key @@ -0,0 +1,30 @@ +-----BEGIN RSA PRIVATE KEY----- +Proc-Type: 4,ENCRYPTED +DEK-Info: AES-128-CBC,5985A282ABF260D0DEB6BCE172A06F98 + +f1P2c6P2PHhdpNYS2LZjfFN4xCY8Wf0zFcShcyK8ssIfEm09xjXCVKQxmyGNb9bQ +VO3widBXK/r/l3hqWo3bdouxY4itGloBhhdJGaBN4A+raMv+0kttMdGrgaWkk/ek +/ljCzsj6lTfWPO7Hk8YOkDxadD6mVNSz/TxSSdED8rM9xsuYhQMcOhSa5sXwdf6w +JImlSO0JKbK1iButZ0m4Os/xvr1D2f6XGjX5OqIl4I/iDt6INbciT8jlY6C7YTGo +dyjr5aFVT5fc+6HHjqi2NdMIn9jOPjct0+hgGVC3180hJDJcfBmcDbMYPf5kMqIx +xJkB0B9QTXG9sbc4w4tqG9JDwzM3H/nzMyNeFDaHdJO9DqnE7SNU3GmzrtkoU3hK +1hgcrLDJhHNLlN6D/ApgjDythjxDLsuR9Cr6CLmcxzVxv/al0qY3/PLpJA+KQjr9 ++4zAxSHQBmu/aBZTqlNth2A8NzVVTRz8F9iEMsllytAzT6C24yXAgoM55Ja8LNSZ +r55PWqC2wLETvmaoBnbr3qcX9xbeeGnhYQbFOGJfTgILhhkkjjB77FTGlXJMtjOg +wX5BlmoWYdkUsqHiCAtIv3I8bS2pJZQt6O8fPvn+9MSgpAyn5xaPlGB1tYn4SRFd +4OAv6KJwQ65jVw7miPOWIYeBm3iBrcZxtzHrylVM1SdcbGz2+A7CQcUu0KMkdSBS +ybNNz2UYQG66a6TOKGXI9lkCBQl9DHiS+hcv80q8IgEE8h8PNGhyddZ99iRlQMDK +3IoO04bUdWZ9JQOgGa3PXhhYlyZ5W0lKrpsvfp8jl/adTixSJ5gw6d7tzXnCrMuj +5YrArkpO+rJXEDXE4IpgQ1zNQUkBCdZu6adqIwVDXAlAMSEZw0/z/mwB1+5BogBK +YPWDtKKN77K36WUmQddm9q1qfBhsxj33OwQlHoYISk0VGmFMs0u/jgIvH2qJwhE8 +wcP7MgZUvr0VT1ezM9x8pTJmmGhY7d1AbC+1qb4uNAk+lWp9Y5y/thz9qFDDb3PY +ppKPHM/VG+Uq6rXAki/LJjfi0B3diWOAdrcznbmQGQ8LkH9uc9juBgQik7gRNrN8 +mYZ9E1itBMifGXuKuPtU2qYTz3RFcI5pPgMVnJQkO8lJqGF8D4qbeKk/u2wUj1/c +i+7EkFAXFSjv9kjVFVsbyhZJia7TLE2QXyMq2OiP2Gwhcu/QxB7kYpdCivmY7q7e +8jTWt08vn9xaT044015Qoc+3w0ixcI+WkEQ+72MDJOX0akfl4xpA9cAfHYs68qkJ +fB7ynFOS2i6/BXvulo9RXb9tIXixLJqDL9pCztCLh/zegMfT0vUbDjjO7Z2edEXk +oJgL5QpshqCxoFq3Rc18NHBRIUa7QpzhT72FtIlgavY7P3bxjtERB7Yku1gEs0/5 +n+8bP4bRGBGgmXqfNMFCrBd3K3ifil/LgHnOEq8qrQ+j/pvGorG0dI3OTcZ+dmYC +w4E7WzSesZhE4xx9xU51tqGLgU7+BUdc9t85zahLo/tXjZSQHsgEqVC24//fqPX1 +kOeEPdqXlMhwq0UlPOsuhfK9fHdFuAFiqh62xH8aPKGi9nnWB/d/JtFsdSugyyzb +-----END RSA PRIVATE KEY----- diff --git a/x-pack/plugin/security/qa/multi-cluster/src/test/resources/ssl/transport.crt b/x-pack/plugin/security/qa/multi-cluster/src/test/resources/ssl/transport.crt new file mode 100644 index 0000000000000..c76e45e70d1fd --- /dev/null +++ b/x-pack/plugin/security/qa/multi-cluster/src/test/resources/ssl/transport.crt @@ -0,0 +1,22 @@ +-----BEGIN CERTIFICATE----- +MIIDpzCCAo+gAwIBAgIUBJisi0qK38LiU3acHwcS9B6/SykwDQYJKoZIhvcNAQEL +BQAwJDEiMCAGA1UEAxMZRWxhc3RpYyBBdXRvIFRyYW5zcG9ydCBDQTAeFw0yMzAx +MjkxMjA1NTNaFw0zMjA4MjkxMjA1NTNaMBQxEjAQBgNVBAMTCXRyYW5zcG9ydDCC +ASIwDQYJKoZIhvcNAQEBBQADggEPADCCAQoCggEBAMKL79IsmRLzmDBhAsjny/Ho +6hP4WekmTXyAvEpCo/h3JqGsCxT2C/qazff67jj57F/OvH2RyGQMHVcHhiZwrs7d +Alo8297UENJfh3YWq3GLxSlP7L5U5+D4sGCHqRQhIVIw1D7FzeskU30trjVp68Di +4SdDLqNrfKw6pmbaWwU0zEh4puM5YeP4exEhfc13v2Yt8JeJMvBV3aUqjwcif57z +suJq/XxGJL9iUZdBVUjc0R2MX52UQllg6jkLoExqkBmZjVE+3LbyRmR5f5kMKY8M +5hvxfKIA/Eycq7tstpM57H/cFG2N38Fbu3VOPo5lm8jsJ0RtMdVv9as1cBVbZ2EC +AwEAAaOB4DCB3TAdBgNVHQ4EFgQUIn0c4Ani9eLCCm128j9PZNsnzb0wHwYDVR0j +BBgwFoAUU5J0GhddwhyQitp8sI+v6IZgpBwwgY8GA1UdEQSBhzCBhIIJbG9jYWxo +b3N0ghdsb2NhbGhvc3Q2LmxvY2FsZG9tYWluNocEfwAAAYcQAAAAAAAAAAAAAAAA +AAAAAYIKbG9jYWxob3N0NIIKbG9jYWxob3N0NoIVbG9jYWxob3N0LmxvY2FsZG9t +YWlughdsb2NhbGhvc3Q0LmxvY2FsZG9tYWluNDAJBgNVHRMEAjAAMA0GCSqGSIb3 +DQEBCwUAA4IBAQBzm07DRNr9Oyg8k6gZ6UvMRo9Elbp+A/B22l3YJHPQnj0dxA6P +iEnQ+TdqwCefowMdakndWJ7nEUBjOg9OLnJnrhd3ibkix2dshUnBChhDimt30JVN +HvC2IvBOZvUlH3MMWTt93FGMEqKjASA+eCfWxLbcvOI8f6Vx5VXnM1NQlmfYiqHD +vGXCIHvPmNBq0UxXU550Pp3sxd9Y2OnnzEAZ1W02UJAICQcTS2fkUzYdEexjIjkY +EX3n5354+tjGSIxPWth4hb19Fn0+1v3DPCIHxfzDUeqh+voOm8+PXWJCqxcOLFPQ +POG10udYr1g2WusMiaNHHrfDZ/o03I3Cz7M2 +-----END CERTIFICATE----- diff --git a/x-pack/plugin/security/qa/multi-cluster/src/test/resources/ssl/transport.key b/x-pack/plugin/security/qa/multi-cluster/src/test/resources/ssl/transport.key new file mode 100644 index 0000000000000..4c85df651b290 --- /dev/null +++ b/x-pack/plugin/security/qa/multi-cluster/src/test/resources/ssl/transport.key @@ -0,0 +1,30 @@ +-----BEGIN RSA PRIVATE KEY----- +Proc-Type: 4,ENCRYPTED +DEK-Info: AES-128-CBC,FF3713FD7246D42335C7FFF4FCACB10C + +KTez5b5wnuCGPIs/pmGyIs58B9PiBsm6SgVNT7UoIy+FdNstN+VZY/KU9qLestRa +CuA+pJrTSqO26XxqpJb4yXn6S+ZhdkVcNl9HqT6MgqUfXPHLLufvwV55ktDHCKxV +Taj09/RXYYJ1CfvwMem8jjBayI2/OPHUM77dkBmqlGp+eC6NwDxkfKxX/vxunrL1 +Skn+WEgbWUm0jM4JNtxpc5KwGzpQdnmggTEbN3pwF6yE8K9iPQ06Dme6GNTkrBRm +HQUcMm5ZWJBCFfYz91CmrrLaCu0Q8kKNoE7wieUhy8r5JJjtCVMebf0Yu2epbJOi +oOCkDqgFbb+g3yaelGAFloKPAoZf0liBpe4vFWhFnfcyY4k0eVUxuaXhCnjtKkll +DKTLC36vgFx1qQc5TaxWtzhsk/ccNYoDcrQySJ9isuuv1J9Nc2ltQ2EgdPekvl21 +tIas4bzbi3o8KQxMIPWJ5CpgXm72bVJ1GJe7C70pGGLOsSk9nTe0sN9o7B6+5KiD +OrwoiKY8bvr27KFc0oGBXWl0XFEuXn1uxnWc2KIyLSQDvqo7Jdur0gxgtIZAK4J7 +9DwJFwK0Db174JebSMtVLo17VDDV2DbVkg0PKiyP7bgAUCBr75AvqJz7Yok+KIxV +hDoBgD5RAfmyKJ0DwJJmBM7qgqF5yNNOrceyxRTIGQUDXhbQDPIKGaLz1PUIY1qJ +bycTkFgxOOSHVPlAhZari4+XF8wth37sPxJPic2MTQtKjt02WFg5Mq59VggkcFTp +Y0eelUcV6dWWHNbtDR/HKI9jrBTjRunJwJiZ3Bw+nnvkVKp19Lfkp8iCgHuNMbp/ +4e9AQjBXy+ududxIBzvkEWtlWeyX/AdzHwFU9FdYGOlYKzK4kmpscG9BfIMp83zP +wNg04iaZdict3cjKwUZKhUyXtb5v8gng60zAFOpGTBFRHr8qYWMPzxIhkxttz72M +DNv1kZJMRfphq5zmNL28S2VYrIE600s7daH50iebfx48pUvTrjjiGPL83Y2RKa1w +ctBYfiF09NFSYXa+rhRgXhZwDrQJ1g2+KYSSAjhCftmZA1eycMmuPppv3sAI+8DX +NEQL7f+jxy+asQwPA7ApujvtWkO4kUBxM8wolu3RA4tZejwB1wcVVqnCbTbwE9qn +DeLztHx7EuxbS2+eCkfhxYJlxJsqRM9DBqBmLranq5w/44SknkdDfZIcG6Z/M7Yv +Vd9nE2+7vo8QrTA/Iy83PDZBsHUxFoYBKFaHbEoDdyypoxsLURqbdBqUp/JC/Lmf +wc5uZcbgtcWqMriazZSsjSPm+PxnYQZWZres7bnppmCDTjZ/mP1Gy/f99JiIa/Pw +TnotriAFM5No5vo8pIZlNRR8+HzTrAevdMypMZdbv8TsfP93fd3TJvpwR/cvw9Nc +GCQQtz1OeuoJCkuZev00Q9FgsmZofKFCgw03nC+y+QHF5mD/wvVkL83H4YOfmQRe +xdwzRvQq8P7Bl9my4QqbTfCIW1ftsbu+4M0aRv/iv21FdQhMVAICfRwC3aRgf9rI +qoeDcHZSSR/ElXFni/Bvr+4Fd+E/V8pPmM/l2KRWN+GHFL+qLjJi3GFCS4qMhiHh +-----END RSA PRIVATE KEY----- diff --git a/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/core/security/transport/ProfileConfigurations.java b/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/core/security/transport/ProfileConfigurations.java index 52956306bcfa8..bdf7c165b55a6 100644 --- a/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/core/security/transport/ProfileConfigurations.java +++ b/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/core/security/transport/ProfileConfigurations.java @@ -8,25 +8,109 @@ import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.ssl.SslConfiguration; -import org.elasticsearch.common.util.Maps; import org.elasticsearch.transport.TransportSettings; import org.elasticsearch.xpack.core.XPackSettings; import org.elasticsearch.xpack.core.ssl.SSLService; +import java.util.HashMap; import java.util.Map; import java.util.Set; import static org.elasticsearch.transport.RemoteClusterPortSettings.REMOTE_CLUSTER_PORT_ENABLED; import static org.elasticsearch.transport.RemoteClusterPortSettings.REMOTE_CLUSTER_PROFILE; +import static org.elasticsearch.xpack.core.XPackSettings.REMOTE_CLUSTER_SSL_ENABLED; import static org.elasticsearch.xpack.core.security.SecurityField.setting; +/** + * Settings for a transport profile usually begin with "transport.profiles.NAME." + * The settings can be either of the two categories: + * 1. Networking - e.g. `transport.profiles.NAME.tcp.keep_alive: true` + * 2. SSL - e.g. `transport.profiles.NAME.xpack.security.ssl.client_authentication: none` + * This class is responsible for building SSL configuration for transport profiles. + * + * Among the transport profiles, two of them are special: "default" and "_remote_cluster". + * + * The "default" profile has dedicated settings for both networking (e.g. `transport.tcp.keep_alive`) + * and SSL (e.g. `xpack.security.transport.ssl.client_authentication`). + * It also accepts networking settings specified with its transport profile name, + * e.g. `transport.profiles.default.tcp.keep_alive` is valid configuration. + * But it does *not* allow SSL settings to be specified with its transport profile name, + * e.g. `transport.profiles.default.xpack.security.ssl.client_authentication` is NOT valid configuration. + * + * The "_remote_cluster" profile also has dedicated settings for both networking (e.g. `remote_cluster.tcp.keep_alive`) + * and SSL (e.g. `xpack.security.remote_cluster.ssl.client_authentication`). + * This profile is completely synthetic in that it does NOT accept either networking or SSL settings + * with its transport profile name. + * NOTE the "_remote_cluster" profile name is special ONLY when the remote cluster port is enabled. + * If the remote cluster port is not enabled, this profile name will be treated just as a normal profile (for BWC). + * + * When building SSL configurations for the transport profiles, assuming SSL is enabled, + * this class builds a map that contains a configuration for each of the configured transport profiles + * (keyed by its name). + * The map also contains an entry that has the special key "default" and value being the SSL + * configuration for the "default" profile. + * If remote cluster is enabled, the map will also contain an entry that has the special key + * "_remote_cluster" with the value being the SSL configuration of the synthetic "_remote_cluster" profile. + * + * NOTE the "_remote_cluster" profile only applies to the new remote cluster model. + * The legacy remote cluster model mostly just uses the "default" transport profile. + */ public final class ProfileConfigurations { private ProfileConfigurations() {} - public static Map get(Settings settings, SSLService sslService, SslConfiguration defaultConfiguration) { + /** + * Builds SSL configuration for transport profiles. + * + * @param settings Settings of the ES node + * @param sslService For resolving the SSL configuration based on its prefix + * @param sslEnabledOnly If true, only include the SSL configuration if SSL is enabled for the profile. + * If false, SSL configuration is included for a profile regardless whether SSL is actually enabled for it. + * @return A map that contains {@link SslConfiguration} for each named transport profile as well + * as an entry for the "default" profile. If the remote_cluster feature is enabled, it also + * contains an entry for the synthetic "_remote_cluster" profile. + */ + public static Map get(Settings settings, SSLService sslService, boolean sslEnabledOnly) { + final boolean transportSslEnabled = XPackSettings.TRANSPORT_SSL_ENABLED.get(settings); + final boolean remoteClusterPortEnabled = REMOTE_CLUSTER_PORT_ENABLED.get(settings); + final boolean remoteClusterSslEnabled = remoteClusterPortEnabled && REMOTE_CLUSTER_SSL_ENABLED.get(settings); + + final Map profileConfigurations = new HashMap<>(); + + if (sslEnabledOnly) { + if (transportSslEnabled == false && remoteClusterSslEnabled == false) { + return profileConfigurations; + } else if (transportSslEnabled == false) { + // The single TRANSPORT_SSL_ENABLED setting determines whether SSL is enabled for both + // the default transport profile and any custom transport profiles. That is, SSL is + // always either enabled or disabled together for default and custom transport profiles. + profileConfigurations.put(REMOTE_CLUSTER_PROFILE, sslService.getSSLConfiguration(XPackSettings.REMOTE_CLUSTER_SSL_PREFIX)); + return profileConfigurations; + } else if (remoteClusterSslEnabled == false) { + populateFromTransportProfiles(settings, sslService, profileConfigurations); + return profileConfigurations; + } + } + + // At this point, either SSL is enabled for both transport and remote cluster, or sslEnabledOnly is false. + // In both case, we need to include all configurations + populateFromTransportProfiles(settings, sslService, profileConfigurations); + if (remoteClusterPortEnabled) { + assert profileConfigurations.containsKey(REMOTE_CLUSTER_PROFILE) == false; + profileConfigurations.put(REMOTE_CLUSTER_PROFILE, sslService.getSSLConfiguration(XPackSettings.REMOTE_CLUSTER_SSL_PREFIX)); + } + + return profileConfigurations; + } + + private static void populateFromTransportProfiles( + Settings settings, + SSLService sslService, + Map profileConfigurations + ) { + final SslConfiguration defaultConfiguration = sslService.getSSLConfiguration(setting("transport.ssl.")); + Set profileNames = settings.getGroups("transport.profiles.", true).keySet(); - Map profileConfiguration = Maps.newMapWithExpectedSize(profileNames.size() + 1); for (String profileName : profileNames) { if (profileName.equals(TransportSettings.DEFAULT_PROFILE)) { // don't attempt to parse ssl settings from the profile; @@ -44,15 +128,10 @@ public static Map get(Settings settings, SSLService ss } SslConfiguration configuration = sslService.getSSLConfiguration("transport.profiles." + profileName + "." + setting("ssl")); - profileConfiguration.put(profileName, configuration); + profileConfigurations.put(profileName, configuration); } - assert profileConfiguration.containsKey(TransportSettings.DEFAULT_PROFILE) == false; - profileConfiguration.put(TransportSettings.DEFAULT_PROFILE, defaultConfiguration); - if (REMOTE_CLUSTER_PORT_ENABLED.get(settings)) { - assert profileConfiguration.containsKey(REMOTE_CLUSTER_PROFILE) == false; - profileConfiguration.put(REMOTE_CLUSTER_PROFILE, sslService.getSSLConfiguration(XPackSettings.REMOTE_CLUSTER_SSL_PREFIX)); - } - return profileConfiguration; + assert profileConfigurations.containsKey(TransportSettings.DEFAULT_PROFILE) == false; + profileConfigurations.put(TransportSettings.DEFAULT_PROFILE, defaultConfiguration); } } diff --git a/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/core/security/transport/netty4/SecurityNetty4Transport.java b/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/core/security/transport/netty4/SecurityNetty4Transport.java index 6263568e14e61..c7cf4ddb91df1 100644 --- a/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/core/security/transport/netty4/SecurityNetty4Transport.java +++ b/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/core/security/transport/netty4/SecurityNetty4Transport.java @@ -27,6 +27,7 @@ import org.elasticsearch.threadpool.ThreadPool; import org.elasticsearch.transport.ConnectTransportException; import org.elasticsearch.transport.TcpChannel; +import org.elasticsearch.transport.TransportSettings; import org.elasticsearch.transport.netty4.Netty4Transport; import org.elasticsearch.transport.netty4.SharedGroupFactory; import org.elasticsearch.xpack.core.XPackSettings; @@ -44,7 +45,9 @@ import javax.net.ssl.SSLEngine; import javax.net.ssl.SSLParameters; -import static org.elasticsearch.xpack.core.security.SecurityField.setting; +import static org.elasticsearch.transport.RemoteClusterPortSettings.REMOTE_CLUSTER_PORT_ENABLED; +import static org.elasticsearch.transport.RemoteClusterPortSettings.REMOTE_CLUSTER_PROFILE; +import static org.elasticsearch.xpack.core.XPackSettings.REMOTE_CLUSTER_SSL_ENABLED; /** * Implementation of a transport that extends the {@link Netty4Transport} to add SSL and IP Filtering @@ -56,7 +59,8 @@ public class SecurityNetty4Transport extends Netty4Transport { private final SSLService sslService; private final SslConfiguration sslConfiguration; private final Map profileConfiguration; - private final boolean sslEnabled; + private final boolean transportSslEnabled; + private final boolean remoteClusterSslEnabled; public SecurityNetty4Transport( final Settings settings, @@ -81,15 +85,11 @@ public SecurityNetty4Transport( ); this.exceptionHandler = new SecurityTransportExceptionHandler(logger, lifecycle, (c, e) -> super.onException(c, e)); this.sslService = sslService; - this.sslEnabled = XPackSettings.TRANSPORT_SSL_ENABLED.get(settings); - if (sslEnabled) { - this.sslConfiguration = sslService.getSSLConfiguration(setting("transport.ssl.")); - Map profileConfiguration = ProfileConfigurations.get(settings, sslService, sslConfiguration); - this.profileConfiguration = Collections.unmodifiableMap(profileConfiguration); - } else { - this.profileConfiguration = Collections.emptyMap(); - this.sslConfiguration = null; - } + this.transportSslEnabled = XPackSettings.TRANSPORT_SSL_ENABLED.get(settings); + this.remoteClusterSslEnabled = REMOTE_CLUSTER_PORT_ENABLED.get(settings) && REMOTE_CLUSTER_SSL_ENABLED.get(settings); + + this.profileConfiguration = Collections.unmodifiableMap(ProfileConfigurations.get(settings, sslService, true)); + this.sslConfiguration = this.profileConfiguration.get(TransportSettings.DEFAULT_PROFILE); } @Override @@ -99,7 +99,13 @@ protected void doStart() { @Override public final ChannelHandler getServerChannelInitializer(String name) { - if (sslEnabled) { + if (remoteClusterSslEnabled && REMOTE_CLUSTER_PROFILE.equals(name)) { + final SslConfiguration remoteClusterSslConfiguration = profileConfiguration.get(name); + if (remoteClusterSslConfiguration == null) { + throw new IllegalStateException("remote cluster SSL is enabled but no configuration is found"); + } + return getSslChannelInitializer(name, remoteClusterSslConfiguration); + } else if (transportSslEnabled) { SslConfiguration configuration = profileConfiguration.get(name); if (configuration == null) { throw new IllegalStateException("unknown profile: " + name); @@ -149,7 +155,7 @@ protected ServerChannelInitializer getSslChannelInitializer(final String name, f @Override public boolean isSecure() { - return this.sslEnabled; + return this.transportSslEnabled; } private class SecurityClientChannelInitializer extends ClientChannelInitializer { @@ -158,7 +164,7 @@ private class SecurityClientChannelInitializer extends ClientChannelInitializer private final SNIHostName serverName; SecurityClientChannelInitializer(DiscoveryNode node) { - this.hostnameVerificationEnabled = sslEnabled && sslConfiguration.verificationMode().isHostnameVerificationEnabled(); + this.hostnameVerificationEnabled = transportSslEnabled && sslConfiguration.verificationMode().isHostnameVerificationEnabled(); String configuredServerName = node.getAttributes().get("server_name"); if (configuredServerName != null) { try { @@ -174,7 +180,7 @@ private class SecurityClientChannelInitializer extends ClientChannelInitializer @Override protected void initChannel(Channel ch) throws Exception { super.initChannel(ch); - if (sslEnabled) { + if (transportSslEnabled) { ch.pipeline() .addFirst(new ClientSslHandlerInitializer(sslConfiguration, sslService, hostnameVerificationEnabled, serverName)); } diff --git a/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/SecurityUsageTransportAction.java b/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/SecurityUsageTransportAction.java index f6a02be8ebd18..09068c50b0162 100644 --- a/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/SecurityUsageTransportAction.java +++ b/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/SecurityUsageTransportAction.java @@ -20,6 +20,7 @@ import org.elasticsearch.protocol.xpack.XPackUsageRequest; import org.elasticsearch.tasks.Task; import org.elasticsearch.threadpool.ThreadPool; +import org.elasticsearch.transport.RemoteClusterPortSettings; import org.elasticsearch.transport.TransportService; import org.elasticsearch.xpack.core.XPackSettings; import org.elasticsearch.xpack.core.action.XPackUsageFeatureAction; @@ -44,6 +45,7 @@ import static org.elasticsearch.xpack.core.XPackSettings.API_KEY_SERVICE_ENABLED_SETTING; import static org.elasticsearch.xpack.core.XPackSettings.FIPS_MODE_ENABLED; import static org.elasticsearch.xpack.core.XPackSettings.HTTP_SSL_ENABLED; +import static org.elasticsearch.xpack.core.XPackSettings.REMOTE_CLUSTER_SSL_ENABLED; import static org.elasticsearch.xpack.core.XPackSettings.TOKEN_SERVICE_ENABLED_SETTING; import static org.elasticsearch.xpack.core.XPackSettings.TRANSPORT_SSL_ENABLED; @@ -188,6 +190,9 @@ static Map sslUsage(Settings settings) { Map map = Maps.newMapWithExpectedSize(2); map.put("http", singletonMap("enabled", HTTP_SSL_ENABLED.get(settings))); map.put("transport", singletonMap("enabled", TRANSPORT_SSL_ENABLED.get(settings))); + if (RemoteClusterPortSettings.REMOTE_CLUSTER_PORT_ENABLED.get(settings)) { + map.put("remote_cluster", singletonMap("enabled", REMOTE_CLUSTER_SSL_ENABLED.get(settings))); + } return map; } else { return Collections.emptyMap(); diff --git a/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/transport/SecurityServerTransportInterceptor.java b/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/transport/SecurityServerTransportInterceptor.java index a76af88a37bb7..4d2d311536616 100644 --- a/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/transport/SecurityServerTransportInterceptor.java +++ b/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/transport/SecurityServerTransportInterceptor.java @@ -61,7 +61,8 @@ import java.util.stream.Collectors; import java.util.stream.Stream; -import static org.elasticsearch.xpack.core.security.SecurityField.setting; +import static org.elasticsearch.transport.RemoteClusterPortSettings.REMOTE_CLUSTER_PORT_ENABLED; +import static org.elasticsearch.transport.RemoteClusterPortSettings.REMOTE_CLUSTER_PROFILE; public class SecurityServerTransportInterceptor implements TransportInterceptor { @@ -256,6 +257,11 @@ public void sendRequestInner( } } + // Package private for testing + Map getProfileFilters() { + return profileFilters; + } + private AsyncSender interceptForRemoteAccessRequests(final AsyncSender sender) { return new AsyncSender() { @Override @@ -434,17 +440,25 @@ public TransportRequestHandler interceptHandler( } private Map initializeProfileFilters(DestructiveOperations destructiveOperations) { - final SslConfiguration sslConfiguration = sslService.getSSLConfiguration(setting("transport.ssl")); - final Map profileConfigurations = ProfileConfigurations.get(settings, sslService, sslConfiguration); + final Map profileConfigurations = ProfileConfigurations.get(settings, sslService, false); Map profileFilters = Maps.newMapWithExpectedSize(profileConfigurations.size() + 1); final boolean transportSSLEnabled = XPackSettings.TRANSPORT_SSL_ENABLED.get(settings); + final boolean remoteClusterPortEnabled = REMOTE_CLUSTER_PORT_ENABLED.get(settings); + final boolean remoteClusterSSLEnabled = remoteClusterPortEnabled && XPackSettings.REMOTE_CLUSTER_SSL_ENABLED.get(settings); + for (Map.Entry entry : profileConfigurations.entrySet()) { final SslConfiguration profileConfiguration = entry.getValue(); - final boolean extractClientCert = transportSSLEnabled && SSLService.isSSLClientAuthEnabled(profileConfiguration); + final String profileName = entry.getKey(); + final boolean extractClientCert; + if (remoteClusterPortEnabled && REMOTE_CLUSTER_PROFILE.equals(profileName)) { + extractClientCert = remoteClusterSSLEnabled && SSLService.isSSLClientAuthEnabled(profileConfiguration); + } else { + extractClientCert = transportSSLEnabled && SSLService.isSSLClientAuthEnabled(profileConfiguration); + } profileFilters.put( - entry.getKey(), + profileName, new ServerTransportFilter( authcService, authzService, diff --git a/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/transport/ServerTransportFilter.java b/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/transport/ServerTransportFilter.java index c34c9d5f0d1ee..54cb20b6c0e0e 100644 --- a/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/transport/ServerTransportFilter.java +++ b/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/transport/ServerTransportFilter.java @@ -117,4 +117,9 @@ requests from all the nodes are attached with a user (either a serialize } }, listener::onFailure)); } + + // Package private for testing + boolean isExtractClientCert() { + return extractClientCert; + } } diff --git a/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/core/security/transport/ProfileConfigurationsTests.java b/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/core/security/transport/ProfileConfigurationsTests.java index 8f9789941f448..f0702ab5c364b 100644 --- a/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/core/security/transport/ProfileConfigurationsTests.java +++ b/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/core/security/transport/ProfileConfigurationsTests.java @@ -9,6 +9,7 @@ import org.elasticsearch.common.settings.MockSecureSettings; import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.common.ssl.SslClientAuthenticationMode; import org.elasticsearch.common.ssl.SslConfiguration; import org.elasticsearch.common.ssl.SslVerificationMode; import org.elasticsearch.env.Environment; @@ -18,14 +19,19 @@ import org.hamcrest.Matchers; import java.nio.file.Path; +import java.util.Arrays; import java.util.Map; +import static org.hamcrest.Matchers.anEmptyMap; +import static org.hamcrest.Matchers.contains; +import static org.hamcrest.Matchers.containsInAnyOrder; +import static org.hamcrest.Matchers.is; + public class ProfileConfigurationsTests extends ESTestCase { public void testGetSecureTransportProfileConfigurations() { assumeFalse("Can't run in a FIPS JVM, uses JKS/PKCS12 keystores", inFipsJvm()); final Settings settings = getBaseSettings().put("path.home", createTempDir()) - .put("xpack.security.transport.ssl.verification_mode", SslVerificationMode.CERTIFICATE.name()) .put("xpack.security.transport.ssl.verification_mode", SslVerificationMode.CERTIFICATE.name()) .put("transport.profiles.full.xpack.security.ssl.verification_mode", SslVerificationMode.FULL.name()) .put("transport.profiles.cert.xpack.security.ssl.verification_mode", SslVerificationMode.CERTIFICATE.name()) @@ -33,7 +39,7 @@ public void testGetSecureTransportProfileConfigurations() { final Environment env = TestEnvironment.newEnvironment(settings); SSLService sslService = new SSLService(env); final SslConfiguration defaultConfig = sslService.getSSLConfiguration("xpack.security.transport.ssl"); - final Map profileConfigurations = ProfileConfigurations.get(settings, sslService, defaultConfig); + final Map profileConfigurations = ProfileConfigurations.get(settings, sslService, true); assertThat(profileConfigurations.size(), Matchers.equalTo(3)); assertThat(profileConfigurations.keySet(), Matchers.containsInAnyOrder("full", "cert", "default")); assertThat(profileConfigurations.get("full").verificationMode(), Matchers.equalTo(SslVerificationMode.FULL)); @@ -50,25 +56,134 @@ public void testGetInsecureTransportProfileConfigurations() { final Environment env = TestEnvironment.newEnvironment(settings); SSLService sslService = new SSLService(env); final SslConfiguration defaultConfig = sslService.getSSLConfiguration("xpack.security.transport.ssl"); - final Map profileConfigurations = ProfileConfigurations.get(settings, sslService, defaultConfig); + final Map profileConfigurations = ProfileConfigurations.get(settings, sslService, true); assertThat(profileConfigurations.size(), Matchers.equalTo(2)); assertThat(profileConfigurations.keySet(), Matchers.containsInAnyOrder("none", "default")); assertThat(profileConfigurations.get("none").verificationMode(), Matchers.equalTo(SslVerificationMode.NONE)); assertThat(profileConfigurations.get("default"), Matchers.sameInstance(defaultConfig)); } + public void testTransportAndRemoteClusterSslCanBeEnabledIndependently() { + assumeFalse("Can't run in a FIPS JVM with JKS/PKCS12 keystore or verification mode None", inFipsJvm()); + final boolean transportSslEnabled = randomBoolean(); + final boolean remoteClusterSslEnabled = randomBoolean(); + final Settings settings = Settings.builder() + .put("path.home", createTempDir()) + .put("xpack.security.transport.ssl.enabled", transportSslEnabled) + .put("xpack.security.transport.ssl.keystore.path", getKeystorePath().toString()) + .put("xpack.security.transport.ssl.verification_mode", SslVerificationMode.NONE.name()) + .put("remote_cluster.enabled", true) + .put("xpack.security.remote_cluster.ssl.enabled", remoteClusterSslEnabled) + .put("xpack.security.remote_cluster.ssl.keystore.path", getKeystorePath().toString()) + .put("xpack.security.remote_cluster.ssl.verification_mode", SslVerificationMode.CERTIFICATE.name()) + .setSecureSettings(getKeystoreSecureSettings("transport", "remote_cluster")) + .build(); + final Environment env = TestEnvironment.newEnvironment(settings); + SSLService sslService = new SSLService(env); + final Map profileConfigurations = ProfileConfigurations.get(settings, sslService, true); + + if (transportSslEnabled && remoteClusterSslEnabled) { + assertThat(profileConfigurations.size(), Matchers.equalTo(2)); + assertThat(profileConfigurations.keySet(), Matchers.containsInAnyOrder("default", "_remote_cluster")); + assertThat(profileConfigurations.get("_remote_cluster").verificationMode(), Matchers.equalTo(SslVerificationMode.CERTIFICATE)); + assertThat( + profileConfigurations.get("default"), + Matchers.sameInstance(sslService.getSSLConfiguration("xpack.security.transport.ssl")) + ); + } else if (transportSslEnabled) { + assertThat(profileConfigurations.size(), Matchers.equalTo(1)); + assertThat(profileConfigurations.keySet(), contains("default")); + assertThat( + profileConfigurations.get("default"), + Matchers.sameInstance(sslService.getSSLConfiguration("xpack.security.transport.ssl")) + ); + } else if (remoteClusterSslEnabled) { + assertThat(profileConfigurations.size(), Matchers.equalTo(1)); + assertThat(profileConfigurations.keySet(), contains("_remote_cluster")); + assertThat(profileConfigurations.get("_remote_cluster").verificationMode(), Matchers.equalTo(SslVerificationMode.CERTIFICATE)); + } else { + assertThat(profileConfigurations, anEmptyMap()); + } + } + + public void testNoProfileConfigurationForRemoteClusterIfFeatureIsDisabled() { + assumeFalse("Can't run in a FIPS JVM with JKS/PKCS12 keystore or verification mode None", inFipsJvm()); + final Settings settings = Settings.builder() + .put("path.home", createTempDir()) + .put("xpack.security.transport.ssl.enabled", true) + .put("xpack.security.transport.ssl.keystore.path", getKeystorePath().toString()) + .put("xpack.security.transport.ssl.verification_mode", SslVerificationMode.NONE.name()) + .put("remote_cluster.enabled", false) + .put("xpack.security.remote_cluster.ssl.enabled", true) + .put("xpack.security.remote_cluster.ssl.keystore.path", getKeystorePath().toString()) + .put("xpack.security.remote_cluster.ssl.verification_mode", SslVerificationMode.CERTIFICATE.name()) + .setSecureSettings(getKeystoreSecureSettings("transport", "remote_cluster")) + .build(); + final Environment env = TestEnvironment.newEnvironment(settings); + SSLService sslService = new SSLService(env); + final Map profileConfigurations = ProfileConfigurations.get(settings, sslService, true); + assertThat(profileConfigurations.size(), Matchers.equalTo(1)); + assertThat(profileConfigurations.keySet(), contains("default")); + assertThat( + profileConfigurations.get("default"), + Matchers.sameInstance(sslService.getSSLConfiguration("xpack.security.transport.ssl")) + ); + } + + public void testGetProfileConfigurationsIrrespectiveToSslEnabled() { + assumeFalse("Can't run in a FIPS JVM with JKS/PKCS12 keystore or verification mode None", inFipsJvm()); + final boolean remoteClusterPortEnabled = randomBoolean(); + final Settings settings = Settings.builder() + .put("path.home", createTempDir()) + .put("xpack.security.transport.ssl.enabled", false) + .put("xpack.security.transport.ssl.keystore.path", getKeystorePath().toString()) + .put("transport.profiles.client.xpack.security.ssl.client_authentication", SslClientAuthenticationMode.NONE) + .put("remote_cluster.enabled", remoteClusterPortEnabled) + .put("xpack.security.transport.ssl.verification_mode", SslVerificationMode.NONE.name()) + .put("xpack.security.remote_cluster.ssl.enabled", false) + .put("xpack.security.remote_cluster.ssl.keystore.path", getKeystorePath().toString()) + .put("xpack.security.remote_cluster.ssl.verification_mode", SslVerificationMode.CERTIFICATE.name()) + .setSecureSettings(getKeystoreSecureSettings("transport", "remote_cluster")) + .build(); + final Environment env = TestEnvironment.newEnvironment(settings); + SSLService sslService = new SSLService(env); + final Map profileConfigurations = ProfileConfigurations.get(settings, sslService, false); + if (remoteClusterPortEnabled) { + assertThat(profileConfigurations.size(), Matchers.equalTo(3)); + assertThat(profileConfigurations.keySet(), containsInAnyOrder("default", "client", "_remote_cluster")); + assertThat(profileConfigurations.get("_remote_cluster").verificationMode(), is(SslVerificationMode.CERTIFICATE)); + } else { + assertThat(profileConfigurations.size(), Matchers.equalTo(2)); + assertThat(profileConfigurations.keySet(), containsInAnyOrder("default", "client")); + } + assertThat( + profileConfigurations.get("default"), + Matchers.sameInstance(sslService.getSSLConfiguration("xpack.security.transport.ssl")) + ); + assertThat(profileConfigurations.get("client").clientAuth(), is(SslClientAuthenticationMode.NONE)); + } + private Settings.Builder getBaseSettings() { + return Settings.builder() + .setSecureSettings(getKeystoreSecureSettings("transport")) + .put("xpack.security.transport.ssl.enabled", true) + .put("xpack.security.transport.ssl.keystore.path", getKeystorePath().toString()); + } + + private Path getKeystorePath() { final Path keystore = randomBoolean() ? getDataPath("/org/elasticsearch/xpack/security/transport/ssl/certs/simple/testnode.jks") : getDataPath("/org/elasticsearch/xpack/security/transport/ssl/certs/simple/testnode.p12"); + return keystore; + } - MockSecureSettings secureSettings = new MockSecureSettings(); - secureSettings.setString("xpack.security.transport.ssl.keystore.secure_password", "testnode"); - - return Settings.builder() - .setSecureSettings(secureSettings) - .put("xpack.security.transport.ssl.enabled", true) - .put("xpack.security.transport.ssl.keystore.path", keystore.toString()); + private static MockSecureSettings getKeystoreSecureSettings(String... sslContexts) { + final MockSecureSettings secureSettings = new MockSecureSettings(); + Arrays.stream(sslContexts) + .forEach( + sslContext -> { secureSettings.setString("xpack.security." + sslContext + ".ssl.keystore.secure_password", "testnode"); } + ); + return secureSettings; } } diff --git a/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/SecurityInfoTransportActionTests.java b/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/SecurityInfoTransportActionTests.java index f4786523165d1..704cb1b43b8fc 100644 --- a/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/SecurityInfoTransportActionTests.java +++ b/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/SecurityInfoTransportActionTests.java @@ -113,6 +113,11 @@ public void testUsage() throws Exception { final boolean transportSSLEnabled = randomBoolean(); settings.put("xpack.security.transport.ssl.enabled", transportSSLEnabled); + final boolean remoteClusterPortEnabled = randomBoolean(); + settings.put("remote_cluster.enabled", remoteClusterPortEnabled); + final boolean remoteClusterSslEnabled = randomBoolean(); + settings.put("xpack.security.remote_cluster.ssl.enabled", remoteClusterSslEnabled); + boolean configureEnabledFlagForTokenService = randomBoolean(); final boolean tokenServiceEnabled; if (configureEnabledFlagForTokenService) { @@ -265,6 +270,11 @@ public void testUsage() throws Exception { } else { assertThat(source.getValue("ssl.http.enabled"), is(httpSSLEnabled)); assertThat(source.getValue("ssl.transport.enabled"), is(transportSSLEnabled)); + if (remoteClusterPortEnabled) { + assertThat(source.getValue("ssl.remote_cluster.enabled"), is(remoteClusterSslEnabled)); + } else { + assertThat(source.getValue("ssl.remote_cluster.enabled"), nullValue()); + } } assertThat(source.getValue("realms"), is(nullValue())); assertThat(source.getValue("token_service"), is(nullValue())); diff --git a/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/transport/SecurityServerTransportInterceptorTests.java b/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/transport/SecurityServerTransportInterceptorTests.java index 89cd644a45842..3cd048a3f1f9e 100644 --- a/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/transport/SecurityServerTransportInterceptorTests.java +++ b/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/transport/SecurityServerTransportInterceptorTests.java @@ -16,6 +16,11 @@ import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.settings.ClusterSettings; import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.common.ssl.SslClientAuthenticationMode; +import org.elasticsearch.common.ssl.SslConfiguration; +import org.elasticsearch.common.ssl.SslKeyConfig; +import org.elasticsearch.common.ssl.SslTrustConfig; +import org.elasticsearch.common.ssl.SslVerificationMode; import org.elasticsearch.common.util.concurrent.ThreadContext; import org.elasticsearch.core.Tuple; import org.elasticsearch.tasks.Task; @@ -60,6 +65,7 @@ import java.io.IOException; import java.util.Collections; +import java.util.List; import java.util.Map; import java.util.Optional; import java.util.Set; @@ -76,6 +82,8 @@ import static org.elasticsearch.xpack.core.security.authz.RoleDescriptorTests.randomUniquelyNamedRoleDescriptors; import static org.elasticsearch.xpack.security.transport.SecurityServerTransportInterceptor.REMOTE_ACCESS_ACTION_ALLOWLIST; import static org.elasticsearch.xpack.security.transport.SecurityServerTransportInterceptor.REMOTE_ACCESS_CLUSTER_CREDENTIAL_HEADER_KEY; +import static org.hamcrest.Matchers.contains; +import static org.hamcrest.Matchers.containsInAnyOrder; import static org.hamcrest.Matchers.equalTo; import static org.hamcrest.Matchers.instanceOf; import static org.hamcrest.Matchers.is; @@ -833,6 +841,104 @@ public TransportResponse read(StreamInput in) { assertThat(securityContext.getThreadContext().getHeader(REMOTE_ACCESS_CLUSTER_CREDENTIAL_HEADER_KEY), nullValue()); } + public void testProfileFiltersCreatedDifferentlyForDifferentTransportAndRemoteClusterSslSettings() { + // filters are created irrespective of ssl enabled + final boolean transportSslEnabled = randomBoolean(); + final boolean remoteClusterSslEnabled = randomBoolean(); + final Settings.Builder builder = Settings.builder() + .put(this.settings) + .put("xpack.security.transport.ssl.enabled", transportSslEnabled) + .put("remote_cluster.enabled", true) + .put("xpack.security.remote_cluster.ssl.enabled", remoteClusterSslEnabled); + final SSLService sslService = mock(SSLService.class); + + when(sslService.getSSLConfiguration("xpack.security.transport.ssl.")).thenReturn( + new SslConfiguration( + "xpack.security.transport.ssl", + randomBoolean(), + mock(SslTrustConfig.class), + mock(SslKeyConfig.class), + randomFrom(SslVerificationMode.values()), + SslClientAuthenticationMode.REQUIRED, + List.of("TLS_AES_256_GCM_SHA384"), + List.of("TLSv1.3") + ) + ); + + when(sslService.getSSLConfiguration("xpack.security.remote_cluster.ssl.")).thenReturn( + new SslConfiguration( + "xpack.security.remote_cluster.ssl", + randomBoolean(), + mock(SslTrustConfig.class), + mock(SslKeyConfig.class), + randomFrom(SslVerificationMode.values()), + SslClientAuthenticationMode.NONE, + List.of("TLS_RSA_WITH_AES_256_GCM_SHA384"), + List.of("TLSv1.2") + ) + ); + + final var securityServerTransportInterceptor = new SecurityServerTransportInterceptor( + builder.build(), + threadPool, + mock(AuthenticationService.class), + mock(AuthorizationService.class), + sslService, + securityContext, + new DestructiveOperations( + Settings.EMPTY, + new ClusterSettings(Settings.EMPTY, Collections.singleton(DestructiveOperations.REQUIRES_NAME_SETTING)) + ), + new RemoteClusterAuthorizationResolver(settings, clusterService.getClusterSettings()) + ); + + final Map profileFilters = securityServerTransportInterceptor.getProfileFilters(); + assertThat(profileFilters.keySet(), containsInAnyOrder("default", "_remote_cluster")); + assertThat(profileFilters.get("default").isExtractClientCert(), is(transportSslEnabled)); + assertThat(profileFilters.get("_remote_cluster").isExtractClientCert(), is(false)); + } + + public void testNoProfileFilterForRemoteClusterWhenTheFeatureIsDisabled() { + final boolean transportSslEnabled = randomBoolean(); + final Settings.Builder builder = Settings.builder() + .put(this.settings) + .put("xpack.security.transport.ssl.enabled", transportSslEnabled) + .put("remote_cluster.enabled", false) + .put("xpack.security.remote_cluster.ssl.enabled", randomBoolean()); + final SSLService sslService = mock(SSLService.class); + + when(sslService.getSSLConfiguration("xpack.security.transport.ssl.")).thenReturn( + new SslConfiguration( + "xpack.security.transport.ssl", + randomBoolean(), + mock(SslTrustConfig.class), + mock(SslKeyConfig.class), + randomFrom(SslVerificationMode.values()), + SslClientAuthenticationMode.REQUIRED, + List.of("TLS_AES_256_GCM_SHA384"), + List.of("TLSv1.3") + ) + ); + + final var securityServerTransportInterceptor = new SecurityServerTransportInterceptor( + builder.build(), + threadPool, + mock(AuthenticationService.class), + mock(AuthorizationService.class), + sslService, + securityContext, + new DestructiveOperations( + Settings.EMPTY, + new ClusterSettings(Settings.EMPTY, Collections.singleton(DestructiveOperations.REQUIRES_NAME_SETTING)) + ), + new RemoteClusterAuthorizationResolver(settings, clusterService.getClusterSettings()) + ); + + final Map profileFilters = securityServerTransportInterceptor.getProfileFilters(); + assertThat(profileFilters.keySet(), contains("default")); + assertThat(profileFilters.get("default").isExtractClientCert(), is(transportSslEnabled)); + } + private Tuple randomAllowlistedActionAndRequest() { final String action = randomFrom(REMOTE_ACCESS_ACTION_ALLOWLIST.toArray(new String[0])); return new Tuple<>(action, mock(TransportRequest.class)); diff --git a/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/ssl/SSLErrorMessageFileTests.java b/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/ssl/SSLErrorMessageFileTests.java index d414dac19df4a..beb28a95dd67e 100644 --- a/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/ssl/SSLErrorMessageFileTests.java +++ b/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/ssl/SSLErrorMessageFileTests.java @@ -206,6 +206,54 @@ public void testMessageForHttpsNotEnabledButTruststoreConfigured() throws Except ); } + public void testMessageForRemoteClusterSslEnabledWithoutKeys() { + final String prefix = "xpack.security.remote_cluster.ssl"; + final Settings.Builder builder = Settings.builder().put("remote_cluster.enabled", true); + // remote cluster ssl is enabled by default + if (randomBoolean()) { + builder.put(prefix + ".enabled", true); + } + if (inFipsJvm()) { + configureWorkingTrustedAuthorities(prefix, builder); + } else { + configureWorkingTruststore(prefix, builder); + } + + final Throwable exception = expectFailure(builder); + assertThat( + exception, + throwableWithMessage( + "invalid SSL configuration for " + + prefix + + " - server ssl configuration requires a key and certificate, but these have not been configured;" + + " you must set either [" + + prefix + + ".keystore.path], or both [" + + prefix + + ".key] and [" + + prefix + + ".certificate]" + ) + ); + assertThat(exception, instanceOf(ElasticsearchException.class)); + } + + public void testNoErrorIfRemoteClusterOrSslDisabledWithoutKeys() { + final String prefix = "xpack.security.remote_cluster.ssl"; + final Settings.Builder builder = Settings.builder().put(prefix + ".enabled", false); + if (randomBoolean()) { + builder.put("remote_cluster.enabled", true); + } else { + builder.put("remote_cluster.enabled", false); + } + if (inFipsJvm()) { + configureWorkingTrustedAuthorities(prefix, builder); + } else { + configureWorkingTruststore(prefix, builder); + } + expectSuccess(builder); + } + private void checkMissingKeyManagerResource(String fileType, String configKey, @Nullable Settings.Builder additionalSettings) { checkMissingResource(fileType, configKey, (prefix, builder) -> buildKeyConfigSettings(additionalSettings, prefix, builder)); } diff --git a/x-pack/qa/multi-cluster-search-security/legacy-with-restricted-trust/src/test/java/org/elasticsearch/xpack/remotecluster/RemoteClusterSecuritySmokeIT.java b/x-pack/qa/multi-cluster-search-security/legacy-with-restricted-trust/src/test/java/org/elasticsearch/xpack/remotecluster/LegacyRemoteClusterSecuritySmokeIT.java similarity index 97% rename from x-pack/qa/multi-cluster-search-security/legacy-with-restricted-trust/src/test/java/org/elasticsearch/xpack/remotecluster/RemoteClusterSecuritySmokeIT.java rename to x-pack/qa/multi-cluster-search-security/legacy-with-restricted-trust/src/test/java/org/elasticsearch/xpack/remotecluster/LegacyRemoteClusterSecuritySmokeIT.java index c29442f1bb0a8..f070b497251ae 100644 --- a/x-pack/qa/multi-cluster-search-security/legacy-with-restricted-trust/src/test/java/org/elasticsearch/xpack/remotecluster/RemoteClusterSecuritySmokeIT.java +++ b/x-pack/qa/multi-cluster-search-security/legacy-with-restricted-trust/src/test/java/org/elasticsearch/xpack/remotecluster/LegacyRemoteClusterSecuritySmokeIT.java @@ -20,7 +20,7 @@ /** * This test suite will be run twice: Once against the fulfilling cluster, then again against the querying cluster. */ -public class RemoteClusterSecuritySmokeIT extends ESRestTestCase { +public class LegacyRemoteClusterSecuritySmokeIT extends ESRestTestCase { private static final String USER = "test_user"; private static final SecureString PASS = new SecureString("x-pack-test-password".toCharArray()); From 0429fac33454fdefc074742a18be29a76313366a Mon Sep 17 00:00:00 2001 From: Martijn van Groningen Date: Thu, 2 Feb 2023 08:00:52 +0100 Subject: [PATCH 44/63] Enforce synthetic source for time series indices (#93380) Support for synthetic source is also added to `unsigned_long` field as part of this change. This is required because `unsigned_long` field types can be used in tsdb indices and this change would prohibit the usage of these field type otherwise. Closes #92319 --- .../rest-api-spec/test/tsdb/20_mapping.yml | 121 ++++++++++++++++++ .../test/tsdb/80_index_resize.yml | 91 ------------- .../search/fieldcaps/FieldCapabilitiesIT.java | 11 +- .../org/elasticsearch/index/IndexMode.java | 16 +++ .../index/mapper/DocumentMapper.java | 3 +- .../index/mapper/SourceFieldMapper.java | 48 +++++-- .../index/mapper/DocumentParserTests.java | 8 +- .../query/SearchExecutionContextTests.java | 2 +- .../unsignedlong/UnsignedLongFieldMapper.java | 27 ++++ .../UnsignedLongFieldMapperTests.java | 67 +++++++++- .../rest-api-spec/test/70_time_series.yml | 26 ++++ .../rest-api-spec/test/rollup/10_basic.yml | 70 +++++----- .../DownsampleActionSingleNodeTests.java | 2 +- 13 files changed, 349 insertions(+), 143 deletions(-) diff --git a/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/tsdb/20_mapping.yml b/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/tsdb/20_mapping.yml index a5f0155f950cd..a1c1c87ee687c 100644 --- a/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/tsdb/20_mapping.yml +++ b/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/tsdb/20_mapping.yml @@ -413,3 +413,124 @@ nested fields: rx: type: long time_series_metric: gauge + +--- +regular source: + - skip: + version: " - 8.6.99" + reason: synthetic source + + - do: + catch: '/time series indices only support synthetic source/' + indices.create: + index: tsdb_index + body: + settings: + index: + mode: time_series + routing_path: [k8s.pod.uid] + time_series: + start_time: 2021-04-28T00:00:00Z + end_time: 2021-04-29T00:00:00Z + mappings: + _source: + mode: stored + properties: + "@timestamp": + type: date + k8s: + properties: + pod: + properties: + uid: + type: keyword + time_series_dimension: true +--- +disabled source: + - skip: + version: " - 8.6.99" + reason: synthetic source + + - do: + catch: '/time series indices only support synthetic source/' + indices.create: + index: tsdb_index + body: + settings: + index: + mode: time_series + routing_path: [k8s.pod.uid] + time_series: + start_time: 2021-04-28T00:00:00Z + end_time: 2021-04-29T00:00:00Z + mappings: + _source: + mode: disabled + properties: + "@timestamp": + type: date + k8s: + properties: + pod: + properties: + uid: + type: keyword + time_series_dimension: true + +--- +source include/exclude: + - skip: + version: " - 8.6.99" + reason: synthetic source + + - do: + catch: '/filtering the stored _source is incompatible with synthetic source/' + indices.create: + index: tsdb_index + body: + settings: + index: + mode: time_series + routing_path: [k8s.pod.uid] + time_series: + start_time: 2021-04-28T00:00:00Z + end_time: 2021-04-29T00:00:00Z + mappings: + _source: + includes: [a] + properties: + "@timestamp": + type: date + k8s: + properties: + pod: + properties: + uid: + type: keyword + time_series_dimension: true + + - do: + catch: '/filtering the stored _source is incompatible with synthetic source/' + indices.create: + index: tsdb_index + body: + settings: + index: + mode: time_series + routing_path: [k8s.pod.uid] + time_series: + start_time: 2021-04-28T00:00:00Z + end_time: 2021-04-29T00:00:00Z + mappings: + _source: + excludes: [b] + properties: + "@timestamp": + type: date + k8s: + properties: + pod: + properties: + uid: + type: keyword + time_series_dimension: true diff --git a/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/tsdb/80_index_resize.yml b/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/tsdb/80_index_resize.yml index 03872f79a037a..f78f16780cb4f 100644 --- a/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/tsdb/80_index_resize.yml +++ b/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/tsdb/80_index_resize.yml @@ -148,94 +148,3 @@ clone: - match: {hits.total.value: 1} - match: {hits.hits.0.fields._tsid: [{k8s.pod.uid: 947e4ced-1786-4e53-9e0c-5c447e959507, metricset: pod}]} - ---- -clone no source index: - - skip: - version: " - 8.1.99" - reason: tsdb indexing changed in 8.2.0 - - - do: - indices.create: - index: test_no_source - body: - settings: - index: - mode: time_series - routing_path: [ metricset, k8s.pod.uid ] - time_series: - start_time: 2021-04-28T00:00:00Z - end_time: 2021-04-29T00:00:00Z - number_of_shards: 1 - number_of_replicas: 0 - mappings: - _source: - enabled: false - properties: - "@timestamp": - type: date - metricset: - type: keyword - time_series_dimension: true - k8s: - properties: - pod: - properties: - uid: - type: keyword - time_series_dimension: true - name: - type: keyword - ip: - type: ip - network: - properties: - tx: - type: long - rx: - type: long - - - do: - bulk: - refresh: true - index: test_no_source - body: - - '{"index": {}}' - - '{"@timestamp": "2021-04-28T18:50:04.467Z", "metricset": "pod", "k8s": {"pod": {"name": "cat", "uid":"947e4ced-1786-4e53-9e0c-5c447e959507", "ip": "10.10.55.1", "network": {"tx": 2001818691, "rx": 802133794}}}}' - - '{"index": {}}' - - '{"@timestamp": "2021-04-28T18:50:24.467Z", "metricset": "pod", "k8s": {"pod": {"name": "cat", "uid":"947e4ced-1786-4e53-9e0c-5c447e959507", "ip": "10.10.55.1", "network": {"tx": 2005177954, "rx": 801479970}}}}' - - '{"index": {}}' - - '{"@timestamp": "2021-04-28T18:50:44.467Z", "metricset": "pod", "k8s": {"pod": {"name": "cat", "uid":"947e4ced-1786-4e53-9e0c-5c447e959507", "ip": "10.10.55.1", "network": {"tx": 2006223737, "rx": 802337279}}}}' - - '{"index": {}}' - - '{"@timestamp": "2021-04-28T18:51:04.467Z", "metricset": "pod", "k8s": {"pod": {"name": "cat", "uid":"947e4ced-1786-4e53-9e0c-5c447e959507", "ip": "10.10.55.2", "network": {"tx": 2012916202, "rx": 803685721}}}}' - - '{"index": {}}' - - '{"@timestamp": "2021-04-28T18:50:03.142Z", "metricset": "pod", "k8s": {"pod": {"name": "dog", "uid":"df3145b3-0563-4d3b-a0f7-897eb2876ea9", "ip": "10.10.55.3", "network": {"tx": 1434521831, "rx": 530575198}}}}' - - '{"index": {}}' - - '{"@timestamp": "2021-04-28T18:50:23.142Z", "metricset": "pod", "k8s": {"pod": {"name": "dog", "uid":"df3145b3-0563-4d3b-a0f7-897eb2876ea9", "ip": "10.10.55.3", "network": {"tx": 1434577921, "rx": 530600088}}}}' - - '{"index": {}}' - - '{"@timestamp": "2021-04-28T18:50:53.142Z", "metricset": "pod", "k8s": {"pod": {"name": "dog", "uid":"df3145b3-0563-4d3b-a0f7-897eb2876ea9", "ip": "10.10.55.3", "network": {"tx": 1434587694, "rx": 530604797}}}}' - - '{"index": {}}' - - '{"@timestamp": "2021-04-28T18:51:03.142Z", "metricset": "pod", "k8s": {"pod": {"name": "dog", "uid":"df3145b3-0563-4d3b-a0f7-897eb2876ea9", "ip": "10.10.55.3", "network": {"tx": 1434595272, "rx": 530605511}}}}' - - - do: - indices.put_settings: - index: test_no_source - body: - index.blocks.write: true - - - do: - indices.clone: - index: test_no_source - target: test_no_source_clone - - - do: - search: - index: test_no_source_clone - body: - docvalue_fields: - - field: _tsid - query: - query_string: - query: '+@timestamp:"2021-04-28T18:51:04.467Z" +k8s.pod.name:cat' - - match: {hits.total.value: 1} - - match: {hits.hits.0.fields._tsid: [{k8s.pod.uid: 947e4ced-1786-4e53-9e0c-5c447e959507, metricset: pod}]} diff --git a/server/src/internalClusterTest/java/org/elasticsearch/search/fieldcaps/FieldCapabilitiesIT.java b/server/src/internalClusterTest/java/org/elasticsearch/search/fieldcaps/FieldCapabilitiesIT.java index 7646778bd8188..c492dfe60b6e4 100644 --- a/server/src/internalClusterTest/java/org/elasticsearch/search/fieldcaps/FieldCapabilitiesIT.java +++ b/server/src/internalClusterTest/java/org/elasticsearch/search/fieldcaps/FieldCapabilitiesIT.java @@ -10,6 +10,7 @@ import org.apache.http.entity.ContentType; import org.apache.http.entity.StringEntity; +import org.apache.lucene.util.BytesRef; import org.elasticsearch.ElasticsearchException; import org.elasticsearch.action.fieldcaps.FieldCapabilities; import org.elasticsearch.action.fieldcaps.FieldCapabilitiesAction; @@ -37,6 +38,7 @@ import org.elasticsearch.index.mapper.KeywordFieldMapper; import org.elasticsearch.index.mapper.MetadataFieldMapper; import org.elasticsearch.index.mapper.SourceLoader; +import org.elasticsearch.index.mapper.StringStoredFieldFieldLoader; import org.elasticsearch.index.mapper.TimeSeriesParams; import org.elasticsearch.index.query.QueryBuilder; import org.elasticsearch.index.query.QueryBuilders; @@ -118,6 +120,7 @@ public void setUp() throws Exception { .endObject() .startObject("playlist") .field("type", "text") + .field("store", true) .endObject() .startObject("some_dimension") .field("type", "keyword") @@ -845,7 +848,13 @@ protected String contentType() { @Override public SourceLoader.SyntheticFieldLoader syntheticFieldLoader() { - throw new UnsupportedOperationException(); + return new StringStoredFieldFieldLoader(name(), simpleName(), null) { + @Override + protected void write(XContentBuilder b, Object value) throws IOException { + BytesRef ref = (BytesRef) value; + b.utf8Value(ref.bytes, ref.offset, ref.length); + } + }; } private static final TypeParser PARSER = new FixedTypeParser(c -> new TestMetadataMapper()); diff --git a/server/src/main/java/org/elasticsearch/index/IndexMode.java b/server/src/main/java/org/elasticsearch/index/IndexMode.java index 71a824535392e..b46b8334408fb 100644 --- a/server/src/main/java/org/elasticsearch/index/IndexMode.java +++ b/server/src/main/java/org/elasticsearch/index/IndexMode.java @@ -26,6 +26,7 @@ import org.elasticsearch.index.mapper.NestedLookup; import org.elasticsearch.index.mapper.ProvidedIdFieldMapper; import org.elasticsearch.index.mapper.RoutingFieldMapper; +import org.elasticsearch.index.mapper.SourceFieldMapper; import org.elasticsearch.index.mapper.TimeSeriesIdFieldMapper; import org.elasticsearch.index.mapper.TsidExtractingIdFieldMapper; @@ -110,6 +111,9 @@ public DocumentDimensions buildDocumentDimensions(IndexSettings settings) { public boolean shouldValidateTimestamp() { return false; } + + @Override + public void validateSourceFieldMapper(SourceFieldMapper sourceFieldMapper) {} }, TIME_SERIES("time_series") { @Override @@ -196,6 +200,13 @@ public DocumentDimensions buildDocumentDimensions(IndexSettings settings) { public boolean shouldValidateTimestamp() { return true; } + + @Override + public void validateSourceFieldMapper(SourceFieldMapper sourceFieldMapper) { + if (sourceFieldMapper.isSynthetic() == false) { + throw new IllegalArgumentException("time series indices only support synthetic source"); + } + } }; protected static String tsdbMode() { @@ -310,6 +321,11 @@ public String getName() { */ public abstract boolean shouldValidateTimestamp(); + /** + * Validates the source field mapper + */ + public abstract void validateSourceFieldMapper(SourceFieldMapper sourceFieldMapper); + /** * Parse a string into an {@link IndexMode}. */ diff --git a/server/src/main/java/org/elasticsearch/index/mapper/DocumentMapper.java b/server/src/main/java/org/elasticsearch/index/mapper/DocumentMapper.java index 938212aac04f3..ea52660c40b10 100644 --- a/server/src/main/java/org/elasticsearch/index/mapper/DocumentMapper.java +++ b/server/src/main/java/org/elasticsearch/index/mapper/DocumentMapper.java @@ -93,13 +93,12 @@ public void validate(IndexSettings settings, boolean checkLimits) { } } + settings.getMode().validateMapping(mappingLookup); /* * Build an empty source loader to validate that the mapping is compatible * with the source loading strategy declared on the source field mapper. */ sourceMapper().newSourceLoader(mapping()); - - settings.getMode().validateMapping(mappingLookup); if (settings.getIndexSortConfig().hasIndexSort() && mappers().nestedLookup() != NestedLookup.EMPTY) { throw new IllegalArgumentException("cannot have nested fields when index sort is activated"); } diff --git a/server/src/main/java/org/elasticsearch/index/mapper/SourceFieldMapper.java b/server/src/main/java/org/elasticsearch/index/mapper/SourceFieldMapper.java index 7ba540804190c..f8b92625ef5fa 100644 --- a/server/src/main/java/org/elasticsearch/index/mapper/SourceFieldMapper.java +++ b/server/src/main/java/org/elasticsearch/index/mapper/SourceFieldMapper.java @@ -19,6 +19,7 @@ import org.elasticsearch.common.bytes.BytesReference; import org.elasticsearch.common.util.CollectionUtils; import org.elasticsearch.core.Nullable; +import org.elasticsearch.index.IndexMode; import org.elasticsearch.index.query.QueryShardException; import org.elasticsearch.index.query.SearchExecutionContext; import org.elasticsearch.search.lookup.Source; @@ -48,7 +49,16 @@ private enum Mode { null, Explicit.IMPLICIT_TRUE, Strings.EMPTY_ARRAY, - Strings.EMPTY_ARRAY + Strings.EMPTY_ARRAY, + null + ); + + private static final SourceFieldMapper TSDB_DEFAULT = new SourceFieldMapper( + Mode.SYNTHETIC, + Explicit.IMPLICIT_TRUE, + Strings.EMPTY_ARRAY, + Strings.EMPTY_ARRAY, + IndexMode.TIME_SERIES ); public static class Defaults { @@ -79,7 +89,7 @@ public static class Builder extends MetadataFieldMapper.Builder { private final Parameter mode = new Parameter<>( "mode", true, - () -> null, + () -> getIndexMode() == IndexMode.TIME_SERIES ? Mode.SYNTHETIC : null, (n, c, o) -> Mode.valueOf(o.toString().toUpperCase(Locale.ROOT)), m -> toType(m).enabled.explicit() ? null : toType(m).mode, (b, n, v) -> b.field(n, v.toString().toLowerCase(Locale.ROOT)), @@ -97,8 +107,11 @@ public static class Builder extends MetadataFieldMapper.Builder { m -> Arrays.asList(toType(m).excludes) ); - public Builder() { + private final IndexMode indexMode; + + public Builder(IndexMode indexMode) { super(Defaults.NAME); + this.indexMode = indexMode; } public Builder setSynthetic() { @@ -127,18 +140,30 @@ public SourceFieldMapper build() { throw new MapperParsingException("Cannot set both [mode] and [enabled] parameters"); } if (isDefault()) { - return DEFAULT; + return indexMode == IndexMode.TIME_SERIES ? TSDB_DEFAULT : DEFAULT; } - return new SourceFieldMapper( + SourceFieldMapper sourceFieldMapper = new SourceFieldMapper( mode.get(), enabled.get(), includes.getValue().toArray(String[]::new), - excludes.getValue().toArray(String[]::new) + excludes.getValue().toArray(String[]::new), + indexMode ); + if (indexMode != null) { + indexMode.validateSourceFieldMapper(sourceFieldMapper); + } + return sourceFieldMapper; + } + + private IndexMode getIndexMode() { + return indexMode; } } - public static final TypeParser PARSER = new ConfigurableTypeParser(c -> DEFAULT, c -> new Builder()); + public static final TypeParser PARSER = new ConfigurableTypeParser( + c -> c.getIndexSettings().getMode() == IndexMode.TIME_SERIES ? TSDB_DEFAULT : DEFAULT, + c -> new Builder(c.getIndexSettings().getMode()) + ); static final class SourceFieldType extends MappedFieldType { @@ -178,7 +203,9 @@ public Query termQuery(Object value, SearchExecutionContext context) { private final String[] excludes; private final SourceFilter sourceFilter; - private SourceFieldMapper(Mode mode, Explicit enabled, String[] includes, String[] excludes) { + private final IndexMode indexMode; + + private SourceFieldMapper(Mode mode, Explicit enabled, String[] includes, String[] excludes, IndexMode indexMode) { super(new SourceFieldType((enabled.explicit() && enabled.value()) || (enabled.explicit() == false && mode != Mode.DISABLED))); assert enabled.explicit() == false || mode == null; this.mode = mode; @@ -190,6 +217,7 @@ private SourceFieldMapper(Mode mode, Explicit enabled, String[] include throw new IllegalArgumentException("filtering the stored _source is incompatible with synthetic source"); } this.complete = stored() && sourceFilter == null; + this.indexMode = indexMode; } private static SourceFilter buildSourceFilter(String[] includes, String[] excludes) { @@ -259,13 +287,13 @@ protected String contentType() { @Override public FieldMapper.Builder getMergeBuilder() { - return new Builder().init(this); + return new Builder(indexMode).init(this); } /** * Build something to load source {@code _source}. */ - public SourceLoader newSourceLoader(Mapping mapping) { + public SourceLoader newSourceLoader(Mapping mapping) { if (mode == Mode.SYNTHETIC) { return new SourceLoader.Synthetic(mapping); } diff --git a/server/src/test/java/org/elasticsearch/index/mapper/DocumentParserTests.java b/server/src/test/java/org/elasticsearch/index/mapper/DocumentParserTests.java index 3e0aa13ad9a0d..546029039ca5d 100644 --- a/server/src/test/java/org/elasticsearch/index/mapper/DocumentParserTests.java +++ b/server/src/test/java/org/elasticsearch/index/mapper/DocumentParserTests.java @@ -2571,7 +2571,13 @@ protected String contentType() { @Override public SourceLoader.SyntheticFieldLoader syntheticFieldLoader() { - throw new UnsupportedOperationException(); + return new StringStoredFieldFieldLoader(name(), simpleName(), null) { + @Override + protected void write(XContentBuilder b, Object value) throws IOException { + BytesRef ref = (BytesRef) value; + b.utf8Value(ref.bytes, ref.offset, ref.length); + } + }; } private static final TypeParser PARSER = new FixedTypeParser(c -> new MockMetadataMapper()); diff --git a/server/src/test/java/org/elasticsearch/index/query/SearchExecutionContextTests.java b/server/src/test/java/org/elasticsearch/index/query/SearchExecutionContextTests.java index 902927654b9db..2c42c0efff82e 100644 --- a/server/src/test/java/org/elasticsearch/index/query/SearchExecutionContextTests.java +++ b/server/src/test/java/org/elasticsearch/index/query/SearchExecutionContextTests.java @@ -398,7 +398,7 @@ public void testSearchRequestRuntimeFieldsAndMultifieldDetection() { public void testSyntheticSourceScriptLoading() throws IOException { // Build a mapping using synthetic source - SourceFieldMapper sourceMapper = new SourceFieldMapper.Builder().setSynthetic().build(); + SourceFieldMapper sourceMapper = new SourceFieldMapper.Builder(null).setSynthetic().build(); RootObjectMapper root = new RootObjectMapper.Builder("_doc", Explicit.IMPLICIT_TRUE).build(MapperBuilderContext.root(true)); Mapping mapping = new Mapping(root, new MetadataFieldMapper[] { sourceMapper }, Map.of()); MappingLookup lookup = MappingLookup.fromMapping(mapping); diff --git a/x-pack/plugin/mapper-unsigned-long/src/main/java/org/elasticsearch/xpack/unsignedlong/UnsignedLongFieldMapper.java b/x-pack/plugin/mapper-unsigned-long/src/main/java/org/elasticsearch/xpack/unsignedlong/UnsignedLongFieldMapper.java index 69835202b4ef7..3db75f6e723d0 100644 --- a/x-pack/plugin/mapper-unsigned-long/src/main/java/org/elasticsearch/xpack/unsignedlong/UnsignedLongFieldMapper.java +++ b/x-pack/plugin/mapper-unsigned-long/src/main/java/org/elasticsearch/xpack/unsignedlong/UnsignedLongFieldMapper.java @@ -31,6 +31,8 @@ import org.elasticsearch.index.mapper.MapperParsingException; import org.elasticsearch.index.mapper.MappingLookup; import org.elasticsearch.index.mapper.SimpleMappedFieldType; +import org.elasticsearch.index.mapper.SortedNumericDocValuesSyntheticFieldLoader; +import org.elasticsearch.index.mapper.SourceLoader; import org.elasticsearch.index.mapper.SourceValueFetcher; import org.elasticsearch.index.mapper.TextSearchInfo; import org.elasticsearch.index.mapper.TimeSeriesParams; @@ -707,4 +709,29 @@ public void doValidate(MappingLookup lookup) { ); } } + + @Override + public SourceLoader.SyntheticFieldLoader syntheticFieldLoader() { + if (hasDocValues == false) { + throw new IllegalArgumentException( + "field [" + name() + "] of type [" + typeName() + "] doesn't support synthetic source because it doesn't have doc values" + ); + } + if (ignoreMalformed.value()) { + throw new IllegalArgumentException( + "field [" + name() + "] of type [" + typeName() + "] doesn't support synthetic source because it ignores malformed numbers" + ); + } + if (copyTo.copyToFields().isEmpty() != true) { + throw new IllegalArgumentException( + "field [" + name() + "] of type [" + typeName() + "] doesn't support synthetic source because it declares copy_to" + ); + } + return new SortedNumericDocValuesSyntheticFieldLoader(name(), simpleName(), ignoreMalformed()) { + @Override + protected void writeValue(XContentBuilder b, long value) throws IOException { + b.value(DocValueFormat.UNSIGNED_LONG_SHIFTED.format(value)); + } + }; + } } diff --git a/x-pack/plugin/mapper-unsigned-long/src/test/java/org/elasticsearch/xpack/unsignedlong/UnsignedLongFieldMapperTests.java b/x-pack/plugin/mapper-unsigned-long/src/test/java/org/elasticsearch/xpack/unsignedlong/UnsignedLongFieldMapperTests.java index 39e4ab7d0f550..5dce9feaf0284 100644 --- a/x-pack/plugin/mapper-unsigned-long/src/test/java/org/elasticsearch/xpack/unsignedlong/UnsignedLongFieldMapperTests.java +++ b/x-pack/plugin/mapper-unsigned-long/src/test/java/org/elasticsearch/xpack/unsignedlong/UnsignedLongFieldMapperTests.java @@ -11,6 +11,7 @@ import org.apache.lucene.index.IndexableField; import org.elasticsearch.cluster.metadata.IndexMetadata; import org.elasticsearch.common.Strings; +import org.elasticsearch.core.Tuple; import org.elasticsearch.index.IndexMode; import org.elasticsearch.index.IndexSettings; import org.elasticsearch.index.mapper.DocumentMapper; @@ -32,6 +33,7 @@ import static org.hamcrest.Matchers.containsString; import static org.hamcrest.Matchers.equalTo; import static org.hamcrest.Matchers.is; +import static org.hamcrest.Matchers.matchesPattern; public class UnsignedLongFieldMapperTests extends MapperTestCase { @@ -352,11 +354,74 @@ private Number randomNumericValue() { @Override protected SyntheticSourceSupport syntheticSourceSupport(boolean ignoreMalformed) { - throw new AssumptionViolatedException("not supported"); + assumeFalse("unsigned_long doesn't support ignore_malformed with synthetic _source", ignoreMalformed); + return new NumberSyntheticSourceSupport(); } @Override protected IngestScriptSupport ingestScriptSupport() { throw new AssumptionViolatedException("not supported"); } + + final class NumberSyntheticSourceSupport implements SyntheticSourceSupport { + private final BigInteger nullValue = usually() ? null : BigInteger.valueOf(randomNonNegativeLong()); + + @Override + public SyntheticSourceExample example(int maxVals) { + if (randomBoolean()) { + Tuple v = generateValue(); + return new SyntheticSourceExample(v.v1(), v.v2(), this::mapping); + } + List> values = randomList(1, maxVals, this::generateValue); + List in = values.stream().map(Tuple::v1).toList(); + List outList = values.stream().map(Tuple::v2).sorted().toList(); + Object out = outList.size() == 1 ? outList.get(0) : outList; + return new SyntheticSourceExample(in, out, this::mapping); + } + + private Tuple generateValue() { + if (nullValue != null && randomBoolean()) { + return Tuple.tuple(null, nullValue); + } + long n = randomNonNegativeLong(); + BigInteger b = BigInteger.valueOf(n); + if (b.signum() < 0) { + b = b.add(BigInteger.ONE.shiftLeft(64)); + } + return Tuple.tuple(n, b); + } + + private void mapping(XContentBuilder b) throws IOException { + minimalMapping(b); + if (nullValue != null) { + b.field("null_value", nullValue); + } + if (rarely()) { + b.field("index", false); + } + if (rarely()) { + b.field("store", false); + } + } + + @Override + public List invalidExample() { + return List.of( + new SyntheticSourceInvalidExample( + matchesPattern("field \\[field] of type \\[.+] doesn't support synthetic source because it doesn't have doc values"), + b -> { + minimalMapping(b); + b.field("doc_values", false); + } + ), + new SyntheticSourceInvalidExample( + matchesPattern("field \\[field] of type \\[.+] doesn't support synthetic source because it ignores malformed numbers"), + b -> { + minimalMapping(b); + b.field("ignore_malformed", true); + } + ) + ); + } + } } diff --git a/x-pack/plugin/mapper-unsigned-long/src/yamlRestTest/resources/rest-api-spec/test/70_time_series.yml b/x-pack/plugin/mapper-unsigned-long/src/yamlRestTest/resources/rest-api-spec/test/70_time_series.yml index 0742558f03e4a..150c90faf175a 100644 --- a/x-pack/plugin/mapper-unsigned-long/src/yamlRestTest/resources/rest-api-spec/test/70_time_series.yml +++ b/x-pack/plugin/mapper-unsigned-long/src/yamlRestTest/resources/rest-api-spec/test/70_time_series.yml @@ -71,6 +71,32 @@ fetch the _tsid: - match: {hits.hits.1.fields.metricset: [aa]} - match: {hits.hits.1.fields.ul: [9223372036854775808]} +--- +fetch the _source: + - skip: + version: " - 8.6.99" + reason: synthetic source support to unsigned long added in 8.7.0 + + - do: + search: + index: test + body: + fields: [_tsid, metricset, ul] + query: + query_string: + query: '+@timestamp:"2021-04-28T18:35:24.467Z" +metricset:aa' + sort: [ _tsid ] + + - match: {hits.total.value: 2} + - match: {hits.hits.0._source.voltage: 7.2} + - match: {hits.hits.0._source.metricset: aa} + - match: {hits.hits.0._source.@timestamp: 2021-04-28T18:35:24.467Z} + - match: {hits.hits.0._source.ul: 9223372036854775807} + - match: {hits.hits.1._source.voltage: 3.2} + - match: {hits.hits.1._source.metricset: aa} + - match: {hits.hits.1._source.@timestamp: 2021-04-28T18:35:24.467Z} + - match: {hits.hits.1._source.ul: 9223372036854775808} + --- # Sort order is of unsigned_long fields is not the one we would expect. # This is caused by the encoding of unsigned_long as a signed long before diff --git a/x-pack/plugin/rollup/qa/rest/src/yamlRestTest/resources/rest-api-spec/test/rollup/10_basic.yml b/x-pack/plugin/rollup/qa/rest/src/yamlRestTest/resources/rest-api-spec/test/rollup/10_basic.yml index 5c6d9f4d83a17..812a14fa6d552 100644 --- a/x-pack/plugin/rollup/qa/rest/src/yamlRestTest/resources/rest-api-spec/test/rollup/10_basic.yml +++ b/x-pack/plugin/rollup/qa/rest/src/yamlRestTest/resources/rest-api-spec/test/rollup/10_basic.yml @@ -90,7 +90,7 @@ setup: "Downsample index": - skip: version: " - 8.4.99" - reason: "rollup renamed to downsample in 8.5.0" + reason: "Downsampling GA-ed in 8.7.0" - do: indices.downsample: @@ -110,23 +110,23 @@ setup: - length: { hits.hits: 4 } - match: { hits.hits.0._source._doc_count: 2 } - - match: { hits.hits.0._source.k8s\.pod\.uid: 947e4ced-1786-4e53-9e0c-5c447e959507 } + - match: { hits.hits.0._source.k8s.pod.uid: 947e4ced-1786-4e53-9e0c-5c447e959507 } - match: { hits.hits.0._source.metricset: pod } - match: { hits.hits.0._source.@timestamp: 2021-04-28T18:00:00.000Z } - - match: { hits.hits.0._source.k8s\.pod\.multi-counter: 21 } - - match: { hits.hits.0._source.k8s\.pod\.multi-gauge.min: 90 } - - match: { hits.hits.0._source.k8s\.pod\.multi-gauge.max: 200 } - - match: { hits.hits.0._source.k8s\.pod\.multi-gauge.sum: 726 } - - match: { hits.hits.0._source.k8s\.pod\.multi-gauge.value_count: 6 } - - match: { hits.hits.0._source.k8s\.pod\.network\.tx.min: 2001818691 } - - match: { hits.hits.0._source.k8s\.pod\.network\.tx.max: 2005177954 } - - match: { hits.hits.0._source.k8s\.pod\.network\.tx.value_count: 2 } - - match: { hits.hits.0._source.k8s\.pod\.ip: "10.10.55.26" } - - match: { hits.hits.0._source.k8s\.pod\.created_at: "2021-04-28T19:35:00.000Z" } - - match: { hits.hits.0._source.k8s\.pod\.number_of_containers: 2 } - - match: { hits.hits.0._source.k8s\.pod\.tags: ["backend", "prod", "us-west1"] } - - match: { hits.hits.0._source.k8s\.pod\.values: [1, 1, 3] } - - is_true: hits.hits.0._source.k8s\.pod\.running + - match: { hits.hits.0._source.k8s.pod.multi-counter: 21 } + - match: { hits.hits.0._source.k8s.pod.multi-gauge.min: 90 } + - match: { hits.hits.0._source.k8s.pod.multi-gauge.max: 200 } + - match: { hits.hits.0._source.k8s.pod.multi-gauge.sum: 726 } + - match: { hits.hits.0._source.k8s.pod.multi-gauge.value_count: 6 } + - match: { hits.hits.0._source.k8s.pod.network.tx.min: 2001818691 } + - match: { hits.hits.0._source.k8s.pod.network.tx.max: 2005177954 } + - match: { hits.hits.0._source.k8s.pod.network.tx.value_count: 2 } + - match: { hits.hits.0._source.k8s.pod.ip: "10.10.55.26" } + - match: { hits.hits.0._source.k8s.pod.created_at: "2021-04-28T19:35:00.000Z" } + - match: { hits.hits.0._source.k8s.pod.number_of_containers: 2 } + - match: { hits.hits.0._source.k8s.pod.tags: ["backend", "prod", "us-west1"] } + - match: { hits.hits.0._source.k8s.pod.values: [1, 1, 3] } + - is_true: hits.hits.0._source.k8s.pod.running # Assert rollup index settings - do: @@ -345,8 +345,8 @@ setup: --- "Downsample a downsampled index": - skip: - version: " - 8.4.99" - reason: "Rollup of rollups introduced in 8.5.0" + version: " - 8.6.99" + reason: "Rollup GA-ed in 8.7.0" - do: indices.downsample: @@ -398,29 +398,29 @@ setup: - length: { hits.hits: 3 } - match: { hits.hits.0._source._doc_count: 2 } - - match: { hits.hits.0._source.k8s\.pod\.uid: 947e4ced-1786-4e53-9e0c-5c447e959507 } + - match: { hits.hits.0._source.k8s.pod.uid: 947e4ced-1786-4e53-9e0c-5c447e959507 } - match: { hits.hits.0._source.metricset: pod } - match: { hits.hits.0._source.@timestamp: 2021-04-28T18:00:00.000Z } - - match: { hits.hits.0._source.k8s\.pod\.multi-counter: 21 } - - match: { hits.hits.0._source.k8s\.pod\.multi-gauge.min: 90 } - - match: { hits.hits.0._source.k8s\.pod\.multi-gauge.max: 200 } - - match: { hits.hits.0._source.k8s\.pod\.multi-gauge.sum: 726 } - - match: { hits.hits.0._source.k8s\.pod\.multi-gauge.value_count: 6 } - - match: { hits.hits.0._source.k8s\.pod\.network\.tx.min: 2001818691 } - - match: { hits.hits.0._source.k8s\.pod\.network\.tx.max: 2005177954 } - - match: { hits.hits.0._source.k8s\.pod\.network\.tx.value_count: 2 } - - match: { hits.hits.0._source.k8s\.pod\.ip: "10.10.55.26" } - - match: { hits.hits.0._source.k8s\.pod\.created_at: "2021-04-28T19:35:00.000Z" } - - match: { hits.hits.0._source.k8s\.pod\.number_of_containers: 2 } - - match: { hits.hits.0._source.k8s\.pod\.tags: [ "backend", "prod", "us-west1" ] } - - match: { hits.hits.0._source.k8s\.pod\.values: [ 1, 1, 3 ] } - - - match: { hits.hits.1._source.k8s\.pod\.uid: 947e4ced-1786-4e53-9e0c-5c447e959507 } + - match: { hits.hits.0._source.k8s.pod.multi-counter: 21 } + - match: { hits.hits.0._source.k8s.pod.multi-gauge.min: 90 } + - match: { hits.hits.0._source.k8s.pod.multi-gauge.max: 200 } + - match: { hits.hits.0._source.k8s.pod.multi-gauge.sum: 726 } + - match: { hits.hits.0._source.k8s.pod.multi-gauge.value_count: 6 } + - match: { hits.hits.0._source.k8s.pod.network.tx.min: 2001818691 } + - match: { hits.hits.0._source.k8s.pod.network.tx.max: 2005177954 } + - match: { hits.hits.0._source.k8s.pod.network.tx.value_count: 2 } + - match: { hits.hits.0._source.k8s.pod.ip: "10.10.55.26" } + - match: { hits.hits.0._source.k8s.pod.created_at: "2021-04-28T19:35:00.000Z" } + - match: { hits.hits.0._source.k8s.pod.number_of_containers: 2 } + - match: { hits.hits.0._source.k8s.pod.tags: [ "backend", "prod", "us-west1" ] } + - match: { hits.hits.0._source.k8s.pod.values: [ 1, 1, 3 ] } + + - match: { hits.hits.1._source.k8s.pod.uid: 947e4ced-1786-4e53-9e0c-5c447e959507 } - match: { hits.hits.1._source.metricset: pod } - match: { hits.hits.1._source.@timestamp: 2021-04-28T20:00:00.000Z } - match: { hits.hits.1._source._doc_count: 2 } - - match: { hits.hits.2._source.k8s\.pod\.uid: df3145b3-0563-4d3b-a0f7-897eb2876ea9 } + - match: { hits.hits.2._source.k8s.pod.uid: df3145b3-0563-4d3b-a0f7-897eb2876ea9 } - match: { hits.hits.2._source.metricset: pod } - match: { hits.hits.2._source.@timestamp: 2021-04-28T18:00:00.000Z } - match: { hits.hits.2._source._doc_count: 4 } diff --git a/x-pack/plugin/rollup/src/test/java/org/elasticsearch/xpack/downsample/DownsampleActionSingleNodeTests.java b/x-pack/plugin/rollup/src/test/java/org/elasticsearch/xpack/downsample/DownsampleActionSingleNodeTests.java index 5a34a95f74a2e..d27bd1a29d3ba 100644 --- a/x-pack/plugin/rollup/src/test/java/org/elasticsearch/xpack/downsample/DownsampleActionSingleNodeTests.java +++ b/x-pack/plugin/rollup/src/test/java/org/elasticsearch/xpack/downsample/DownsampleActionSingleNodeTests.java @@ -214,7 +214,7 @@ public void setup() throws IOException { mapping.startObject(FIELD_LABEL_DOUBLE).field("type", "double").endObject(); mapping.startObject(FIELD_LABEL_INTEGER).field("type", "integer").endObject(); mapping.startObject(FIELD_LABEL_KEYWORD).field("type", "keyword").endObject(); - mapping.startObject(FIELD_LABEL_TEXT).field("type", "text").endObject(); + mapping.startObject(FIELD_LABEL_TEXT).field("type", "text").field("store", "true").endObject(); mapping.startObject(FIELD_LABEL_BOOLEAN).field("type", "boolean").endObject(); mapping.startObject(FIELD_LABEL_IPv4_ADDRESS).field("type", "ip").endObject(); mapping.startObject(FIELD_LABEL_IPv6_ADDRESS).field("type", "ip").endObject(); From 7c3e20184044510a514b2eb719853c82420753c4 Mon Sep 17 00:00:00 2001 From: Hendrik Muhs Date: Thu, 2 Feb 2023 09:25:00 +0100 Subject: [PATCH 45/63] [ML] rename frequent_items to frequent_item_sets and make it GA (#93421) rename frequent_items to frequent_item_sets and remove the experimental batch --- docs/changelog/93421.yaml | 10 ++ docs/reference/aggregations/bucket.asciidoc | 2 +- ...> frequent-item-sets-aggregation.asciidoc} | 168 +++++++++--------- .../xpack/ml/MachineLearning.java | 2 +- .../frequentitemsets/EclatMapReducer.java | 1 + .../FrequentItemSetsAggregationBuilder.java | 5 +- ...equentItemSetsAggregationBuilderTests.java | 4 +- ...ems_agg.yml => frequent_item_sets_agg.yml} | 92 +++++++--- 8 files changed, 166 insertions(+), 118 deletions(-) create mode 100644 docs/changelog/93421.yaml rename docs/reference/aggregations/bucket/{frequent-items-aggregation.asciidoc => frequent-item-sets-aggregation.asciidoc} (86%) rename x-pack/plugin/src/yamlRestTest/resources/rest-api-spec/test/ml/{frequent_items_agg.yml => frequent_item_sets_agg.yml} (88%) diff --git a/docs/changelog/93421.yaml b/docs/changelog/93421.yaml new file mode 100644 index 0000000000000..20cb13522f52c --- /dev/null +++ b/docs/changelog/93421.yaml @@ -0,0 +1,10 @@ +pr: 93421 +summary: Make `frequent_item_sets` aggregation GA +area: Machine Learning +type: feature +issues: [] +highlight: + title: Make `frequent_item_sets` aggregation GA + body: The `frequent_item_sets` aggregation has been moved from + technical preview to general availability. + notable: true diff --git a/docs/reference/aggregations/bucket.asciidoc b/docs/reference/aggregations/bucket.asciidoc index a52eb15c9a151..4391d73ebd46c 100644 --- a/docs/reference/aggregations/bucket.asciidoc +++ b/docs/reference/aggregations/bucket.asciidoc @@ -36,7 +36,7 @@ include::bucket/filter-aggregation.asciidoc[] include::bucket/filters-aggregation.asciidoc[] -include::bucket/frequent-items-aggregation.asciidoc[] +include::bucket/frequent-item-sets-aggregation.asciidoc[] include::bucket/geodistance-aggregation.asciidoc[] diff --git a/docs/reference/aggregations/bucket/frequent-items-aggregation.asciidoc b/docs/reference/aggregations/bucket/frequent-item-sets-aggregation.asciidoc similarity index 86% rename from docs/reference/aggregations/bucket/frequent-items-aggregation.asciidoc rename to docs/reference/aggregations/bucket/frequent-item-sets-aggregation.asciidoc index f52a31e98f1d5..01dacd0d6ccd5 100644 --- a/docs/reference/aggregations/bucket/frequent-items-aggregation.asciidoc +++ b/docs/reference/aggregations/bucket/frequent-item-sets-aggregation.asciidoc @@ -1,40 +1,38 @@ -[[search-aggregations-bucket-frequent-items-aggregation]] -=== Frequent items aggregation +[[search-aggregations-bucket-frequent-item-sets-aggregation]] +=== Frequent item sets aggregation ++++ -Frequent items +Frequent item sets ++++ -experimental::[] - -A bucket aggregation which finds frequent item sets. It is a form of association -rules mining that identifies items that often occur together. Items that are -frequently purchased together or log events that tend to co-occur are examples -of frequent item sets. Finding frequent item sets helps to discover +A bucket aggregation which finds frequent item sets. It is a form of association +rules mining that identifies items that often occur together. Items that are +frequently purchased together or log events that tend to co-occur are examples +of frequent item sets. Finding frequent item sets helps to discover relationships between different data points (items). -The aggregation reports closed item sets. A frequent item set is called closed -if no superset exists with the same ratio of documents (also known as its -<>). For example, we have the two +The aggregation reports closed item sets. A frequent item set is called closed +if no superset exists with the same ratio of documents (also known as its +<>). For example, we have the two following candidates for a frequent item set, which have the same support value: 1. `apple, orange, banana` 2. `apple, orange, banana, tomato`. -Only the second item set (`apple, orange, banana, tomato`) is returned, and the -first set – which is a subset of the second one – is skipped. Both item sets +Only the second item set (`apple, orange, banana, tomato`) is returned, and the +first set – which is a subset of the second one – is skipped. Both item sets might be returned if their support values are different. -The runtime of the aggregation depends on the data and the provided parameters. -It might take a significant time for the aggregation to complete. For this -reason, it is recommended to use <> to run your +The runtime of the aggregation depends on the data and the provided parameters. +It might take a significant time for the aggregation to complete. For this +reason, it is recommended to use <> to run your requests asynchronously. ==== Syntax -A `frequent_items` aggregation looks like this in isolation: +A `frequent_item_sets` aggregation looks like this in isolation: [source,js] -------------------------------------------------- -"frequent_items": { +"frequent_item_sets": { "minimum_set_size": 3, "fields": [ {"field": "my_field_1"}, @@ -44,74 +42,74 @@ A `frequent_items` aggregation looks like this in isolation: -------------------------------------------------- // NOTCONSOLE -.`frequent_items` Parameters +.`frequent_item_sets` Parameters |=== |Parameter Name |Description |Required |Default Value |`fields` |(array) Fields to analyze. | Required | -|`minimum_set_size` | (integer) The <> of one item set. | Optional | `1` -|`minimum_support` | (integer) The <> of one item set. | Optional | `0.1` +|`minimum_set_size` | (integer) The <> of one item set. | Optional | `1` +|`minimum_support` | (integer) The <> of one item set. | Optional | `0.1` |`size` | (integer) The number of top item sets to return. | Optional | `10` |`filter` | (object) Query that filters documents from the analysis | Optional | `match_all` |=== [discrete] -[[frequent-items-fields]] +[[frequent-item-sets-fields]] ==== Fields -Supported field types for the analyzed fields are keyword, numeric, ip, date, -and arrays of these types. You can also add runtime fields to your analyzed +Supported field types for the analyzed fields are keyword, numeric, ip, date, +and arrays of these types. You can also add runtime fields to your analyzed fields. -If the combined cardinality of the analyzed fields are high, the aggregation +If the combined cardinality of the analyzed fields are high, the aggregation might require a significant amount of system resources. -You can filter the values for each field by using the `include` and `exclude` -parameters. The parameters can be regular expression strings or arrays of -strings of exact terms. The filtered values are removed from the analysis and -therefore reduce the runtime. If both `include` and `exclude` are defined, -`exclude` takes precedence; it means `include` is evaluated first and then +You can filter the values for each field by using the `include` and `exclude` +parameters. The parameters can be regular expression strings or arrays of +strings of exact terms. The filtered values are removed from the analysis and +therefore reduce the runtime. If both `include` and `exclude` are defined, +`exclude` takes precedence; it means `include` is evaluated first and then `exclude`. [discrete] -[[frequent-items-minimum-set-size]] +[[frequent-item-sets-minimum-set-size]] ==== Minimum set size -The minimum set size is the minimum number of items the set needs to contain. A -value of 1 returns the frequency of single items. Only item sets that contain at -least the number of `minimum_set_size` items are returned. For example, the item -set `orange, banana, apple` is returned only if the minimum set size is 3 or +The minimum set size is the minimum number of items the set needs to contain. A +value of 1 returns the frequency of single items. Only item sets that contain at +least the number of `minimum_set_size` items are returned. For example, the item +set `orange, banana, apple` is returned only if the minimum set size is 3 or lower. [discrete] -[[frequent-items-minimum-support]] +[[frequent-item-sets-minimum-support]] ==== Minimum support -The minimum support value is the ratio of documents that an item set must exist -in to be considered "frequent". In particular, it is a normalized value between -0 and 1. It is calculated by dividing the number of documents containing the +The minimum support value is the ratio of documents that an item set must exist +in to be considered "frequent". In particular, it is a normalized value between +0 and 1. It is calculated by dividing the number of documents containing the item set by the total number of documents. -For example, if a given item set is contained by five documents and the total -number of documents is 20, then the support of the item set is 5/20 = 0.25. -Therefore, this set is returned only if the minimum support is 0.25 or lower. -As a higher minimum support prunes more items, the calculation is less resource -intensive. The `minimum_support` parameter has an effect on the required memory +For example, if a given item set is contained by five documents and the total +number of documents is 20, then the support of the item set is 5/20 = 0.25. +Therefore, this set is returned only if the minimum support is 0.25 or lower. +As a higher minimum support prunes more items, the calculation is less resource +intensive. The `minimum_support` parameter has an effect on the required memory and the runtime of the aggregation. [discrete] -[[frequent-items-size]] +[[frequent-item-sets-size]] ==== Size -This parameter defines the maximum number of item sets to return. The result -contains top-k item sets; the item sets with the highest support values. This -parameter has a significant effect on the required memory and the runtime of the +This parameter defines the maximum number of item sets to return. The result +contains top-k item sets; the item sets with the highest support values. This +parameter has a significant effect on the required memory and the runtime of the aggregation. [discrete] -[[frequent-items-filter]] +[[frequent-item-sets-filter]] ==== Filter A query to filter documents to use as part of the analysis. Documents that @@ -123,7 +121,7 @@ Use a top-level query to filter the data set. [discrete] -[[frequent-items-example]] +[[frequent-item-sets-example]] ==== Examples In the following examples, we use the e-commerce {kib} sample data set. @@ -132,14 +130,14 @@ In the following examples, we use the e-commerce {kib} sample data set. [discrete] ==== Aggregation with two analyzed fields and an `exclude` parameter -In the first example, the goal is to find out based on transaction data (1.) -from what product categories the customers purchase products frequently together -and (2.) from which cities they make those purchases. We want to exclude results -where location information is not available (where the city name is `other`). -Finally, we are interested in sets with three or more items, and want to see the +In the first example, the goal is to find out based on transaction data (1.) +from what product categories the customers purchase products frequently together +and (2.) from which cities they make those purchases. We want to exclude results +where location information is not available (where the city name is `other`). +Finally, we are interested in sets with three or more items, and want to see the first three frequent item sets with the highest support. -Note that we use the <> endpoint in this first +Note that we use the <> endpoint in this first example. [source,console] @@ -149,7 +147,7 @@ POST /kibana_sample_data_ecommerce/_async_search "size":0, "aggs":{ "my_agg":{ - "frequent_items":{ + "frequent_item_sets":{ "minimum_set_size":3, "fields":[ { @@ -168,7 +166,7 @@ POST /kibana_sample_data_ecommerce/_async_search ------------------------------------------------- // TEST[skip:setup kibana sample data] -The response of the API call above contains an identifier (`id`) of the async +The response of the API call above contains an identifier (`id`) of the async search request. You can use the identifier to retrieve the search results: [source,console] @@ -225,27 +223,27 @@ The API returns a response similar to the following one: "support" : 0.026310160427807486 } ], - (...) + (...) } } ------------------------------------------------- // TEST[skip:setup kibana sample data] <1> The array of returned item sets. -<2> The `key` object contains one item set. In this case, it consists of two +<2> The `key` object contains one item set. In this case, it consists of two values of the `category.keyword` field and one value of the `geoip.city_name`. -<3> The number of documents that contain the item set. -<4> The support value of the item set. It is calculated by dividing the number -of documents containing the item set by the total number of documents. - -The response shows that the categories customers purchase from most frequently -together are `Women's Clothing` and `Women's Shoes` and customers from New York -tend to buy items from these categories frequently together. In other words, -customers who buy products labelled `Women's Clothing` more likely buy products -also from the `Women's Shoes` category and customers from New York most likely -buy products from these categories together. The item set with the second -highest support is `Women's Clothing` and `Women's Accessories` with customers -mostly from New York. Finally, the item set with the third highest support is +<3> The number of documents that contain the item set. +<4> The support value of the item set. It is calculated by dividing the number +of documents containing the item set by the total number of documents. + +The response shows that the categories customers purchase from most frequently +together are `Women's Clothing` and `Women's Shoes` and customers from New York +tend to buy items from these categories frequently together. In other words, +customers who buy products labelled `Women's Clothing` more likely buy products +also from the `Women's Shoes` category and customers from New York most likely +buy products from these categories together. The item set with the second +highest support is `Women's Clothing` and `Women's Accessories` with customers +mostly from New York. Finally, the item set with the third highest support is `Men's Clothing` and `Men's Shoes` with customers mostly from Cairo. @@ -262,7 +260,7 @@ POST /kibana_sample_data_ecommerce/_async_search "size": 0, "aggs": { "my_agg": { - "frequent_items": { + "frequent_item_sets": { "minimum_set_size": 3, "fields": [ { "field": "category.keyword" }, @@ -282,20 +280,20 @@ POST /kibana_sample_data_ecommerce/_async_search // TEST[skip:setup kibana sample data] The result will only show item sets that created from documents matching the -filter, namely purchases in Europe. Using `filter`, the calculated `support` -still takes all purchases into acount. That's different than specifying a query -at the top-level, in which case `support` gets calculated only from purchases in +filter, namely purchases in Europe. Using `filter`, the calculated `support` +still takes all purchases into acount. That's different than specifying a query +at the top-level, in which case `support` gets calculated only from purchases in Europe. [discrete] ==== Analyzing numeric values by using a runtime field -The frequent items aggregation enables you to bucket numeric values by using -<>. The next example demonstrates how to use a script to -add a runtime field to your documents called `price_range`, which is -calculated from the taxful total price of the individual transactions. The -runtime field then can be used in the frequent items aggregation as a field to +The frequent items aggregation enables you to bucket numeric values by using +<>. The next example demonstrates how to use a script to +add a runtime field to your documents called `price_range`, which is +calculated from the taxful total price of the individual transactions. The +runtime field then can be used in the frequent items aggregation as a field to analyze. @@ -318,7 +316,7 @@ GET kibana_sample_data_ecommerce/_search "size": 0, "aggs": { "my_agg": { - "frequent_items": { + "frequent_item_sets": { "minimum_set_size": 4, "fields": [ { @@ -402,6 +400,6 @@ The API returns a response similar to the following one: ------------------------------------------------- // TEST[skip:setup kibana sample data] -The response shows the categories that customers purchase from most frequently -together, the location of the customers who tend to buy items from these +The response shows the categories that customers purchase from most frequently +together, the location of the customers who tend to buy items from these categories, and the most frequent price ranges of these purchases. diff --git a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/MachineLearning.java b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/MachineLearning.java index 80d97885bc4f9..821dab59c35a7 100644 --- a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/MachineLearning.java +++ b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/MachineLearning.java @@ -1573,7 +1573,7 @@ public List getAggregations() { ).addResultReader(InternalCategorizationAggregation::new) .setAggregatorRegistrar(s -> s.registerUsage(CategorizeTextAggregationBuilder.NAME)), new AggregationSpec( - FrequentItemSetsAggregationBuilder.NAME, + new ParseField(FrequentItemSetsAggregationBuilder.NAME, FrequentItemSetsAggregationBuilder.DEPRECATED_NAME), FrequentItemSetsAggregationBuilder::new, checkAggLicense(FrequentItemSetsAggregationBuilder.PARSER, FREQUENT_ITEM_SETS_AGG_FEATURE) ).addResultReader(FrequentItemSetsAggregatorFactory.getResultReader()) diff --git a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/aggs/frequentitemsets/EclatMapReducer.java b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/aggs/frequentitemsets/EclatMapReducer.java index 171a8f4d79d30..9a8a47991af45 100644 --- a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/aggs/frequentitemsets/EclatMapReducer.java +++ b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/aggs/frequentitemsets/EclatMapReducer.java @@ -120,6 +120,7 @@ public XContentBuilder toXContent(XContentBuilder builder, Params params) throws private static final Logger logger = LogManager.getLogger(EclatMapReducer.class); private static final int VERSION = 1; + // named writable for this implementation public static final String NAME = "frequent_items-eclat-" + VERSION; // cache for marking transactions visited, memory usage: ((BITSET_CACHE_TRAVERSAL_DEPTH -2) * BITSET_CACHE_NUMBER_OF_TRANSACTIONS) / 8 diff --git a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/aggs/frequentitemsets/FrequentItemSetsAggregationBuilder.java b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/aggs/frequentitemsets/FrequentItemSetsAggregationBuilder.java index 3f3db67afe4d6..e4263da428bca 100644 --- a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/aggs/frequentitemsets/FrequentItemSetsAggregationBuilder.java +++ b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/aggs/frequentitemsets/FrequentItemSetsAggregationBuilder.java @@ -37,7 +37,10 @@ public final class FrequentItemSetsAggregationBuilder extends AbstractAggregationBuilder { - public static final String NAME = "frequent_items"; + public static final String NAME = "frequent_item_sets"; + + // name used between 8.4 - 8.6, kept for backwards compatibility until 9.0 + public static final String DEPRECATED_NAME = "frequent_items"; public static final double DEFAULT_MINIMUM_SUPPORT = 0.01; public static final int DEFAULT_MINIMUM_SET_SIZE = 1; diff --git a/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/aggs/frequentitemsets/FrequentItemSetsAggregationBuilderTests.java b/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/aggs/frequentitemsets/FrequentItemSetsAggregationBuilderTests.java index 4890d0893a64f..9534ace3d3b9b 100644 --- a/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/aggs/frequentitemsets/FrequentItemSetsAggregationBuilderTests.java +++ b/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/aggs/frequentitemsets/FrequentItemSetsAggregationBuilderTests.java @@ -184,7 +184,7 @@ public void testValidation() { randomFrom(EXECUTION_HINT_ALLOWED_MODES) ).subAggregation(AggregationBuilders.avg("fieldA"))); - assertEquals("Aggregator [fi] of type [frequent_items] cannot accept sub-aggregations", e.getMessage()); + assertEquals("Aggregator [fi] of type [frequent_item_sets] cannot accept sub-aggregations", e.getMessage()); e = expectThrows( IllegalArgumentException.class, @@ -202,7 +202,7 @@ public void testValidation() { ).subAggregations(new AggregatorFactories.Builder().addAggregator(AggregationBuilders.avg("fieldA"))) ); - assertEquals("Aggregator [fi] of type [frequent_items] cannot accept sub-aggregations", e.getMessage()); + assertEquals("Aggregator [fi] of type [frequent_item_sets] cannot accept sub-aggregations", e.getMessage()); e = expectThrows( IllegalArgumentException.class, diff --git a/x-pack/plugin/src/yamlRestTest/resources/rest-api-spec/test/ml/frequent_items_agg.yml b/x-pack/plugin/src/yamlRestTest/resources/rest-api-spec/test/ml/frequent_item_sets_agg.yml similarity index 88% rename from x-pack/plugin/src/yamlRestTest/resources/rest-api-spec/test/ml/frequent_items_agg.yml rename to x-pack/plugin/src/yamlRestTest/resources/rest-api-spec/test/ml/frequent_item_sets_agg.yml index 39661d4917d01..f5244d271abed 100644 --- a/x-pack/plugin/src/yamlRestTest/resources/rest-api-spec/test/ml/frequent_items_agg.yml +++ b/x-pack/plugin/src/yamlRestTest/resources/rest-api-spec/test/ml/frequent_item_sets_agg.yml @@ -93,7 +93,7 @@ setup: --- -"Test frequent items array fields": +"Test frequent item sets array fields": - do: search: @@ -103,7 +103,7 @@ setup: "size": 0, "aggs": { "fi": { - "frequent_items": { + "frequent_item_sets": { "minimum_set_size": 3, "minimum_support": 0.3, "fields": [ @@ -123,7 +123,7 @@ setup: - match: { aggregations.fi.buckets.1.key.error_message: ["engine overheated"] } --- -"Test frequent items date format": +"Test frequent item sets date format": - do: search: @@ -141,7 +141,7 @@ setup: "size": 0, "aggs": { "fi": { - "frequent_items": { + "frequent_item_sets": { "minimum_set_size": 3, "minimum_support": 0.3, "fields": [ @@ -159,7 +159,7 @@ setup: --- -"Test frequent items date format 2": +"Test frequent item sets date format 2": - do: search: @@ -177,7 +177,7 @@ setup: "size": 0, "aggs": { "fi": { - "frequent_items": { + "frequent_item_sets": { "minimum_set_size": 2, "minimum_support": 0.3, "fields": [ @@ -195,7 +195,7 @@ setup: - match: { aggregations.fi.buckets.0.key.error_message: ["engine overheated"] } --- -"Test frequent items array fields profile": +"Test frequent item sets array fields profile": - do: search: @@ -206,7 +206,7 @@ setup: "size": 0, "aggs": { "fi": { - "frequent_items": { + "frequent_item_sets": { "minimum_set_size": 3, "minimum_support": 0.2, "fields": [ @@ -229,7 +229,7 @@ setup: - match: { aggregations.fi.profile.unique_items_after_prune: 11 } --- -"Test frequent items flattened fields": +"Test frequent item sets flattened fields": - do: search: @@ -239,7 +239,7 @@ setup: "size": 0, "aggs": { "fi": { - "frequent_items": { + "frequent_item_sets": { "minimum_set_size": 3, "minimum_support": 0.3, "fields": [ @@ -259,7 +259,7 @@ setup: - match: { aggregations.fi.buckets.1.key.data\.error_message: ["engine overheated"] } --- -"Test frequent items as subagg": +"Test frequent item sets as subagg": - do: search: @@ -276,7 +276,7 @@ setup: }, "aggs": { "fi": { - "frequent_items": { + "frequent_item_sets": { "minimum_set_size": 3, "minimum_support": 0.3, "fields": [ @@ -298,7 +298,7 @@ setup: - match: { aggregations.filter_error.fi.buckets.0.key.error_message: ["compressor low pressure"] } --- -"Test frequent items as multi-bucket subagg": +"Test frequent item sets as multi-bucket subagg": - do: search: @@ -314,7 +314,7 @@ setup: }, "aggs": { "fi": { - "frequent_items": { + "frequent_item_sets": { "minimum_set_size": 3, "minimum_support": 0.3, "fields": [ @@ -335,7 +335,7 @@ setup: - match: { aggregations.weekly.buckets.2.fi.buckets.0.doc_count: 1 } --- -"Test frequent items filter": +"Test frequent item sets filter": - do: search: @@ -345,7 +345,7 @@ setup: "size": 0, "aggs": { "fi": { - "frequent_items": { + "frequent_item_sets": { "minimum_set_size": 3, "minimum_support": 0.3, "fields": [ @@ -369,7 +369,7 @@ setup: - match: { aggregations.fi.buckets.0.key.error_message: ["compressor low pressure"] } --- -"Test frequent items exclude": +"Test frequent item sets exclude": - do: search: @@ -379,7 +379,7 @@ setup: "size": 0, "aggs": { "fi": { - "frequent_items": { + "frequent_item_sets": { "minimum_set_size": 3, "minimum_support": 0.3, "fields": [ @@ -401,7 +401,7 @@ setup: - match: { aggregations.fi.buckets.1.support: 0.3 } --- -"Test frequent items include": +"Test frequent item sets include": - do: search: @@ -411,7 +411,7 @@ setup: "size": 0, "aggs": { "fi": { - "frequent_items": { + "frequent_item_sets": { "minimum_set_size": 3, "minimum_support": 0.3, "fields": [ @@ -431,9 +431,9 @@ setup: - match: { aggregations.fi.buckets.0.key.error_message: ["engine overheated"] } --- -"Test frequent items unsupported types": +"Test frequent item sets unsupported types": - do: - catch: /Field \[geo_point\] of type \[geo_point\] is not supported for aggregation \[frequent_items\]/ + catch: /Field \[geo_point\] of type \[geo_point\] is not supported for aggregation \[frequent_item_sets\]/ search: index: store body: > @@ -441,7 +441,7 @@ setup: "size": 0, "aggs": { "fi": { - "frequent_items": { + "frequent_item_sets": { "minimum_set_size": 3, "minimum_support": 0.3, "fields": [ @@ -454,7 +454,7 @@ setup: } } - do: - catch: /Field \[histogram\] of type \[histogram\] is not supported for aggregation \[frequent_items\]/ + catch: /Field \[histogram\] of type \[histogram\] is not supported for aggregation \[frequent_item_sets\]/ search: index: store body: > @@ -462,7 +462,7 @@ setup: "size": 0, "aggs": { "fi": { - "frequent_items": { + "frequent_item_sets": { "minimum_set_size": 3, "minimum_support": 0.3, "fields": [ @@ -476,9 +476,9 @@ setup: } --- -"Test frequent items unsupported subaggs": +"Test frequent item sets unsupported subaggs": - do: - catch: /Aggregator \[fi\] of type \[frequent_items\] cannot accept sub-aggregations/ + catch: /Aggregator \[fi\] of type \[frequent_item_sets\] cannot accept sub-aggregations/ search: index: store body: > @@ -486,7 +486,7 @@ setup: "size": 0, "aggs": { "fi": { - "frequent_items": { + "frequent_item_sets": { "minimum_set_size": 3, "minimum_support": 0.3, "fields": [ @@ -504,3 +504,39 @@ setup: } } } + +--- +"Test deprecated frequent items": + - skip: + features: + - "allowed_warnings" + + - do: + allowed_warnings: + - 'Deprecated field [frequent_items] used, expected [frequent_item_sets] instead' + + search: + index: store + body: > + { + "size": 0, + "aggs": { + "fi": { + "frequent_items": { + "minimum_set_size": 3, + "minimum_support": 0.3, + "fields": [ + {"field": "features"}, + {"field": "error_message"} + ] + } + } + } + } + - length: { aggregations.fi.buckets: 4 } + - match: { aggregations.fi.buckets.0.doc_count: 5 } + - match: { aggregations.fi.buckets.0.support: 0.5 } + - match: { aggregations.fi.buckets.0.key.error_message: ["compressor low pressure"] } + - match: { aggregations.fi.buckets.1.doc_count: 4 } + - match: { aggregations.fi.buckets.1.support: 0.4 } + - match: { aggregations.fi.buckets.1.key.error_message: ["engine overheated"] } From ed2f0cfba12b695d4901352ba8cace2031a523cf Mon Sep 17 00:00:00 2001 From: David Turner Date: Thu, 2 Feb 2023 10:28:58 +0000 Subject: [PATCH 46/63] Fix PrioritizedThrottledTaskRunnerTests (#93446) These tests try and execute `maxThreads` concurrent tasks to ensure that the rest of the executor's queue has been processed, but due to #93443 (and the executor's zero timeout) this sometimes doesn't work. This commit fixes the problem by making every thread a core thread so that they do not time out. Closes #92910 Closes #92747 --- .../util/concurrent/PrioritizedThrottledTaskRunnerTests.java | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/server/src/test/java/org/elasticsearch/common/util/concurrent/PrioritizedThrottledTaskRunnerTests.java b/server/src/test/java/org/elasticsearch/common/util/concurrent/PrioritizedThrottledTaskRunnerTests.java index f1955ebcddacb..01ecce705856f 100644 --- a/server/src/test/java/org/elasticsearch/common/util/concurrent/PrioritizedThrottledTaskRunnerTests.java +++ b/server/src/test/java/org/elasticsearch/common/util/concurrent/PrioritizedThrottledTaskRunnerTests.java @@ -39,7 +39,7 @@ public class PrioritizedThrottledTaskRunnerTests extends ESTestCase { public void setUp() throws Exception { super.setUp(); maxThreads = between(1, 10); - executor = EsExecutors.newScaling("test", 1, maxThreads, 0, TimeUnit.MILLISECONDS, false, threadFactory, threadContext); + executor = EsExecutors.newScaling("test", maxThreads, maxThreads, 0, TimeUnit.NANOSECONDS, false, threadFactory, threadContext); } @Override @@ -197,7 +197,7 @@ public void testEnqueueSpawnsNewTasksUpToMax() throws Exception { public void testFailsTasksOnRejectionOrShutdown() throws Exception { final var executor = randomBoolean() - ? EsExecutors.newScaling("test", 1, maxThreads, 0, TimeUnit.MILLISECONDS, true, threadFactory, threadContext) + ? EsExecutors.newScaling("test", maxThreads, maxThreads, 0, TimeUnit.MILLISECONDS, true, threadFactory, threadContext) : EsExecutors.newFixed("test", maxThreads, between(1, 5), threadFactory, threadContext, false); final var taskRunner = new PrioritizedThrottledTaskRunner("test", between(1, maxThreads * 2), executor); final var totalPermits = between(1, maxThreads * 2); From c363b6406413dd4878a081b7dd393ab87a22ee62 Mon Sep 17 00:00:00 2001 From: Nikolaj Volgushev Date: Thu, 2 Feb 2023 11:29:41 +0100 Subject: [PATCH 47/63] Revert "Convert full cluster restart tests to new rest testing framework (#93422)" (#93444) Reverts #93422 Should address: #93442 and possibly #93440 --- .../main/groovy/elasticsearch.bwc-test.gradle | 14 --- .../internal/ElasticsearchTestBasePlugin.java | 2 +- .../InternalTestArtifactExtension.java | 2 +- .../test/rest/RestTestBasePlugin.java | 74 ++---------- qa/full-cluster-restart/build.gradle | 62 +++++++++-- .../FullClusterRestartTestOrdering.java | 24 ---- .../FullClusterRestartUpgradeStatus.java | 14 --- ...rameterizedFullClusterRestartTestCase.java | 100 ----------------- .../upgrades/FullClusterRestartIT.java | 49 +------- .../upgrades/QueryBuilderBWCIT.java | 29 +---- .../test/cluster/ClusterHandle.java | 17 --- .../local/AbstractLocalSpecBuilder.java | 23 ---- .../local/DefaultLocalClusterSpecBuilder.java | 34 +----- .../cluster/local/LocalClusterFactory.java | 72 ++---------- .../cluster/local/LocalClusterHandle.java | 59 +++++----- .../test/cluster/local/LocalClusterSpec.java | 14 +-- .../local/LocalClusterSpecBuilder.java | 11 -- .../local/LocalElasticsearchCluster.java | 24 +--- .../test/cluster/local/LocalSpecBuilder.java | 11 -- .../cluster/local/WaitForHttpResource.java | 2 +- .../LocalDistributionResolver.java | 3 - .../ReleasedDistributionResolver.java | 54 --------- .../SnapshotDistributionResolver.java | 30 +---- .../qa/full-cluster-restart/build.gradle | 99 +++++++++++++++-- .../xpack/restart/FullClusterRestartIT.java | 54 +-------- .../resources/system_key | 0 x-pack/qa/full-cluster-restart/build.gradle | 105 ++++++++++++++++-- ...stractXpackFullClusterRestartTestCase.java | 49 -------- .../restart/CoreFullClusterRestartIT.java | 46 -------- .../xpack/restart/QueryBuilderBWCIT.java | 42 ------- .../restart/CoreFullClusterRestartIT.java | 24 ++++ .../xpack/restart/FullClusterRestartIT.java | 51 ++++----- ...MLModelDeploymentFullClusterRestartIT.java | 10 +- ...nfigIndexMappingsFullClusterRestartIT.java | 10 +- .../MlHiddenIndicesFullClusterRestartIT.java | 10 +- .../MlMigrationFullClusterRestartIT.java | 10 +- .../xpack/restart/QueryBuilderBWCIT.java | 22 ++++ .../xpack/restart/WatcherMappingUpdateIT.java | 12 +- .../xpack/restart/funny-timeout-watch.json | 0 .../xpack/restart/logging-watch.json | 0 .../xpack/restart/simple-watch.json | 0 .../xpack/restart/throttle-period-watch.json | 0 .../resources/system_key | 0 43 files changed, 401 insertions(+), 867 deletions(-) delete mode 100644 qa/full-cluster-restart/src/javaRestTest/java/org/elasticsearch/upgrades/FullClusterRestartTestOrdering.java delete mode 100644 qa/full-cluster-restart/src/javaRestTest/java/org/elasticsearch/upgrades/FullClusterRestartUpgradeStatus.java delete mode 100644 qa/full-cluster-restart/src/javaRestTest/java/org/elasticsearch/upgrades/ParameterizedFullClusterRestartTestCase.java rename qa/full-cluster-restart/src/{javaRestTest => test}/java/org/elasticsearch/upgrades/FullClusterRestartIT.java (97%) rename qa/full-cluster-restart/src/{javaRestTest => test}/java/org/elasticsearch/upgrades/QueryBuilderBWCIT.java (91%) delete mode 100644 test/test-clusters/src/main/java/org/elasticsearch/test/cluster/local/distribution/ReleasedDistributionResolver.java rename x-pack/plugin/shutdown/qa/full-cluster-restart/src/{javaRestTest => test}/java/org/elasticsearch/xpack/restart/FullClusterRestartIT.java (65%) rename x-pack/plugin/shutdown/qa/full-cluster-restart/src/{javaRestTest => test}/resources/system_key (100%) delete mode 100644 x-pack/qa/full-cluster-restart/src/javaRestTest/java/org/elasticsearch/xpack/restart/AbstractXpackFullClusterRestartTestCase.java delete mode 100644 x-pack/qa/full-cluster-restart/src/javaRestTest/java/org/elasticsearch/xpack/restart/CoreFullClusterRestartIT.java delete mode 100644 x-pack/qa/full-cluster-restart/src/javaRestTest/java/org/elasticsearch/xpack/restart/QueryBuilderBWCIT.java create mode 100644 x-pack/qa/full-cluster-restart/src/test/java/org/elasticsearch/xpack/restart/CoreFullClusterRestartIT.java rename x-pack/qa/full-cluster-restart/src/{javaRestTest => test}/java/org/elasticsearch/xpack/restart/FullClusterRestartIT.java (96%) rename x-pack/qa/full-cluster-restart/src/{javaRestTest => test}/java/org/elasticsearch/xpack/restart/MLModelDeploymentFullClusterRestartIT.java (97%) rename x-pack/qa/full-cluster-restart/src/{javaRestTest => test}/java/org/elasticsearch/xpack/restart/MlConfigIndexMappingsFullClusterRestartIT.java (94%) rename x-pack/qa/full-cluster-restart/src/{javaRestTest => test}/java/org/elasticsearch/xpack/restart/MlHiddenIndicesFullClusterRestartIT.java (96%) rename x-pack/qa/full-cluster-restart/src/{javaRestTest => test}/java/org/elasticsearch/xpack/restart/MlMigrationFullClusterRestartIT.java (96%) create mode 100644 x-pack/qa/full-cluster-restart/src/test/java/org/elasticsearch/xpack/restart/QueryBuilderBWCIT.java rename x-pack/qa/full-cluster-restart/src/{javaRestTest => test}/java/org/elasticsearch/xpack/restart/WatcherMappingUpdateIT.java (89%) rename x-pack/qa/full-cluster-restart/src/{javaRestTest => test}/resources/org/elasticsearch/xpack/restart/funny-timeout-watch.json (100%) rename x-pack/qa/full-cluster-restart/src/{javaRestTest => test}/resources/org/elasticsearch/xpack/restart/logging-watch.json (100%) rename x-pack/qa/full-cluster-restart/src/{javaRestTest => test}/resources/org/elasticsearch/xpack/restart/simple-watch.json (100%) rename x-pack/qa/full-cluster-restart/src/{javaRestTest => test}/resources/org/elasticsearch/xpack/restart/throttle-period-watch.json (100%) rename x-pack/qa/full-cluster-restart/src/{javaRestTest => test}/resources/system_key (100%) diff --git a/build-tools-internal/src/main/groovy/elasticsearch.bwc-test.gradle b/build-tools-internal/src/main/groovy/elasticsearch.bwc-test.gradle index a5e74c3721297..b80c450c5914e 100644 --- a/build-tools-internal/src/main/groovy/elasticsearch.bwc-test.gradle +++ b/build-tools-internal/src/main/groovy/elasticsearch.bwc-test.gradle @@ -9,8 +9,6 @@ import org.elasticsearch.gradle.Version import org.elasticsearch.gradle.internal.ElasticsearchTestBasePlugin import org.elasticsearch.gradle.internal.info.BuildParams -import org.elasticsearch.gradle.internal.test.rest.InternalJavaRestTestPlugin -import org.elasticsearch.gradle.testclusters.StandaloneRestIntegTestTask ext.bwcTaskName = { Version version -> return "v${version}#bwcTest" @@ -38,17 +36,5 @@ plugins.withType(ElasticsearchTestBasePlugin) { } } -plugins.withType(InternalJavaRestTestPlugin) { - tasks.named("javaRestTest") { - enabled = false - } - - tasks.withType(StandaloneRestIntegTestTask).configureEach { - testClassesDirs = sourceSets.javaRestTest.output.classesDirs - classpath = sourceSets.javaRestTest.runtimeClasspath - usesDefaultDistribution() - } -} - tasks.matching { it.name.equals("check") }.configureEach {dependsOn(bwcTestSnapshots) } tasks.matching { it.name.equals("test") }.configureEach {enabled = false} diff --git a/build-tools-internal/src/main/java/org/elasticsearch/gradle/internal/ElasticsearchTestBasePlugin.java b/build-tools-internal/src/main/java/org/elasticsearch/gradle/internal/ElasticsearchTestBasePlugin.java index c6758092b17ec..854dc6d204382 100644 --- a/build-tools-internal/src/main/java/org/elasticsearch/gradle/internal/ElasticsearchTestBasePlugin.java +++ b/build-tools-internal/src/main/java/org/elasticsearch/gradle/internal/ElasticsearchTestBasePlugin.java @@ -89,7 +89,7 @@ public void execute(Task t) { test.getJvmArgumentProviders().add(nonInputProperties); test.getExtensions().add("nonInputProperties", nonInputProperties); - test.setWorkingDir(project.file(project.getBuildDir() + "/testrun/" + test.getName().replace("#", "_"))); + test.setWorkingDir(project.file(project.getBuildDir() + "/testrun/" + test.getName())); test.setMaxParallelForks(Integer.parseInt(System.getProperty("tests.jvms", BuildParams.getDefaultParallel().toString()))); test.exclude("**/*$*.class"); diff --git a/build-tools-internal/src/main/java/org/elasticsearch/gradle/internal/InternalTestArtifactExtension.java b/build-tools-internal/src/main/java/org/elasticsearch/gradle/internal/InternalTestArtifactExtension.java index 4952085f466be..fae845b229651 100644 --- a/build-tools-internal/src/main/java/org/elasticsearch/gradle/internal/InternalTestArtifactExtension.java +++ b/build-tools-internal/src/main/java/org/elasticsearch/gradle/internal/InternalTestArtifactExtension.java @@ -32,7 +32,7 @@ public void registerTestArtifactFromSourceSet(SourceSet sourceSet) { JavaPluginExtension javaPluginExtension = project.getExtensions().getByType(JavaPluginExtension.class); javaPluginExtension.registerFeature(name + "Artifacts", featureSpec -> { featureSpec.usingSourceSet(sourceSet); - featureSpec.capability("org.elasticsearch.gradle", project.getName() + "-test-artifacts", "1.0"); + featureSpec.capability("org.elasticsearch.gradle", project.getName() + "-" + name + "-artifacts", "1.0"); // This feature is only used internally in the // elasticsearch build so we do not need any publication. featureSpec.disablePublication(); diff --git a/build-tools-internal/src/main/java/org/elasticsearch/gradle/internal/test/rest/RestTestBasePlugin.java b/build-tools-internal/src/main/java/org/elasticsearch/gradle/internal/test/rest/RestTestBasePlugin.java index 1a7b5bc3ee2a1..9baa17bc00d7c 100644 --- a/build-tools-internal/src/main/java/org/elasticsearch/gradle/internal/test/rest/RestTestBasePlugin.java +++ b/build-tools-internal/src/main/java/org/elasticsearch/gradle/internal/test/rest/RestTestBasePlugin.java @@ -13,8 +13,6 @@ import org.elasticsearch.gradle.Architecture; import org.elasticsearch.gradle.DistributionDownloadPlugin; import org.elasticsearch.gradle.ElasticsearchDistribution; -import org.elasticsearch.gradle.ElasticsearchDistributionType; -import org.elasticsearch.gradle.Version; import org.elasticsearch.gradle.VersionProperties; import org.elasticsearch.gradle.distribution.ElasticsearchDistributionTypes; import org.elasticsearch.gradle.internal.ElasticsearchJavaPlugin; @@ -60,8 +58,6 @@ public class RestTestBasePlugin implements Plugin { private static final String TESTS_RUNTIME_JAVA_SYSPROP = "tests.runtime.java"; private static final String DEFAULT_DISTRIBUTION_SYSPROP = "tests.default.distribution"; private static final String INTEG_TEST_DISTRIBUTION_SYSPROP = "tests.integ-test.distribution"; - private static final String BWC_SNAPSHOT_DISTRIBUTION_SYSPROP_PREFIX = "tests.snapshot.distribution."; - private static final String BWC_RELEASED_DISTRIBUTION_SYSPROP_PREFIX = "tests.release.distribution."; private static final String TESTS_CLUSTER_MODULES_PATH_SYSPROP = "tests.cluster.modules.path"; private static final String TESTS_CLUSTER_PLUGINS_PATH_SYSPROP = "tests.cluster.plugins.path"; private static final String DEFAULT_REST_INTEG_TEST_DISTRO = "default_distro"; @@ -83,17 +79,16 @@ public void apply(Project project) { project.getPluginManager().apply(InternalDistributionDownloadPlugin.class); // Register integ-test and default distributions - ElasticsearchDistribution defaultDistro = createDistribution( - project, - DEFAULT_REST_INTEG_TEST_DISTRO, - VersionProperties.getElasticsearch() - ); - ElasticsearchDistribution integTestDistro = createDistribution( - project, - INTEG_TEST_REST_INTEG_TEST_DISTRO, - VersionProperties.getElasticsearch(), - ElasticsearchDistributionTypes.INTEG_TEST_ZIP - ); + NamedDomainObjectContainer distributions = DistributionDownloadPlugin.getContainer(project); + ElasticsearchDistribution defaultDistro = distributions.create(DEFAULT_REST_INTEG_TEST_DISTRO, distro -> { + distro.setVersion(VersionProperties.getElasticsearch()); + distro.setArchitecture(Architecture.current()); + }); + ElasticsearchDistribution integTestDistro = distributions.create(INTEG_TEST_REST_INTEG_TEST_DISTRO, distro -> { + distro.setVersion(VersionProperties.getElasticsearch()); + distro.setArchitecture(Architecture.current()); + distro.setType(ElasticsearchDistributionTypes.INTEG_TEST_ZIP); + }); // Create configures for module and plugin dependencies Configuration modulesConfiguration = createPluginConfiguration(project, MODULES_CONFIGURATION, true, false); @@ -156,35 +151,6 @@ public Void call(Object... args) { return null; } }); - - // Add `usesBwcDistribution(version)` extension method to test tasks to indicate they require a BWC distribution - task.getExtensions().getExtraProperties().set("usesBwcDistribution", new Closure(task) { - @Override - public Void call(Object... args) { - if (args.length != 1 && args[0] instanceof Version == false) { - throw new IllegalArgumentException("Expected exactly one argument of type org.elasticsearch.gradle.Version"); - } - - Version version = (Version) args[0]; - boolean isReleased = BuildParams.getBwcVersions().unreleasedInfo(version) == null; - String versionString = version.toString(); - ElasticsearchDistribution bwcDistro = createDistribution(project, "bwc_" + versionString, versionString); - - task.dependsOn(bwcDistro); - registerDistributionInputs(task, bwcDistro); - - nonInputSystemProperties.systemProperty( - (isReleased ? BWC_RELEASED_DISTRIBUTION_SYSPROP_PREFIX : BWC_SNAPSHOT_DISTRIBUTION_SYSPROP_PREFIX) + versionString, - providerFactory.provider(() -> bwcDistro.getExtracted().getSingleFile().getPath()) - ); - - if (version.before(BuildParams.getBwcVersions().getMinimumWireCompatibleVersion())) { - // If we are upgrade testing older versions we also need to upgrade to 7.last - this.call(BuildParams.getBwcVersions().getMinimumWireCompatibleVersion()); - } - return null; - } - }); }); project.getTasks() @@ -192,26 +158,6 @@ public Void call(Object... args) { .configure(check -> check.dependsOn(project.getTasks().withType(StandaloneRestIntegTestTask.class))); } - private ElasticsearchDistribution createDistribution(Project project, String name, String version) { - return createDistribution(project, name, version, null); - } - - private ElasticsearchDistribution createDistribution(Project project, String name, String version, ElasticsearchDistributionType type) { - NamedDomainObjectContainer distributions = DistributionDownloadPlugin.getContainer(project); - ElasticsearchDistribution maybeDistro = distributions.findByName(name); - if (maybeDistro == null) { - return distributions.create(name, distro -> { - distro.setVersion(version); - distro.setArchitecture(Architecture.current()); - if (type != null) { - distro.setType(type); - } - }); - } else { - return maybeDistro; - } - } - private FileTree getDistributionFiles(ElasticsearchDistribution distribution, Action patternFilter) { return distribution.getExtracted().getAsFileTree().matching(patternFilter); } diff --git a/qa/full-cluster-restart/build.gradle b/qa/full-cluster-restart/build.gradle index b6f181809e0e4..a3af45b43363e 100644 --- a/qa/full-cluster-restart/build.gradle +++ b/qa/full-cluster-restart/build.gradle @@ -6,20 +6,64 @@ * Side Public License, v 1. */ + +import org.elasticsearch.gradle.Version import org.elasticsearch.gradle.internal.info.BuildParams import org.elasticsearch.gradle.testclusters.StandaloneRestIntegTestTask -apply plugin: 'elasticsearch.internal-java-rest-test' -apply plugin: 'elasticsearch.internal-test-artifact-base' +apply plugin: 'elasticsearch.internal-testclusters' +apply plugin: 'elasticsearch.standalone-rest-test' +apply plugin: 'elasticsearch.internal-test-artifact' apply plugin: 'elasticsearch.bwc-test' -testArtifacts { - registerTestArtifactFromSourceSet(sourceSets.javaRestTest) -} - BuildParams.bwcVersions.withIndexCompatible { bwcVersion, baseName -> - tasks.register(bwcTaskName(bwcVersion), StandaloneRestIntegTestTask) { - usesBwcDistribution(bwcVersion) - systemProperty("tests.old_cluster_version", bwcVersion) + def baseCluster = testClusters.register(baseName) { + if (bwcVersion.before(BuildParams.bwcVersions.minimumWireCompatibleVersion)) { + // When testing older versions we have to first upgrade to 7.last + versions = [bwcVersion.toString(), BuildParams.bwcVersions.minimumWireCompatibleVersion.toString(), project.version] + } else { + versions = [bwcVersion.toString(), project.version] + } + numberOfNodes = 2 + // some tests rely on the translog not being flushed + setting 'indices.memory.shard_inactive_time', '60m' + setting 'path.repo', "${buildDir}/cluster/shared/repo/${baseName}" + setting 'xpack.security.enabled', 'false' + requiresFeature 'es.index_mode_feature_flag_registered', Version.fromString("8.0.0") + } + + tasks.register("${baseName}#oldClusterTest", StandaloneRestIntegTestTask) { + useCluster baseCluster + mustRunAfter("precommit") + doFirst { + delete("${buildDir}/cluster/shared/repo/${baseName}") + } + + systemProperty 'tests.is_old_cluster', 'true' + } + + tasks.register("${baseName}#upgradedClusterTest", StandaloneRestIntegTestTask) { + useCluster baseCluster + dependsOn "${baseName}#oldClusterTest" + doFirst { + baseCluster.get().goToNextVersion() + if (bwcVersion.before(BuildParams.bwcVersions.minimumWireCompatibleVersion)) { + // When doing a full cluster restart of older versions we actually have to upgrade twice. First to 7.last, then to the current version. + baseCluster.get().goToNextVersion() + } + } + systemProperty 'tests.is_old_cluster', 'false' + } + + String oldVersion = bwcVersion.toString().minus("-SNAPSHOT") + tasks.matching { it.name.startsWith(baseName) && it.name.endsWith("ClusterTest") }.configureEach { + it.systemProperty 'tests.old_cluster_version', oldVersion + it.systemProperty 'tests.path.repo', "${buildDir}/cluster/shared/repo/${baseName}" + it.nonInputProperties.systemProperty('tests.rest.cluster', baseCluster.map(c -> c.allHttpSocketURI.join(","))) + it.nonInputProperties.systemProperty('tests.clustername', baseName) + } + + tasks.register(bwcTaskName(bwcVersion)) { + dependsOn tasks.named("${baseName}#upgradedClusterTest") } } diff --git a/qa/full-cluster-restart/src/javaRestTest/java/org/elasticsearch/upgrades/FullClusterRestartTestOrdering.java b/qa/full-cluster-restart/src/javaRestTest/java/org/elasticsearch/upgrades/FullClusterRestartTestOrdering.java deleted file mode 100644 index 232619ee93bb9..0000000000000 --- a/qa/full-cluster-restart/src/javaRestTest/java/org/elasticsearch/upgrades/FullClusterRestartTestOrdering.java +++ /dev/null @@ -1,24 +0,0 @@ -/* - * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one - * or more contributor license agreements. Licensed under the Elastic License - * 2.0 and the Server Side Public License, v 1; you may not use this file except - * in compliance with, at your election, the Elastic License 2.0 or the Server - * Side Public License, v 1. - */ - -package org.elasticsearch.upgrades; - -import com.carrotsearch.randomizedtesting.TestMethodAndParams; - -import java.util.Comparator; - -public class FullClusterRestartTestOrdering implements Comparator { - @Override - public int compare(TestMethodAndParams o1, TestMethodAndParams o2) { - return Integer.compare(getOrdinal(o1), getOrdinal(o2)); - } - - private int getOrdinal(TestMethodAndParams t) { - return ((FullClusterRestartUpgradeStatus) t.getInstanceArguments().get(0)).ordinal(); - } -} diff --git a/qa/full-cluster-restart/src/javaRestTest/java/org/elasticsearch/upgrades/FullClusterRestartUpgradeStatus.java b/qa/full-cluster-restart/src/javaRestTest/java/org/elasticsearch/upgrades/FullClusterRestartUpgradeStatus.java deleted file mode 100644 index 06048d020e2a0..0000000000000 --- a/qa/full-cluster-restart/src/javaRestTest/java/org/elasticsearch/upgrades/FullClusterRestartUpgradeStatus.java +++ /dev/null @@ -1,14 +0,0 @@ -/* - * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one - * or more contributor license agreements. Licensed under the Elastic License - * 2.0 and the Server Side Public License, v 1; you may not use this file except - * in compliance with, at your election, the Elastic License 2.0 or the Server - * Side Public License, v 1. - */ - -package org.elasticsearch.upgrades; - -public enum FullClusterRestartUpgradeStatus { - OLD, - UPGRADED -} diff --git a/qa/full-cluster-restart/src/javaRestTest/java/org/elasticsearch/upgrades/ParameterizedFullClusterRestartTestCase.java b/qa/full-cluster-restart/src/javaRestTest/java/org/elasticsearch/upgrades/ParameterizedFullClusterRestartTestCase.java deleted file mode 100644 index a064c87743800..0000000000000 --- a/qa/full-cluster-restart/src/javaRestTest/java/org/elasticsearch/upgrades/ParameterizedFullClusterRestartTestCase.java +++ /dev/null @@ -1,100 +0,0 @@ -/* - * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one - * or more contributor license agreements. Licensed under the Elastic License - * 2.0 and the Server Side Public License, v 1; you may not use this file except - * in compliance with, at your election, the Elastic License 2.0 or the Server - * Side Public License, v 1. - */ - -package org.elasticsearch.upgrades; - -import com.carrotsearch.randomizedtesting.annotations.Name; -import com.carrotsearch.randomizedtesting.annotations.ParametersFactory; -import com.carrotsearch.randomizedtesting.annotations.TestCaseOrdering; - -import org.elasticsearch.test.cluster.ElasticsearchCluster; -import org.elasticsearch.test.cluster.util.Version; -import org.elasticsearch.test.rest.ESRestTestCase; -import org.junit.AfterClass; -import org.junit.Before; - -import java.util.Arrays; -import java.util.Locale; - -import static org.elasticsearch.upgrades.FullClusterRestartUpgradeStatus.OLD; -import static org.elasticsearch.upgrades.FullClusterRestartUpgradeStatus.UPGRADED; - -@TestCaseOrdering(FullClusterRestartTestOrdering.class) -public abstract class ParameterizedFullClusterRestartTestCase extends ESRestTestCase { - private static final Version MINIMUM_WIRE_COMPATIBLE_VERSION = Version.fromString("7.17.0"); - private static final Version OLD_CLUSTER_VERSION = Version.fromString(System.getProperty("tests.old_cluster_version")); - private static boolean upgradeFailed = false; - private static boolean upgraded = false; - private final FullClusterRestartUpgradeStatus requestedUpgradeStatus; - - public ParameterizedFullClusterRestartTestCase(@Name("cluster") FullClusterRestartUpgradeStatus upgradeStatus) { - this.requestedUpgradeStatus = upgradeStatus; - } - - @ParametersFactory - public static Iterable parameters() throws Exception { - return Arrays.stream(FullClusterRestartUpgradeStatus.values()).map(v -> new Object[] { v }).toList(); - } - - @Before - public void maybeUpgrade() throws Exception { - if (upgraded == false && requestedUpgradeStatus == UPGRADED) { - try { - if (OLD_CLUSTER_VERSION.before(MINIMUM_WIRE_COMPATIBLE_VERSION)) { - // First upgrade to latest wire compatible version - getUpgradeCluster().upgradeToVersion(MINIMUM_WIRE_COMPATIBLE_VERSION); - } - getUpgradeCluster().upgradeToVersion(Version.CURRENT); - closeClients(); - initClient(); - } catch (Exception e) { - upgradeFailed = true; - throw e; - } finally { - upgraded = true; - } - } - - // Skip remaining tests if upgrade failed - assumeFalse("Cluster upgrade failed", upgradeFailed); - } - - @AfterClass - public static void resetUpgrade() { - upgraded = false; - upgradeFailed = false; - } - - public boolean isRunningAgainstOldCluster() { - return requestedUpgradeStatus == OLD; - } - - public static org.elasticsearch.Version getOldClusterVersion() { - return org.elasticsearch.Version.fromString(OLD_CLUSTER_VERSION.toString()); - } - - public static Version getOldClusterTestVersion() { - return Version.fromString(OLD_CLUSTER_VERSION.toString()); - } - - protected abstract ElasticsearchCluster getUpgradeCluster(); - - @Override - protected String getTestRestCluster() { - return getUpgradeCluster().getHttpAddresses(); - } - - @Override - protected boolean preserveClusterUponCompletion() { - return true; - } - - protected String getRootTestName() { - return getTestName().split(" ")[0].toLowerCase(Locale.ROOT); - } -} diff --git a/qa/full-cluster-restart/src/javaRestTest/java/org/elasticsearch/upgrades/FullClusterRestartIT.java b/qa/full-cluster-restart/src/test/java/org/elasticsearch/upgrades/FullClusterRestartIT.java similarity index 97% rename from qa/full-cluster-restart/src/javaRestTest/java/org/elasticsearch/upgrades/FullClusterRestartIT.java rename to qa/full-cluster-restart/src/test/java/org/elasticsearch/upgrades/FullClusterRestartIT.java index 3f9a007e6bf4e..af66fbc61562b 100644 --- a/qa/full-cluster-restart/src/javaRestTest/java/org/elasticsearch/upgrades/FullClusterRestartIT.java +++ b/qa/full-cluster-restart/src/test/java/org/elasticsearch/upgrades/FullClusterRestartIT.java @@ -8,8 +8,6 @@ package org.elasticsearch.upgrades; -import com.carrotsearch.randomizedtesting.annotations.Name; - import org.apache.http.util.EntityUtils; import org.elasticsearch.Version; import org.elasticsearch.action.admin.cluster.settings.RestClusterGetSettingsResponse; @@ -30,10 +28,6 @@ import org.elasticsearch.rest.action.admin.indices.RestPutIndexTemplateAction; import org.elasticsearch.test.NotEqualMessageBuilder; import org.elasticsearch.test.XContentTestUtils; -import org.elasticsearch.test.cluster.ElasticsearchCluster; -import org.elasticsearch.test.cluster.FeatureFlag; -import org.elasticsearch.test.cluster.local.LocalClusterConfigProvider; -import org.elasticsearch.test.cluster.local.distribution.DistributionType; import org.elasticsearch.test.rest.ESRestTestCase; import org.elasticsearch.test.rest.ObjectPath; import org.elasticsearch.transport.Compression; @@ -41,10 +35,6 @@ import org.elasticsearch.xcontent.XContentParser; import org.elasticsearch.xcontent.json.JsonXContent; import org.junit.Before; -import org.junit.ClassRule; -import org.junit.rules.RuleChain; -import org.junit.rules.TemporaryFolder; -import org.junit.rules.TestRule; import java.io.IOException; import java.util.ArrayList; @@ -54,6 +44,7 @@ import java.util.HashMap; import java.util.HashSet; import java.util.List; +import java.util.Locale; import java.util.Map; import java.util.Set; import java.util.concurrent.TimeUnit; @@ -89,41 +80,13 @@ * version is started with the same data directories and then this is rerun * with {@code tests.is_old_cluster} set to {@code false}. */ -public class FullClusterRestartIT extends ParameterizedFullClusterRestartTestCase { - - private static TemporaryFolder repoDirectory = new TemporaryFolder(); - - protected static LocalClusterConfigProvider clusterConfig = c -> {}; - - private static ElasticsearchCluster cluster = ElasticsearchCluster.local() - .distribution(DistributionType.DEFAULT) - .version(getOldClusterTestVersion()) - .nodes(2) - .setting("path.repo", () -> repoDirectory.getRoot().getPath()) - .setting("xpack.security.enabled", "false") - // some tests rely on the translog not being flushed - .setting("indices.memory.shard_inactive_time", "60m") - .apply(() -> clusterConfig) - .feature(FeatureFlag.TIME_SERIES_MODE) - .build(); - - @ClassRule - public static TestRule ruleChain = RuleChain.outerRule(repoDirectory).around(cluster); +public class FullClusterRestartIT extends AbstractFullClusterRestartTestCase { private String index; - public FullClusterRestartIT(@Name("cluster") FullClusterRestartUpgradeStatus upgradeStatus) { - super(upgradeStatus); - } - - @Override - protected ElasticsearchCluster getUpgradeCluster() { - return cluster; - } - @Before public void setIndex() { - index = getRootTestName(); + index = getTestName().toLowerCase(Locale.ROOT); } public void testSearch() throws Exception { @@ -1088,7 +1051,7 @@ public void testSnapshotRestore() throws IOException { repoConfig.startObject("settings"); { repoConfig.field("compress", randomBoolean()); - repoConfig.field("location", repoDirectory.getRoot().getPath()); + repoConfig.field("location", System.getProperty("tests.path.repo")); } repoConfig.endObject(); } @@ -1762,7 +1725,7 @@ public void testEnableSoftDeletesOnRestore() throws Exception { repoConfig.startObject("settings"); { repoConfig.field("compress", randomBoolean()); - repoConfig.field("location", repoDirectory.getRoot().getPath()); + repoConfig.field("location", System.getProperty("tests.path.repo")); } repoConfig.endObject(); } @@ -1822,7 +1785,7 @@ public void testForbidDisableSoftDeletesOnRestore() throws Exception { repoConfig.startObject("settings"); { repoConfig.field("compress", randomBoolean()); - repoConfig.field("location", repoDirectory.getRoot().getPath()); + repoConfig.field("location", System.getProperty("tests.path.repo")); } repoConfig.endObject(); } diff --git a/qa/full-cluster-restart/src/javaRestTest/java/org/elasticsearch/upgrades/QueryBuilderBWCIT.java b/qa/full-cluster-restart/src/test/java/org/elasticsearch/upgrades/QueryBuilderBWCIT.java similarity index 91% rename from qa/full-cluster-restart/src/javaRestTest/java/org/elasticsearch/upgrades/QueryBuilderBWCIT.java rename to qa/full-cluster-restart/src/test/java/org/elasticsearch/upgrades/QueryBuilderBWCIT.java index 1636644409fc7..d69f0b05958f9 100644 --- a/qa/full-cluster-restart/src/javaRestTest/java/org/elasticsearch/upgrades/QueryBuilderBWCIT.java +++ b/qa/full-cluster-restart/src/test/java/org/elasticsearch/upgrades/QueryBuilderBWCIT.java @@ -8,8 +8,6 @@ package org.elasticsearch.upgrades; -import com.carrotsearch.randomizedtesting.annotations.Name; - import org.elasticsearch.client.Request; import org.elasticsearch.client.Response; import org.elasticsearch.common.Strings; @@ -34,11 +32,7 @@ import org.elasticsearch.index.query.functionscore.FunctionScoreQueryBuilder; import org.elasticsearch.index.query.functionscore.RandomScoreFunctionBuilder; import org.elasticsearch.search.SearchModule; -import org.elasticsearch.test.cluster.ElasticsearchCluster; -import org.elasticsearch.test.cluster.local.LocalClusterConfigProvider; -import org.elasticsearch.test.cluster.local.distribution.DistributionType; import org.elasticsearch.xcontent.XContentBuilder; -import org.junit.ClassRule; import java.io.ByteArrayInputStream; import java.io.InputStream; @@ -60,28 +54,9 @@ * The queries to test are specified in json format, which turns out to work because we tend break here rarely. If the * json format of a query being tested here then feel free to change this. */ -public class QueryBuilderBWCIT extends ParameterizedFullClusterRestartTestCase { - private static final List CANDIDATES = new ArrayList<>(); - - protected static LocalClusterConfigProvider clusterConfig = c -> {}; - - @ClassRule - public static ElasticsearchCluster cluster = ElasticsearchCluster.local() - .distribution(DistributionType.DEFAULT) - .version(getOldClusterTestVersion()) - .nodes(2) - .setting("xpack.security.enabled", "false") - .apply(() -> clusterConfig) - .build(); +public class QueryBuilderBWCIT extends AbstractFullClusterRestartTestCase { - @Override - protected ElasticsearchCluster getUpgradeCluster() { - return cluster; - } - - public QueryBuilderBWCIT(@Name("cluster") FullClusterRestartUpgradeStatus upgradeStatus) { - super(upgradeStatus); - } + private static final List CANDIDATES = new ArrayList<>(); static { addCandidate(""" diff --git a/test/test-clusters/src/main/java/org/elasticsearch/test/cluster/ClusterHandle.java b/test/test-clusters/src/main/java/org/elasticsearch/test/cluster/ClusterHandle.java index 2a4e3e3958c57..658925744860d 100644 --- a/test/test-clusters/src/main/java/org/elasticsearch/test/cluster/ClusterHandle.java +++ b/test/test-clusters/src/main/java/org/elasticsearch/test/cluster/ClusterHandle.java @@ -8,8 +8,6 @@ package org.elasticsearch.test.cluster; -import org.elasticsearch.test.cluster.util.Version; - import java.io.Closeable; /** @@ -75,19 +73,4 @@ public interface ClusterHandle extends Closeable { * @return cluster node TCP transport endpoints */ String getTransportEndpoint(int index); - - /** - * Upgrades a single node to the given version. Method blocks until the node is back up and ready to respond to requests. - * - * @param index index of node ot upgrade - * @param version version to upgrade to - */ - void upgradeNodeToVersion(int index, Version version); - - /** - * Performs a "full cluster restart" upgrade to the given version. Method blocks until the cluster is restarted and available. - * - * @param version version to upgrade to - */ - void upgradeToVersion(Version version); } diff --git a/test/test-clusters/src/main/java/org/elasticsearch/test/cluster/local/AbstractLocalSpecBuilder.java b/test/test-clusters/src/main/java/org/elasticsearch/test/cluster/local/AbstractLocalSpecBuilder.java index aa71ffdf27a72..2e3366cdf9af3 100644 --- a/test/test-clusters/src/main/java/org/elasticsearch/test/cluster/local/AbstractLocalSpecBuilder.java +++ b/test/test-clusters/src/main/java/org/elasticsearch/test/cluster/local/AbstractLocalSpecBuilder.java @@ -12,7 +12,6 @@ import org.elasticsearch.test.cluster.FeatureFlag; import org.elasticsearch.test.cluster.SettingsProvider; import org.elasticsearch.test.cluster.local.distribution.DistributionType; -import org.elasticsearch.test.cluster.util.Version; import org.elasticsearch.test.cluster.util.resource.Resource; import java.util.ArrayList; @@ -33,11 +32,9 @@ public abstract class AbstractLocalSpecBuilder> im private final Set plugins = new HashSet<>(); private final Set features = new HashSet<>(); private final Map keystoreSettings = new HashMap<>(); - private final Map keystoreFiles = new HashMap<>(); private final Map extraConfigFiles = new HashMap<>(); private final Map systemProperties = new HashMap<>(); private DistributionType distributionType; - private Version version; private String keystorePassword; protected AbstractLocalSpecBuilder(AbstractLocalSpecBuilder parent) { @@ -141,16 +138,6 @@ public Map getKeystoreSettings() { return inherit(() -> parent.getKeystoreSettings(), keystoreSettings); } - @Override - public T keystore(String key, Resource file) { - this.keystoreFiles.put(key, file); - return cast(this); - } - - public Map getKeystoreFiles() { - return inherit(() -> parent.getKeystoreFiles(), keystoreFiles); - } - @Override public T configFile(String fileName, Resource configFile) { this.extraConfigFiles.put(fileName, configFile); @@ -181,16 +168,6 @@ public String getKeystorePassword() { return inherit(() -> parent.getKeystorePassword(), keystorePassword); } - @Override - public T version(Version version) { - this.version = version; - return cast(this); - } - - public Version getVersion() { - return inherit(() -> parent.getVersion(), version); - } - private List inherit(Supplier> parent, List child) { List combinedList = new ArrayList<>(); if (this.parent != null) { diff --git a/test/test-clusters/src/main/java/org/elasticsearch/test/cluster/local/DefaultLocalClusterSpecBuilder.java b/test/test-clusters/src/main/java/org/elasticsearch/test/cluster/local/DefaultLocalClusterSpecBuilder.java index fad762fa34c23..9c4aa48eb03d4 100644 --- a/test/test-clusters/src/main/java/org/elasticsearch/test/cluster/local/DefaultLocalClusterSpecBuilder.java +++ b/test/test-clusters/src/main/java/org/elasticsearch/test/cluster/local/DefaultLocalClusterSpecBuilder.java @@ -19,14 +19,12 @@ import java.util.List; import java.util.Optional; import java.util.function.Consumer; -import java.util.function.Supplier; public class DefaultLocalClusterSpecBuilder extends AbstractLocalSpecBuilder implements LocalClusterSpecBuilder { private String name = "test-cluster"; private final List nodeBuilders = new ArrayList<>(); private final List users = new ArrayList<>(); private final List roleFiles = new ArrayList<>(); - private final List> lazyConfigProviders = new ArrayList<>(); public DefaultLocalClusterSpecBuilder() { super(null); @@ -48,12 +46,6 @@ public DefaultLocalClusterSpecBuilder apply(LocalClusterConfigProvider configPro return this; } - @Override - public LocalClusterSpecBuilder apply(Supplier configProvider) { - lazyConfigProviders.add(configProvider); - return this; - } - @Override public DefaultLocalClusterSpecBuilder nodes(int nodes) { if (nodes < nodeBuilders.size()) { @@ -125,28 +117,7 @@ public ElasticsearchCluster build() { clusterSpec.setNodes(nodeSpecs); clusterSpec.validate(); - return new LocalElasticsearchCluster(this); - } - - LocalClusterSpec buildClusterSpec() { - // Apply lazily provided configuration - lazyConfigProviders.forEach(s -> s.get().apply(this)); - - List clusterUsers = users.isEmpty() ? List.of(User.DEFAULT_USER) : users; - LocalClusterSpec clusterSpec = new LocalClusterSpec(name, clusterUsers, roleFiles); - List nodeSpecs; - - if (nodeBuilders.isEmpty()) { - // No node-specific configuration so assume a single-node cluster - nodeSpecs = List.of(new DefaultLocalNodeSpecBuilder(this).build(clusterSpec)); - } else { - nodeSpecs = nodeBuilders.stream().map(node -> node.build(clusterSpec)).toList(); - } - - clusterSpec.setNodes(nodeSpecs); - clusterSpec.validate(); - - return clusterSpec; + return new LocalElasticsearchCluster(clusterSpec); } public static class DefaultLocalNodeSpecBuilder extends AbstractLocalSpecBuilder implements LocalNodeSpecBuilder { @@ -167,7 +138,7 @@ private LocalNodeSpec build(LocalClusterSpec cluster) { return new LocalNodeSpec( cluster, name, - Optional.ofNullable(getVersion()).orElse(Version.CURRENT), + Version.CURRENT, getSettingsProviders(), getSettings(), getEnvironmentProviders(), @@ -177,7 +148,6 @@ private LocalNodeSpec build(LocalClusterSpec cluster) { Optional.ofNullable(getDistributionType()).orElse(DistributionType.INTEG_TEST), getFeatures(), getKeystoreSettings(), - getKeystoreFiles(), getKeystorePassword(), getExtraConfigFiles(), getSystemProperties() diff --git a/test/test-clusters/src/main/java/org/elasticsearch/test/cluster/local/LocalClusterFactory.java b/test/test-clusters/src/main/java/org/elasticsearch/test/cluster/local/LocalClusterFactory.java index 5f43bb8aa71b6..08318b5145038 100644 --- a/test/test-clusters/src/main/java/org/elasticsearch/test/cluster/local/LocalClusterFactory.java +++ b/test/test-clusters/src/main/java/org/elasticsearch/test/cluster/local/LocalClusterFactory.java @@ -79,20 +79,21 @@ public LocalClusterHandle create(LocalClusterSpec spec) { public class Node { private final LocalNodeSpec spec; private final Path workingDir; + private final Path distributionDir; private final Path repoDir; private final Path dataDir; private final Path logsDir; private final Path configDir; private final Path tempDir; - private Path distributionDir; - private Version currentVersion; + private boolean initialized = false; private Process process = null; private DistributionDescriptor distributionDescriptor; public Node(LocalNodeSpec spec) { this.spec = spec; this.workingDir = baseWorkingDir.resolve(spec.getCluster().getName()).resolve(spec.getName()); + this.distributionDir = workingDir.resolve("distro"); // location of es distribution files, typically hard-linked this.repoDir = baseWorkingDir.resolve("repo"); this.dataDir = workingDir.resolve("data"); this.logsDir = workingDir.resolve("logs"); @@ -100,30 +101,22 @@ public Node(LocalNodeSpec spec) { this.tempDir = workingDir.resolve("tmp"); // elasticsearch temporary directory } - public synchronized void start(Version version) { + public synchronized void start() { LOGGER.info("Starting Elasticsearch node '{}'", spec.getName()); - if (version != null) { - spec.setVersion(version); - } - if (currentVersion == null || currentVersion.equals(spec.getVersion()) == false) { + if (initialized == false) { LOGGER.info("Creating installation for node '{}' in {}", spec.getName(), workingDir); distributionDescriptor = resolveDistribution(); LOGGER.info("Distribution for node '{}': {}", spec.getName(), distributionDescriptor); - distributionDir = OS.conditional( - // Use per-version distribution directories on Windows to avoid cleanup failures - c -> c.onWindows(() -> workingDir.resolve("distro").resolve(distributionDescriptor.getVersion().toString())) - .onUnix(() -> workingDir.resolve("distro")) - ); - initializeWorkingDirectory(currentVersion != null); + initializeWorkingDirectory(); createConfigDirectory(); copyExtraConfigFiles(); // extra config files might be needed for running cli tools like plugin install copyExtraJarFiles(); installPlugins(); - if (distributionDescriptor.getType() == DistributionType.INTEG_TEST) { + if (spec.getDistributionType() == DistributionType.INTEG_TEST) { installModules(); } - currentVersion = spec.getVersion(); + initialized = true; } else { createConfigDirectory(); copyExtraConfigFiles(); @@ -132,7 +125,6 @@ public synchronized void start(Version version) { writeConfiguration(); createKeystore(); addKeystoreSettings(); - addKeystoreFiles(); configureSecurity(); startElasticsearch(); @@ -143,7 +135,6 @@ public synchronized void stop(boolean forcibly) { ProcessUtils.stopHandle(process.toHandle(), forcibly); ProcessReaper.instance().unregister(getServiceName()); } - deletePortsFiles(); } public void waitForExit() { @@ -168,20 +159,6 @@ public String getTransportEndpoint() { return readPortsFile(portsFile).get(0); } - public void deletePortsFiles() { - try { - Path hostsFile = workingDir.resolve("config").resolve("unicast_hosts.txt"); - Path httpPortsFile = workingDir.resolve("logs").resolve("http.ports"); - Path transportPortsFile = workingDir.resolve("logs").resolve("transport.ports"); - - Files.deleteIfExists(hostsFile); - Files.deleteIfExists(httpPortsFile); - Files.deleteIfExists(transportPortsFile); - } catch (IOException e) { - throw new UncheckedIOException("Failed to write unicast_hosts for: " + this, e); - } - } - public LocalNodeSpec getSpec() { return spec; } @@ -228,13 +205,9 @@ private List readPortsFile(Path file) { } } - private void initializeWorkingDirectory(boolean preserveWorkingDirectory) { + private void initializeWorkingDirectory() { try { - if (preserveWorkingDirectory) { - IOUtils.deleteWithRetry(distributionDir); - } else { - IOUtils.deleteWithRetry(workingDir); - } + IOUtils.deleteWithRetry(workingDir); try { IOUtils.syncWithLinks(distributionDescriptor.getDistributionDir(), distributionDir); } catch (IOUtils.LinkCreationException e) { @@ -377,31 +350,6 @@ private void addKeystoreSettings() { }); } - private void addKeystoreFiles() { - spec.getKeystoreFiles().forEach((key, file) -> { - try { - Path path = Files.createTempFile(tempDir, key, null); - file.writeTo(path); - - ProcessUtils.exec( - spec.getKeystorePassword(), - workingDir, - OS.conditional( - c -> c.onWindows(() -> distributionDir.resolve("bin").resolve("elasticsearch-keystore.bat")) - .onUnix(() -> distributionDir.resolve("bin").resolve("elasticsearch-keystore")) - ), - getEnvironmentVariables(), - false, - "add-file", - key, - path.toString() - ).waitFor(); - } catch (InterruptedException | IOException e) { - throw new RuntimeException(e); - } - }); - } - private void configureSecurity() { if (spec.isSecurityEnabled()) { if (spec.getUsers().isEmpty() == false) { diff --git a/test/test-clusters/src/main/java/org/elasticsearch/test/cluster/local/LocalClusterHandle.java b/test/test-clusters/src/main/java/org/elasticsearch/test/cluster/local/LocalClusterHandle.java index 6ad2709957299..878b017e3cd62 100644 --- a/test/test-clusters/src/main/java/org/elasticsearch/test/cluster/local/LocalClusterHandle.java +++ b/test/test-clusters/src/main/java/org/elasticsearch/test/cluster/local/LocalClusterHandle.java @@ -14,7 +14,7 @@ import org.elasticsearch.test.cluster.local.LocalClusterFactory.Node; import org.elasticsearch.test.cluster.local.model.User; import org.elasticsearch.test.cluster.util.ExceptionUtils; -import org.elasticsearch.test.cluster.util.Version; +import org.elasticsearch.test.cluster.util.Retry; import java.io.IOException; import java.io.UncheckedIOException; @@ -28,6 +28,7 @@ import java.util.concurrent.ForkJoinPool; import java.util.concurrent.ForkJoinWorkerThread; import java.util.concurrent.TimeUnit; +import java.util.concurrent.TimeoutException; import java.util.concurrent.atomic.AtomicBoolean; import java.util.concurrent.atomic.AtomicLong; import java.util.stream.Collectors; @@ -65,7 +66,7 @@ public LocalClusterHandle(String name, List nodes) { public void start() { if (started.getAndSet(true) == false) { LOGGER.info("Starting Elasticsearch test cluster '{}'", name); - execute(() -> nodes.parallelStream().forEach(n -> n.start(null))); + execute(() -> nodes.parallelStream().forEach(Node::start)); } waitUntilReady(); } @@ -74,10 +75,11 @@ public void start() { public void stop(boolean forcibly) { if (started.getAndSet(false)) { LOGGER.info("Stopping Elasticsearch test cluster '{}', forcibly: {}", name, forcibly); - execute(() -> nodes.parallelStream().forEach(n -> n.stop(forcibly))); + execute(() -> nodes.forEach(n -> n.stop(forcibly))); + deletePortFiles(); } else { // Make sure the process is stopped, otherwise wait - execute(() -> nodes.parallelStream().forEach(Node::waitForExit)); + execute(() -> nodes.forEach(n -> n.waitForExit())); } } @@ -126,31 +128,16 @@ public String getTransportEndpoint(int index) { return getTransportEndpoints().split(",")[index]; } - @Override - public void upgradeNodeToVersion(int index, Version version) { - Node node = nodes.get(index); - node.stop(false); - LOGGER.info("Upgrading node '{}' to version {}", node.getSpec().getName(), version); - node.start(version); - waitUntilReady(); - } - - @Override - public void upgradeToVersion(Version version) { - stop(false); - if (started.getAndSet(true) == false) { - LOGGER.info("Upgrading Elasticsearch test cluster '{}' to version {}", name, version); - execute(() -> nodes.parallelStream().forEach(n -> n.start(version))); - } - waitUntilReady(); - } - private void waitUntilReady() { writeUnicastHostsFile(); try { - WaitForHttpResource wait = configureWaitForReady(); - wait.waitFor(CLUSTER_UP_TIMEOUT.toMillis()); - } catch (Exception e) { + Retry.retryUntilTrue(CLUSTER_UP_TIMEOUT, Duration.ZERO, () -> { + WaitForHttpResource wait = configureWaitForReady(); + return wait.wait(500); + }); + } catch (TimeoutException e) { + throw new RuntimeException("Timed out after " + CLUSTER_UP_TIMEOUT + " waiting for cluster '" + name + "' status to be yellow"); + } catch (ExecutionException e) { throw new RuntimeException("An error occurred while checking cluster '" + name + "' status.", e); } } @@ -204,7 +191,7 @@ private boolean isSecurityAutoConfigured(Node node) { private void writeUnicastHostsFile() { String transportUris = execute(() -> nodes.parallelStream().map(Node::getTransportEndpoint).collect(Collectors.joining("\n"))); - execute(() -> nodes.parallelStream().forEach(node -> { + nodes.forEach(node -> { try { Path hostsFile = node.getWorkingDir().resolve("config").resolve("unicast_hosts.txt"); if (Files.notExists(hostsFile)) { @@ -213,7 +200,23 @@ private void writeUnicastHostsFile() { } catch (IOException e) { throw new UncheckedIOException("Failed to write unicast_hosts for: " + node, e); } - })); + }); + } + + private void deletePortFiles() { + nodes.forEach(node -> { + try { + Path hostsFile = node.getWorkingDir().resolve("config").resolve("unicast_hosts.txt"); + Path httpPortsFile = node.getWorkingDir().resolve("logs").resolve("http.ports"); + Path tranportPortsFile = node.getWorkingDir().resolve("logs").resolve("transport.ports"); + + Files.deleteIfExists(hostsFile); + Files.deleteIfExists(httpPortsFile); + Files.deleteIfExists(tranportPortsFile); + } catch (IOException e) { + throw new UncheckedIOException("Failed to write unicast_hosts for: " + node, e); + } + }); } private T execute(Callable task) { diff --git a/test/test-clusters/src/main/java/org/elasticsearch/test/cluster/local/LocalClusterSpec.java b/test/test-clusters/src/main/java/org/elasticsearch/test/cluster/local/LocalClusterSpec.java index 2234b037381a8..2836411bbb067 100644 --- a/test/test-clusters/src/main/java/org/elasticsearch/test/cluster/local/LocalClusterSpec.java +++ b/test/test-clusters/src/main/java/org/elasticsearch/test/cluster/local/LocalClusterSpec.java @@ -69,6 +69,7 @@ void validate() { public static class LocalNodeSpec { private final LocalClusterSpec cluster; private final String name; + private final Version version; private final List settingsProviders; private final Map settings; private final List environmentProviders; @@ -78,11 +79,9 @@ public static class LocalNodeSpec { private final DistributionType distributionType; private final Set features; private final Map keystoreSettings; - private final Map keystoreFiles; private final String keystorePassword; private final Map extraConfigFiles; private final Map systemProperties; - private Version version; public LocalNodeSpec( LocalClusterSpec cluster, @@ -97,7 +96,6 @@ public LocalNodeSpec( DistributionType distributionType, Set features, Map keystoreSettings, - Map keystoreFiles, String keystorePassword, Map extraConfigFiles, Map systemProperties @@ -114,16 +112,11 @@ public LocalNodeSpec( this.distributionType = distributionType; this.features = features; this.keystoreSettings = keystoreSettings; - this.keystoreFiles = keystoreFiles; this.keystorePassword = keystorePassword; this.extraConfigFiles = extraConfigFiles; this.systemProperties = systemProperties; } - void setVersion(Version version) { - this.version = version; - } - public LocalClusterSpec getCluster() { return cluster; } @@ -164,10 +157,6 @@ public Map getKeystoreSettings() { return keystoreSettings; } - public Map getKeystoreFiles() { - return keystoreFiles; - } - public String getKeystorePassword() { return keystorePassword; } @@ -265,7 +254,6 @@ private LocalNodeSpec getFilteredSpec(SettingsProvider filteredProvider) { n.distributionType, n.features, n.keystoreSettings, - n.keystoreFiles, n.keystorePassword, n.extraConfigFiles, n.systemProperties diff --git a/test/test-clusters/src/main/java/org/elasticsearch/test/cluster/local/LocalClusterSpecBuilder.java b/test/test-clusters/src/main/java/org/elasticsearch/test/cluster/local/LocalClusterSpecBuilder.java index 1f4086fd47fe8..c07a491d2ace6 100644 --- a/test/test-clusters/src/main/java/org/elasticsearch/test/cluster/local/LocalClusterSpecBuilder.java +++ b/test/test-clusters/src/main/java/org/elasticsearch/test/cluster/local/LocalClusterSpecBuilder.java @@ -12,7 +12,6 @@ import org.elasticsearch.test.cluster.util.resource.Resource; import java.util.function.Consumer; -import java.util.function.Supplier; public interface LocalClusterSpecBuilder extends LocalSpecBuilder { /** @@ -20,18 +19,8 @@ public interface LocalClusterSpecBuilder extends LocalSpecBuilder configProvider); - /** * Sets the number of nodes for the cluster. */ diff --git a/test/test-clusters/src/main/java/org/elasticsearch/test/cluster/local/LocalElasticsearchCluster.java b/test/test-clusters/src/main/java/org/elasticsearch/test/cluster/local/LocalElasticsearchCluster.java index 9a5e5666f5e9a..54d541cd07144 100644 --- a/test/test-clusters/src/main/java/org/elasticsearch/test/cluster/local/LocalElasticsearchCluster.java +++ b/test/test-clusters/src/main/java/org/elasticsearch/test/cluster/local/LocalElasticsearchCluster.java @@ -10,21 +10,18 @@ import org.elasticsearch.test.cluster.ElasticsearchCluster; import org.elasticsearch.test.cluster.local.distribution.LocalDistributionResolver; -import org.elasticsearch.test.cluster.local.distribution.ReleasedDistributionResolver; import org.elasticsearch.test.cluster.local.distribution.SnapshotDistributionResolver; -import org.elasticsearch.test.cluster.util.Version; import org.junit.runner.Description; import org.junit.runners.model.Statement; import java.nio.file.Path; public class LocalElasticsearchCluster implements ElasticsearchCluster { - private final DefaultLocalClusterSpecBuilder builder; - private LocalClusterSpec spec; + private final LocalClusterSpec spec; private LocalClusterHandle handle; - public LocalElasticsearchCluster(DefaultLocalClusterSpecBuilder builder) { - this.builder = builder; + public LocalElasticsearchCluster(LocalClusterSpec spec) { + this.spec = spec; } @Override @@ -33,10 +30,9 @@ public Statement apply(Statement base, Description description) { @Override public void evaluate() throws Throwable { try { - spec = builder.buildClusterSpec(); handle = new LocalClusterFactory( Path.of(System.getProperty("java.io.tmpdir")).resolve(description.getDisplayName()).toAbsolutePath(), - new LocalDistributionResolver(new SnapshotDistributionResolver(new ReleasedDistributionResolver())) + new LocalDistributionResolver(new SnapshotDistributionResolver()) ).create(spec); handle.start(); base.evaluate(); @@ -101,18 +97,6 @@ public String getTransportEndpoint(int index) { return handle.getTransportEndpoint(index); } - @Override - public void upgradeNodeToVersion(int index, Version version) { - checkHandle(); - handle.upgradeNodeToVersion(index, version); - } - - @Override - public void upgradeToVersion(Version version) { - checkHandle(); - handle.upgradeToVersion(version); - } - private void checkHandle() { if (handle == null) { throw new IllegalStateException("Cluster handle has not been initialized. Did you forget the @ClassRule annotation?"); diff --git a/test/test-clusters/src/main/java/org/elasticsearch/test/cluster/local/LocalSpecBuilder.java b/test/test-clusters/src/main/java/org/elasticsearch/test/cluster/local/LocalSpecBuilder.java index d01d7d303748f..3b9428bc1a1aa 100644 --- a/test/test-clusters/src/main/java/org/elasticsearch/test/cluster/local/LocalSpecBuilder.java +++ b/test/test-clusters/src/main/java/org/elasticsearch/test/cluster/local/LocalSpecBuilder.java @@ -12,7 +12,6 @@ import org.elasticsearch.test.cluster.FeatureFlag; import org.elasticsearch.test.cluster.SettingsProvider; import org.elasticsearch.test.cluster.local.distribution.DistributionType; -import org.elasticsearch.test.cluster.util.Version; import org.elasticsearch.test.cluster.util.resource.Resource; import java.util.function.Supplier; @@ -69,11 +68,6 @@ interface LocalSpecBuilder> { */ T keystore(String key, String value); - /** - * Adds a secure file to the node keystore. - */ - T keystore(String key, Resource file); - /** * Sets the security setting keystore password. */ @@ -84,11 +78,6 @@ interface LocalSpecBuilder> { */ T configFile(String fileName, Resource configFile); - /** - * Sets the version of Elasticsearch. Defaults to {@link Version#CURRENT}. - */ - T version(Version version); - /** * Adds a system property to node JVM arguments. */ diff --git a/test/test-clusters/src/main/java/org/elasticsearch/test/cluster/local/WaitForHttpResource.java b/test/test-clusters/src/main/java/org/elasticsearch/test/cluster/local/WaitForHttpResource.java index f00e6f13cb314..edab2cdf1e7e9 100644 --- a/test/test-clusters/src/main/java/org/elasticsearch/test/cluster/local/WaitForHttpResource.java +++ b/test/test-clusters/src/main/java/org/elasticsearch/test/cluster/local/WaitForHttpResource.java @@ -90,7 +90,7 @@ public void setPassword(String password) { this.password = password; } - public boolean waitFor(long durationInMs) throws GeneralSecurityException, InterruptedException, IOException { + public boolean wait(int durationInMs) throws GeneralSecurityException, InterruptedException, IOException { final long waitUntil = System.nanoTime() + TimeUnit.MILLISECONDS.toNanos(durationInMs); final long sleep = Long.max(durationInMs / 10, 100); diff --git a/test/test-clusters/src/main/java/org/elasticsearch/test/cluster/local/distribution/LocalDistributionResolver.java b/test/test-clusters/src/main/java/org/elasticsearch/test/cluster/local/distribution/LocalDistributionResolver.java index b9442b28e1591..5c9f45cbe092f 100644 --- a/test/test-clusters/src/main/java/org/elasticsearch/test/cluster/local/distribution/LocalDistributionResolver.java +++ b/test/test-clusters/src/main/java/org/elasticsearch/test/cluster/local/distribution/LocalDistributionResolver.java @@ -13,9 +13,6 @@ import java.nio.file.Files; import java.nio.file.Path; -/** - * A {@link DistributionResolver} for resolving locally built distributions for the current version of Elasticsearch. - */ public class LocalDistributionResolver implements DistributionResolver { private final DistributionResolver delegate; diff --git a/test/test-clusters/src/main/java/org/elasticsearch/test/cluster/local/distribution/ReleasedDistributionResolver.java b/test/test-clusters/src/main/java/org/elasticsearch/test/cluster/local/distribution/ReleasedDistributionResolver.java deleted file mode 100644 index 12654be310ef8..0000000000000 --- a/test/test-clusters/src/main/java/org/elasticsearch/test/cluster/local/distribution/ReleasedDistributionResolver.java +++ /dev/null @@ -1,54 +0,0 @@ -/* - * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one - * or more contributor license agreements. Licensed under the Elastic License - * 2.0 and the Server Side Public License, v 1; you may not use this file except - * in compliance with, at your election, the Elastic License 2.0 or the Server - * Side Public License, v 1. - */ - -package org.elasticsearch.test.cluster.local.distribution; - -import org.elasticsearch.test.cluster.util.Version; - -import java.nio.file.Files; -import java.nio.file.Path; - -/** - * A {@link DistributionResolver} for resolving previously released distributions of Elasticsearch. - */ -public class ReleasedDistributionResolver implements DistributionResolver { - private static final String BWC_DISTRIBUTION_SYSPROP_PREFIX = "tests.release.distribution."; - - @Override - public DistributionDescriptor resolve(Version version, DistributionType type) { - String distributionPath = System.getProperty(BWC_DISTRIBUTION_SYSPROP_PREFIX + version.toString()); - - if (distributionPath == null) { - String taskPath = System.getProperty("tests.task"); - String project = taskPath.substring(0, taskPath.lastIndexOf(':')); - String taskName = taskPath.substring(taskPath.lastIndexOf(':') + 1); - - throw new IllegalStateException( - "Cannot locate Elasticsearch distribution. Ensure you've added the following to the build script for project '" - + project - + "':\n\n" - + "tasks.named('" - + taskName - + "') {\n" - + " usesBwcDistribution(" - + version - + ")\n" - + "}" - ); - } - - Path distributionDir = Path.of(distributionPath); - if (Files.notExists(distributionDir)) { - throw new IllegalStateException( - "Cannot locate Elasticsearch distribution. Directory at '" + distributionDir + "' does not exist." - ); - } - - return new DefaultDistributionDescriptor(version, false, distributionDir, DistributionType.DEFAULT); - } -} diff --git a/test/test-clusters/src/main/java/org/elasticsearch/test/cluster/local/distribution/SnapshotDistributionResolver.java b/test/test-clusters/src/main/java/org/elasticsearch/test/cluster/local/distribution/SnapshotDistributionResolver.java index c6cecf09e9b9d..182dbe66a584d 100644 --- a/test/test-clusters/src/main/java/org/elasticsearch/test/cluster/local/distribution/SnapshotDistributionResolver.java +++ b/test/test-clusters/src/main/java/org/elasticsearch/test/cluster/local/distribution/SnapshotDistributionResolver.java @@ -10,36 +10,10 @@ import org.elasticsearch.test.cluster.util.Version; -import java.nio.file.Files; -import java.nio.file.Path; - -/** - * A {@link DistributionResolver} for resolving snapshot versions of Elasticsearch for previous, backwards-compatible versions. - */ public class SnapshotDistributionResolver implements DistributionResolver { - private static final String BWC_DISTRIBUTION_SYSPROP_PREFIX = "tests.snapshot.distribution."; - private final DistributionResolver delegate; - - public SnapshotDistributionResolver(DistributionResolver delegate) { - this.delegate = delegate; - } - @Override public DistributionDescriptor resolve(Version version, DistributionType type) { - String distributionPath = System.getProperty(BWC_DISTRIBUTION_SYSPROP_PREFIX + version.toString()); - - if (distributionPath != null) { - Path distributionDir = Path.of(distributionPath); - if (Files.notExists(distributionDir)) { - throw new IllegalStateException( - "Cannot locate Elasticsearch distribution. Directory at '" + distributionDir + "' does not exist." - ); - } - - // Snapshot distributions are never release builds and always use the default distribution - return new DefaultDistributionDescriptor(version, true, distributionDir, DistributionType.DEFAULT); - } - - return delegate.resolve(version, type); + // Not yet implemented + throw new UnsupportedOperationException("Cannot resolve distribution for version " + version); } } diff --git a/x-pack/plugin/shutdown/qa/full-cluster-restart/build.gradle b/x-pack/plugin/shutdown/qa/full-cluster-restart/build.gradle index d9539bb668b4b..429b29bbc9fdb 100644 --- a/x-pack/plugin/shutdown/qa/full-cluster-restart/build.gradle +++ b/x-pack/plugin/shutdown/qa/full-cluster-restart/build.gradle @@ -1,21 +1,104 @@ +import org.elasticsearch.gradle.Version import org.elasticsearch.gradle.internal.info.BuildParams import org.elasticsearch.gradle.testclusters.StandaloneRestIntegTestTask -apply plugin: 'elasticsearch.internal-java-rest-test' +apply plugin: 'elasticsearch.internal-testclusters' +apply plugin: 'elasticsearch.standalone-rest-test' apply plugin: 'elasticsearch.bwc-test' dependencies { // TODO: Remove core dependency and change tests to not use builders that are part of xpack-core. // Currently needed for MlConfigIndexMappingsFullClusterRestartIT and SLM classes used in // FullClusterRestartIT - javaRestTestImplementation(testArtifact(project(xpackModule('core')))) - javaRestTestImplementation(testArtifact(project(":qa:full-cluster-restart"))) - javaRestTestImplementation project(':x-pack:qa') + testImplementation(testArtifact(project(xpackModule('core')))) + testImplementation(testArtifact(project(":qa:full-cluster-restart"))) + testImplementation project(':x-pack:qa') +} + +tasks.named("forbiddenPatterns") { + exclude '**/system_key' +} + +String outputDir = "${buildDir}/generated-resources/${project.name}" + +tasks.register("copyTestNodeKeyMaterial", Copy) { + from project(':x-pack:plugin:core') + .files( + 'src/test/resources/org/elasticsearch/xpack/security/transport/ssl/certs/simple/testnode.pem', + 'src/test/resources/org/elasticsearch/xpack/security/transport/ssl/certs/simple/testnode.crt' + ) + into outputDir } BuildParams.bwcVersions.withIndexCompatible { bwcVersion, baseName -> - tasks.register(bwcTaskName(bwcVersion), StandaloneRestIntegTestTask) { - usesBwcDistribution(bwcVersion) - systemProperty("tests.old_cluster_version", bwcVersion) - } + def baseCluster = testClusters.register(baseName) { + testDistribution = "DEFAULT" + if (bwcVersion.before(BuildParams.bwcVersions.minimumWireCompatibleVersion)) { + // When testing older versions we have to first upgrade to 7.last + versions = [bwcVersion.toString(), BuildParams.bwcVersions.minimumWireCompatibleVersion.toString(), project.version] + } else { + versions = [bwcVersion.toString(), project.version] + } + numberOfNodes = 2 + setting 'path.repo', "${buildDir}/cluster/shared/repo/${baseName}" + user username: "test_user", password: "x-pack-test-password" + + setting 'path.repo', "${buildDir}/cluster/shared/repo/${baseName}" + // some tests rely on the translog not being flushed + setting 'indices.memory.shard_inactive_time', '60m' + setting 'xpack.security.enabled', 'true' + setting 'xpack.security.transport.ssl.enabled', 'true' + setting 'xpack.license.self_generated.type', 'trial' + + extraConfigFile 'testnode.pem', file("${outputDir}/testnode.pem") + extraConfigFile 'testnode.crt', file("${outputDir}/testnode.crt") + + keystore 'xpack.watcher.encryption_key', file("${project.projectDir}/src/test/resources/system_key") + setting 'xpack.watcher.encrypt_sensitive_data', 'true' + + setting 'xpack.security.transport.ssl.key', 'testnode.pem' + setting 'xpack.security.transport.ssl.certificate', 'testnode.crt' + keystore 'xpack.security.transport.ssl.secure_key_passphrase', 'testnode' + + setting 'xpack.security.authc.api_key.enabled', 'true' + + requiresFeature 'es.index_mode_feature_flag_registered', Version.fromString("8.0.0") + } + + tasks.register("${baseName}#oldClusterTest", StandaloneRestIntegTestTask) { + mustRunAfter("precommit") + useCluster baseCluster + dependsOn "copyTestNodeKeyMaterial" + doFirst { + delete("${buildDir}/cluster/shared/repo/${baseName}") + } + systemProperty 'tests.is_old_cluster', 'true' + } + + tasks.register("${baseName}#upgradedClusterTest", StandaloneRestIntegTestTask) { + mustRunAfter("precommit") + useCluster baseCluster + dependsOn "${baseName}#oldClusterTest" + doFirst { + baseCluster.get().goToNextVersion() + if (bwcVersion.before(BuildParams.bwcVersions.minimumWireCompatibleVersion)) { + // When doing a full cluster restart of older versions we actually have to upgrade twice. First to 7.last, then to the current version. + baseCluster.get().goToNextVersion() + } + } + systemProperty 'tests.is_old_cluster', 'false' + } + + String oldVersion = bwcVersion.toString().minus("-SNAPSHOT") + tasks.matching { it.name.startsWith("${baseName}#") && it.name.endsWith("ClusterTest") }.configureEach { + it.systemProperty 'tests.old_cluster_version', oldVersion + it.systemProperty 'tests.path.repo', "${buildDir}/cluster/shared/repo/${baseName}" + it.nonInputProperties.systemProperty('tests.rest.cluster', baseCluster.map(c -> c.allHttpSocketURI.join(","))) + it.nonInputProperties.systemProperty('tests.clustername', baseName) + } + + tasks.register(bwcTaskName(bwcVersion)) { + dependsOn "${baseName}#upgradedClusterTest" + } + } diff --git a/x-pack/plugin/shutdown/qa/full-cluster-restart/src/javaRestTest/java/org/elasticsearch/xpack/restart/FullClusterRestartIT.java b/x-pack/plugin/shutdown/qa/full-cluster-restart/src/test/java/org/elasticsearch/xpack/restart/FullClusterRestartIT.java similarity index 65% rename from x-pack/plugin/shutdown/qa/full-cluster-restart/src/javaRestTest/java/org/elasticsearch/xpack/restart/FullClusterRestartIT.java rename to x-pack/plugin/shutdown/qa/full-cluster-restart/src/test/java/org/elasticsearch/xpack/restart/FullClusterRestartIT.java index 07ed594770649..df6e3ed6b9388 100644 --- a/x-pack/plugin/shutdown/qa/full-cluster-restart/src/javaRestTest/java/org/elasticsearch/xpack/restart/FullClusterRestartIT.java +++ b/x-pack/plugin/shutdown/qa/full-cluster-restart/src/test/java/org/elasticsearch/xpack/restart/FullClusterRestartIT.java @@ -6,8 +6,6 @@ */ package org.elasticsearch.xpack.restart; -import com.carrotsearch.randomizedtesting.annotations.Name; - import org.elasticsearch.Version; import org.elasticsearch.client.Request; import org.elasticsearch.client.Response; @@ -15,17 +13,10 @@ import org.elasticsearch.common.Strings; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.util.concurrent.ThreadContext; -import org.elasticsearch.test.cluster.ElasticsearchCluster; -import org.elasticsearch.test.cluster.FeatureFlag; -import org.elasticsearch.test.cluster.local.distribution.DistributionType; -import org.elasticsearch.test.cluster.util.resource.Resource; import org.elasticsearch.test.rest.ESRestTestCase; -import org.elasticsearch.upgrades.FullClusterRestartUpgradeStatus; -import org.elasticsearch.upgrades.ParameterizedFullClusterRestartTestCase; +import org.elasticsearch.upgrades.AbstractFullClusterRestartTestCase; import org.elasticsearch.xcontent.XContentBuilder; import org.elasticsearch.xcontent.json.JsonXContent; -import org.junit.BeforeClass; -import org.junit.ClassRule; import java.io.IOException; import java.nio.charset.StandardCharsets; @@ -42,37 +33,7 @@ import static org.hamcrest.Matchers.hasSize; import static org.hamcrest.Matchers.notNullValue; -public class FullClusterRestartIT extends ParameterizedFullClusterRestartTestCase { - - @ClassRule - public static ElasticsearchCluster cluster = ElasticsearchCluster.local() - .distribution(DistributionType.DEFAULT) - .version(getOldClusterTestVersion()) - .nodes(2) - // some tests rely on the translog not being flushed - .setting("indices.memory.shard_inactive_time", "60m") - .setting("xpack.security.enabled", "true") - .setting("xpack.security.transport.ssl.enabled", "true") - .setting("xpack.security.transport.ssl.key", "testnode.pem") - .setting("xpack.security.transport.ssl.certificate", "testnode.crt") - .setting("xpack.license.self_generated.type", "trial") - .setting("xpack.watcher.encrypt_sensitive_data", "true") - .setting("xpack.security.authc.api_key.enabled", "true") - .configFile("testnode.pem", Resource.fromClasspath("org/elasticsearch/xpack/security/transport/ssl/certs/simple/testnode.pem")) - .configFile("testnode.crt", Resource.fromClasspath("org/elasticsearch/xpack/security/transport/ssl/certs/simple/testnode.crt")) - .keystore("xpack.watcher.encryption_key", Resource.fromClasspath("system_key")) - .keystore("xpack.security.transport.ssl.secure_key_passphrase", "testnode") - .feature(FeatureFlag.TIME_SERIES_MODE) - .build(); - - public FullClusterRestartIT(@Name("cluster") FullClusterRestartUpgradeStatus upgradeStatus) { - super(upgradeStatus); - } - - @Override - protected ElasticsearchCluster getUpgradeCluster() { - return cluster; - } +public class FullClusterRestartIT extends AbstractFullClusterRestartTestCase { @Override protected Settings restClientSettings() { @@ -86,13 +47,10 @@ protected Settings restClientSettings() { .build(); } - @BeforeClass - public static void checkClusterVersion() { - assumeTrue("no shutdown in versions before " + Version.V_7_15_0, getOldClusterVersion().onOrAfter(Version.V_7_15_0)); - } - @SuppressWarnings("unchecked") public void testNodeShutdown() throws Exception { + assumeTrue("no shutdown in versions before " + Version.V_7_15_0, getOldClusterVersion().onOrAfter(Version.V_7_15_0)); + if (isRunningAgainstOldCluster()) { final Request getNodesReq = new Request("GET", "_nodes"); final Response getNodesResp = adminClient().performRequest(getNodesReq); @@ -106,7 +64,7 @@ public void testNodeShutdown() throws Exception { // Use the types available from as early as possible final String type = randomFrom("restart", "remove"); putBody.field("type", type); - putBody.field("reason", getRootTestName()); + putBody.field("reason", this.getTestName()); } putBody.endObject(); putShutdownRequest.setJsonEntity(Strings.toString(putBody)); @@ -123,7 +81,7 @@ public void testNodeShutdown() throws Exception { assertThat("there should be exactly one shutdown registered", shutdowns, hasSize(1)); final Map shutdown = shutdowns.get(0); assertThat(shutdown.get("node_id"), notNullValue()); // Since we randomly determine the node ID, we can't check it - assertThat(shutdown.get("reason"), equalTo(getRootTestName())); + assertThat(shutdown.get("reason"), equalTo(this.getTestName())); assertThat( (String) shutdown.get("status"), anyOf( diff --git a/x-pack/plugin/shutdown/qa/full-cluster-restart/src/javaRestTest/resources/system_key b/x-pack/plugin/shutdown/qa/full-cluster-restart/src/test/resources/system_key similarity index 100% rename from x-pack/plugin/shutdown/qa/full-cluster-restart/src/javaRestTest/resources/system_key rename to x-pack/plugin/shutdown/qa/full-cluster-restart/src/test/resources/system_key diff --git a/x-pack/qa/full-cluster-restart/build.gradle b/x-pack/qa/full-cluster-restart/build.gradle index d9539bb668b4b..3923d439d394d 100644 --- a/x-pack/qa/full-cluster-restart/build.gradle +++ b/x-pack/qa/full-cluster-restart/build.gradle @@ -1,21 +1,110 @@ +import org.elasticsearch.gradle.Version import org.elasticsearch.gradle.internal.info.BuildParams import org.elasticsearch.gradle.testclusters.StandaloneRestIntegTestTask -apply plugin: 'elasticsearch.internal-java-rest-test' +apply plugin: 'elasticsearch.internal-testclusters' +apply plugin: 'elasticsearch.standalone-rest-test' apply plugin: 'elasticsearch.bwc-test' dependencies { // TODO: Remove core dependency and change tests to not use builders that are part of xpack-core. // Currently needed for MlConfigIndexMappingsFullClusterRestartIT and SLM classes used in // FullClusterRestartIT - javaRestTestImplementation(testArtifact(project(xpackModule('core')))) - javaRestTestImplementation(testArtifact(project(":qa:full-cluster-restart"))) - javaRestTestImplementation project(':x-pack:qa') + testImplementation(testArtifact(project(xpackModule('core')))) + testImplementation(testArtifact(project(":qa:full-cluster-restart"))) + testImplementation project(':x-pack:qa') +} + +tasks.named("forbiddenPatterns") { + exclude '**/system_key' +} + +String outputDir = "${buildDir}/generated-resources/${project.name}" + +tasks.register("copyTestNodeKeyMaterial", Copy) { + from project(':x-pack:plugin:core') + .files( + 'src/test/resources/org/elasticsearch/xpack/security/transport/ssl/certs/simple/testnode.pem', + 'src/test/resources/org/elasticsearch/xpack/security/transport/ssl/certs/simple/testnode.crt' + ) + into outputDir } BuildParams.bwcVersions.withIndexCompatible { bwcVersion, baseName -> - tasks.register(bwcTaskName(bwcVersion), StandaloneRestIntegTestTask) { - usesBwcDistribution(bwcVersion) - systemProperty("tests.old_cluster_version", bwcVersion) - } + def baseCluster = testClusters.register(baseName) { + testDistribution = "DEFAULT" + if (bwcVersion.before(BuildParams.bwcVersions.minimumWireCompatibleVersion)) { + // When testing older versions we have to first upgrade to 7.last + versions = [bwcVersion.toString(), BuildParams.bwcVersions.minimumWireCompatibleVersion.toString(), project.version] + } else { + versions = [bwcVersion.toString(), project.version] + } + numberOfNodes = 2 + setting 'path.repo', "${buildDir}/cluster/shared/repo/${baseName}" + user username: "test_user", password: "x-pack-test-password" + + setting 'path.repo', "${buildDir}/cluster/shared/repo/${baseName}" + // some tests rely on the translog not being flushed + setting 'indices.memory.shard_inactive_time', '60m' + setting 'xpack.security.enabled', 'true' + setting 'xpack.security.transport.ssl.enabled', 'true' + setting 'xpack.license.self_generated.type', 'trial' + + extraConfigFile 'testnode.pem', file("${outputDir}/testnode.pem") + extraConfigFile 'testnode.crt', file("${outputDir}/testnode.crt") + + keystore 'xpack.watcher.encryption_key', file("${project.projectDir}/src/test/resources/system_key") + setting 'xpack.watcher.encrypt_sensitive_data', 'true' + + setting 'xpack.security.transport.ssl.key', 'testnode.pem' + setting 'xpack.security.transport.ssl.certificate', 'testnode.crt' + keystore 'xpack.security.transport.ssl.secure_key_passphrase', 'testnode' + + setting 'xpack.security.authc.api_key.enabled', 'true' + + requiresFeature 'es.index_mode_feature_flag_registered', Version.fromString("8.0.0") + } + + tasks.register("${baseName}#oldClusterTest", StandaloneRestIntegTestTask) { + mustRunAfter("precommit") + useCluster baseCluster + dependsOn "copyTestNodeKeyMaterial" + doFirst { + delete("${buildDir}/cluster/shared/repo/${baseName}") + } + systemProperty 'tests.is_old_cluster', 'true' + exclude 'org/elasticsearch/upgrades/FullClusterRestartIT.class' + exclude 'org/elasticsearch/upgrades/FullClusterRestartSettingsUpgradeIT.class' + exclude 'org/elasticsearch/upgrades/QueryBuilderBWCIT.class' + } + + tasks.register("${baseName}#upgradedClusterTest", StandaloneRestIntegTestTask) { + mustRunAfter("precommit") + useCluster baseCluster + dependsOn "${baseName}#oldClusterTest" + doFirst { + baseCluster.get().goToNextVersion() + if (bwcVersion.before(BuildParams.bwcVersions.minimumWireCompatibleVersion)) { + // When doing a full cluster restart of older versions we actually have to upgrade twice. First to 7.last, then to the current version. + baseCluster.get().goToNextVersion() + } + } + systemProperty 'tests.is_old_cluster', 'false' + exclude 'org/elasticsearch/upgrades/FullClusterRestartIT.class' + exclude 'org/elasticsearch/upgrades/FullClusterRestartSettingsUpgradeIT.class' + exclude 'org/elasticsearch/upgrades/QueryBuilderBWCIT.class' + } + + String oldVersion = bwcVersion.toString().minus("-SNAPSHOT") + tasks.matching { it.name.startsWith("${baseName}#") && it.name.endsWith("ClusterTest") }.configureEach { + it.systemProperty 'tests.old_cluster_version', oldVersion + it.systemProperty 'tests.path.repo', "${buildDir}/cluster/shared/repo/${baseName}" + it.nonInputProperties.systemProperty('tests.rest.cluster', baseCluster.map(c -> c.allHttpSocketURI.join(","))) + it.nonInputProperties.systemProperty('tests.clustername', baseName) + } + + tasks.register(bwcTaskName(bwcVersion)) { + dependsOn "${baseName}#upgradedClusterTest" + } + } diff --git a/x-pack/qa/full-cluster-restart/src/javaRestTest/java/org/elasticsearch/xpack/restart/AbstractXpackFullClusterRestartTestCase.java b/x-pack/qa/full-cluster-restart/src/javaRestTest/java/org/elasticsearch/xpack/restart/AbstractXpackFullClusterRestartTestCase.java deleted file mode 100644 index 0bc9101301a54..0000000000000 --- a/x-pack/qa/full-cluster-restart/src/javaRestTest/java/org/elasticsearch/xpack/restart/AbstractXpackFullClusterRestartTestCase.java +++ /dev/null @@ -1,49 +0,0 @@ -/* - * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one - * or more contributor license agreements. Licensed under the Elastic License - * 2.0; you may not use this file except in compliance with the Elastic License - * 2.0. - */ - -package org.elasticsearch.xpack.restart; - -import org.elasticsearch.test.cluster.ElasticsearchCluster; -import org.elasticsearch.test.cluster.FeatureFlag; -import org.elasticsearch.test.cluster.local.distribution.DistributionType; -import org.elasticsearch.test.cluster.util.resource.Resource; -import org.elasticsearch.upgrades.FullClusterRestartUpgradeStatus; -import org.elasticsearch.upgrades.ParameterizedFullClusterRestartTestCase; -import org.junit.ClassRule; - -public abstract class AbstractXpackFullClusterRestartTestCase extends ParameterizedFullClusterRestartTestCase { - - @ClassRule - public static ElasticsearchCluster cluster = ElasticsearchCluster.local() - .distribution(DistributionType.DEFAULT) - .version(getOldClusterTestVersion()) - .nodes(2) - // some tests rely on the translog not being flushed - .setting("indices.memory.shard_inactive_time", "60m") - .setting("xpack.security.enabled", "true") - .setting("xpack.security.transport.ssl.enabled", "true") - .setting("xpack.security.transport.ssl.key", "testnode.pem") - .setting("xpack.security.transport.ssl.certificate", "testnode.crt") - .setting("xpack.license.self_generated.type", "trial") - .setting("xpack.watcher.encrypt_sensitive_data", "true") - .setting("xpack.security.authc.api_key.enabled", "true") - .configFile("testnode.pem", Resource.fromClasspath("org/elasticsearch/xpack/security/transport/ssl/certs/simple/testnode.pem")) - .configFile("testnode.crt", Resource.fromClasspath("org/elasticsearch/xpack/security/transport/ssl/certs/simple/testnode.crt")) - .keystore("xpack.watcher.encryption_key", Resource.fromClasspath("system_key")) - .keystore("xpack.security.transport.ssl.secure_key_passphrase", "testnode") - .feature(FeatureFlag.TIME_SERIES_MODE) - .build(); - - public AbstractXpackFullClusterRestartTestCase(FullClusterRestartUpgradeStatus upgradeStatus) { - super(upgradeStatus); - } - - @Override - protected ElasticsearchCluster getUpgradeCluster() { - return cluster; - } -} diff --git a/x-pack/qa/full-cluster-restart/src/javaRestTest/java/org/elasticsearch/xpack/restart/CoreFullClusterRestartIT.java b/x-pack/qa/full-cluster-restart/src/javaRestTest/java/org/elasticsearch/xpack/restart/CoreFullClusterRestartIT.java deleted file mode 100644 index dcdc127079637..0000000000000 --- a/x-pack/qa/full-cluster-restart/src/javaRestTest/java/org/elasticsearch/xpack/restart/CoreFullClusterRestartIT.java +++ /dev/null @@ -1,46 +0,0 @@ -/* - * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one - * or more contributor license agreements. Licensed under the Elastic License - * 2.0; you may not use this file except in compliance with the Elastic License - * 2.0. - */ -package org.elasticsearch.xpack.restart; - -import com.carrotsearch.randomizedtesting.annotations.Name; - -import org.elasticsearch.common.settings.Settings; -import org.elasticsearch.common.util.concurrent.ThreadContext; -import org.elasticsearch.test.cluster.util.resource.Resource; -import org.elasticsearch.upgrades.FullClusterRestartIT; -import org.elasticsearch.upgrades.FullClusterRestartUpgradeStatus; - -import java.nio.charset.StandardCharsets; -import java.util.Base64; - -public class CoreFullClusterRestartIT extends FullClusterRestartIT { - - static { - clusterConfig = c -> c.setting("xpack.security.enabled", "true") - .setting("xpack.security.transport.ssl.enabled", "true") - .setting("xpack.security.transport.ssl.key", "testnode.pem") - .setting("xpack.security.transport.ssl.certificate", "testnode.crt") - .setting("xpack.license.self_generated.type", "trial") - .setting("xpack.watcher.encrypt_sensitive_data", "true") - .setting("xpack.security.authc.api_key.enabled", "true") - .configFile("testnode.pem", Resource.fromClasspath("org/elasticsearch/xpack/security/transport/ssl/certs/simple/testnode.pem")) - .configFile("testnode.crt", Resource.fromClasspath("org/elasticsearch/xpack/security/transport/ssl/certs/simple/testnode.crt")) - .keystore("xpack.watcher.encryption_key", Resource.fromClasspath("system_key")) - .keystore("xpack.security.transport.ssl.secure_key_passphrase", "testnode"); - } - - public CoreFullClusterRestartIT(@Name("cluster") FullClusterRestartUpgradeStatus upgradeStatus) { - super(upgradeStatus); - } - - @Override - protected Settings restClientSettings() { - String token = "Basic " + Base64.getEncoder().encodeToString("test_user:x-pack-test-password".getBytes(StandardCharsets.UTF_8)); - return Settings.builder().put(ThreadContext.PREFIX + ".Authorization", token).build(); - } - -} diff --git a/x-pack/qa/full-cluster-restart/src/javaRestTest/java/org/elasticsearch/xpack/restart/QueryBuilderBWCIT.java b/x-pack/qa/full-cluster-restart/src/javaRestTest/java/org/elasticsearch/xpack/restart/QueryBuilderBWCIT.java deleted file mode 100644 index 563cde322b725..0000000000000 --- a/x-pack/qa/full-cluster-restart/src/javaRestTest/java/org/elasticsearch/xpack/restart/QueryBuilderBWCIT.java +++ /dev/null @@ -1,42 +0,0 @@ -/* - * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one - * or more contributor license agreements. Licensed under the Elastic License - * 2.0; you may not use this file except in compliance with the Elastic License - * 2.0. - */ -package org.elasticsearch.xpack.restart; - -import org.elasticsearch.common.settings.Settings; -import org.elasticsearch.common.util.concurrent.ThreadContext; -import org.elasticsearch.test.cluster.util.resource.Resource; -import org.elasticsearch.upgrades.FullClusterRestartUpgradeStatus; - -import java.nio.charset.StandardCharsets; -import java.util.Base64; - -public class QueryBuilderBWCIT extends org.elasticsearch.upgrades.QueryBuilderBWCIT { - - static { - clusterConfig = c -> c.setting("xpack.security.enabled", "true") - .setting("xpack.security.transport.ssl.enabled", "true") - .setting("xpack.security.transport.ssl.key", "testnode.pem") - .setting("xpack.security.transport.ssl.certificate", "testnode.crt") - .setting("xpack.license.self_generated.type", "trial") - .setting("xpack.watcher.encrypt_sensitive_data", "true") - .setting("xpack.security.authc.api_key.enabled", "true") - .configFile("testnode.pem", Resource.fromClasspath("org/elasticsearch/xpack/security/transport/ssl/certs/simple/testnode.pem")) - .configFile("testnode.crt", Resource.fromClasspath("org/elasticsearch/xpack/security/transport/ssl/certs/simple/testnode.crt")) - .keystore("xpack.watcher.encryption_key", Resource.fromClasspath("system_key")) - .keystore("xpack.security.transport.ssl.secure_key_passphrase", "testnode"); - } - - public QueryBuilderBWCIT(FullClusterRestartUpgradeStatus upgradeStatus) { - super(upgradeStatus); - } - - @Override - protected Settings restClientSettings() { - String token = "Basic " + Base64.getEncoder().encodeToString("test_user:x-pack-test-password".getBytes(StandardCharsets.UTF_8)); - return Settings.builder().put(ThreadContext.PREFIX + ".Authorization", token).build(); - } -} diff --git a/x-pack/qa/full-cluster-restart/src/test/java/org/elasticsearch/xpack/restart/CoreFullClusterRestartIT.java b/x-pack/qa/full-cluster-restart/src/test/java/org/elasticsearch/xpack/restart/CoreFullClusterRestartIT.java new file mode 100644 index 0000000000000..e06cb12f747a7 --- /dev/null +++ b/x-pack/qa/full-cluster-restart/src/test/java/org/elasticsearch/xpack/restart/CoreFullClusterRestartIT.java @@ -0,0 +1,24 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0; you may not use this file except in compliance with the Elastic License + * 2.0. + */ +package org.elasticsearch.xpack.restart; + +import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.common.util.concurrent.ThreadContext; +import org.elasticsearch.upgrades.FullClusterRestartIT; + +import java.nio.charset.StandardCharsets; +import java.util.Base64; + +public class CoreFullClusterRestartIT extends FullClusterRestartIT { + + @Override + protected Settings restClientSettings() { + String token = "Basic " + Base64.getEncoder().encodeToString("test_user:x-pack-test-password".getBytes(StandardCharsets.UTF_8)); + return Settings.builder().put(ThreadContext.PREFIX + ".Authorization", token).build(); + } + +} diff --git a/x-pack/qa/full-cluster-restart/src/javaRestTest/java/org/elasticsearch/xpack/restart/FullClusterRestartIT.java b/x-pack/qa/full-cluster-restart/src/test/java/org/elasticsearch/xpack/restart/FullClusterRestartIT.java similarity index 96% rename from x-pack/qa/full-cluster-restart/src/javaRestTest/java/org/elasticsearch/xpack/restart/FullClusterRestartIT.java rename to x-pack/qa/full-cluster-restart/src/test/java/org/elasticsearch/xpack/restart/FullClusterRestartIT.java index ab48825ed983a..42c551b16655b 100644 --- a/x-pack/qa/full-cluster-restart/src/javaRestTest/java/org/elasticsearch/xpack/restart/FullClusterRestartIT.java +++ b/x-pack/qa/full-cluster-restart/src/test/java/org/elasticsearch/xpack/restart/FullClusterRestartIT.java @@ -6,8 +6,6 @@ */ package org.elasticsearch.xpack.restart; -import com.carrotsearch.randomizedtesting.annotations.Name; - import org.apache.http.entity.ContentType; import org.apache.http.entity.StringEntity; import org.apache.http.util.EntityUtils; @@ -27,7 +25,7 @@ import org.elasticsearch.rest.action.search.RestSearchAction; import org.elasticsearch.test.StreamsUtils; import org.elasticsearch.test.rest.ESRestTestCase; -import org.elasticsearch.upgrades.FullClusterRestartUpgradeStatus; +import org.elasticsearch.upgrades.AbstractFullClusterRestartTestCase; import org.elasticsearch.xcontent.ObjectPath; import org.elasticsearch.xcontent.XContentBuilder; import org.elasticsearch.xcontent.XContentParser; @@ -63,15 +61,11 @@ import static org.hamcrest.Matchers.not; import static org.hamcrest.Matchers.startsWith; -public class FullClusterRestartIT extends AbstractXpackFullClusterRestartTestCase { +public class FullClusterRestartIT extends AbstractFullClusterRestartTestCase { public static final int UPGRADE_FIELD_EXPECTED_INDEX_FORMAT_VERSION = 6; public static final int SECURITY_EXPECTED_INDEX_FORMAT_VERSION = 6; - public FullClusterRestartIT(@Name("cluster") FullClusterRestartUpgradeStatus upgradeStatus) { - super(upgradeStatus); - } - @Override protected Settings restClientSettings() { String token = "Basic " + Base64.getEncoder().encodeToString("test_user:x-pack-test-password".getBytes(StandardCharsets.UTF_8)); @@ -109,7 +103,12 @@ public void testSecurityNativeRealm() throws Exception { } else { waitForYellow(".security"); final Request getSettingsRequest = new Request("GET", "/.security/_settings/index.format"); - getSettingsRequest.setOptions(systemIndexWarningHandlerOptions(".security-7")); + getSettingsRequest.setOptions( + expectWarnings( + "this request accesses system indices: [.security-7], but in a future major " + + "version, direct access to system indices will be prevented by default" + ) + ); Response settingsResponse = client().performRequest(getSettingsRequest); Map settingsResponseMap = entityAsMap(settingsResponse); logger.info("settings response map {}", settingsResponseMap); @@ -391,7 +390,12 @@ public void testApiKeySuperuser() throws IOException { "doc_type": "foo" }"""); if (getOldClusterVersion().onOrAfter(Version.V_7_10_0)) { - indexRequest.setOptions(systemIndexWarningHandlerOptions(".security-7").addHeader("Authorization", apiKeyAuthHeader)); + indexRequest.setOptions( + expectWarnings( + "this request accesses system indices: [.security-7], but in a future major " + + "version, direct access to system indices will be prevented by default" + ).toBuilder().addHeader("Authorization", apiKeyAuthHeader) + ); } else { indexRequest.setOptions(RequestOptions.DEFAULT.toBuilder().addHeader("Authorization", apiKeyAuthHeader)); } @@ -405,7 +409,12 @@ public void testApiKeySuperuser() throws IOException { // read is ok final Request searchRequest = new Request("GET", ".security/_search"); - searchRequest.setOptions(systemIndexWarningHandlerOptions(".security-7").addHeader("Authorization", apiKeyAuthHeader)); + searchRequest.setOptions( + expectWarnings( + "this request accesses system indices: [.security-7], but in a future major " + + "version, direct access to system indices will be prevented by default" + ).toBuilder().addHeader("Authorization", apiKeyAuthHeader) + ); assertOK(client().performRequest(searchRequest)); // write must not be allowed @@ -414,7 +423,12 @@ public void testApiKeySuperuser() throws IOException { { "doc_type": "foo" }"""); - indexRequest.setOptions(systemIndexWarningHandlerOptions(".security-7").addHeader("Authorization", apiKeyAuthHeader)); + indexRequest.setOptions( + expectWarnings( + "this request accesses system indices: [.security-7], but in a future major " + + "version, direct access to system indices will be prevented by default" + ).toBuilder().addHeader("Authorization", apiKeyAuthHeader) + ); final ResponseException e = expectThrows(ResponseException.class, () -> client().performRequest(indexRequest)); assertThat(e.getResponse().getStatusLine().getStatusCode(), equalTo(403)); assertThat(e.getMessage(), containsString("is unauthorized")); @@ -981,17 +995,4 @@ private static void createComposableTemplate(RestClient client, String templateN createIndexTemplateRequest.setEntity(templateJSON); client.performRequest(createIndexTemplateRequest); } - - private RequestOptions.Builder systemIndexWarningHandlerOptions(String index) { - return RequestOptions.DEFAULT.toBuilder() - .setWarningsHandler( - w -> w.size() > 0 - && w.contains( - "this request accesses system indices: [" - + index - + "], but in a future major " - + "version, direct access to system indices will be prevented by default" - ) == false - ); - } } diff --git a/x-pack/qa/full-cluster-restart/src/javaRestTest/java/org/elasticsearch/xpack/restart/MLModelDeploymentFullClusterRestartIT.java b/x-pack/qa/full-cluster-restart/src/test/java/org/elasticsearch/xpack/restart/MLModelDeploymentFullClusterRestartIT.java similarity index 97% rename from x-pack/qa/full-cluster-restart/src/javaRestTest/java/org/elasticsearch/xpack/restart/MLModelDeploymentFullClusterRestartIT.java rename to x-pack/qa/full-cluster-restart/src/test/java/org/elasticsearch/xpack/restart/MLModelDeploymentFullClusterRestartIT.java index 25a14c47e52c7..4d1a5dfb75ab7 100644 --- a/x-pack/qa/full-cluster-restart/src/javaRestTest/java/org/elasticsearch/xpack/restart/MLModelDeploymentFullClusterRestartIT.java +++ b/x-pack/qa/full-cluster-restart/src/test/java/org/elasticsearch/xpack/restart/MLModelDeploymentFullClusterRestartIT.java @@ -7,8 +7,6 @@ package org.elasticsearch.xpack.restart; -import com.carrotsearch.randomizedtesting.annotations.Name; - import org.apache.http.util.EntityUtils; import org.elasticsearch.Version; import org.elasticsearch.client.Request; @@ -17,7 +15,7 @@ import org.elasticsearch.common.util.concurrent.ThreadContext; import org.elasticsearch.common.xcontent.support.XContentMapValues; import org.elasticsearch.core.Strings; -import org.elasticsearch.upgrades.FullClusterRestartUpgradeStatus; +import org.elasticsearch.upgrades.AbstractFullClusterRestartTestCase; import org.elasticsearch.xpack.core.ml.inference.assignment.AllocationStatus; import org.junit.Before; @@ -34,7 +32,7 @@ import static org.hamcrest.Matchers.equalTo; import static org.hamcrest.Matchers.hasSize; -public class MLModelDeploymentFullClusterRestartIT extends AbstractXpackFullClusterRestartTestCase { +public class MLModelDeploymentFullClusterRestartIT extends AbstractFullClusterRestartTestCase { // See PyTorchModelIT for how this model was created static final String BASE_64_ENCODED_MODEL = @@ -65,10 +63,6 @@ public class MLModelDeploymentFullClusterRestartIT extends AbstractXpackFullClus RAW_MODEL_SIZE = Base64.getDecoder().decode(BASE_64_ENCODED_MODEL).length; } - public MLModelDeploymentFullClusterRestartIT(@Name("cluster") FullClusterRestartUpgradeStatus upgradeStatus) { - super(upgradeStatus); - } - @Before public void setLogging() throws IOException { Request loggingSettings = new Request("PUT", "_cluster/settings"); diff --git a/x-pack/qa/full-cluster-restart/src/javaRestTest/java/org/elasticsearch/xpack/restart/MlConfigIndexMappingsFullClusterRestartIT.java b/x-pack/qa/full-cluster-restart/src/test/java/org/elasticsearch/xpack/restart/MlConfigIndexMappingsFullClusterRestartIT.java similarity index 94% rename from x-pack/qa/full-cluster-restart/src/javaRestTest/java/org/elasticsearch/xpack/restart/MlConfigIndexMappingsFullClusterRestartIT.java rename to x-pack/qa/full-cluster-restart/src/test/java/org/elasticsearch/xpack/restart/MlConfigIndexMappingsFullClusterRestartIT.java index e4ab3957f2627..bfc078ffe9206 100644 --- a/x-pack/qa/full-cluster-restart/src/javaRestTest/java/org/elasticsearch/xpack/restart/MlConfigIndexMappingsFullClusterRestartIT.java +++ b/x-pack/qa/full-cluster-restart/src/test/java/org/elasticsearch/xpack/restart/MlConfigIndexMappingsFullClusterRestartIT.java @@ -6,8 +6,6 @@ */ package org.elasticsearch.xpack.restart; -import com.carrotsearch.randomizedtesting.annotations.Name; - import org.elasticsearch.Version; import org.elasticsearch.client.Request; import org.elasticsearch.client.Response; @@ -15,7 +13,7 @@ import org.elasticsearch.common.util.concurrent.ThreadContext; import org.elasticsearch.common.xcontent.support.XContentMapValues; import org.elasticsearch.core.Strings; -import org.elasticsearch.upgrades.FullClusterRestartUpgradeStatus; +import org.elasticsearch.upgrades.AbstractFullClusterRestartTestCase; import org.elasticsearch.xpack.test.rest.IndexMappingTemplateAsserter; import org.elasticsearch.xpack.test.rest.XPackRestTestConstants; import org.elasticsearch.xpack.test.rest.XPackRestTestHelper; @@ -31,15 +29,11 @@ import static org.hamcrest.Matchers.is; import static org.hamcrest.Matchers.nullValue; -public class MlConfigIndexMappingsFullClusterRestartIT extends AbstractXpackFullClusterRestartTestCase { +public class MlConfigIndexMappingsFullClusterRestartIT extends AbstractFullClusterRestartTestCase { private static final String OLD_CLUSTER_JOB_ID = "ml-config-mappings-old-cluster-job"; private static final String NEW_CLUSTER_JOB_ID = "ml-config-mappings-new-cluster-job"; - public MlConfigIndexMappingsFullClusterRestartIT(@Name("cluster") FullClusterRestartUpgradeStatus upgradeStatus) { - super(upgradeStatus); - } - @Override protected Settings restClientSettings() { String token = "Basic " + Base64.getEncoder().encodeToString("test_user:x-pack-test-password".getBytes(StandardCharsets.UTF_8)); diff --git a/x-pack/qa/full-cluster-restart/src/javaRestTest/java/org/elasticsearch/xpack/restart/MlHiddenIndicesFullClusterRestartIT.java b/x-pack/qa/full-cluster-restart/src/test/java/org/elasticsearch/xpack/restart/MlHiddenIndicesFullClusterRestartIT.java similarity index 96% rename from x-pack/qa/full-cluster-restart/src/javaRestTest/java/org/elasticsearch/xpack/restart/MlHiddenIndicesFullClusterRestartIT.java rename to x-pack/qa/full-cluster-restart/src/test/java/org/elasticsearch/xpack/restart/MlHiddenIndicesFullClusterRestartIT.java index aeb3dad547946..274fa7ee40fce 100644 --- a/x-pack/qa/full-cluster-restart/src/javaRestTest/java/org/elasticsearch/xpack/restart/MlHiddenIndicesFullClusterRestartIT.java +++ b/x-pack/qa/full-cluster-restart/src/test/java/org/elasticsearch/xpack/restart/MlHiddenIndicesFullClusterRestartIT.java @@ -6,8 +6,6 @@ */ package org.elasticsearch.xpack.restart; -import com.carrotsearch.randomizedtesting.annotations.Name; - import org.elasticsearch.Version; import org.elasticsearch.client.Request; import org.elasticsearch.client.RequestOptions; @@ -18,7 +16,7 @@ import org.elasticsearch.common.xcontent.support.XContentMapValues; import org.elasticsearch.core.Strings; import org.elasticsearch.core.Tuple; -import org.elasticsearch.upgrades.FullClusterRestartUpgradeStatus; +import org.elasticsearch.upgrades.AbstractFullClusterRestartTestCase; import org.elasticsearch.xcontent.XContentParser; import org.elasticsearch.xcontent.XContentParserConfiguration; import org.elasticsearch.xcontent.spi.XContentProvider; @@ -40,7 +38,7 @@ import static org.hamcrest.Matchers.notNullValue; import static org.hamcrest.Matchers.nullValue; -public class MlHiddenIndicesFullClusterRestartIT extends AbstractXpackFullClusterRestartTestCase { +public class MlHiddenIndicesFullClusterRestartIT extends AbstractFullClusterRestartTestCase { private static final String JOB_ID = "ml-hidden-indices-old-cluster-job"; private static final List, String>> EXPECTED_INDEX_ALIAS_PAIRS = List.of( @@ -51,10 +49,6 @@ public class MlHiddenIndicesFullClusterRestartIT extends AbstractXpackFullCluste Tuple.tuple(List.of(".ml-anomalies-shared"), ".ml-anomalies-.write-" + JOB_ID) ); - public MlHiddenIndicesFullClusterRestartIT(@Name("cluster") FullClusterRestartUpgradeStatus upgradeStatus) { - super(upgradeStatus); - } - @Override protected Settings restClientSettings() { String token = "Basic " + Base64.getEncoder().encodeToString("test_user:x-pack-test-password".getBytes(StandardCharsets.UTF_8)); diff --git a/x-pack/qa/full-cluster-restart/src/javaRestTest/java/org/elasticsearch/xpack/restart/MlMigrationFullClusterRestartIT.java b/x-pack/qa/full-cluster-restart/src/test/java/org/elasticsearch/xpack/restart/MlMigrationFullClusterRestartIT.java similarity index 96% rename from x-pack/qa/full-cluster-restart/src/javaRestTest/java/org/elasticsearch/xpack/restart/MlMigrationFullClusterRestartIT.java rename to x-pack/qa/full-cluster-restart/src/test/java/org/elasticsearch/xpack/restart/MlMigrationFullClusterRestartIT.java index 2bbda9123ae34..61ce6f7827e2a 100644 --- a/x-pack/qa/full-cluster-restart/src/javaRestTest/java/org/elasticsearch/xpack/restart/MlMigrationFullClusterRestartIT.java +++ b/x-pack/qa/full-cluster-restart/src/test/java/org/elasticsearch/xpack/restart/MlMigrationFullClusterRestartIT.java @@ -6,8 +6,6 @@ */ package org.elasticsearch.xpack.restart; -import com.carrotsearch.randomizedtesting.annotations.Name; - import org.elasticsearch.Version; import org.elasticsearch.client.Request; import org.elasticsearch.client.Response; @@ -19,7 +17,7 @@ import org.elasticsearch.search.aggregations.AggregatorFactories; import org.elasticsearch.search.aggregations.bucket.terms.TermsAggregationBuilder; import org.elasticsearch.search.aggregations.metrics.MaxAggregationBuilder; -import org.elasticsearch.upgrades.FullClusterRestartUpgradeStatus; +import org.elasticsearch.upgrades.AbstractFullClusterRestartTestCase; import org.elasticsearch.xpack.core.ml.MlTasks; import org.elasticsearch.xpack.core.ml.datafeed.DatafeedConfig; import org.elasticsearch.xpack.test.rest.XPackRestTestConstants; @@ -37,17 +35,13 @@ import static org.hamcrest.Matchers.emptyOrNullString; import static org.hamcrest.Matchers.is; -public class MlMigrationFullClusterRestartIT extends AbstractXpackFullClusterRestartTestCase { +public class MlMigrationFullClusterRestartIT extends AbstractFullClusterRestartTestCase { private static final String OLD_CLUSTER_OPEN_JOB_ID = "migration-old-cluster-open-job"; private static final String OLD_CLUSTER_STARTED_DATAFEED_ID = "migration-old-cluster-started-datafeed"; private static final String OLD_CLUSTER_CLOSED_JOB_ID = "migration-old-cluster-closed-job"; private static final String OLD_CLUSTER_STOPPED_DATAFEED_ID = "migration-old-cluster-stopped-datafeed"; - public MlMigrationFullClusterRestartIT(@Name("cluster") FullClusterRestartUpgradeStatus upgradeStatus) { - super(upgradeStatus); - } - @Override protected Settings restClientSettings() { String token = "Basic " + Base64.getEncoder().encodeToString("test_user:x-pack-test-password".getBytes(StandardCharsets.UTF_8)); diff --git a/x-pack/qa/full-cluster-restart/src/test/java/org/elasticsearch/xpack/restart/QueryBuilderBWCIT.java b/x-pack/qa/full-cluster-restart/src/test/java/org/elasticsearch/xpack/restart/QueryBuilderBWCIT.java new file mode 100644 index 0000000000000..cffc6881df645 --- /dev/null +++ b/x-pack/qa/full-cluster-restart/src/test/java/org/elasticsearch/xpack/restart/QueryBuilderBWCIT.java @@ -0,0 +1,22 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0; you may not use this file except in compliance with the Elastic License + * 2.0. + */ +package org.elasticsearch.xpack.restart; + +import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.common.util.concurrent.ThreadContext; + +import java.nio.charset.StandardCharsets; +import java.util.Base64; + +public class QueryBuilderBWCIT extends org.elasticsearch.upgrades.QueryBuilderBWCIT { + + @Override + protected Settings restClientSettings() { + String token = "Basic " + Base64.getEncoder().encodeToString("test_user:x-pack-test-password".getBytes(StandardCharsets.UTF_8)); + return Settings.builder().put(ThreadContext.PREFIX + ".Authorization", token).build(); + } +} diff --git a/x-pack/qa/full-cluster-restart/src/javaRestTest/java/org/elasticsearch/xpack/restart/WatcherMappingUpdateIT.java b/x-pack/qa/full-cluster-restart/src/test/java/org/elasticsearch/xpack/restart/WatcherMappingUpdateIT.java similarity index 89% rename from x-pack/qa/full-cluster-restart/src/javaRestTest/java/org/elasticsearch/xpack/restart/WatcherMappingUpdateIT.java rename to x-pack/qa/full-cluster-restart/src/test/java/org/elasticsearch/xpack/restart/WatcherMappingUpdateIT.java index 043b3f49a8825..ea926e964360d 100644 --- a/x-pack/qa/full-cluster-restart/src/javaRestTest/java/org/elasticsearch/xpack/restart/WatcherMappingUpdateIT.java +++ b/x-pack/qa/full-cluster-restart/src/test/java/org/elasticsearch/xpack/restart/WatcherMappingUpdateIT.java @@ -7,8 +7,6 @@ package org.elasticsearch.xpack.restart; -import com.carrotsearch.randomizedtesting.annotations.Name; - import org.apache.http.util.EntityUtils; import org.elasticsearch.Version; import org.elasticsearch.client.Request; @@ -16,7 +14,7 @@ import org.elasticsearch.client.Response; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.util.concurrent.ThreadContext; -import org.elasticsearch.upgrades.FullClusterRestartUpgradeStatus; +import org.elasticsearch.upgrades.AbstractFullClusterRestartTestCase; import java.nio.charset.StandardCharsets; import java.util.Base64; @@ -25,11 +23,7 @@ import static org.hamcrest.Matchers.containsString; import static org.hamcrest.Matchers.not; -public class WatcherMappingUpdateIT extends AbstractXpackFullClusterRestartTestCase { - - public WatcherMappingUpdateIT(@Name("cluster") FullClusterRestartUpgradeStatus upgradeStatus) { - super(upgradeStatus); - } +public class WatcherMappingUpdateIT extends AbstractFullClusterRestartTestCase { @Override protected Settings restClientSettings() { @@ -97,7 +91,7 @@ private void assertNoMappingVersion(String index) throws Exception { private RequestOptions.Builder getWarningHandlerOptions(String index) { return RequestOptions.DEFAULT.toBuilder() - .setWarningsHandler(w -> w.size() > 0 && w.contains(getWatcherSystemIndexWarning(index)) == false); + .setWarningsHandler(w -> w.contains(getWatcherSystemIndexWarning(index)) == false || w.size() != 1); } private String getWatcherSystemIndexWarning(String index) { diff --git a/x-pack/qa/full-cluster-restart/src/javaRestTest/resources/org/elasticsearch/xpack/restart/funny-timeout-watch.json b/x-pack/qa/full-cluster-restart/src/test/resources/org/elasticsearch/xpack/restart/funny-timeout-watch.json similarity index 100% rename from x-pack/qa/full-cluster-restart/src/javaRestTest/resources/org/elasticsearch/xpack/restart/funny-timeout-watch.json rename to x-pack/qa/full-cluster-restart/src/test/resources/org/elasticsearch/xpack/restart/funny-timeout-watch.json diff --git a/x-pack/qa/full-cluster-restart/src/javaRestTest/resources/org/elasticsearch/xpack/restart/logging-watch.json b/x-pack/qa/full-cluster-restart/src/test/resources/org/elasticsearch/xpack/restart/logging-watch.json similarity index 100% rename from x-pack/qa/full-cluster-restart/src/javaRestTest/resources/org/elasticsearch/xpack/restart/logging-watch.json rename to x-pack/qa/full-cluster-restart/src/test/resources/org/elasticsearch/xpack/restart/logging-watch.json diff --git a/x-pack/qa/full-cluster-restart/src/javaRestTest/resources/org/elasticsearch/xpack/restart/simple-watch.json b/x-pack/qa/full-cluster-restart/src/test/resources/org/elasticsearch/xpack/restart/simple-watch.json similarity index 100% rename from x-pack/qa/full-cluster-restart/src/javaRestTest/resources/org/elasticsearch/xpack/restart/simple-watch.json rename to x-pack/qa/full-cluster-restart/src/test/resources/org/elasticsearch/xpack/restart/simple-watch.json diff --git a/x-pack/qa/full-cluster-restart/src/javaRestTest/resources/org/elasticsearch/xpack/restart/throttle-period-watch.json b/x-pack/qa/full-cluster-restart/src/test/resources/org/elasticsearch/xpack/restart/throttle-period-watch.json similarity index 100% rename from x-pack/qa/full-cluster-restart/src/javaRestTest/resources/org/elasticsearch/xpack/restart/throttle-period-watch.json rename to x-pack/qa/full-cluster-restart/src/test/resources/org/elasticsearch/xpack/restart/throttle-period-watch.json diff --git a/x-pack/qa/full-cluster-restart/src/javaRestTest/resources/system_key b/x-pack/qa/full-cluster-restart/src/test/resources/system_key similarity index 100% rename from x-pack/qa/full-cluster-restart/src/javaRestTest/resources/system_key rename to x-pack/qa/full-cluster-restart/src/test/resources/system_key From df09c971fe6b301c48189d321cc232dddd319b37 Mon Sep 17 00:00:00 2001 From: David Turner Date: Thu, 2 Feb 2023 10:29:52 +0000 Subject: [PATCH 48/63] Misc improvements to TBbNA tests (#93435) Similar to #92983, this commit reworks the tests in `TransportBroadcastByNodeActionTests` to use the `ReachabilityChecker` to check that things are released on cancellation, and adds a test showing the cancellation behaviour of the shard-level operations. --- .../TransportBroadcastByNodeActionTests.java | 211 ++++++++++++------ 1 file changed, 146 insertions(+), 65 deletions(-) diff --git a/server/src/test/java/org/elasticsearch/action/support/broadcast/node/TransportBroadcastByNodeActionTests.java b/server/src/test/java/org/elasticsearch/action/support/broadcast/node/TransportBroadcastByNodeActionTests.java index a3476af7de933..df4a26260f0a2 100644 --- a/server/src/test/java/org/elasticsearch/action/support/broadcast/node/TransportBroadcastByNodeActionTests.java +++ b/server/src/test/java/org/elasticsearch/action/support/broadcast/node/TransportBroadcastByNodeActionTests.java @@ -37,7 +37,9 @@ import org.elasticsearch.cluster.routing.ShardsIterator; import org.elasticsearch.cluster.routing.TestShardRouting; import org.elasticsearch.cluster.service.ClusterService; +import org.elasticsearch.common.Randomness; import org.elasticsearch.common.io.stream.StreamInput; +import org.elasticsearch.common.io.stream.StreamOutput; import org.elasticsearch.common.io.stream.Writeable; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.util.concurrent.ThreadContext; @@ -50,6 +52,7 @@ import org.elasticsearch.tasks.TaskCancelHelper; import org.elasticsearch.tasks.TaskCancelledException; import org.elasticsearch.test.ESTestCase; +import org.elasticsearch.test.ReachabilityChecker; import org.elasticsearch.test.transport.CapturingTransport; import org.elasticsearch.threadpool.TestThreadPool; import org.elasticsearch.threadpool.ThreadPool; @@ -63,6 +66,7 @@ import java.io.IOException; import java.util.ArrayList; +import java.util.Arrays; import java.util.Collections; import java.util.HashMap; import java.util.HashSet; @@ -71,6 +75,7 @@ import java.util.Set; import java.util.concurrent.ExecutionException; import java.util.concurrent.TimeUnit; +import java.util.stream.IntStream; import static java.util.Collections.emptyMap; import static java.util.Collections.emptySet; @@ -79,6 +84,7 @@ import static org.hamcrest.CoreMatchers.containsString; import static org.hamcrest.CoreMatchers.is; import static org.hamcrest.Matchers.anEmptyMap; +import static org.hamcrest.Matchers.greaterThan; import static org.hamcrest.object.HasToString.hasToString; public class TransportBroadcastByNodeActionTests extends ESTestCase { @@ -91,9 +97,9 @@ public class TransportBroadcastByNodeActionTests extends ESTestCase { private CapturingTransport transport; private TestTransportBroadcastByNodeAction action; + private TransportService transportService; public static class Request extends BroadcastRequest { - public Request(StreamInput in) throws IOException { super(in); } @@ -113,29 +119,36 @@ public Response(int totalShards, int successfulShards, int failedShards, List { + // empty per-shard result, but not a singleton so we can check each instance is released on cancellation + public static class ShardResult implements Writeable { + public ShardResult() {} + + @Override + public void writeTo(StreamOutput out) throws IOException {} + } + + class TestTransportBroadcastByNodeAction extends TransportBroadcastByNodeAction { private final Map shards = new HashMap<>(); - TestTransportBroadcastByNodeAction( - TransportService transportService, - ActionFilters actionFilters, - IndexNameExpressionResolver indexNameExpressionResolver, - Writeable.Reader request, - String executor - ) { - super("indices:admin/test", clusterService, transportService, actionFilters, indexNameExpressionResolver, request, executor); + TestTransportBroadcastByNodeAction(String actionName) { + super( + actionName, + clusterService, + transportService, + new ActionFilters(Set.of()), + new MyResolver(), + Request::new, + ThreadPool.Names.SAME + ); } @Override - protected EmptyResult readShardResult(StreamInput in) { - return EmptyResult.INSTANCE; + protected ShardResult readShardResult(StreamInput in) { + return new ShardResult(); } @Override - protected ResponseFactory getResponseFactory(Request request, ClusterState clusterState) { + protected ResponseFactory getResponseFactory(Request request, ClusterState clusterState) { return (totalShards, successfulShards, failedShards, emptyResults, shardFailures) -> new Response( totalShards, successfulShards, @@ -150,11 +163,11 @@ protected Request readRequestFrom(StreamInput in) throws IOException { } @Override - protected void shardOperation(Request request, ShardRouting shardRouting, Task task, ActionListener listener) { + protected void shardOperation(Request request, ShardRouting shardRouting, Task task, ActionListener listener) { ActionListener.completeWith(listener, () -> { if (rarely()) { shards.put(shardRouting, Boolean.TRUE); - return EmptyResult.INSTANCE; + return new ShardResult(); } else { ElasticsearchException e = new ElasticsearchException("operation failed"); shards.put(shardRouting, e); @@ -181,6 +194,7 @@ protected ClusterBlockException checkRequestBlock(ClusterState state, Request re public Map getResults() { return shards; } + } static class MyResolver extends IndexNameExpressionResolver { @@ -204,7 +218,7 @@ public void setUp() throws Exception { super.setUp(); transport = new CapturingTransport(); clusterService = createClusterService(THREAD_POOL); - TransportService transportService = transport.createTransportService( + transportService = transport.createTransportService( clusterService.getSettings(), THREAD_POOL, TransportService.NOOP_TRANSPORT_INTERCEPTOR, @@ -215,13 +229,7 @@ public void setUp() throws Exception { transportService.start(); transportService.acceptIncomingRequests(); setClusterState(clusterService); - action = new TestTransportBroadcastByNodeAction( - transportService, - new ActionFilters(new HashSet<>()), - new MyResolver(), - Request::new, - ThreadPool.Names.SAME - ); + action = new TestTransportBroadcastByNodeAction("indices:admin/test"); } @After @@ -348,16 +356,15 @@ public void testNoShardOperationsExecutedIfTaskCancelled() throws Exception { shards.add(shard); } } - final TransportBroadcastByNodeAction< - Request, - Response, - TransportBroadcastByNodeAction.EmptyResult>.BroadcastByNodeTransportRequestHandler handler = - action.new BroadcastByNodeTransportRequestHandler(); + final TransportBroadcastByNodeAction.BroadcastByNodeTransportRequestHandler handler = + action.new BroadcastByNodeTransportRequestHandler(); final PlainActionFuture future = PlainActionFuture.newFuture(); TestTransportChannel channel = new TestTransportChannel(future); - handler.messageReceived(action.new NodeRequest(new Request(), new ArrayList<>(shards), nodeId), channel, cancelledTask()); + final CancellableTask cancellableTask = new CancellableTask(randomLong(), "transport", "action", "", null, emptyMap()); + TaskCancelHelper.cancel(cancellableTask, "simulated"); + handler.messageReceived(action.new NodeRequest(new Request(), new ArrayList<>(shards), nodeId), channel, cancellableTask); expectThrows(TaskCancelledException.class, future::actionGet); assertThat(action.getResults(), anEmptyMap()); @@ -410,11 +417,8 @@ public void testOperationExecution() throws Exception { shards.add(shard); } } - final TransportBroadcastByNodeAction< - Request, - Response, - TransportBroadcastByNodeAction.EmptyResult>.BroadcastByNodeTransportRequestHandler handler = - action.new BroadcastByNodeTransportRequestHandler(); + final TransportBroadcastByNodeAction.BroadcastByNodeTransportRequestHandler handler = + action.new BroadcastByNodeTransportRequestHandler(); final PlainActionFuture future = PlainActionFuture.newFuture(); TestTransportChannel channel = new TestTransportChannel(future); @@ -441,13 +445,12 @@ public void testOperationExecution() throws Exception { failedShards++; } } - // check the operation results assertEquals("successful shards", successfulShards, nodeResponse.getSuccessfulShards()); assertEquals("total shards", action.getResults().size(), nodeResponse.getTotalShards()); - assertEquals("failed shards", failedShards, nodeResponse.getExceptions().size()); @SuppressWarnings("unchecked") List exceptions = nodeResponse.getExceptions(); + assertEquals("exceptions count", failedShards, exceptions.size()); for (BroadcastShardOperationFailedException exception : exceptions) { assertThat(exception.getMessage(), is("operation indices:admin/test failed")); assertThat(exception.getCause(), hasToString(containsString("operation failed"))); @@ -495,7 +498,7 @@ public void testResultAggregation() throws ExecutionException, InterruptedExcept transport.handleRemoteError(requestId, new Exception()); } else { List shards = map.get(entry.getKey()); - List shardResults = new ArrayList<>(); + List shardResults = new ArrayList<>(); for (ShardRouting shard : shards) { totalShards++; if (rarely()) { @@ -503,12 +506,13 @@ public void testResultAggregation() throws ExecutionException, InterruptedExcept totalFailedShards++; exceptions.add(new BroadcastShardOperationFailedException(shard.shardId(), "operation indices:admin/test failed")); } else { - shardResults.add(TransportBroadcastByNodeAction.EmptyResult.INSTANCE); + shardResults.add(new ShardResult()); } } totalSuccessfulShards += shardResults.size(); - TransportBroadcastByNodeAction.NodeResponse nodeResponse = - action.new NodeResponse(entry.getKey(), shards.size(), shardResults, exceptions); + TransportBroadcastByNodeAction.NodeResponse nodeResponse = action.new NodeResponse( + entry.getKey(), shards.size(), shardResults, exceptions + ); transport.handleResponse(requestId, nodeResponse); } } @@ -523,33 +527,110 @@ public void testResultAggregation() throws ExecutionException, InterruptedExcept assertEquals("accumulated exceptions", totalFailedShards, response.getShardFailures().length); } - public void testNoResultAggregationIfTaskCancelled() { - Request request = new Request(TEST_INDEX); - PlainActionFuture listener = new PlainActionFuture<>(); - final CancellableTask task = new CancellableTask(randomLong(), "transport", "action", "", null, emptyMap()); - TransportBroadcastByNodeAction.AsyncAction asyncAction = - action.new AsyncAction(task, request, clusterService.state(), null, listener); - asyncAction.start(); - Map> capturedRequests = transport.getCapturedRequestsByTargetNodeAndClear(); - int cancelAt = randomIntBetween(0, Math.max(0, capturedRequests.size() - 2)); - int i = 0; - for (Map.Entry> entry : capturedRequests.entrySet()) { - if (cancelAt == i) { - TaskCancelHelper.cancel(task, "simulated"); + public void testResponsesReleasedOnCancellation() { + final CancellableTask cancellableTask = new CancellableTask(randomLong(), "transport", "action", "", null, emptyMap()); + final PlainActionFuture listener = new PlainActionFuture<>(); + action.execute(cancellableTask, new Request(TEST_INDEX), listener); + + final List capturedRequests = new ArrayList<>( + Arrays.asList(transport.getCapturedRequestsAndClear()) + ); + Randomness.shuffle(capturedRequests); + + final ReachabilityChecker reachabilityChecker = new ReachabilityChecker(); + final Runnable nextRequestProcessor = () -> { + final var capturedRequest = capturedRequests.remove(0); + if (randomBoolean()) { + // transport.handleResponse may de/serialize the response, releasing it early, so send the response straight to the handler + transport.getTransportResponseHandler(capturedRequest.requestId()) + .handleResponse( + action.new NodeResponse( + capturedRequest.node().getId(), 1, List.of(reachabilityChecker.register(new ShardResult())), List.of() + ) + ); + } else { + // handleRemoteError may de/serialize the exception, releasing it early, so just use handleLocalError + transport.handleLocalError( + capturedRequest.requestId(), + reachabilityChecker.register(new ElasticsearchException("simulated")) + ); } - transport.handleRemoteError(entry.getValue().get(0).requestId(), new ElasticsearchException("simulated")); - i++; + }; + + assertThat(capturedRequests.size(), greaterThan(2)); + final var responsesBeforeCancellation = between(1, capturedRequests.size() - 2); + for (int i = 0; i < responsesBeforeCancellation; i++) { + nextRequestProcessor.run(); + } + + reachabilityChecker.checkReachable(); + TaskCancelHelper.cancel(cancellableTask, "simulated"); + + // responses captured before cancellation are now unreachable + reachabilityChecker.ensureUnreachable(); + + while (capturedRequests.size() > 0) { + // a response sent after cancellation is dropped immediately + assertFalse(listener.isDone()); + nextRequestProcessor.run(); + reachabilityChecker.ensureUnreachable(); } - assertTrue(listener.isDone()); - assertTrue(asyncAction.getNodeResponseTracker().responsesDiscarded()); - expectThrows(ExecutionException.class, TaskCancelledException.class, listener::get); + expectThrows(TaskCancelledException.class, () -> listener.actionGet(10, TimeUnit.SECONDS)); } - private static Task cancelledTask() { - final CancellableTask task = new CancellableTask(randomLong(), "transport", "action", "", null, emptyMap()); - TaskCancelHelper.cancel(task, "simulated"); - return task; + public void testShardLevelOperationsStopOnCancellation() throws Exception { + action = new TestTransportBroadcastByNodeAction("indices:admin/shard_level_test") { + int expectedShardId; + + @Override + protected void shardOperation(Request request, ShardRouting shardRouting, Task task, ActionListener listener) { + // this test runs a node-level operation on three shards, cancelling the task some time during the execution on the second + if (task instanceof CancellableTask cancellableTask) { + assertEquals(expectedShardId++, shardRouting.shardId().id()); + switch (shardRouting.shardId().id()) { + case 0 -> { + assertFalse(cancellableTask.isCancelled()); + listener.onResponse(new ShardResult()); + } + case 1 -> { + assertFalse(cancellableTask.isCancelled()); + TaskCancelHelper.cancel(cancellableTask, "simulated"); + if (randomBoolean()) { + listener.onResponse(new ShardResult()); + } else { + assertTrue(cancellableTask.notifyIfCancelled(listener)); + } + } + default -> fail("unexpected shard execution: " + shardRouting); + } + } else { + fail("task was not cancellable"); + } + } + }; + + final PlainActionFuture nodeResponseFuture = new PlainActionFuture<>(); + + action.new BroadcastByNodeTransportRequestHandler().messageReceived( + action.new NodeRequest( + new Request(), IntStream.range(0, 3) + .mapToObj(shardId -> TestShardRouting.newShardRouting(TEST_INDEX, shardId, "node-id", true, ShardRoutingState.STARTED)) + .toList(), "node-id" + ), + new TestTransportChannel(nodeResponseFuture), + new CancellableTask(randomLong(), "transport", "action", "", null, emptyMap()) + ); + + assertTrue(nodeResponseFuture.isDone()); + assertEquals( + "task cancelled [simulated]", + expectThrows( + java.util.concurrent.ExecutionException.class, + org.elasticsearch.tasks.TaskCancelledException.class, + nodeResponseFuture::get + ).getMessage() + ); } } From 4494c66a762c45b17c7b71ac7166822d2a0c9e45 Mon Sep 17 00:00:00 2001 From: David Turner Date: Thu, 2 Feb 2023 10:37:19 +0000 Subject: [PATCH 49/63] Fix context leak in list tasks API (#93431) In #90977 we made the list tasks API fully async, but failed to notice that if we waited for a task to complete then we would respond in the thread context of the last-completing task. This commit fixes the problem by restoring the context of the list-tasks task before responding. Closes #93428 --- docs/changelog/93431.yaml | 6 + .../admin/cluster/tasks/ListTasksIT.java | 178 +++++++++++++++++- .../tasks/list/TransportListTasksAction.java | 21 ++- 3 files changed, 194 insertions(+), 11 deletions(-) create mode 100644 docs/changelog/93431.yaml diff --git a/docs/changelog/93431.yaml b/docs/changelog/93431.yaml new file mode 100644 index 0000000000000..082a847684459 --- /dev/null +++ b/docs/changelog/93431.yaml @@ -0,0 +1,6 @@ +pr: 93431 +summary: Fix context leak in list tasks API +area: Task Management +type: bug +issues: + - 93428 diff --git a/server/src/internalClusterTest/java/org/elasticsearch/action/admin/cluster/tasks/ListTasksIT.java b/server/src/internalClusterTest/java/org/elasticsearch/action/admin/cluster/tasks/ListTasksIT.java index f60ab6204cc21..e3b12b7b9ebb4 100644 --- a/server/src/internalClusterTest/java/org/elasticsearch/action/admin/cluster/tasks/ListTasksIT.java +++ b/server/src/internalClusterTest/java/org/elasticsearch/action/admin/cluster/tasks/ListTasksIT.java @@ -8,15 +8,38 @@ package org.elasticsearch.action.admin.cluster.tasks; +import org.elasticsearch.action.ActionListener; +import org.elasticsearch.action.ActionRequest; import org.elasticsearch.action.ActionRequestValidationException; -import org.elasticsearch.test.ESIntegTestCase; +import org.elasticsearch.action.ActionResponse; +import org.elasticsearch.action.ActionRunnable; +import org.elasticsearch.action.ActionType; +import org.elasticsearch.action.support.ActionFilters; +import org.elasticsearch.action.support.HandledTransportAction; +import org.elasticsearch.action.support.PlainActionFuture; +import org.elasticsearch.common.inject.Inject; +import org.elasticsearch.plugins.ActionPlugin; +import org.elasticsearch.plugins.Plugin; +import org.elasticsearch.plugins.PluginsService; +import org.elasticsearch.tasks.Task; +import org.elasticsearch.test.ESSingleNodeTestCase; +import org.elasticsearch.threadpool.ThreadPool; +import org.elasticsearch.transport.TransportService; + +import java.util.Collection; +import java.util.List; +import java.util.concurrent.BrokenBarrierException; +import java.util.concurrent.CyclicBarrier; +import java.util.concurrent.TimeUnit; +import java.util.concurrent.TimeoutException; import static org.hamcrest.Matchers.containsString; import static org.hamcrest.Matchers.empty; +import static org.hamcrest.Matchers.hasItem; import static org.hamcrest.Matchers.is; import static org.hamcrest.Matchers.not; -public class ListTasksIT extends ESIntegTestCase { +public class ListTasksIT extends ESSingleNodeTestCase { public void testListTasksFilteredByDescription() { @@ -40,4 +63,155 @@ public void testListTasksValidation() { assertThat(ex.getMessage(), containsString("matching on descriptions is not available when [detailed] is false")); } + + public void testWaitForCompletion() throws Exception { + final var threadPool = getInstanceFromNode(ThreadPool.class); + final var threadContext = threadPool.getThreadContext(); + + final var barrier = new CyclicBarrier(2); + getInstanceFromNode(PluginsService.class).filterPlugins(TestPlugin.class).get(0).barrier = barrier; + + final var testActionFuture = new PlainActionFuture(); + client().execute(TEST_ACTION, new TestRequest(), testActionFuture.map(r -> { + assertThat(threadContext.getResponseHeaders().get(TestTransportAction.HEADER_NAME), hasItem(TestTransportAction.HEADER_VALUE)); + return r; + })); + + barrier.await(10, TimeUnit.SECONDS); + + final var listTasksResponse = client().admin().cluster().prepareListTasks().setActions(TestTransportAction.NAME).get(); + assertThat(listTasksResponse.getNodeFailures(), empty()); + assertEquals(1, listTasksResponse.getTasks().size()); + final var task = listTasksResponse.getTasks().get(0); + assertEquals(TestTransportAction.NAME, task.action()); + + final var listWaitFuture = new PlainActionFuture(); + client().admin() + .cluster() + .prepareListTasks() + .setTargetTaskId(task.taskId()) + .setWaitForCompletion(true) + .execute(listWaitFuture.delegateFailure((l, listResult) -> { + assertEquals(1, listResult.getTasks().size()); + assertEquals(task.taskId(), listResult.getTasks().get(0).taskId()); + // the task must now be complete: + client().admin().cluster().prepareListTasks().setActions(TestTransportAction.NAME).execute(l.map(listAfterWaitResult -> { + assertThat(listAfterWaitResult.getTasks(), empty()); + assertThat(listAfterWaitResult.getNodeFailures(), empty()); + assertThat(listAfterWaitResult.getTaskFailures(), empty()); + return null; + })); + // and we must not see its header: + assertNull(threadContext.getResponseHeaders().get(TestTransportAction.HEADER_NAME)); + })); + + // briefly fill up the management pool so that (a) we know the wait has started and (b) we know it's not blocking + flushThreadPool(threadPool, ThreadPool.Names.MANAGEMENT); + + final var getWaitFuture = new PlainActionFuture(); + client().admin() + .cluster() + .prepareGetTask(task.taskId()) + .setWaitForCompletion(true) + .execute(getWaitFuture.delegateFailure((l, getResult) -> { + assertTrue(getResult.getTask().isCompleted()); + assertEquals(task.taskId(), getResult.getTask().getTask().taskId()); + // the task must now be complete: + client().admin().cluster().prepareListTasks().setActions(TestTransportAction.NAME).execute(l.map(listAfterWaitResult -> { + assertThat(listAfterWaitResult.getTasks(), empty()); + assertThat(listAfterWaitResult.getNodeFailures(), empty()); + assertThat(listAfterWaitResult.getTaskFailures(), empty()); + return null; + })); + // and we must not see its header: + assertNull(threadContext.getResponseHeaders().get(TestTransportAction.HEADER_NAME)); + })); + + // briefly fill up the generic pool so that (a) we know the wait has started and (b) we know it's not blocking + // flushThreadPool(threadPool, ThreadPool.Names.GENERIC); // TODO it _is_ blocking right now!!, unmute this in #93375 + + assertFalse(listWaitFuture.isDone()); + assertFalse(testActionFuture.isDone()); + barrier.await(10, TimeUnit.SECONDS); + testActionFuture.get(10, TimeUnit.SECONDS); + listWaitFuture.get(10, TimeUnit.SECONDS); + getWaitFuture.get(10, TimeUnit.SECONDS); + } + + private void flushThreadPool(ThreadPool threadPool, String executor) throws InterruptedException, BrokenBarrierException, + TimeoutException { + var maxThreads = threadPool.info(executor).getMax(); + var barrier = new CyclicBarrier(maxThreads + 1); + for (int i = 0; i < maxThreads; i++) { + threadPool.executor(executor).execute(() -> { + try { + barrier.await(10, TimeUnit.SECONDS); + } catch (Exception e) { + throw new AssertionError(e); + } + }); + } + barrier.await(10, TimeUnit.SECONDS); + } + + @Override + protected Collection> getPlugins() { + return List.of(TestPlugin.class); + } + + private static final ActionType TEST_ACTION = new ActionType<>( + TestTransportAction.NAME, + in -> ActionResponse.Empty.INSTANCE + ); + + public static class TestPlugin extends Plugin implements ActionPlugin { + volatile CyclicBarrier barrier; + + @Override + public List> getActions() { + return List.of(new ActionHandler<>(TEST_ACTION, TestTransportAction.class)); + } + } + + public static class TestRequest extends ActionRequest { + @Override + public ActionRequestValidationException validate() { + return null; + } + } + + public static class TestTransportAction extends HandledTransportAction { + + static final String NAME = "internal:test/action"; + + static final String HEADER_NAME = "HEADER_NAME"; + static final String HEADER_VALUE = "HEADER_VALUE"; + + private final TestPlugin testPlugin; + private final ThreadPool threadPool; + + @Inject + public TestTransportAction( + TransportService transportService, + ActionFilters actionFilters, + PluginsService pluginsService, + ThreadPool threadPool + ) { + super(NAME, transportService, actionFilters, in -> new TestRequest()); + testPlugin = pluginsService.filterPlugins(TestPlugin.class).get(0); + this.threadPool = threadPool; + } + + @Override + protected void doExecute(Task task, TestRequest request, ActionListener listener) { + final var barrier = testPlugin.barrier; + assertNotNull(barrier); + threadPool.generic().execute(ActionRunnable.run(listener, () -> { + barrier.await(10, TimeUnit.SECONDS); + threadPool.getThreadContext().addResponseHeader(HEADER_NAME, HEADER_VALUE); + barrier.await(10, TimeUnit.SECONDS); + })); + } + } + } diff --git a/server/src/main/java/org/elasticsearch/action/admin/cluster/node/tasks/list/TransportListTasksAction.java b/server/src/main/java/org/elasticsearch/action/admin/cluster/node/tasks/list/TransportListTasksAction.java index b0495899e5876..5af717dfbb88a 100644 --- a/server/src/main/java/org/elasticsearch/action/admin/cluster/node/tasks/list/TransportListTasksAction.java +++ b/server/src/main/java/org/elasticsearch/action/admin/cluster/node/tasks/list/TransportListTasksAction.java @@ -13,6 +13,7 @@ import org.elasticsearch.action.FailedNodeException; import org.elasticsearch.action.TaskOperationFailure; import org.elasticsearch.action.support.ActionFilters; +import org.elasticsearch.action.support.ContextPreservingActionListener; import org.elasticsearch.action.support.ListenableActionFuture; import org.elasticsearch.action.support.ThreadedActionListener; import org.elasticsearch.action.support.tasks.TransportTasksAction; @@ -126,19 +127,21 @@ protected void processTasks(ListTasksRequest request, Consumer operation, // No tasks to wait, we can run nodeOperation in the management pool allMatchedTasksRemovedListener.onResponse(null); } else { + final var threadPool = clusterService.threadPool(); future.addListener( new ThreadedActionListener<>( - clusterService.threadPool().executor(ThreadPool.Names.MANAGEMENT), - false, - allMatchedTasksRemovedListener + threadPool.executor(ThreadPool.Names.MANAGEMENT), + new ContextPreservingActionListener<>( + threadPool.getThreadContext().newRestorableContext(false), + allMatchedTasksRemovedListener + ) ) ); - var cancellable = clusterService.threadPool() - .schedule( - () -> future.onFailure(new ElasticsearchTimeoutException("Timed out waiting for completion of tasks")), - requireNonNullElse(request.getTimeout(), DEFAULT_WAIT_FOR_COMPLETION_TIMEOUT), - ThreadPool.Names.SAME - ); + var cancellable = threadPool.schedule( + () -> future.onFailure(new ElasticsearchTimeoutException("Timed out waiting for completion of tasks")), + requireNonNullElse(request.getTimeout(), DEFAULT_WAIT_FOR_COMPLETION_TIMEOUT), + ThreadPool.Names.SAME + ); future.addListener(ActionListener.wrap(cancellable::cancel)); } } else { From 049563dde6a4c10dbce9eef41ba545c1a76d95f7 Mon Sep 17 00:00:00 2001 From: Mary Gouseti Date: Thu, 2 Feb 2023 12:39:38 +0100 Subject: [PATCH 50/63] Remove MonitoringWithWatcherRestIT.testThatLocalExporterAddsWatches We remove this test because it is flaky and the feature tested here is already tested by monitoring. --- .../MonitoringWithWatcherRestIT.java | 22 ------------------- 1 file changed, 22 deletions(-) diff --git a/x-pack/plugin/watcher/qa/with-monitoring/src/javaRestTest/java/org/elasticsearch/smoketest/MonitoringWithWatcherRestIT.java b/x-pack/plugin/watcher/qa/with-monitoring/src/javaRestTest/java/org/elasticsearch/smoketest/MonitoringWithWatcherRestIT.java index e854b524aa72c..d8ccbfd7688fa 100644 --- a/x-pack/plugin/watcher/qa/with-monitoring/src/javaRestTest/java/org/elasticsearch/smoketest/MonitoringWithWatcherRestIT.java +++ b/x-pack/plugin/watcher/qa/with-monitoring/src/javaRestTest/java/org/elasticsearch/smoketest/MonitoringWithWatcherRestIT.java @@ -44,28 +44,6 @@ public void cleanExporters() throws Exception { deleteAllWatcherData(); } - @AwaitsFix(bugUrl = "https://github.com/elastic/elasticsearch/issues/59132") - public void testThatLocalExporterAddsWatches() throws Exception { - String watchId = createMonitoringWatch(); - - Request request = new Request("PUT", "/_cluster/settings"); - request.setJsonEntity( - Strings.toString( - jsonBuilder().startObject() - .startObject("persistent") - .field("xpack.monitoring.exporters.my_local_exporter.type", "local") - .field("xpack.monitoring.exporters.my_local_exporter.cluster_alerts.management.enabled", true) - .endObject() - .endObject() - ) - ); - adminClient().performRequest(request); - - assertTotalWatchCount(WATCH_IDS.length); - - assertMonitoringWatchHasBeenOverWritten(watchId); - } - private void assertMonitoringWatchHasBeenOverWritten(String watchId) throws Exception { assertBusy(() -> { ObjectPath path = ObjectPath.createFromResponse(client().performRequest(new Request("GET", "/_watcher/watch/" + watchId))); From 4a556f1719555d689b2fba722b3439bf43f40e8b Mon Sep 17 00:00:00 2001 From: David Turner Date: Thu, 2 Feb 2023 12:03:51 +0000 Subject: [PATCH 51/63] More investigation into 93271 (#93454) We still don't properly understand why this test is failing, and it doesn't reproduce locally, so this commit adds a little extra logging to capture extra detail from a failure in CI. --- qa/rolling-upgrade/build.gradle | 3 +++ .../org/elasticsearch/upgrades/SnapshotBasedRecoveryIT.java | 1 - 2 files changed, 3 insertions(+), 1 deletion(-) diff --git a/qa/rolling-upgrade/build.gradle b/qa/rolling-upgrade/build.gradle index 6aae99b863afb..d3078dd8c9381 100644 --- a/qa/rolling-upgrade/build.gradle +++ b/qa/rolling-upgrade/build.gradle @@ -39,6 +39,9 @@ BuildParams.bwcVersions.withWireCompatible { bwcVersion, baseName -> setting 'repositories.url.allowed_urls', 'http://snapshot.test*' setting 'path.repo', "${buildDir}/cluster/shared/repo/${baseName}" setting 'xpack.security.enabled', 'false' + setting 'logger.org.elasticsearch.cluster.service.MasterService', 'TRACE' + setting 'logger.org.elasticsearch.cluster.routing.allocation.allocator.DesiredBalanceShardsAllocator', 'TRACE' + setting 'logger.org.elasticsearch.cluster.routing.allocation.decider.AllocationDeciders', 'TRACE' requiresFeature 'es.index_mode_feature_flag_registered', Version.fromString("8.0.0") } diff --git a/qa/rolling-upgrade/src/test/java/org/elasticsearch/upgrades/SnapshotBasedRecoveryIT.java b/qa/rolling-upgrade/src/test/java/org/elasticsearch/upgrades/SnapshotBasedRecoveryIT.java index 992ef3d0d97ae..ba6bf68267048 100644 --- a/qa/rolling-upgrade/src/test/java/org/elasticsearch/upgrades/SnapshotBasedRecoveryIT.java +++ b/qa/rolling-upgrade/src/test/java/org/elasticsearch/upgrades/SnapshotBasedRecoveryIT.java @@ -42,7 +42,6 @@ public class SnapshotBasedRecoveryIT extends AbstractRollingTestCase { - @AwaitsFix(bugUrl = "https://github.com/elastic/elasticsearch/issues/93271") public void testSnapshotBasedRecovery() throws Exception { final String indexName = "snapshot_based_recovery"; final String repositoryName = "snapshot_based_recovery_repo"; From 030d6d96cd55e91b1a039e7a8fa74df6af2c46ce Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Francisco=20Fern=C3=A1ndez=20Casta=C3=B1o?= Date: Thu, 2 Feb 2023 13:52:30 +0100 Subject: [PATCH 52/63] Remove extra logging from PrioritizedThrottledTaskRunnerTests (#93456) --- .../PrioritizedThrottledTaskRunnerTests.java | 47 +++++++------------ 1 file changed, 16 insertions(+), 31 deletions(-) diff --git a/server/src/test/java/org/elasticsearch/common/util/concurrent/PrioritizedThrottledTaskRunnerTests.java b/server/src/test/java/org/elasticsearch/common/util/concurrent/PrioritizedThrottledTaskRunnerTests.java index 01ecce705856f..05d650f2fe804 100644 --- a/server/src/test/java/org/elasticsearch/common/util/concurrent/PrioritizedThrottledTaskRunnerTests.java +++ b/server/src/test/java/org/elasticsearch/common/util/concurrent/PrioritizedThrottledTaskRunnerTests.java @@ -9,8 +9,6 @@ package org.elasticsearch.common.util.concurrent; import org.elasticsearch.common.settings.Settings; -import org.elasticsearch.logging.LogManager; -import org.elasticsearch.logging.Logger; import org.elasticsearch.test.ESTestCase; import org.elasticsearch.threadpool.TestThreadPool; @@ -49,16 +47,12 @@ public void tearDown() throws Exception { } static class TestTask extends AbstractRunnable implements Comparable { - private final Logger logger = LogManager.getLogger(TestTask.class); - private final Runnable runnable; private final int priority; - private final String taskDescription; - TestTask(Runnable runnable, int priority, String taskDescription) { + TestTask(Runnable runnable, int priority) { this.runnable = runnable; this.priority = priority; - this.taskDescription = taskDescription; } @Override @@ -68,9 +62,7 @@ public int compareTo(TestTask o) { @Override public void doRun() { - logger.info("--> starting to execute task [{}]", taskDescription); runnable.run(); - logger.info("--> finished task [{}]", taskDescription); } @Override @@ -96,7 +88,7 @@ public void testMultiThreadedEnqueue() throws Exception { throw new AssertionError(e); } executedCountDown.countDown(); - }, getRandomPriority(), "testMultiThreadedEnqueue-" + taskId)); + }, getRandomPriority())); assertThat(taskRunner.runningTasks(), lessThanOrEqualTo(maxTasks)); }).start(); } @@ -117,7 +109,7 @@ public void testTasksRunInOrder() throws Exception { taskRunner.enqueueTask(new TestTask(() -> { awaitBarrier(blockBarrier); // notify main thread that the runner is blocked awaitBarrier(blockBarrier); // wait for main thread to finish enqueuing tasks - }, getRandomPriority(), "blocking task")); + }, getRandomPriority())); blockBarrier.await(10, TimeUnit.SECONDS); // wait for blocking task to start executing @@ -135,7 +127,7 @@ public void testTasksRunInOrder() throws Exception { taskRunner.enqueueTask(new TestTask(() -> { executedPriorities.add(priority); executedCountDown.countDown(); - }, priority, "concurrent enqueued tasks - " + taskId)); + }, priority)); awaitBarrier(enqueuedBarrier); // notify main thread that the task is enqueued }).start(); } @@ -171,7 +163,7 @@ public void testEnqueueSpawnsNewTasksUpToMax() throws Exception { throw new RuntimeException(e); } executedCountDown.countDown(); - }, getRandomPriority(), "testEnqueueSpawnsNewTasksUpToMax-" + taskId)); + }, getRandomPriority())); assertThat(taskRunner.runningTasks(), equalTo(i + 1)); } // Enqueueing one or more new tasks would create only one new running task @@ -184,7 +176,7 @@ public void testEnqueueSpawnsNewTasksUpToMax() throws Exception { throw new RuntimeException(e); } executedCountDown.countDown(); - }, getRandomPriority(), "testEnqueueSpawnsNewTasksUpToMax-" + taskId)); + }, getRandomPriority())); assertThat(taskRunner.runningTasks(), equalTo(maxTasks)); } assertThat(taskRunner.queueSize(), equalTo(newTasks - 1)); @@ -209,19 +201,17 @@ public void testFailsTasksOnRejectionOrShutdown() throws Exception { try { while (true) { assertTrue(permits.tryAcquire(10, TimeUnit.SECONDS)); - taskRunner.enqueueTask( - new TestTask(taskCompleted::countDown, getRandomPriority(), "testFailsTasksOnRejectionOrShutdown") { - @Override - public void onRejection(Exception e) { - rejectionCountDown.countDown(); - } + taskRunner.enqueueTask(new TestTask(taskCompleted::countDown, getRandomPriority()) { + @Override + public void onRejection(Exception e) { + rejectionCountDown.countDown(); + } - @Override - public void onAfter() { - permits.release(); - } + @Override + public void onAfter() { + permits.release(); } - ); + }); } } catch (InterruptedException e) { Thread.currentThread().interrupt(); @@ -244,14 +234,9 @@ private int getRandomPriority() { } private void assertNoRunningTasks(PrioritizedThrottledTaskRunner taskRunner) { - logger.info("--> ensure that there are no running tasks in the executor. Max number of threads [{}]", maxThreads); final var barrier = new CyclicBarrier(maxThreads + 1); for (int i = 0; i < maxThreads; i++) { - executor.execute(() -> { - logger.info("--> await until barrier is released"); - awaitBarrier(barrier); - logger.info("--> the barrier is released"); - }); + executor.execute(() -> { awaitBarrier(barrier); }); } awaitBarrier(barrier); assertThat(taskRunner.runningTasks(), equalTo(0)); From 45056a256604af522ad80c61cfc284d0ceb2f9f5 Mon Sep 17 00:00:00 2001 From: Armin Braun Date: Thu, 2 Feb 2023 14:32:49 +0100 Subject: [PATCH 53/63] Minor cleanups FrozenIndexInput (#93309) Some random finds while working with this code. We shouldn't use a Consumer instead of a LongConsumer as we never pass `null` to the consumer. Also, way simplified the locking around the Lucene `Bytebuffer b` to simplify the code and technically make it a little faster/less-contenting as well. Plus, made use of modern Java's buffer slicing to simplify the slicing of the Lucene buffer. --- .../shared/SharedBlobCacheService.java | 4 +- .../cache/common/CacheFile.java | 4 +- .../store/input/FrozenIndexInput.java | 57 ++++++------------- .../input/MetadataCachingIndexInput.java | 4 +- 4 files changed, 24 insertions(+), 45 deletions(-) diff --git a/x-pack/plugin/blob-cache/src/main/java/org/elasticsearch/blobcache/shared/SharedBlobCacheService.java b/x-pack/plugin/blob-cache/src/main/java/org/elasticsearch/blobcache/shared/SharedBlobCacheService.java index bb04657848087..2e84e6d85fdd6 100644 --- a/x-pack/plugin/blob-cache/src/main/java/org/elasticsearch/blobcache/shared/SharedBlobCacheService.java +++ b/x-pack/plugin/blob-cache/src/main/java/org/elasticsearch/blobcache/shared/SharedBlobCacheService.java @@ -52,7 +52,7 @@ import java.util.concurrent.atomic.AtomicBoolean; import java.util.concurrent.atomic.AtomicReference; import java.util.concurrent.atomic.LongAdder; -import java.util.function.Consumer; +import java.util.function.LongConsumer; import java.util.function.Predicate; import java.util.stream.Collectors; @@ -894,7 +894,7 @@ public interface RangeAvailableHandler { @FunctionalInterface public interface RangeMissingHandler { - void fillCacheRange(SharedBytes.IO channel, long channelPos, long relativePos, long length, Consumer progressUpdater) + void fillCacheRange(SharedBytes.IO channel, long channelPos, long relativePos, long length, LongConsumer progressUpdater) throws IOException; } diff --git a/x-pack/plugin/searchable-snapshots/src/main/java/org/elasticsearch/xpack/searchablesnapshots/cache/common/CacheFile.java b/x-pack/plugin/searchable-snapshots/src/main/java/org/elasticsearch/xpack/searchablesnapshots/cache/common/CacheFile.java index 8c0bc924b819d..9377164dd0952 100644 --- a/x-pack/plugin/searchable-snapshots/src/main/java/org/elasticsearch/xpack/searchablesnapshots/cache/common/CacheFile.java +++ b/x-pack/plugin/searchable-snapshots/src/main/java/org/elasticsearch/xpack/searchablesnapshots/cache/common/CacheFile.java @@ -35,7 +35,7 @@ import java.util.concurrent.Executor; import java.util.concurrent.Future; import java.util.concurrent.atomic.AtomicBoolean; -import java.util.function.Consumer; +import java.util.function.LongConsumer; public class CacheFile { @@ -334,7 +334,7 @@ public interface RangeAvailableHandler { @FunctionalInterface public interface RangeMissingHandler { - void fillCacheRange(FileChannel channel, long from, long to, Consumer progressUpdater) throws IOException; + void fillCacheRange(FileChannel channel, long from, long to, LongConsumer progressUpdater) throws IOException; } /** diff --git a/x-pack/plugin/searchable-snapshots/src/main/java/org/elasticsearch/xpack/searchablesnapshots/store/input/FrozenIndexInput.java b/x-pack/plugin/searchable-snapshots/src/main/java/org/elasticsearch/xpack/searchablesnapshots/store/input/FrozenIndexInput.java index 7f924a9cae35a..735f4e18ad8ee 100644 --- a/x-pack/plugin/searchable-snapshots/src/main/java/org/elasticsearch/xpack/searchablesnapshots/store/input/FrozenIndexInput.java +++ b/x-pack/plugin/searchable-snapshots/src/main/java/org/elasticsearch/xpack/searchablesnapshots/store/input/FrozenIndexInput.java @@ -26,9 +26,8 @@ import java.io.InputStream; import java.nio.ByteBuffer; import java.util.Locale; -import java.util.concurrent.atomic.AtomicBoolean; -import java.util.concurrent.locks.ReentrantReadWriteLock; -import java.util.function.Consumer; +import java.util.concurrent.Semaphore; +import java.util.function.LongConsumer; import static org.elasticsearch.blobcache.BlobCacheUtils.toIntBytes; import static org.elasticsearch.core.Strings.format; @@ -113,22 +112,12 @@ protected void readWithoutBlobCache(ByteBuffer b) throws Exception { final int length = b.remaining(); final int originalByteBufPosition = b.position(); - final ReentrantReadWriteLock luceneByteBufLock = new ReentrantReadWriteLock(); - final AtomicBoolean stopAsyncReads = new AtomicBoolean(); - // Runnable that, when called, ensures that async callbacks (such as those used by readCacheFile) are not + // Semaphore that, when all permits are acquired, ensures that async callbacks (such as those used by readCacheFile) are not // accessing the byte buffer anymore that was passed to readWithoutBlobCache - // In particular, it's important to call this method before adapting the ByteBuffer's offset - final Runnable preventAsyncBufferChanges = () -> { - luceneByteBufLock.writeLock().lock(); - try { - stopAsyncReads.set(true); - } finally { - luceneByteBufLock.writeLock().unlock(); - } - }; - + // In particular, it's important to acquire all permits before adapting the ByteBuffer's offset + final Semaphore luceneByteBufPermits = new Semaphore(Integer.MAX_VALUE); + boolean bufferWriteLocked = false; logger.trace("readInternal: read [{}-{}] ([{}] bytes) from [{}]", position, position + length, length, this); - try { final ByteRange startRangeToWrite = computeRange(position); final ByteRange endRangeToWrite = computeRange(position + length - 1); @@ -149,8 +138,7 @@ protected void readWithoutBlobCache(ByteBuffer b) throws Exception { len, b, rangeToRead.start(), - luceneByteBufLock, - stopAsyncReads + luceneByteBufPermits ), (channel, channelPos, relativePos, len, progressUpdater) -> { final long startTimeNanos = stats.currentTimeNanos(); @@ -163,12 +151,15 @@ protected void readWithoutBlobCache(ByteBuffer b) throws Exception { SearchableSnapshots.CACHE_FETCH_ASYNC_THREAD_POOL_NAME ); assert bytesRead == length : bytesRead + " vs " + length; - assert luceneByteBufLock.getReadHoldCount() == 0; + assert luceneByteBufPermits.availablePermits() == Integer.MAX_VALUE; - preventAsyncBufferChanges.run(); + luceneByteBufPermits.acquire(Integer.MAX_VALUE); + bufferWriteLocked = true; b.position(originalByteBufPosition + bytesRead); // mark all bytes as accounted for } finally { - preventAsyncBufferChanges.run(); + if (bufferWriteLocked == false) { + luceneByteBufPermits.acquire(Integer.MAX_VALUE); + } } } @@ -188,8 +179,7 @@ private int readCacheFile( long length, final ByteBuffer buffer, long logicalPos, - ReentrantReadWriteLock luceneByteBufLock, - AtomicBoolean stopAsyncReads + Semaphore luceneByteBufPermits ) throws IOException { logger.trace( "{}: reading cached {} logical {} channel {} pos {} length {} (details: {})", @@ -205,21 +195,10 @@ private int readCacheFile( return 0; } final int bytesRead; - if (luceneByteBufLock.readLock().tryLock()) { + if (luceneByteBufPermits.tryAcquire()) { try { - boolean shouldStopReading = stopAsyncReads.get(); - if (shouldStopReading) { - // return fake response - return Math.toIntExact(length); - } // create slice that is positioned to read the given values - final ByteBuffer dup = buffer.duplicate(); - final int newPosition = dup.position() + Math.toIntExact(relativePos); - assert newPosition <= dup.limit() : "newpos " + newPosition + " limit " + dup.limit(); - assert newPosition + length <= buffer.limit() - : "oldpos " + dup.position() + " newpos " + newPosition + " length " + length + " limit " + buffer.limit(); - dup.position(newPosition); - dup.limit(newPosition + Math.toIntExact(length)); + final ByteBuffer dup = buffer.slice(buffer.position() + Math.toIntExact(relativePos), Math.toIntExact(length)); bytesRead = fc.read(dup, channelPos); if (bytesRead == -1) { throw new EOFException( @@ -233,7 +212,7 @@ private int readCacheFile( ); } } finally { - luceneByteBufLock.readLock().unlock(); + luceneByteBufPermits.release(); } } else { // return fake response @@ -263,7 +242,7 @@ private void writeCacheFile( final long fileChannelPos, final long relativePos, final long length, - final Consumer progressUpdater, + final LongConsumer progressUpdater, final long startTimeNanos ) throws IOException { assert ThreadPool.assertCurrentThreadPool(SearchableSnapshots.CACHE_FETCH_ASYNC_THREAD_POOL_NAME); diff --git a/x-pack/plugin/searchable-snapshots/src/main/java/org/elasticsearch/xpack/searchablesnapshots/store/input/MetadataCachingIndexInput.java b/x-pack/plugin/searchable-snapshots/src/main/java/org/elasticsearch/xpack/searchablesnapshots/store/input/MetadataCachingIndexInput.java index a599da2c073c9..9191662a334a2 100644 --- a/x-pack/plugin/searchable-snapshots/src/main/java/org/elasticsearch/xpack/searchablesnapshots/store/input/MetadataCachingIndexInput.java +++ b/x-pack/plugin/searchable-snapshots/src/main/java/org/elasticsearch/xpack/searchablesnapshots/store/input/MetadataCachingIndexInput.java @@ -41,7 +41,7 @@ import java.util.Objects; import java.util.concurrent.Future; import java.util.concurrent.atomic.AtomicReference; -import java.util.function.Consumer; +import java.util.function.LongConsumer; import static org.elasticsearch.blobcache.BlobCacheUtils.toIntBytes; import static org.elasticsearch.core.Strings.format; @@ -287,7 +287,7 @@ protected int readCacheFile(final FileChannel fc, final long position, final Byt return bytesRead; } - protected void writeCacheFile(final FileChannel fc, final long start, final long end, final Consumer progressUpdater) + protected void writeCacheFile(final FileChannel fc, final long start, final long end, final LongConsumer progressUpdater) throws IOException { assert assertFileChannelOpen(fc); assert ThreadPool.assertCurrentThreadPool(SearchableSnapshots.CACHE_FETCH_ASYNC_THREAD_POOL_NAME); From dd0cd4f1fcf029c4da98216468bf4bc92bfe44ab Mon Sep 17 00:00:00 2001 From: Nikolaj Volgushev Date: Thu, 2 Feb 2023 15:03:11 +0100 Subject: [PATCH 54/63] Make password long enough for FIPS mode (#93450) In FIPS mode, passwords require a minimal length. This PR adjusts a test to adhere to this. Fixes: #93449 --- .../java/org/elasticsearch/server/cli/ServerCliTests.java | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/distribution/tools/server-cli/src/test/java/org/elasticsearch/server/cli/ServerCliTests.java b/distribution/tools/server-cli/src/test/java/org/elasticsearch/server/cli/ServerCliTests.java index a42a5aad3b411..f420834f84f3b 100644 --- a/distribution/tools/server-cli/src/test/java/org/elasticsearch/server/cli/ServerCliTests.java +++ b/distribution/tools/server-cli/src/test/java/org/elasticsearch/server/cli/ServerCliTests.java @@ -331,11 +331,11 @@ public void testSecureSettingsLoaderChoice() throws Exception { public void testSecureSettingsLoaderWithPassword() throws Exception { var loader = setupMockKeystoreLoader(); - assertKeystorePassword("aaa"); + assertKeystorePassword("aaaaaaaaaaaaaaaaaa"); assertTrue(loader.loaded); assertTrue(loader.bootstrapped); // the password we read should match what we passed in - assertEquals("aaa", loader.password); + assertEquals("aaaaaaaaaaaaaaaaaa", loader.password); // after the command the secrets password is closed assertEquals( "SecureString has already been closed", From 4b9a1b9b453365750a59f05b8e8a7a5ec73d1980 Mon Sep 17 00:00:00 2001 From: Ievgen Degtiarenko Date: Thu, 2 Feb 2023 16:13:01 +0100 Subject: [PATCH 55/63] Expose per node counts (#93439) --- .../cluster/get-desired-balance.asciidoc | 79 +++++++++++-------- .../30_desired_balance.yml | 42 +++++----- .../test/cluster.desired_balance/10_basic.yml | 46 ++++++----- .../allocation/DesiredBalanceResponse.java | 12 +-- .../allocator/ClusterBalanceStats.java | 70 +++++++++++----- .../DesiredBalanceResponseTests.java | 60 +++++++++----- ...TransportGetDesiredBalanceActionTests.java | 7 +- .../allocator/ClusterBalanceStatsTests.java | 68 ++++++++++++---- 8 files changed, 251 insertions(+), 133 deletions(-) diff --git a/docs/reference/cluster/get-desired-balance.asciidoc b/docs/reference/cluster/get-desired-balance.asciidoc index 4c0b3e9850774..03bb9727089fb 100644 --- a/docs/reference/cluster/get-desired-balance.asciidoc +++ b/docs/reference/cluster/get-desired-balance.asciidoc @@ -33,53 +33,70 @@ The API returns the following result: "reconciliation_time_in_millis": 0 }, "cluster_balance_stats" : { - { + "tiers": { "data_hot" : { - "total_shard_size" : { - "total" : 36.0, - "min" : 10.0, - "max" : 16.0, - "average" : 12.0, - "std_dev" : 2.8284271247461903 + "shard_count" : { + "total" : 7.0, + "min" : 2.0, + "max" : 3.0, + "average" : 2.3333333333333335, + "std_dev" : 0.4714045207910317 }, - "total_write_load" : { + "forecast_write_load" : { "total" : 21.0, "min" : 6.0, "max" : 8.5, "average" : 7.0, "std_dev" : 1.0801234497346435 }, - "shard_count" : { - "total" : 7.0, - "min" : 2.0, - "max" : 3.0, - "average" : 2.3333333333333335, - "std_dev" : 0.4714045207910317 + "forecast_disk_usage" : { + "total" : 36.0, + "min" : 10.0, + "max" : 16.0, + "average" : 12.0, + "std_dev" : 2.8284271247461903 } }, "data_warm" : { - "total_shard_size" : { - "total" : 42.0, - "min" : 12.0, - "max" : 18.0, - "average" : 14.0, - "std_dev" : 2.8284271247461903 + "shard_count" : { + "total" : 3.0, + "min" : 1.0, + "max" : 1.0, + "average" : 1.0, + "std_dev" : 0.0 }, - "total_write_load" : { + "forecast_write_load" : { "total" : 0.0, "min" : 0.0, "max" : 0.0, "average" : 0.0, "std_dev" : 0.0 }, - "shard_count" : { - "total" : 3.0, - "min" : 1.0, - "max" : 1.0, - "average" : 1.0, - "std_dev" : 0.0 + "forecast_disk_usage" : { + "total" : 42.0, + "min" : 12.0, + "max" : 18.0, + "average" : 14.0, + "std_dev" : 2.8284271247461903 } } + }, + "nodes": { + "node-1": { + "shard_count": 10, + "forecast_write_load": 8.5, + "forecast_disk_usage_bytes": 498435 + }, + "node-2": { + "shard_count": 15, + "forecast_write_load": 3.25, + "forecast_disk_usage_bytes": 384935 + }, + "node-3": { + "shard_count": 12, + "forecast_write_load": 6.0, + "forecast_disk_usage_bytes": 648766 + } } }, "routing_table": { @@ -95,8 +112,8 @@ The API returns the following result: "relocating_node_is_desired": false, "shard_id": 0, "index": "test", - "forecasted_write_load": 8.0, - "forecasted_shard_size_in_bytes": 1024 + "forecast_write_load": 8.0, + "forecast_shard_size_in_bytes": 1024 } ], "desired": { @@ -119,8 +136,8 @@ The API returns the following result: "relocating_node_is_desired": false, "shard_id": 1, "index": "test", - "forecasted_write_load": null, - "forecasted_shard_size_in_bytes": null + "forecast_write_load": null, + "forecast_shard_size_in_bytes": null } ], "desired": { diff --git a/qa/smoke-test-multinode/src/yamlRestTest/resources/rest-api-spec/test/smoke_test_multinode/30_desired_balance.yml b/qa/smoke-test-multinode/src/yamlRestTest/resources/rest-api-spec/test/smoke_test_multinode/30_desired_balance.yml index 061f3f8daa754..c7841a9ff099f 100644 --- a/qa/smoke-test-multinode/src/yamlRestTest/resources/rest-api-spec/test/smoke_test_multinode/30_desired_balance.yml +++ b/qa/smoke-test-multinode/src/yamlRestTest/resources/rest-api-spec/test/smoke_test_multinode/30_desired_balance.yml @@ -75,21 +75,27 @@ setup: _internal.get_desired_balance: { } - is_true: 'cluster_balance_stats' - - is_true: 'cluster_balance_stats.data_content.total_shard_size' - - is_true: 'cluster_balance_stats.data_content.total_shard_size.total' - - is_true: 'cluster_balance_stats.data_content.total_shard_size.min' - - is_true: 'cluster_balance_stats.data_content.total_shard_size.max' - - is_true: 'cluster_balance_stats.data_content.total_shard_size.average' - - is_true: 'cluster_balance_stats.data_content.total_shard_size.std_dev' - - is_true: 'cluster_balance_stats.data_content.total_write_load' - - is_true: 'cluster_balance_stats.data_content.total_write_load.total' - - is_true: 'cluster_balance_stats.data_content.total_write_load.min' - - is_true: 'cluster_balance_stats.data_content.total_write_load.max' - - is_true: 'cluster_balance_stats.data_content.total_write_load.average' - - is_true: 'cluster_balance_stats.data_content.total_write_load.std_dev' - - is_true: 'cluster_balance_stats.data_content.shard_count' - - is_true: 'cluster_balance_stats.data_content.shard_count.total' - - is_true: 'cluster_balance_stats.data_content.shard_count.min' - - is_true: 'cluster_balance_stats.data_content.shard_count.max' - - is_true: 'cluster_balance_stats.data_content.shard_count.average' - - is_true: 'cluster_balance_stats.data_content.shard_count.std_dev' + - is_true: 'cluster_balance_stats.tiers' + - is_true: 'cluster_balance_stats.tiers.data_content.shard_count' + - is_true: 'cluster_balance_stats.tiers.data_content.shard_count.total' + - is_true: 'cluster_balance_stats.tiers.data_content.shard_count.min' + - is_true: 'cluster_balance_stats.tiers.data_content.shard_count.max' + - is_true: 'cluster_balance_stats.tiers.data_content.shard_count.average' + - is_true: 'cluster_balance_stats.tiers.data_content.shard_count.std_dev' + - is_true: 'cluster_balance_stats.tiers.data_content.forecast_write_load' + - is_true: 'cluster_balance_stats.tiers.data_content.forecast_write_load.total' + - is_true: 'cluster_balance_stats.tiers.data_content.forecast_write_load.min' + - is_true: 'cluster_balance_stats.tiers.data_content.forecast_write_load.max' + - is_true: 'cluster_balance_stats.tiers.data_content.forecast_write_load.average' + - is_true: 'cluster_balance_stats.tiers.data_content.forecast_write_load.std_dev' + - is_true: 'cluster_balance_stats.tiers.data_content.forecast_disk_usage' + - is_true: 'cluster_balance_stats.tiers.data_content.forecast_disk_usage.total' + - is_true: 'cluster_balance_stats.tiers.data_content.forecast_disk_usage.min' + - is_true: 'cluster_balance_stats.tiers.data_content.forecast_disk_usage.max' + - is_true: 'cluster_balance_stats.tiers.data_content.forecast_disk_usage.average' + - is_true: 'cluster_balance_stats.tiers.data_content.forecast_disk_usage.std_dev' + - is_true: 'cluster_balance_stats.nodes' + - is_true: 'cluster_balance_stats.nodes.test-cluster-0' + - gte: { 'cluster_balance_stats.nodes.test-cluster-0.shard_count' : 0 } + - gte: { 'cluster_balance_stats.nodes.test-cluster-0.forecast_write_load': 0.0 } + - gte: { 'cluster_balance_stats.nodes.test-cluster-0.forecast_disk_usage_bytes' : 0 } diff --git a/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/cluster.desired_balance/10_basic.yml b/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/cluster.desired_balance/10_basic.yml index 45e81e4ba103e..15dc9853ff50f 100644 --- a/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/cluster.desired_balance/10_basic.yml +++ b/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/cluster.desired_balance/10_basic.yml @@ -30,24 +30,30 @@ setup: _internal.get_desired_balance: { } - is_true: 'cluster_balance_stats' - - is_true: 'cluster_balance_stats.data_content.total_shard_size' - - is_true: 'cluster_balance_stats.data_content.total_shard_size.total' - - is_true: 'cluster_balance_stats.data_content.total_shard_size.min' - - is_true: 'cluster_balance_stats.data_content.total_shard_size.max' - - is_true: 'cluster_balance_stats.data_content.total_shard_size.average' - - is_true: 'cluster_balance_stats.data_content.total_shard_size.std_dev' - - is_true: 'cluster_balance_stats.data_content.total_write_load' - - is_true: 'cluster_balance_stats.data_content.total_write_load.total' - - is_true: 'cluster_balance_stats.data_content.total_write_load.min' - - is_true: 'cluster_balance_stats.data_content.total_write_load.max' - - is_true: 'cluster_balance_stats.data_content.total_write_load.average' - - is_true: 'cluster_balance_stats.data_content.total_write_load.std_dev' - - is_true: 'cluster_balance_stats.data_content.shard_count' - - is_true: 'cluster_balance_stats.data_content.shard_count.total' - - is_true: 'cluster_balance_stats.data_content.shard_count.min' - - is_true: 'cluster_balance_stats.data_content.shard_count.max' - - is_true: 'cluster_balance_stats.data_content.shard_count.average' - - is_true: 'cluster_balance_stats.data_content.shard_count.std_dev' + - is_true: 'cluster_balance_stats.tiers' + - is_true: 'cluster_balance_stats.tiers.data_content.shard_count' + - is_true: 'cluster_balance_stats.tiers.data_content.shard_count.total' + - is_true: 'cluster_balance_stats.tiers.data_content.shard_count.min' + - is_true: 'cluster_balance_stats.tiers.data_content.shard_count.max' + - is_true: 'cluster_balance_stats.tiers.data_content.shard_count.average' + - is_true: 'cluster_balance_stats.tiers.data_content.shard_count.std_dev' + - is_true: 'cluster_balance_stats.tiers.data_content.forecast_write_load' + - is_true: 'cluster_balance_stats.tiers.data_content.forecast_write_load.total' + - is_true: 'cluster_balance_stats.tiers.data_content.forecast_write_load.min' + - is_true: 'cluster_balance_stats.tiers.data_content.forecast_write_load.max' + - is_true: 'cluster_balance_stats.tiers.data_content.forecast_write_load.average' + - is_true: 'cluster_balance_stats.tiers.data_content.forecast_write_load.std_dev' + - is_true: 'cluster_balance_stats.tiers.data_content.forecast_disk_usage' + - is_true: 'cluster_balance_stats.tiers.data_content.forecast_disk_usage.total' + - is_true: 'cluster_balance_stats.tiers.data_content.forecast_disk_usage.min' + - is_true: 'cluster_balance_stats.tiers.data_content.forecast_disk_usage.max' + - is_true: 'cluster_balance_stats.tiers.data_content.forecast_disk_usage.average' + - is_true: 'cluster_balance_stats.tiers.data_content.forecast_disk_usage.std_dev' + - is_true: 'cluster_balance_stats.nodes' + - is_true: 'cluster_balance_stats.nodes.test-cluster-0' + - gte: { 'cluster_balance_stats.nodes.test-cluster-0.shard_count' : 0 } + - gte: { 'cluster_balance_stats.nodes.test-cluster-0.forecast_write_load': 0.0 } + - gte: { 'cluster_balance_stats.nodes.test-cluster-0.forecast_disk_usage_bytes' : 0 } --- "Test get desired balance for single shard": @@ -81,8 +87,8 @@ setup: - is_true: 'routing_table.test.0.current.0.node_is_desired' - is_false: 'routing_table.test.0.current.0.relocating_node' - is_false: 'routing_table.test.0.current.0.relocating_node_is_desired' - - is_false: 'routing_table.test.0.current.0.forecasted_write_load' - - is_false: 'routing_table.test.0.current.0.forecasted_shard_size_in_bytes' + - is_false: 'routing_table.test.0.current.0.forecast_write_load' + - is_false: 'routing_table.test.0.current.0.forecast_shard_size_in_bytes' - match: { routing_table.test.0.desired.total: 1 } - gte: { routing_table.test.0.desired.unassigned: 0 } - gte: { routing_table.test.0.desired.ignored: 0 } diff --git a/server/src/main/java/org/elasticsearch/action/admin/cluster/allocation/DesiredBalanceResponse.java b/server/src/main/java/org/elasticsearch/action/admin/cluster/allocation/DesiredBalanceResponse.java index 053ffee1cbb91..2b481d4cac3b4 100644 --- a/server/src/main/java/org/elasticsearch/action/admin/cluster/allocation/DesiredBalanceResponse.java +++ b/server/src/main/java/org/elasticsearch/action/admin/cluster/allocation/DesiredBalanceResponse.java @@ -174,8 +174,8 @@ public record ShardView( boolean relocatingNodeIsDesired, int shardId, String index, - @Nullable Double forecastedWriteLoad, - @Nullable Long forecastedShardSizeInBytes + @Nullable Double forecastWriteLoad, + @Nullable Long forecastShardSizeInBytes ) implements Writeable, ToXContentObject { private static final TransportVersion ADD_FORECASTS_VERSION = TransportVersion.V_8_7_0; @@ -223,8 +223,8 @@ public void writeTo(StreamOutput out) throws IOException { out.writeVInt(shardId); out.writeString(index); if (out.getTransportVersion().onOrAfter(ADD_FORECASTS_VERSION)) { - out.writeOptionalDouble(forecastedWriteLoad); - out.writeOptionalLong(forecastedShardSizeInBytes); + out.writeOptionalDouble(forecastWriteLoad); + out.writeOptionalLong(forecastShardSizeInBytes); } else { out.writeMissingWriteable(AllocationId.class); } @@ -241,8 +241,8 @@ public XContentBuilder toXContent(XContentBuilder builder, Params params) throws .field("relocating_node_is_desired", relocatingNodeIsDesired) .field("shard_id", shardId) .field("index", index) - .field("forecasted_write_load", forecastedWriteLoad) - .field("forecasted_shard_size_in_bytes", forecastedShardSizeInBytes) + .field("forecast_write_load", forecastWriteLoad) + .field("forecast_shard_size_in_bytes", forecastShardSizeInBytes) .endObject(); } } diff --git a/server/src/main/java/org/elasticsearch/cluster/routing/allocation/allocator/ClusterBalanceStats.java b/server/src/main/java/org/elasticsearch/cluster/routing/allocation/allocator/ClusterBalanceStats.java index 9feb35df03acb..9bebebcdfe542 100644 --- a/server/src/main/java/org/elasticsearch/cluster/routing/allocation/allocator/ClusterBalanceStats.java +++ b/server/src/main/java/org/elasticsearch/cluster/routing/allocation/allocator/ClusterBalanceStats.java @@ -17,6 +17,7 @@ import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.StreamOutput; import org.elasticsearch.common.io.stream.Writeable; +import org.elasticsearch.common.unit.ByteSizeValue; import org.elasticsearch.common.util.Maps; import org.elasticsearch.xcontent.ToXContentObject; import org.elasticsearch.xcontent.XContentBuilder; @@ -28,49 +29,58 @@ import java.util.Map; import java.util.function.ToDoubleFunction; -public record ClusterBalanceStats(Map tiers) implements Writeable, ToXContentObject { +public record ClusterBalanceStats(Map tiers, Map nodes) + implements + Writeable, + ToXContentObject { - public static ClusterBalanceStats EMPTY = new ClusterBalanceStats(Map.of()); + public static ClusterBalanceStats EMPTY = new ClusterBalanceStats(Map.of(), Map.of()); public static ClusterBalanceStats createFrom(ClusterState clusterState, WriteLoadForecaster writeLoadForecaster) { - var tierToNodeStats = new HashMap>(); + var tierToNodeStats = new HashMap>(); + var nodes = new HashMap(); for (RoutingNode routingNode : clusterState.getRoutingNodes()) { var dataRoles = routingNode.node().getRoles().stream().filter(DiscoveryNodeRole::canContainData).toList(); if (dataRoles.isEmpty()) { continue; } - var nodeStats = NodeStats.createFrom(routingNode, clusterState.metadata(), writeLoadForecaster); + var nodeStats = NodeBalanceStats.createFrom(routingNode, clusterState.metadata(), writeLoadForecaster); + nodes.put(routingNode.node().getName(), nodeStats); for (DiscoveryNodeRole role : dataRoles) { tierToNodeStats.computeIfAbsent(role.roleName(), ignored -> new ArrayList<>()).add(nodeStats); } } - return new ClusterBalanceStats(Maps.transformValues(tierToNodeStats, TierBalanceStats::createFrom)); + return new ClusterBalanceStats(Maps.transformValues(tierToNodeStats, TierBalanceStats::createFrom), nodes); } public static ClusterBalanceStats readFrom(StreamInput in) throws IOException { - return new ClusterBalanceStats(in.readImmutableMap(StreamInput::readString, TierBalanceStats::readFrom)); + return new ClusterBalanceStats( + in.readImmutableMap(StreamInput::readString, TierBalanceStats::readFrom), + in.readImmutableMap(StreamInput::readString, NodeBalanceStats::readFrom) + ); } @Override public void writeTo(StreamOutput out) throws IOException { out.writeMap(tiers, StreamOutput::writeString, StreamOutput::writeWriteable); + out.writeMap(nodes, StreamOutput::writeString, StreamOutput::writeWriteable); } @Override public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException { - return builder.map(tiers); + return builder.startObject().field("tiers").map(tiers).field("nodes").map(nodes).endObject(); } - public record TierBalanceStats(MetricStats shardCount, MetricStats totalWriteLoad, MetricStats totalShardSize) + public record TierBalanceStats(MetricStats shardCount, MetricStats forecastWriteLoad, MetricStats forecastShardSize) implements Writeable, ToXContentObject { - private static TierBalanceStats createFrom(List nodes) { + private static TierBalanceStats createFrom(List nodes) { return new TierBalanceStats( MetricStats.createFrom(nodes, it -> it.shards), - MetricStats.createFrom(nodes, it -> it.totalWriteLoad), - MetricStats.createFrom(nodes, it -> it.totalShardSize) + MetricStats.createFrom(nodes, it -> it.forecastWriteLoad), + MetricStats.createFrom(nodes, it -> it.forecastShardSize) ); } @@ -81,30 +91,30 @@ public static TierBalanceStats readFrom(StreamInput in) throws IOException { @Override public void writeTo(StreamOutput out) throws IOException { shardCount.writeTo(out); - totalWriteLoad.writeTo(out); - totalShardSize.writeTo(out); + forecastWriteLoad.writeTo(out); + forecastShardSize.writeTo(out); } @Override public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException { return builder.startObject() .field("shard_count", shardCount) - .field("total_write_load", totalWriteLoad) - .field("total_shard_size", totalShardSize) + .field("forecast_write_load", forecastWriteLoad) + .field("forecast_disk_usage", forecastShardSize) .endObject(); } } public record MetricStats(double total, double min, double max, double average, double stdDev) implements Writeable, ToXContentObject { - private static MetricStats createFrom(List nodes, ToDoubleFunction metricExtractor) { + private static MetricStats createFrom(List nodes, ToDoubleFunction metricExtractor) { assert nodes.isEmpty() == false : "Stats must be created from non empty nodes"; double total = 0.0; double total2 = 0.0; double min = Double.POSITIVE_INFINITY; double max = Double.NEGATIVE_INFINITY; int count = 0; - for (NodeStats node : nodes) { + for (NodeBalanceStats node : nodes) { var metric = metricExtractor.applyAsDouble(node); if (Double.isNaN(metric)) { continue; @@ -145,9 +155,9 @@ public XContentBuilder toXContent(XContentBuilder builder, Params params) throws } } - private record NodeStats(int shards, double totalWriteLoad, long totalShardSize) { + public record NodeBalanceStats(int shards, double forecastWriteLoad, long forecastShardSize) implements Writeable, ToXContentObject { - private static NodeStats createFrom(RoutingNode routingNode, Metadata metadata, WriteLoadForecaster writeLoadForecaster) { + private static NodeBalanceStats createFrom(RoutingNode routingNode, Metadata metadata, WriteLoadForecaster writeLoadForecaster) { double totalWriteLoad = 0.0; long totalShardSize = 0L; @@ -158,7 +168,27 @@ private static NodeStats createFrom(RoutingNode routingNode, Metadata metadata, totalShardSize += indexMetadata.getForecastedShardSizeInBytes().orElse(0); } - return new NodeStats(routingNode.size(), totalWriteLoad, totalShardSize); + return new NodeBalanceStats(routingNode.size(), totalWriteLoad, totalShardSize); + } + + public static NodeBalanceStats readFrom(StreamInput in) throws IOException { + return new NodeBalanceStats(in.readInt(), in.readDouble(), in.readLong()); + } + + @Override + public void writeTo(StreamOutput out) throws IOException { + out.writeInt(shards); + out.writeDouble(forecastWriteLoad); + out.writeLong(forecastShardSize); + } + + @Override + public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException { + return builder.startObject() + .field("shard_count", shards) + .field("forecast_write_load", forecastWriteLoad) + .humanReadableField("forecast_disk_usage_bytes", "forecast_disk_usage", ByteSizeValue.ofBytes(forecastShardSize)) + .endObject(); } } } diff --git a/server/src/test/java/org/elasticsearch/action/admin/cluster/allocation/DesiredBalanceResponseTests.java b/server/src/test/java/org/elasticsearch/action/admin/cluster/allocation/DesiredBalanceResponseTests.java index d6907b208b47b..338c949674df8 100644 --- a/server/src/test/java/org/elasticsearch/action/admin/cluster/allocation/DesiredBalanceResponseTests.java +++ b/server/src/test/java/org/elasticsearch/action/admin/cluster/allocation/DesiredBalanceResponseTests.java @@ -66,7 +66,8 @@ private ClusterBalanceStats randomClusterBalanceStats() { DiscoveryNodeRole.DATA_COLD_NODE_ROLE, DiscoveryNodeRole.DATA_FROZEN_NODE_ROLE ) - ).stream().map(DiscoveryNodeRole::roleName).collect(toMap(identity(), ignore -> randomTierBalanceStats())) + ).stream().map(DiscoveryNodeRole::roleName).collect(toMap(identity(), ignore -> randomTierBalanceStats())), + randomList(10, () -> randomAlphaOfLength(10)).stream().collect(toMap(identity(), ignore -> randomNodeBalanceStats())) ); } @@ -78,6 +79,14 @@ private ClusterBalanceStats.TierBalanceStats randomTierBalanceStats() { ); } + private ClusterBalanceStats.NodeBalanceStats randomNodeBalanceStats() { + return new ClusterBalanceStats.NodeBalanceStats( + randomIntBetween(0, Integer.MAX_VALUE), + randomDouble(), + randomLongBetween(0, Long.MAX_VALUE) + ); + } + private Map> randomRoutingTable() { Map> routingTable = new HashMap<>(); for (int i = 0; i < randomInt(8); i++) { @@ -166,10 +175,14 @@ public void testToXContent() throws IOException { // cluster balance stats Map clusterBalanceStats = (Map) json.get("cluster_balance_stats"); - assertEquals(clusterBalanceStats.keySet(), response.getClusterBalanceStats().tiers().keySet()); + assertEquals(Set.of("tiers", "nodes"), clusterBalanceStats.keySet()); + + // tier balance stats + Map tiers = (Map) clusterBalanceStats.get("tiers"); + assertEquals(tiers.keySet(), response.getClusterBalanceStats().tiers().keySet()); for (var entry : response.getClusterBalanceStats().tiers().entrySet()) { - Map tierStats = (Map) clusterBalanceStats.get(entry.getKey()); - assertEquals(Set.of("shard_count", "total_write_load", "total_shard_size"), tierStats.keySet()); + Map tierStats = (Map) tiers.get(entry.getKey()); + assertEquals(Set.of("shard_count", "forecast_write_load", "forecast_disk_usage"), tierStats.keySet()); Map shardCountStats = (Map) tierStats.get("shard_count"); assertEquals(Set.of("total", "average", "min", "max", "std_dev"), shardCountStats.keySet()); @@ -179,21 +192,32 @@ public void testToXContent() throws IOException { assertEquals(shardCountStats.get("max"), entry.getValue().shardCount().max()); assertEquals(shardCountStats.get("std_dev"), entry.getValue().shardCount().stdDev()); - Map totalWriteLoadStats = (Map) tierStats.get("total_write_load"); + Map totalWriteLoadStats = (Map) tierStats.get("forecast_write_load"); assertEquals(Set.of("total", "average", "min", "max", "std_dev"), totalWriteLoadStats.keySet()); - assertEquals(totalWriteLoadStats.get("total"), entry.getValue().totalWriteLoad().total()); - assertEquals(totalWriteLoadStats.get("average"), entry.getValue().totalWriteLoad().average()); - assertEquals(totalWriteLoadStats.get("min"), entry.getValue().totalWriteLoad().min()); - assertEquals(totalWriteLoadStats.get("max"), entry.getValue().totalWriteLoad().max()); - assertEquals(totalWriteLoadStats.get("std_dev"), entry.getValue().totalWriteLoad().stdDev()); + assertEquals(totalWriteLoadStats.get("total"), entry.getValue().forecastWriteLoad().total()); + assertEquals(totalWriteLoadStats.get("average"), entry.getValue().forecastWriteLoad().average()); + assertEquals(totalWriteLoadStats.get("min"), entry.getValue().forecastWriteLoad().min()); + assertEquals(totalWriteLoadStats.get("max"), entry.getValue().forecastWriteLoad().max()); + assertEquals(totalWriteLoadStats.get("std_dev"), entry.getValue().forecastWriteLoad().stdDev()); - Map totalShardStats = (Map) tierStats.get("total_shard_size"); + Map totalShardStats = (Map) tierStats.get("forecast_disk_usage"); assertEquals(Set.of("total", "average", "min", "max", "std_dev"), totalShardStats.keySet()); - assertEquals(totalShardStats.get("total"), entry.getValue().totalShardSize().total()); - assertEquals(totalShardStats.get("average"), entry.getValue().totalShardSize().average()); - assertEquals(totalShardStats.get("min"), entry.getValue().totalShardSize().min()); - assertEquals(totalShardStats.get("max"), entry.getValue().totalShardSize().max()); - assertEquals(totalShardStats.get("std_dev"), entry.getValue().totalShardSize().stdDev()); + assertEquals(totalShardStats.get("total"), entry.getValue().forecastShardSize().total()); + assertEquals(totalShardStats.get("average"), entry.getValue().forecastShardSize().average()); + assertEquals(totalShardStats.get("min"), entry.getValue().forecastShardSize().min()); + assertEquals(totalShardStats.get("max"), entry.getValue().forecastShardSize().max()); + assertEquals(totalShardStats.get("std_dev"), entry.getValue().forecastShardSize().stdDev()); + } + // node balance stats + Map nodes = (Map) clusterBalanceStats.get("nodes"); + assertEquals(nodes.keySet(), response.getClusterBalanceStats().nodes().keySet()); + for (var entry : response.getClusterBalanceStats().nodes().entrySet()) { + Map nodesStats = (Map) nodes.get(entry.getKey()); + assertEquals(Set.of("shard_count", "forecast_write_load", "forecast_disk_usage_bytes"), nodesStats.keySet()); + + assertEquals(nodesStats.get("shard_count"), entry.getValue().shards()); + assertEquals(nodesStats.get("forecast_write_load"), entry.getValue().forecastWriteLoad()); + assertEquals(nodesStats.get("forecast_disk_usage_bytes"), entry.getValue().forecastShardSize()); } // routing table @@ -221,8 +245,8 @@ public void testToXContent() throws IOException { assertEquals(jsonShard.get("relocating_node_is_desired"), shardView.relocatingNodeIsDesired()); assertEquals(jsonShard.get("shard_id"), shardView.shardId()); assertEquals(jsonShard.get("index"), shardView.index()); - assertEquals(jsonShard.get("forecasted_write_load"), shardView.forecastedWriteLoad()); - assertEquals(jsonShard.get("forecasted_shard_size_in_bytes"), shardView.forecastedShardSizeInBytes()); + assertEquals(jsonShard.get("forecast_write_load"), shardView.forecastWriteLoad()); + assertEquals(jsonShard.get("forecast_shard_size_in_bytes"), shardView.forecastShardSizeInBytes()); } Map jsonDesired = (Map) jsonDesiredShard.get("desired"); diff --git a/server/src/test/java/org/elasticsearch/action/admin/cluster/allocation/TransportGetDesiredBalanceActionTests.java b/server/src/test/java/org/elasticsearch/action/admin/cluster/allocation/TransportGetDesiredBalanceActionTests.java index c39632a7cc446..2a7dc273c2100 100644 --- a/server/src/test/java/org/elasticsearch/action/admin/cluster/allocation/TransportGetDesiredBalanceActionTests.java +++ b/server/src/test/java/org/elasticsearch/action/admin/cluster/allocation/TransportGetDesiredBalanceActionTests.java @@ -236,14 +236,11 @@ public void testGetDesiredBalance() throws Exception { assertEquals(shard.index().getName(), shardView.index()); assertEquals(shard.shardId().id(), shardView.shardId()); var forecastedWriteLoad = TEST_WRITE_LOAD_FORECASTER.getForecastedWriteLoad(indexMetadata); - assertEquals( - forecastedWriteLoad.isPresent() ? forecastedWriteLoad.getAsDouble() : null, - shardView.forecastedWriteLoad() - ); + assertEquals(forecastedWriteLoad.isPresent() ? forecastedWriteLoad.getAsDouble() : null, shardView.forecastWriteLoad()); var forecastedShardSizeInBytes = indexMetadata.getForecastedShardSizeInBytes(); assertEquals( forecastedShardSizeInBytes.isPresent() ? forecastedShardSizeInBytes.getAsLong() : null, - shardView.forecastedShardSizeInBytes() + shardView.forecastShardSizeInBytes() ); Set desiredNodeIds = Optional.ofNullable(shardAssignments.get(shard.shardId())) .map(ShardAssignment::nodeIds) diff --git a/server/src/test/java/org/elasticsearch/cluster/routing/allocation/allocator/ClusterBalanceStatsTests.java b/server/src/test/java/org/elasticsearch/cluster/routing/allocation/allocator/ClusterBalanceStatsTests.java index e534c0c29bdb0..922b778ecc061 100644 --- a/server/src/test/java/org/elasticsearch/cluster/routing/allocation/allocator/ClusterBalanceStatsTests.java +++ b/server/src/test/java/org/elasticsearch/cluster/routing/allocation/allocator/ClusterBalanceStatsTests.java @@ -39,9 +39,9 @@ public void testStatsForSingleTierClusterWithNoForecasts() { var clusterState = createClusterState( List.of( - newNode("node-1", Set.of(DATA_CONTENT_NODE_ROLE)), - newNode("node-2", Set.of(DATA_CONTENT_NODE_ROLE)), - newNode("node-3", Set.of(DATA_CONTENT_NODE_ROLE)) + newNode("node-1", "node-1", Set.of(DATA_CONTENT_NODE_ROLE)), + newNode("node-2", "node-2", Set.of(DATA_CONTENT_NODE_ROLE)), + newNode("node-3", "node-3", Set.of(DATA_CONTENT_NODE_ROLE)) ), List.of( startedIndex("index-1", null, null, "node-1", "node-2"), @@ -63,6 +63,14 @@ public void testStatsForSingleTierClusterWithNoForecasts() { new ClusterBalanceStats.MetricStats(0.0, 0.0, 0.0, 0.0, 0.0), new ClusterBalanceStats.MetricStats(0.0, 0.0, 0.0, 0.0, 0.0) ) + ), + Map.of( + "node-1", + new ClusterBalanceStats.NodeBalanceStats(2, 0.0, 0L), + "node-2", + new ClusterBalanceStats.NodeBalanceStats(2, 0.0, 0L), + "node-3", + new ClusterBalanceStats.NodeBalanceStats(2, 0.0, 0L) ) ) ) @@ -73,9 +81,9 @@ public void testStatsForSingleTierClusterWithForecasts() { var clusterState = createClusterState( List.of( - newNode("node-1", Set.of(DATA_CONTENT_NODE_ROLE)), - newNode("node-2", Set.of(DATA_CONTENT_NODE_ROLE)), - newNode("node-3", Set.of(DATA_CONTENT_NODE_ROLE)) + newNode("node-1", "node-1", Set.of(DATA_CONTENT_NODE_ROLE)), + newNode("node-2", "node-2", Set.of(DATA_CONTENT_NODE_ROLE)), + newNode("node-3", "node-3", Set.of(DATA_CONTENT_NODE_ROLE)) ), List.of( startedIndex("index-1", 1.5, 8L, "node-1", "node-2"), @@ -97,6 +105,14 @@ public void testStatsForSingleTierClusterWithForecasts() { new ClusterBalanceStats.MetricStats(12.0, 3.5, 4.5, 4.0, stdDev(3.5, 4.0, 4.5)), new ClusterBalanceStats.MetricStats(36.0, 10.0, 14.0, 12.0, stdDev(10.0, 12.0, 14.0)) ) + ), + Map.of( + "node-1", + new ClusterBalanceStats.NodeBalanceStats(2, 3.5, 14L), + "node-2", + new ClusterBalanceStats.NodeBalanceStats(2, 4.0, 12L), + "node-3", + new ClusterBalanceStats.NodeBalanceStats(2, 4.5, 10L) ) ) ) @@ -107,12 +123,12 @@ public void testStatsForHotWarmClusterWithForecasts() { var clusterState = createClusterState( List.of( - newNode("node-hot-1", Set.of(DATA_CONTENT_NODE_ROLE, DATA_HOT_NODE_ROLE)), - newNode("node-hot-2", Set.of(DATA_CONTENT_NODE_ROLE, DATA_HOT_NODE_ROLE)), - newNode("node-hot-3", Set.of(DATA_CONTENT_NODE_ROLE, DATA_HOT_NODE_ROLE)), - newNode("node-warm-1", Set.of(DATA_WARM_NODE_ROLE)), - newNode("node-warm-2", Set.of(DATA_WARM_NODE_ROLE)), - newNode("node-warm-3", Set.of(DATA_WARM_NODE_ROLE)) + newNode("node-hot-1", "node-hot-1", Set.of(DATA_CONTENT_NODE_ROLE, DATA_HOT_NODE_ROLE)), + newNode("node-hot-2", "node-hot-2", Set.of(DATA_CONTENT_NODE_ROLE, DATA_HOT_NODE_ROLE)), + newNode("node-hot-3", "node-hot-3", Set.of(DATA_CONTENT_NODE_ROLE, DATA_HOT_NODE_ROLE)), + newNode("node-warm-1", "node-warm-1", Set.of(DATA_WARM_NODE_ROLE)), + newNode("node-warm-2", "node-warm-2", Set.of(DATA_WARM_NODE_ROLE)), + newNode("node-warm-3", "node-warm-3", Set.of(DATA_WARM_NODE_ROLE)) ), List.of( startedIndex("index-hot-1", 4.0, 4L, "node-hot-1", "node-hot-2", "node-hot-3"), @@ -148,6 +164,20 @@ public void testStatsForHotWarmClusterWithForecasts() { new ClusterBalanceStats.MetricStats(0.0, 0.0, 0.0, 0.0, 0.0), new ClusterBalanceStats.MetricStats(42.0, 12.0, 18.0, 14.0, stdDev(12.0, 12.0, 18.0)) ) + ), + Map.of( + "node-hot-1", + new ClusterBalanceStats.NodeBalanceStats(3, 8.5, 16L), + "node-hot-2", + new ClusterBalanceStats.NodeBalanceStats(2, 6.0, 10L), + "node-hot-3", + new ClusterBalanceStats.NodeBalanceStats(2, 6.5, 10L), + "node-warm-1", + new ClusterBalanceStats.NodeBalanceStats(1, 0.0, 12L), + "node-warm-2", + new ClusterBalanceStats.NodeBalanceStats(1, 0.0, 12L), + "node-warm-3", + new ClusterBalanceStats.NodeBalanceStats(1, 0.0, 18L) ) ) ) @@ -158,9 +188,9 @@ public void testStatsForNoIndicesInTier() { var clusterState = createClusterState( List.of( - newNode("node-1", Set.of(DATA_CONTENT_NODE_ROLE)), - newNode("node-2", Set.of(DATA_CONTENT_NODE_ROLE)), - newNode("node-3", Set.of(DATA_CONTENT_NODE_ROLE)) + newNode("node-1", "node-1", Set.of(DATA_CONTENT_NODE_ROLE)), + newNode("node-2", "node-2", Set.of(DATA_CONTENT_NODE_ROLE)), + newNode("node-3", "node-3", Set.of(DATA_CONTENT_NODE_ROLE)) ), List.of() ); @@ -178,6 +208,14 @@ public void testStatsForNoIndicesInTier() { new ClusterBalanceStats.MetricStats(0.0, 0.0, 0.0, 0.0, 0.0), new ClusterBalanceStats.MetricStats(0.0, 0.0, 0.0, 0.0, 0.0) ) + ), + Map.of( + "node-1", + new ClusterBalanceStats.NodeBalanceStats(0, 0.0, 0L), + "node-2", + new ClusterBalanceStats.NodeBalanceStats(0, 0.0, 0L), + "node-3", + new ClusterBalanceStats.NodeBalanceStats(0, 0.0, 0L) ) ) ) From 02a969e5141aef2f5ceada9271383c213fa642b1 Mon Sep 17 00:00:00 2001 From: David Turner Date: Thu, 2 Feb 2023 15:37:59 +0000 Subject: [PATCH 56/63] Even more logging for #93226 (#93465) --- .../elasticsearch/plugins/IndexFoldersDeletionListenerIT.java | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/server/src/internalClusterTest/java/org/elasticsearch/plugins/IndexFoldersDeletionListenerIT.java b/server/src/internalClusterTest/java/org/elasticsearch/plugins/IndexFoldersDeletionListenerIT.java index b48e8e7bacbcc..f40208c359ffc 100644 --- a/server/src/internalClusterTest/java/org/elasticsearch/plugins/IndexFoldersDeletionListenerIT.java +++ b/server/src/internalClusterTest/java/org/elasticsearch/plugins/IndexFoldersDeletionListenerIT.java @@ -132,7 +132,9 @@ public void testListenersInvokedWhenIndexIsDeleted() throws Exception { reason = "Debug #93226", value = "org.elasticsearch.indices.cluster.IndicesClusterStateService:DEBUG," + "org.elasticsearch.indices.IndicesService:DEBUG," - + "org.elasticsearch.index.IndexService:DEBUG" + + "org.elasticsearch.index.IndexService:DEBUG," + + "org.elasticsearch.env.NodeEnvironment:DEBUG," + + "org.elasticsearch.cluster.service.MasterService:TRACE" ) public void testListenersInvokedWhenIndexIsRelocated() throws Exception { final String masterNode = internalCluster().startMasterOnlyNode(); From deb587bb1a9835b599ebf41849982363a873365c Mon Sep 17 00:00:00 2001 From: David Turner Date: Thu, 2 Feb 2023 16:09:16 +0000 Subject: [PATCH 57/63] Describe delete-snapshot tasks (#93466) Delete-snapshot tasks can sometimes be very long-running, but they're hard to monitor because the tasks API does not include a description of the snapshot(s) which are being deleted. This commit adds such a description. --- .../org/elasticsearch/snapshots/RepositoriesIT.java | 13 +++++++++++++ .../snapshots/delete/DeleteSnapshotRequest.java | 7 +++++++ 2 files changed, 20 insertions(+) diff --git a/server/src/internalClusterTest/java/org/elasticsearch/snapshots/RepositoriesIT.java b/server/src/internalClusterTest/java/org/elasticsearch/snapshots/RepositoriesIT.java index 771cb87e34f38..5bc943fcc8be2 100644 --- a/server/src/internalClusterTest/java/org/elasticsearch/snapshots/RepositoriesIT.java +++ b/server/src/internalClusterTest/java/org/elasticsearch/snapshots/RepositoriesIT.java @@ -11,6 +11,7 @@ import org.elasticsearch.action.ActionFuture; import org.elasticsearch.action.admin.cluster.repositories.get.GetRepositoriesResponse; import org.elasticsearch.action.admin.cluster.repositories.verify.VerifyRepositoryResponse; +import org.elasticsearch.action.admin.cluster.snapshots.delete.DeleteSnapshotAction; import org.elasticsearch.action.admin.cluster.state.ClusterStateResponse; import org.elasticsearch.action.support.master.AcknowledgedResponse; import org.elasticsearch.client.internal.Client; @@ -274,6 +275,18 @@ public void testRepositoryConflict() throws Exception { logger.info("--> waiting for block to kick in on node [{}]", blockedNode); waitForBlock(blockedNode, repo); + assertTrue( + client().admin() + .cluster() + .prepareListTasks() + .setActions(DeleteSnapshotAction.NAME) + .setDetailed(true) + .get() + .getTasks() + .stream() + .anyMatch(ti -> ("[" + repo + "][" + snapshot1 + "]").equals(ti.description())) + ); + logger.info("--> try deleting the repository, should fail because the deletion of the snapshot is in progress"); RepositoryConflictException e1 = expectThrows( RepositoryConflictException.class, diff --git a/server/src/main/java/org/elasticsearch/action/admin/cluster/snapshots/delete/DeleteSnapshotRequest.java b/server/src/main/java/org/elasticsearch/action/admin/cluster/snapshots/delete/DeleteSnapshotRequest.java index ce410efe79f40..99dcb33bc5581 100644 --- a/server/src/main/java/org/elasticsearch/action/admin/cluster/snapshots/delete/DeleteSnapshotRequest.java +++ b/server/src/main/java/org/elasticsearch/action/admin/cluster/snapshots/delete/DeleteSnapshotRequest.java @@ -10,10 +10,12 @@ import org.elasticsearch.action.ActionRequestValidationException; import org.elasticsearch.action.support.master.MasterNodeRequest; +import org.elasticsearch.common.Strings; import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.StreamOutput; import java.io.IOException; +import java.util.Arrays; import static org.elasticsearch.action.ValidateActions.addValidationError; @@ -111,4 +113,9 @@ public DeleteSnapshotRequest snapshots(String... snapshots) { this.snapshots = snapshots; return this; } + + @Override + public String getDescription() { + return Strings.format("[%s]%s", repository, Arrays.toString(snapshots)); + } } From ceb4734f17799180069158500cadf62e4a1b56e3 Mon Sep 17 00:00:00 2001 From: David Turner Date: Thu, 2 Feb 2023 16:17:40 +0000 Subject: [PATCH 58/63] Capture thread dump on ShardLockObtainFailedException (#93458) We sometimes see a `ShardLockObtainFailedException` when a shard failed to shut down as fast as we expected, often because a node left and rejoined the cluster. Sometimes this is because it was held open by ongoing scrolls or PITs, but other times it may be because the shutdown process itself is too slow. With this commit we add the ability to capture and log a thread dump at the time of the failure to give us more information about where the shutdown process might be running slowly. Relates #93226 --- .../discovery/fault-detection.asciidoc | 36 ++++++++ .../elasticsearch/common/ReferenceDocs.java | 1 + .../elasticsearch/env/NodeEnvironment.java | 57 +++++++++--- .../common/reference-docs-links.json | 1 + .../env/NodeEnvironmentTests.java | 86 ++++++++++++------- 5 files changed, 138 insertions(+), 43 deletions(-) diff --git a/docs/reference/modules/discovery/fault-detection.asciidoc b/docs/reference/modules/discovery/fault-detection.asciidoc index 7b368065afe11..66889e71d0ab4 100644 --- a/docs/reference/modules/discovery/fault-detection.asciidoc +++ b/docs/reference/modules/discovery/fault-detection.asciidoc @@ -373,3 +373,39 @@ checks are `transport_worker` and `cluster_coordination` threads, for which there should never be a long wait. There may also be evidence of long waits for threads in the {es} logs. Refer to <> for more information. + +===== Diagnosing `ShardLockObtainFailedException` failures + +If a node leaves and rejoins the cluster then {es} will usually shut down and +re-initialize its shards. If the shards do not shut down quickly enough then +{es} may fail to re-initialize them due to a `ShardLockObtainFailedException`. + +To gather more information about the reason for shards shutting down slowly, +configure the following logger: + +[source,yaml] +---- +logger.org.elasticsearch.env.NodeEnvironment: DEBUG +---- + +When this logger is enabled, {es} will attempt to run the +<> API whenever it encounters a +`ShardLockObtainFailedException`. The results are compressed, encoded, and +split into chunks to avoid truncation: + +[source,text] +---- +[DEBUG][o.e.e.NodeEnvironment ] [master] hot threads while failing to obtain shard lock for [index][0] [part 1]: H4sIAAAAAAAA/x... +[DEBUG][o.e.e.NodeEnvironment ] [master] hot threads while failing to obtain shard lock for [index][0] [part 2]: p7x3w1hmOQVtuV... +[DEBUG][o.e.e.NodeEnvironment ] [master] hot threads while failing to obtain shard lock for [index][0] [part 3]: v7uTboMGDbyOy+... +[DEBUG][o.e.e.NodeEnvironment ] [master] hot threads while failing to obtain shard lock for [index][0] [part 4]: 4tse0RnPnLeDNN... +[DEBUG][o.e.e.NodeEnvironment ] [master] hot threads while failing to obtain shard lock for [index][0] (gzip compressed, base64-encoded, and split into 4 parts on preceding log lines) +---- + +To reconstruct the output, base64-decode the data and decompress it using +`gzip`. For instance, on Unix-like systems: + +[source,sh] +---- +cat shardlock.log | sed -e 's/.*://' | base64 --decode | gzip --decompress +---- diff --git a/server/src/main/java/org/elasticsearch/common/ReferenceDocs.java b/server/src/main/java/org/elasticsearch/common/ReferenceDocs.java index 0fc043210bbda..856ba30d6c4a5 100644 --- a/server/src/main/java/org/elasticsearch/common/ReferenceDocs.java +++ b/server/src/main/java/org/elasticsearch/common/ReferenceDocs.java @@ -33,6 +33,7 @@ public enum ReferenceDocs { DISCOVERY_TROUBLESHOOTING, UNSTABLE_CLUSTER_TROUBLESHOOTING, LAGGING_NODE_TROUBLESHOOTING, + SHARD_LOCK_TROUBLESHOOTING, CONCURRENT_REPOSITORY_WRITERS, ARCHIVE_INDICES, // this comment keeps the ';' on the next line so every entry above has a trailing ',' which makes the diff for adding new links cleaner diff --git a/server/src/main/java/org/elasticsearch/env/NodeEnvironment.java b/server/src/main/java/org/elasticsearch/env/NodeEnvironment.java index e4c42af85af47..c21f2c172795b 100644 --- a/server/src/main/java/org/elasticsearch/env/NodeEnvironment.java +++ b/server/src/main/java/org/elasticsearch/env/NodeEnvironment.java @@ -8,6 +8,7 @@ package org.elasticsearch.env; +import org.apache.logging.log4j.Level; import org.apache.logging.log4j.LogManager; import org.apache.logging.log4j.Logger; import org.apache.logging.log4j.util.Strings; @@ -25,8 +26,10 @@ import org.elasticsearch.cluster.node.DiscoveryNode; import org.elasticsearch.cluster.node.DiscoveryNodeRole; import org.elasticsearch.common.Randomness; +import org.elasticsearch.common.ReferenceDocs; import org.elasticsearch.common.UUIDs; import org.elasticsearch.common.io.FileSystemUtils; +import org.elasticsearch.common.logging.ChunkedLoggingStream; import org.elasticsearch.common.settings.Setting; import org.elasticsearch.common.settings.Setting.Property; import org.elasticsearch.common.settings.Settings; @@ -49,12 +52,15 @@ import org.elasticsearch.index.store.FsDirectoryFactory; import org.elasticsearch.monitor.fs.FsInfo; import org.elasticsearch.monitor.fs.FsProbe; +import org.elasticsearch.monitor.jvm.HotThreads; import org.elasticsearch.monitor.jvm.JvmInfo; import org.elasticsearch.xcontent.NamedXContentRegistry; import java.io.Closeable; import java.io.IOException; +import java.io.OutputStreamWriter; import java.io.UncheckedIOException; +import java.nio.charset.StandardCharsets; import java.nio.file.AtomicMoveNotSupportedException; import java.nio.file.DirectoryStream; import java.nio.file.FileStore; @@ -926,6 +932,38 @@ public Set lockedShards() { } } + // throttle the hot-threads calls: no more than one per minute + private final Semaphore shardLockHotThreadsPermit = new Semaphore(1); + private long nextShardLockHotThreadsNanos = Long.MIN_VALUE; + + private void maybeLogThreadDump(ShardId shardId, String message) { + if (logger.isDebugEnabled() == false) { + return; + } + + final var prefix = format("hot threads while failing to obtain shard lock for %s: %s", shardId, message); + if (shardLockHotThreadsPermit.tryAcquire()) { + try { + final var now = System.nanoTime(); + if (now <= nextShardLockHotThreadsNanos) { + return; + } + nextShardLockHotThreadsNanos = now + TimeUnit.SECONDS.toNanos(60); + final var hotThreads = new HotThreads().busiestThreads(500).ignoreIdleThreads(false).detect(); + try ( + var stream = ChunkedLoggingStream.create(logger, Level.DEBUG, prefix, ReferenceDocs.SHARD_LOCK_TROUBLESHOOTING); + var writer = new OutputStreamWriter(stream, StandardCharsets.UTF_8) + ) { + writer.write(hotThreads); + } + } catch (Exception e) { + logger.error(format("could not obtain %s", prefix), e); + } finally { + shardLockHotThreadsPermit.release(); + } + } + } + private final class InternalShardLock { /* * This class holds a mutex for exclusive access and timeout / wait semantics @@ -975,18 +1013,15 @@ void acquire(long timeoutInMillis, final String details) throws ShardLockObtainF setDetails(details); } else { final Tuple lockDetails = this.lockDetails; // single volatile read - throw new ShardLockObtainFailedException( - shardId, - "obtaining shard lock for [" - + details - + "] timed out after [" - + timeoutInMillis - + "ms], lock already held for [" - + lockDetails.v2() - + "] with age [" - + TimeUnit.NANOSECONDS.toMillis(System.nanoTime() - lockDetails.v1()) - + "ms]" + final var message = format( + "obtaining shard lock for [%s] timed out after [%dms], lock already held for [%s] with age [%dms]", + details, + timeoutInMillis, + lockDetails.v2(), + TimeUnit.NANOSECONDS.toMillis(System.nanoTime() - lockDetails.v1()) ); + maybeLogThreadDump(shardId, message); + throw new ShardLockObtainFailedException(shardId, message); } } catch (InterruptedException e) { Thread.currentThread().interrupt(); diff --git a/server/src/main/resources/org/elasticsearch/common/reference-docs-links.json b/server/src/main/resources/org/elasticsearch/common/reference-docs-links.json index a5a62f3da8b7b..cac82799c50a9 100644 --- a/server/src/main/resources/org/elasticsearch/common/reference-docs-links.json +++ b/server/src/main/resources/org/elasticsearch/common/reference-docs-links.json @@ -3,6 +3,7 @@ "DISCOVERY_TROUBLESHOOTING": "discovery-troubleshooting.html", "UNSTABLE_CLUSTER_TROUBLESHOOTING": "cluster-fault-detection.html#cluster-fault-detection-troubleshooting", "LAGGING_NODE_TROUBLESHOOTING": "cluster-fault-detection.html#_diagnosing_lagging_nodes", + "SHARD_LOCK_TROUBLESHOOTING": "cluster-fault-detection.html#_diagnosing_shardlockobtainfailedexception_failures", "CONCURRENT_REPOSITORY_WRITERS": "add-repository.html", "ARCHIVE_INDICES": "archive-indices.html" } diff --git a/server/src/test/java/org/elasticsearch/env/NodeEnvironmentTests.java b/server/src/test/java/org/elasticsearch/env/NodeEnvironmentTests.java index 33f7f8a3d24d0..6451c76e99e9e 100644 --- a/server/src/test/java/org/elasticsearch/env/NodeEnvironmentTests.java +++ b/server/src/test/java/org/elasticsearch/env/NodeEnvironmentTests.java @@ -7,6 +7,7 @@ */ package org.elasticsearch.env; +import org.apache.logging.log4j.Level; import org.apache.lucene.analysis.core.KeywordAnalyzer; import org.apache.lucene.index.DirectoryReader; import org.apache.lucene.index.IndexWriter; @@ -34,7 +35,9 @@ import org.elasticsearch.index.shard.ShardId; import org.elasticsearch.test.ESTestCase; import org.elasticsearch.test.IndexSettingsModule; +import org.elasticsearch.test.MockLogAppender; import org.elasticsearch.test.NodeRoles; +import org.elasticsearch.test.junit.annotations.TestLogging; import java.io.IOException; import java.nio.charset.StandardCharsets; @@ -114,44 +117,63 @@ public void testSegmentInfosTracing() { } } + // using a literal string here because the logger is mentioned in the docs, and therefore must only be changed with care + private static final String NODE_ENVIRONMENT_LOGGER_NAME = "org.elasticsearch.env.NodeEnvironment"; + + @TestLogging(reason = "test includes assertions about DEBUG logging", value = NODE_ENVIRONMENT_LOGGER_NAME + ":DEBUG") public void testShardLock() throws Exception { - final NodeEnvironment env = newNodeEnvironment(); + try (var env = newNodeEnvironment()) { - Index index = new Index("foo", "fooUUID"); - ShardLock fooLock = env.shardLock(new ShardId(index, 0), "1"); - assertEquals(new ShardId(index, 0), fooLock.getShardId()); + Index index = new Index("foo", "fooUUID"); - try { - env.shardLock(new ShardId(index, 0), "2"); - fail("shard is locked"); - } catch (ShardLockObtainFailedException ex) { - // expected - } - for (Path path : env.indexPaths(index)) { - Files.createDirectories(path.resolve("0")); - Files.createDirectories(path.resolve("1")); - } - try { - env.lockAllForIndex(index, idxSettings, "3", randomIntBetween(0, 10)); - fail("shard 0 is locked"); - } catch (ShardLockObtainFailedException ex) { - // expected - } + var appender = new MockLogAppender(); + appender.addExpectation( + new MockLogAppender.SeenEventExpectation( + "hot threads logging", + NODE_ENVIRONMENT_LOGGER_NAME, + Level.DEBUG, + "hot threads while failing to obtain shard lock for [foo][0]: obtaining shard lock for [2] timed out after *" + ) + ); + appender.addExpectation( + new MockLogAppender.UnseenEventExpectation( + "second attempt should be suppressed due to throttling", + NODE_ENVIRONMENT_LOGGER_NAME, + Level.DEBUG, + "hot threads while failing to obtain shard lock for [foo][0]: obtaining shard lock for [3] timed out after *" + ) + ); - fooLock.close(); - // can lock again? - env.shardLock(new ShardId(index, 0), "4").close(); + try (var ignored = appender.capturing(NodeEnvironment.class); var lock = env.shardLock(new ShardId(index, 0), "1")) { + assertEquals(new ShardId(index, 0), lock.getShardId()); - List locks = env.lockAllForIndex(index, idxSettings, "5", randomIntBetween(0, 10)); - try { - env.shardLock(new ShardId(index, 0), "6"); - fail("shard is locked"); - } catch (ShardLockObtainFailedException ex) { - // expected + expectThrows(ShardLockObtainFailedException.class, () -> env.shardLock(new ShardId(index, 0), "2")); + + for (Path path : env.indexPaths(index)) { + Files.createDirectories(path.resolve("0")); + Files.createDirectories(path.resolve("1")); + } + expectThrows( + ShardLockObtainFailedException.class, + () -> env.lockAllForIndex(index, idxSettings, "3", randomIntBetween(0, 10)) + ); + + appender.assertAllExpectationsMatched(); + } + + // can lock again? + env.shardLock(new ShardId(index, 0), "4").close(); + + List locks = new ArrayList<>(); + try { + locks.addAll(env.lockAllForIndex(index, idxSettings, "5", randomIntBetween(0, 10))); + expectThrows(ShardLockObtainFailedException.class, () -> env.shardLock(new ShardId(index, 0), "6")); + } finally { + IOUtils.close(locks); + } + + assertTrue("LockedShards: " + env.lockedShards(), env.lockedShards().isEmpty()); } - IOUtils.close(locks); - assertTrue("LockedShards: " + env.lockedShards(), env.lockedShards().isEmpty()); - env.close(); } public void testAvailableIndexFolders() throws Exception { From 7e0885c9d45d3ab97ba16d7855ee4668bd1e5c11 Mon Sep 17 00:00:00 2001 From: Keith Massey Date: Thu, 2 Feb 2023 10:24:20 -0600 Subject: [PATCH 59/63] Avoiding a NullPointerException in GeoIpDownloaderIT (#93471) --- .../org/elasticsearch/ingest/geoip/GeoIpDownloaderIT.java | 5 +++-- 1 file changed, 3 insertions(+), 2 deletions(-) diff --git a/modules/ingest-geoip/src/internalClusterTest/java/org/elasticsearch/ingest/geoip/GeoIpDownloaderIT.java b/modules/ingest-geoip/src/internalClusterTest/java/org/elasticsearch/ingest/geoip/GeoIpDownloaderIT.java index 5d02fde827160..63c14ac4df96b 100644 --- a/modules/ingest-geoip/src/internalClusterTest/java/org/elasticsearch/ingest/geoip/GeoIpDownloaderIT.java +++ b/modules/ingest-geoip/src/internalClusterTest/java/org/elasticsearch/ingest/geoip/GeoIpDownloaderIT.java @@ -311,8 +311,9 @@ public void testGeoIpDatabasesDownloadNoGeoipProcessors() throws Exception { .get(); assertTrue(settingsResponse.isAcknowledged()); assertBusy(() -> { - assertNotNull(getTask()); - assertNull(getTask().getState()); + PersistentTasksCustomMetadata.PersistentTask task = getTask(); + assertNotNull(task); + assertNull(task.getState()); putGeoIpPipeline(); // This is to work around the race condition described in #92888 }); putNonGeoipPipeline(pipelineId); From f99744bfe40be125122b8dd76d2560f157a6e455 Mon Sep 17 00:00:00 2001 From: Artem Prigoda Date: Thu, 2 Feb 2023 17:35:59 +0100 Subject: [PATCH 60/63] Bump versions after 7.17.9 release --- .ci/bwcVersions | 1 + .ci/snapshotBwcVersions | 2 +- server/src/main/java/org/elasticsearch/TransportVersion.java | 1 + server/src/main/java/org/elasticsearch/Version.java | 1 + 4 files changed, 4 insertions(+), 1 deletion(-) diff --git a/.ci/bwcVersions b/.ci/bwcVersions index ec9dc446cf627..6dabe613c8276 100644 --- a/.ci/bwcVersions +++ b/.ci/bwcVersions @@ -58,6 +58,7 @@ BWC_VERSION: - "7.17.7" - "7.17.8" - "7.17.9" + - "7.17.10" - "8.0.0" - "8.0.1" - "8.1.0" diff --git a/.ci/snapshotBwcVersions b/.ci/snapshotBwcVersions index 86174e41f7146..c89f884223c01 100644 --- a/.ci/snapshotBwcVersions +++ b/.ci/snapshotBwcVersions @@ -1,4 +1,4 @@ BWC_VERSION: - - "7.17.9" + - "7.17.10" - "8.6.2" - "8.7.0" diff --git a/server/src/main/java/org/elasticsearch/TransportVersion.java b/server/src/main/java/org/elasticsearch/TransportVersion.java index d1ebe20b0302d..8f986efe2721f 100644 --- a/server/src/main/java/org/elasticsearch/TransportVersion.java +++ b/server/src/main/java/org/elasticsearch/TransportVersion.java @@ -105,6 +105,7 @@ public class TransportVersion implements Comparable { public static final TransportVersion V_7_17_7 = new TransportVersion(7_17_07_99, "108ba576-bb28-42f4-bcbf-845a0ce52560"); public static final TransportVersion V_7_17_8 = new TransportVersion(7_17_08_99, "82a3e70d-cf0e-4efb-ad16-6077ab9fe19f"); public static final TransportVersion V_7_17_9 = new TransportVersion(7_17_09_99, "afd50dda-735f-4eae-9309-3218ffec1b2d"); + public static final TransportVersion V_7_17_10 = new TransportVersion(7_17_10_99, "18ae7108-6f7a-4205-adbb-cfcd6aa6ccc6"); public static final TransportVersion V_8_0_0 = new TransportVersion(8_00_00_99, "c7d2372c-9f01-4a79-8b11-227d862dfe4f"); public static final TransportVersion V_8_0_1 = new TransportVersion(8_00_01_99, "56e044c3-37e5-4f7e-bd38-f493927354ac"); public static final TransportVersion V_8_1_0 = new TransportVersion(8_01_00_99, "3dc49dce-9cef-492a-ac8d-3cc79f6b4280"); diff --git a/server/src/main/java/org/elasticsearch/Version.java b/server/src/main/java/org/elasticsearch/Version.java index d42152bcd528d..723357d9b013d 100644 --- a/server/src/main/java/org/elasticsearch/Version.java +++ b/server/src/main/java/org/elasticsearch/Version.java @@ -109,6 +109,7 @@ public class Version implements Comparable, ToXContentFragment { public static final Version V_7_17_7 = new Version(7_17_07_99, TransportVersion.V_7_17_7, org.apache.lucene.util.Version.LUCENE_8_11_1); public static final Version V_7_17_8 = new Version(7_17_08_99, TransportVersion.V_7_17_8, org.apache.lucene.util.Version.LUCENE_8_11_1); public static final Version V_7_17_9 = new Version(7_17_09_99, TransportVersion.V_7_17_9, org.apache.lucene.util.Version.LUCENE_8_11_1); + public static final Version V_7_17_10 = new Version(7_17_10_99, TransportVersion.V_7_17_10, org.apache.lucene.util.Version.LUCENE_8_11_1); public static final Version V_8_0_0 = new Version(8_00_00_99, TransportVersion.V_8_0_0, org.apache.lucene.util.Version.LUCENE_9_0_0); public static final Version V_8_0_1 = new Version(8_00_01_99, TransportVersion.V_8_0_1, org.apache.lucene.util.Version.LUCENE_9_0_0); public static final Version V_8_1_0 = new Version(8_01_00_99, TransportVersion.V_8_1_0, org.apache.lucene.util.Version.LUCENE_9_0_0); From ab15ae672ab2a7fb906c1c16db5d1a5e1a043a6f Mon Sep 17 00:00:00 2001 From: Artem Prigoda Date: Thu, 2 Feb 2023 17:37:19 +0100 Subject: [PATCH 61/63] Prune changelogs after 7.17.9 release --- docs/changelog/92051.yaml | 5 ----- docs/changelog/93354.yaml | 6 ------ 2 files changed, 11 deletions(-) delete mode 100644 docs/changelog/92051.yaml delete mode 100644 docs/changelog/93354.yaml diff --git a/docs/changelog/92051.yaml b/docs/changelog/92051.yaml deleted file mode 100644 index f30c416692f23..0000000000000 --- a/docs/changelog/92051.yaml +++ /dev/null @@ -1,5 +0,0 @@ -pr: 92051 -summary: Make field-caps tasks cancellable -area: Search -type: enhancement -issues: [] diff --git a/docs/changelog/93354.yaml b/docs/changelog/93354.yaml deleted file mode 100644 index 2ad4d27a069cc..0000000000000 --- a/docs/changelog/93354.yaml +++ /dev/null @@ -1,6 +0,0 @@ -pr: 93354 -summary: Bump bundled JDK to Java 19.0.2 -area: Packaging -type: upgrade -issues: - - 93025 From 5c67475651b7b8379e91e8a4716fcdcf5ba36b66 Mon Sep 17 00:00:00 2001 From: Joe Gallo Date: Thu, 2 Feb 2023 11:46:45 -0500 Subject: [PATCH 62/63] Faster CollectionUtils.ensureNoSelfReferences (#93433) --- .../common/util/CollectionUtils.java | 70 ++++++++------- .../common/util/CollectionUtilsTests.java | 89 +++++++++++++++---- .../common/xcontent/BaseXContentTestCase.java | 3 +- 3 files changed, 111 insertions(+), 51 deletions(-) diff --git a/server/src/main/java/org/elasticsearch/common/util/CollectionUtils.java b/server/src/main/java/org/elasticsearch/common/util/CollectionUtils.java index 9eb24ea30c861..1664b4690bc74 100644 --- a/server/src/main/java/org/elasticsearch/common/util/CollectionUtils.java +++ b/server/src/main/java/org/elasticsearch/common/util/CollectionUtils.java @@ -9,7 +9,6 @@ package org.elasticsearch.common.util; import org.elasticsearch.common.Strings; -import org.elasticsearch.common.collect.Iterators; import java.nio.file.Path; import java.util.AbstractList; @@ -21,7 +20,6 @@ import java.util.IdentityHashMap; import java.util.List; import java.util.ListIterator; -import java.util.Locale; import java.util.Map; import java.util.Objects; import java.util.RandomAccess; @@ -100,44 +98,56 @@ public static int[] toArray(Collection ints) { * @param messageHint A string to be included in the exception message if the call fails, to provide * more context to the handler of the exception */ - public static void ensureNoSelfReferences(Object value, String messageHint) { - Iterable it = convert(value); - if (it != null) { - ensureNoSelfReferences(it, value, Collections.newSetFromMap(new IdentityHashMap<>()), messageHint); - } + public static void ensureNoSelfReferences(final Object value, final String messageHint) { + ensureNoSelfReferences(value, Collections.newSetFromMap(new IdentityHashMap<>()), messageHint); } - @SuppressWarnings("unchecked") - private static Iterable convert(Object value) { - if (value == null) { - return null; - } - if (value instanceof Map map) { - return () -> Iterators.concat(map.keySet().iterator(), map.values().iterator()); - } else if ((value instanceof Iterable) && (value instanceof Path == false)) { - return (Iterable) value; + private static void ensureNoSelfReferences(final Object value, final Set ancestors, final String messageHint) { + // these instanceof checks are a bit on the ugly side, but it's important for performance that we have + // a separate dispatch point for Maps versus for Iterables. a polymorphic version of this code would + // be prettier, but it would also likely be quite a bit slower. this is a hot path for ingest pipelines, + // and performance here is important. + if (value == null || value instanceof String || value instanceof Number || value instanceof Boolean) { + // noop + } else if (value instanceof Map m && m.isEmpty() == false) { + ensureNoSelfReferences(m, ancestors, messageHint); + } else if ((value instanceof Iterable i) && (value instanceof Path == false)) { + ensureNoSelfReferences(i, i, ancestors, messageHint); } else if (value instanceof Object[]) { - return Arrays.asList((Object[]) value); - } else { - return null; + // note: the iterable and reference arguments are different + ensureNoSelfReferences(Arrays.asList((Object[]) value), value, ancestors, messageHint); } } + private static void ensureNoSelfReferences(final Map reference, final Set ancestors, final String messageHint) { + addToAncestorsOrThrow(reference, ancestors, messageHint); + for (Map.Entry e : reference.entrySet()) { + ensureNoSelfReferences(e.getKey(), ancestors, messageHint); + ensureNoSelfReferences(e.getValue(), ancestors, messageHint); + } + ancestors.remove(reference); + } + private static void ensureNoSelfReferences( - final Iterable value, - Object originalReference, + final Iterable iterable, + final Object reference, final Set ancestors, - String messageHint + final String messageHint ) { - if (value != null) { - if (ancestors.add(originalReference) == false) { - String suffix = Strings.isNullOrEmpty(messageHint) ? "" : String.format(Locale.ROOT, " (%s)", messageHint); - throw new IllegalArgumentException("Iterable object is self-referencing itself" + suffix); - } - for (Object o : value) { - ensureNoSelfReferences(convert(o), o, ancestors, messageHint); + addToAncestorsOrThrow(reference, ancestors, messageHint); + for (Object o : iterable) { + ensureNoSelfReferences(o, ancestors, messageHint); + } + ancestors.remove(reference); + } + + private static void addToAncestorsOrThrow(Object reference, Set ancestors, String messageHint) { + if (ancestors.add(reference) == false) { + StringBuilder sb = new StringBuilder("Iterable object is self-referencing itself"); + if (Strings.hasLength(messageHint)) { + sb.append(" (").append(messageHint).append(")"); } - ancestors.remove(originalReference); + throw new IllegalArgumentException(sb.toString()); } } diff --git a/server/src/test/java/org/elasticsearch/common/util/CollectionUtilsTests.java b/server/src/test/java/org/elasticsearch/common/util/CollectionUtilsTests.java index 89f55a6981ffb..3b3d205a8ebb5 100644 --- a/server/src/test/java/org/elasticsearch/common/util/CollectionUtilsTests.java +++ b/server/src/test/java/org/elasticsearch/common/util/CollectionUtilsTests.java @@ -8,10 +8,10 @@ package org.elasticsearch.common.util; +import org.elasticsearch.common.collect.Iterators; import org.elasticsearch.test.ESTestCase; import java.util.ArrayList; -import java.util.Arrays; import java.util.Collections; import java.util.Comparator; import java.util.HashMap; @@ -19,9 +19,10 @@ import java.util.LinkedList; import java.util.List; import java.util.Map; +import java.util.Set; -import static java.util.Collections.emptyMap; import static org.elasticsearch.common.util.CollectionUtils.eagerPartition; +import static org.elasticsearch.common.util.CollectionUtils.ensureNoSelfReferences; import static org.elasticsearch.common.util.CollectionUtils.limitSize; import static org.hamcrest.Matchers.containsString; import static org.hamcrest.Matchers.equalTo; @@ -29,7 +30,7 @@ public class CollectionUtilsTests extends ESTestCase { public void testRotateEmpty() { - assertTrue(CollectionUtils.rotate(Collections.emptyList(), randomInt()).isEmpty()); + assertTrue(CollectionUtils.rotate(List.of(), randomInt()).isEmpty()); } public void testRotate() { @@ -77,59 +78,109 @@ public void testUniquify() { } public void testEmptyPartition() { - assertEquals(Collections.emptyList(), eagerPartition(Collections.emptyList(), 1)); + assertEquals(List.of(), eagerPartition(List.of(), 1)); } public void testSimplePartition() { - assertEquals( - Arrays.asList(Arrays.asList(1, 2), Arrays.asList(3, 4), Arrays.asList(5)), - eagerPartition(Arrays.asList(1, 2, 3, 4, 5), 2) - ); + assertEquals(List.of(List.of(1, 2), List.of(3, 4), List.of(5)), eagerPartition(List.of(1, 2, 3, 4, 5), 2)); } public void testSingletonPartition() { - assertEquals( - Arrays.asList(Arrays.asList(1), Arrays.asList(2), Arrays.asList(3), Arrays.asList(4), Arrays.asList(5)), - eagerPartition(Arrays.asList(1, 2, 3, 4, 5), 1) - ); + assertEquals(List.of(List.of(1), List.of(2), List.of(3), List.of(4), List.of(5)), eagerPartition(List.of(1, 2, 3, 4, 5), 1)); } public void testOversizedPartition() { - assertEquals(Arrays.asList(Arrays.asList(1, 2, 3, 4, 5)), eagerPartition(Arrays.asList(1, 2, 3, 4, 5), 15)); + assertEquals(List.of(List.of(1, 2, 3, 4, 5)), eagerPartition(List.of(1, 2, 3, 4, 5), 15)); } public void testPerfectPartition() { assertEquals( - Arrays.asList(Arrays.asList(1, 2, 3, 4, 5, 6), Arrays.asList(7, 8, 9, 10, 11, 12)), - eagerPartition(Arrays.asList(1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12), 6) + List.of(List.of(1, 2, 3, 4, 5, 6), List.of(7, 8, 9, 10, 11, 12)), + eagerPartition(List.of(1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12), 6) ); } public void testEnsureNoSelfReferences() { - CollectionUtils.ensureNoSelfReferences(emptyMap(), "test with empty map"); - CollectionUtils.ensureNoSelfReferences(null, "test with null"); + ensureNoSelfReferences("string", "test with a string"); + ensureNoSelfReferences(2, "test with a number"); + ensureNoSelfReferences(true, "test with a boolean"); + ensureNoSelfReferences(Map.of(), "test with an empty map"); + ensureNoSelfReferences(Set.of(), "test with an empty set"); + ensureNoSelfReferences(List.of(), "test with an empty list"); + ensureNoSelfReferences(new Object[0], "test with an empty array"); + ensureNoSelfReferences((Iterable) Collections::emptyIterator, "test with an empty iterable"); + } + public void testEnsureNoSelfReferencesMap() { + // map value { Map map = new HashMap<>(); map.put("field", map); IllegalArgumentException e = expectThrows( IllegalArgumentException.class, - () -> CollectionUtils.ensureNoSelfReferences(map, "test with self ref value") + () -> ensureNoSelfReferences(map, "test with self ref value") ); assertThat(e.getMessage(), containsString("Iterable object is self-referencing itself (test with self ref value)")); } + // map key { Map map = new HashMap<>(); map.put(map, 1); IllegalArgumentException e = expectThrows( IllegalArgumentException.class, - () -> CollectionUtils.ensureNoSelfReferences(map, "test with self ref key") + () -> ensureNoSelfReferences(map, "test with self ref key") ); assertThat(e.getMessage(), containsString("Iterable object is self-referencing itself (test with self ref key)")); } + // nested map value + { + Map map = new HashMap<>(); + map.put("field", Set.of(List.of((Iterable) () -> Iterators.single(new Object[] { map })))); + IllegalArgumentException e = expectThrows( + IllegalArgumentException.class, + () -> ensureNoSelfReferences(map, "test with self ref nested value") + ); + assertThat(e.getMessage(), containsString("Iterable object is self-referencing itself (test with self ref nested value)")); + } + } + + public void testEnsureNoSelfReferencesSet() { + Set set = new HashSet<>(); + set.add("foo"); + set.add(set); + + IllegalArgumentException e = expectThrows( + IllegalArgumentException.class, + () -> ensureNoSelfReferences(set, "test with self ref set") + ); + assertThat(e.getMessage(), containsString("Iterable object is self-referencing itself (test with self ref set)")); + } + + public void testEnsureNoSelfReferencesList() { + List list = new ArrayList<>(); + list.add("foo"); + list.add(list); + + IllegalArgumentException e = expectThrows( + IllegalArgumentException.class, + () -> ensureNoSelfReferences(list, "test with self ref list") + ); + assertThat(e.getMessage(), containsString("Iterable object is self-referencing itself (test with self ref list)")); + } + + public void testEnsureNoSelfReferencesArray() { + Object[] array = new Object[2]; + array[0] = "foo"; + array[1] = array; + + IllegalArgumentException e = expectThrows( + IllegalArgumentException.class, + () -> ensureNoSelfReferences(array, "test with self ref array") + ); + assertThat(e.getMessage(), containsString("Iterable object is self-referencing itself (test with self ref array)")); } public void testLimitSizeOfShortList() { diff --git a/server/src/test/java/org/elasticsearch/common/xcontent/BaseXContentTestCase.java b/server/src/test/java/org/elasticsearch/common/xcontent/BaseXContentTestCase.java index 6623276c84eb6..97ef5a3568309 100644 --- a/server/src/test/java/org/elasticsearch/common/xcontent/BaseXContentTestCase.java +++ b/server/src/test/java/org/elasticsearch/common/xcontent/BaseXContentTestCase.java @@ -17,7 +17,6 @@ import org.elasticsearch.common.geo.GeoPoint; import org.elasticsearch.common.text.Text; import org.elasticsearch.common.unit.DistanceUnit; -import org.elasticsearch.common.util.CollectionUtils; import org.elasticsearch.core.PathUtils; import org.elasticsearch.test.ESTestCase; import org.elasticsearch.xcontent.NamedObjectNotFoundException; @@ -954,7 +953,7 @@ public void testEnsureNoSelfReferences() throws IOException { /** * Test that the same map written multiple times do not trigger the self-reference check in - * {@link CollectionUtils#ensureNoSelfReferences(Object, String)} (Object)} + * {@link XContentBuilder#ensureNoSelfReferences(Object)} */ public void testRepeatedMapsAndNoSelfReferences() throws Exception { Map mapB = singletonMap("b", "B"); From 06bebb78cf38b8e4f8d89dd3eb18f8e2e22803b4 Mon Sep 17 00:00:00 2001 From: Mark Vieira Date: Thu, 2 Feb 2023 09:19:12 -0800 Subject: [PATCH 63/63] Revert "Revert "Convert full cluster restart tests to new rest testing framework (#93422)" (#93444)" This reverts commit 221c93531b7f6063913ebc21a4ae64f35df32ff2. --- .../main/groovy/elasticsearch.bwc-test.gradle | 14 +++ .../internal/ElasticsearchTestBasePlugin.java | 2 +- .../InternalTestArtifactExtension.java | 2 +- .../test/rest/RestTestBasePlugin.java | 74 ++++++++++-- qa/full-cluster-restart/build.gradle | 62 ++--------- .../upgrades/FullClusterRestartIT.java | 49 +++++++- .../FullClusterRestartTestOrdering.java | 24 ++++ .../FullClusterRestartUpgradeStatus.java | 14 +++ ...rameterizedFullClusterRestartTestCase.java | 100 +++++++++++++++++ .../upgrades/QueryBuilderBWCIT.java | 29 ++++- .../test/cluster/ClusterHandle.java | 17 +++ .../local/AbstractLocalSpecBuilder.java | 23 ++++ .../local/DefaultLocalClusterSpecBuilder.java | 34 +++++- .../cluster/local/LocalClusterFactory.java | 72 ++++++++++-- .../cluster/local/LocalClusterHandle.java | 59 +++++----- .../test/cluster/local/LocalClusterSpec.java | 14 ++- .../local/LocalClusterSpecBuilder.java | 11 ++ .../local/LocalElasticsearchCluster.java | 24 +++- .../test/cluster/local/LocalSpecBuilder.java | 11 ++ .../cluster/local/WaitForHttpResource.java | 2 +- .../LocalDistributionResolver.java | 3 + .../ReleasedDistributionResolver.java | 54 +++++++++ .../SnapshotDistributionResolver.java | 30 ++++- .../qa/full-cluster-restart/build.gradle | 99 ++--------------- .../xpack/restart/FullClusterRestartIT.java | 54 ++++++++- .../resources/system_key | 0 x-pack/qa/full-cluster-restart/build.gradle | 105 ++---------------- ...stractXpackFullClusterRestartTestCase.java | 49 ++++++++ .../restart/CoreFullClusterRestartIT.java | 46 ++++++++ .../xpack/restart/FullClusterRestartIT.java | 51 +++++---- ...MLModelDeploymentFullClusterRestartIT.java | 10 +- ...nfigIndexMappingsFullClusterRestartIT.java | 10 +- .../MlHiddenIndicesFullClusterRestartIT.java | 10 +- .../MlMigrationFullClusterRestartIT.java | 10 +- .../xpack/restart/QueryBuilderBWCIT.java | 42 +++++++ .../xpack/restart/WatcherMappingUpdateIT.java | 12 +- .../xpack/restart/funny-timeout-watch.json | 0 .../xpack/restart/logging-watch.json | 0 .../xpack/restart/simple-watch.json | 0 .../xpack/restart/throttle-period-watch.json | 0 .../resources/system_key | 0 .../restart/CoreFullClusterRestartIT.java | 24 ---- .../xpack/restart/QueryBuilderBWCIT.java | 22 ---- 43 files changed, 867 insertions(+), 401 deletions(-) rename qa/full-cluster-restart/src/{test => javaRestTest}/java/org/elasticsearch/upgrades/FullClusterRestartIT.java (97%) create mode 100644 qa/full-cluster-restart/src/javaRestTest/java/org/elasticsearch/upgrades/FullClusterRestartTestOrdering.java create mode 100644 qa/full-cluster-restart/src/javaRestTest/java/org/elasticsearch/upgrades/FullClusterRestartUpgradeStatus.java create mode 100644 qa/full-cluster-restart/src/javaRestTest/java/org/elasticsearch/upgrades/ParameterizedFullClusterRestartTestCase.java rename qa/full-cluster-restart/src/{test => javaRestTest}/java/org/elasticsearch/upgrades/QueryBuilderBWCIT.java (91%) create mode 100644 test/test-clusters/src/main/java/org/elasticsearch/test/cluster/local/distribution/ReleasedDistributionResolver.java rename x-pack/plugin/shutdown/qa/full-cluster-restart/src/{test => javaRestTest}/java/org/elasticsearch/xpack/restart/FullClusterRestartIT.java (65%) rename x-pack/plugin/shutdown/qa/full-cluster-restart/src/{test => javaRestTest}/resources/system_key (100%) create mode 100644 x-pack/qa/full-cluster-restart/src/javaRestTest/java/org/elasticsearch/xpack/restart/AbstractXpackFullClusterRestartTestCase.java create mode 100644 x-pack/qa/full-cluster-restart/src/javaRestTest/java/org/elasticsearch/xpack/restart/CoreFullClusterRestartIT.java rename x-pack/qa/full-cluster-restart/src/{test => javaRestTest}/java/org/elasticsearch/xpack/restart/FullClusterRestartIT.java (96%) rename x-pack/qa/full-cluster-restart/src/{test => javaRestTest}/java/org/elasticsearch/xpack/restart/MLModelDeploymentFullClusterRestartIT.java (97%) rename x-pack/qa/full-cluster-restart/src/{test => javaRestTest}/java/org/elasticsearch/xpack/restart/MlConfigIndexMappingsFullClusterRestartIT.java (94%) rename x-pack/qa/full-cluster-restart/src/{test => javaRestTest}/java/org/elasticsearch/xpack/restart/MlHiddenIndicesFullClusterRestartIT.java (96%) rename x-pack/qa/full-cluster-restart/src/{test => javaRestTest}/java/org/elasticsearch/xpack/restart/MlMigrationFullClusterRestartIT.java (96%) create mode 100644 x-pack/qa/full-cluster-restart/src/javaRestTest/java/org/elasticsearch/xpack/restart/QueryBuilderBWCIT.java rename x-pack/qa/full-cluster-restart/src/{test => javaRestTest}/java/org/elasticsearch/xpack/restart/WatcherMappingUpdateIT.java (89%) rename x-pack/qa/full-cluster-restart/src/{test => javaRestTest}/resources/org/elasticsearch/xpack/restart/funny-timeout-watch.json (100%) rename x-pack/qa/full-cluster-restart/src/{test => javaRestTest}/resources/org/elasticsearch/xpack/restart/logging-watch.json (100%) rename x-pack/qa/full-cluster-restart/src/{test => javaRestTest}/resources/org/elasticsearch/xpack/restart/simple-watch.json (100%) rename x-pack/qa/full-cluster-restart/src/{test => javaRestTest}/resources/org/elasticsearch/xpack/restart/throttle-period-watch.json (100%) rename x-pack/qa/full-cluster-restart/src/{test => javaRestTest}/resources/system_key (100%) delete mode 100644 x-pack/qa/full-cluster-restart/src/test/java/org/elasticsearch/xpack/restart/CoreFullClusterRestartIT.java delete mode 100644 x-pack/qa/full-cluster-restart/src/test/java/org/elasticsearch/xpack/restart/QueryBuilderBWCIT.java diff --git a/build-tools-internal/src/main/groovy/elasticsearch.bwc-test.gradle b/build-tools-internal/src/main/groovy/elasticsearch.bwc-test.gradle index b80c450c5914e..a5e74c3721297 100644 --- a/build-tools-internal/src/main/groovy/elasticsearch.bwc-test.gradle +++ b/build-tools-internal/src/main/groovy/elasticsearch.bwc-test.gradle @@ -9,6 +9,8 @@ import org.elasticsearch.gradle.Version import org.elasticsearch.gradle.internal.ElasticsearchTestBasePlugin import org.elasticsearch.gradle.internal.info.BuildParams +import org.elasticsearch.gradle.internal.test.rest.InternalJavaRestTestPlugin +import org.elasticsearch.gradle.testclusters.StandaloneRestIntegTestTask ext.bwcTaskName = { Version version -> return "v${version}#bwcTest" @@ -36,5 +38,17 @@ plugins.withType(ElasticsearchTestBasePlugin) { } } +plugins.withType(InternalJavaRestTestPlugin) { + tasks.named("javaRestTest") { + enabled = false + } + + tasks.withType(StandaloneRestIntegTestTask).configureEach { + testClassesDirs = sourceSets.javaRestTest.output.classesDirs + classpath = sourceSets.javaRestTest.runtimeClasspath + usesDefaultDistribution() + } +} + tasks.matching { it.name.equals("check") }.configureEach {dependsOn(bwcTestSnapshots) } tasks.matching { it.name.equals("test") }.configureEach {enabled = false} diff --git a/build-tools-internal/src/main/java/org/elasticsearch/gradle/internal/ElasticsearchTestBasePlugin.java b/build-tools-internal/src/main/java/org/elasticsearch/gradle/internal/ElasticsearchTestBasePlugin.java index 854dc6d204382..c6758092b17ec 100644 --- a/build-tools-internal/src/main/java/org/elasticsearch/gradle/internal/ElasticsearchTestBasePlugin.java +++ b/build-tools-internal/src/main/java/org/elasticsearch/gradle/internal/ElasticsearchTestBasePlugin.java @@ -89,7 +89,7 @@ public void execute(Task t) { test.getJvmArgumentProviders().add(nonInputProperties); test.getExtensions().add("nonInputProperties", nonInputProperties); - test.setWorkingDir(project.file(project.getBuildDir() + "/testrun/" + test.getName())); + test.setWorkingDir(project.file(project.getBuildDir() + "/testrun/" + test.getName().replace("#", "_"))); test.setMaxParallelForks(Integer.parseInt(System.getProperty("tests.jvms", BuildParams.getDefaultParallel().toString()))); test.exclude("**/*$*.class"); diff --git a/build-tools-internal/src/main/java/org/elasticsearch/gradle/internal/InternalTestArtifactExtension.java b/build-tools-internal/src/main/java/org/elasticsearch/gradle/internal/InternalTestArtifactExtension.java index fae845b229651..4952085f466be 100644 --- a/build-tools-internal/src/main/java/org/elasticsearch/gradle/internal/InternalTestArtifactExtension.java +++ b/build-tools-internal/src/main/java/org/elasticsearch/gradle/internal/InternalTestArtifactExtension.java @@ -32,7 +32,7 @@ public void registerTestArtifactFromSourceSet(SourceSet sourceSet) { JavaPluginExtension javaPluginExtension = project.getExtensions().getByType(JavaPluginExtension.class); javaPluginExtension.registerFeature(name + "Artifacts", featureSpec -> { featureSpec.usingSourceSet(sourceSet); - featureSpec.capability("org.elasticsearch.gradle", project.getName() + "-" + name + "-artifacts", "1.0"); + featureSpec.capability("org.elasticsearch.gradle", project.getName() + "-test-artifacts", "1.0"); // This feature is only used internally in the // elasticsearch build so we do not need any publication. featureSpec.disablePublication(); diff --git a/build-tools-internal/src/main/java/org/elasticsearch/gradle/internal/test/rest/RestTestBasePlugin.java b/build-tools-internal/src/main/java/org/elasticsearch/gradle/internal/test/rest/RestTestBasePlugin.java index 9baa17bc00d7c..1a7b5bc3ee2a1 100644 --- a/build-tools-internal/src/main/java/org/elasticsearch/gradle/internal/test/rest/RestTestBasePlugin.java +++ b/build-tools-internal/src/main/java/org/elasticsearch/gradle/internal/test/rest/RestTestBasePlugin.java @@ -13,6 +13,8 @@ import org.elasticsearch.gradle.Architecture; import org.elasticsearch.gradle.DistributionDownloadPlugin; import org.elasticsearch.gradle.ElasticsearchDistribution; +import org.elasticsearch.gradle.ElasticsearchDistributionType; +import org.elasticsearch.gradle.Version; import org.elasticsearch.gradle.VersionProperties; import org.elasticsearch.gradle.distribution.ElasticsearchDistributionTypes; import org.elasticsearch.gradle.internal.ElasticsearchJavaPlugin; @@ -58,6 +60,8 @@ public class RestTestBasePlugin implements Plugin { private static final String TESTS_RUNTIME_JAVA_SYSPROP = "tests.runtime.java"; private static final String DEFAULT_DISTRIBUTION_SYSPROP = "tests.default.distribution"; private static final String INTEG_TEST_DISTRIBUTION_SYSPROP = "tests.integ-test.distribution"; + private static final String BWC_SNAPSHOT_DISTRIBUTION_SYSPROP_PREFIX = "tests.snapshot.distribution."; + private static final String BWC_RELEASED_DISTRIBUTION_SYSPROP_PREFIX = "tests.release.distribution."; private static final String TESTS_CLUSTER_MODULES_PATH_SYSPROP = "tests.cluster.modules.path"; private static final String TESTS_CLUSTER_PLUGINS_PATH_SYSPROP = "tests.cluster.plugins.path"; private static final String DEFAULT_REST_INTEG_TEST_DISTRO = "default_distro"; @@ -79,16 +83,17 @@ public void apply(Project project) { project.getPluginManager().apply(InternalDistributionDownloadPlugin.class); // Register integ-test and default distributions - NamedDomainObjectContainer distributions = DistributionDownloadPlugin.getContainer(project); - ElasticsearchDistribution defaultDistro = distributions.create(DEFAULT_REST_INTEG_TEST_DISTRO, distro -> { - distro.setVersion(VersionProperties.getElasticsearch()); - distro.setArchitecture(Architecture.current()); - }); - ElasticsearchDistribution integTestDistro = distributions.create(INTEG_TEST_REST_INTEG_TEST_DISTRO, distro -> { - distro.setVersion(VersionProperties.getElasticsearch()); - distro.setArchitecture(Architecture.current()); - distro.setType(ElasticsearchDistributionTypes.INTEG_TEST_ZIP); - }); + ElasticsearchDistribution defaultDistro = createDistribution( + project, + DEFAULT_REST_INTEG_TEST_DISTRO, + VersionProperties.getElasticsearch() + ); + ElasticsearchDistribution integTestDistro = createDistribution( + project, + INTEG_TEST_REST_INTEG_TEST_DISTRO, + VersionProperties.getElasticsearch(), + ElasticsearchDistributionTypes.INTEG_TEST_ZIP + ); // Create configures for module and plugin dependencies Configuration modulesConfiguration = createPluginConfiguration(project, MODULES_CONFIGURATION, true, false); @@ -151,6 +156,35 @@ public Void call(Object... args) { return null; } }); + + // Add `usesBwcDistribution(version)` extension method to test tasks to indicate they require a BWC distribution + task.getExtensions().getExtraProperties().set("usesBwcDistribution", new Closure(task) { + @Override + public Void call(Object... args) { + if (args.length != 1 && args[0] instanceof Version == false) { + throw new IllegalArgumentException("Expected exactly one argument of type org.elasticsearch.gradle.Version"); + } + + Version version = (Version) args[0]; + boolean isReleased = BuildParams.getBwcVersions().unreleasedInfo(version) == null; + String versionString = version.toString(); + ElasticsearchDistribution bwcDistro = createDistribution(project, "bwc_" + versionString, versionString); + + task.dependsOn(bwcDistro); + registerDistributionInputs(task, bwcDistro); + + nonInputSystemProperties.systemProperty( + (isReleased ? BWC_RELEASED_DISTRIBUTION_SYSPROP_PREFIX : BWC_SNAPSHOT_DISTRIBUTION_SYSPROP_PREFIX) + versionString, + providerFactory.provider(() -> bwcDistro.getExtracted().getSingleFile().getPath()) + ); + + if (version.before(BuildParams.getBwcVersions().getMinimumWireCompatibleVersion())) { + // If we are upgrade testing older versions we also need to upgrade to 7.last + this.call(BuildParams.getBwcVersions().getMinimumWireCompatibleVersion()); + } + return null; + } + }); }); project.getTasks() @@ -158,6 +192,26 @@ public Void call(Object... args) { .configure(check -> check.dependsOn(project.getTasks().withType(StandaloneRestIntegTestTask.class))); } + private ElasticsearchDistribution createDistribution(Project project, String name, String version) { + return createDistribution(project, name, version, null); + } + + private ElasticsearchDistribution createDistribution(Project project, String name, String version, ElasticsearchDistributionType type) { + NamedDomainObjectContainer distributions = DistributionDownloadPlugin.getContainer(project); + ElasticsearchDistribution maybeDistro = distributions.findByName(name); + if (maybeDistro == null) { + return distributions.create(name, distro -> { + distro.setVersion(version); + distro.setArchitecture(Architecture.current()); + if (type != null) { + distro.setType(type); + } + }); + } else { + return maybeDistro; + } + } + private FileTree getDistributionFiles(ElasticsearchDistribution distribution, Action patternFilter) { return distribution.getExtracted().getAsFileTree().matching(patternFilter); } diff --git a/qa/full-cluster-restart/build.gradle b/qa/full-cluster-restart/build.gradle index a3af45b43363e..b6f181809e0e4 100644 --- a/qa/full-cluster-restart/build.gradle +++ b/qa/full-cluster-restart/build.gradle @@ -6,64 +6,20 @@ * Side Public License, v 1. */ - -import org.elasticsearch.gradle.Version import org.elasticsearch.gradle.internal.info.BuildParams import org.elasticsearch.gradle.testclusters.StandaloneRestIntegTestTask -apply plugin: 'elasticsearch.internal-testclusters' -apply plugin: 'elasticsearch.standalone-rest-test' -apply plugin: 'elasticsearch.internal-test-artifact' +apply plugin: 'elasticsearch.internal-java-rest-test' +apply plugin: 'elasticsearch.internal-test-artifact-base' apply plugin: 'elasticsearch.bwc-test' -BuildParams.bwcVersions.withIndexCompatible { bwcVersion, baseName -> - def baseCluster = testClusters.register(baseName) { - if (bwcVersion.before(BuildParams.bwcVersions.minimumWireCompatibleVersion)) { - // When testing older versions we have to first upgrade to 7.last - versions = [bwcVersion.toString(), BuildParams.bwcVersions.minimumWireCompatibleVersion.toString(), project.version] - } else { - versions = [bwcVersion.toString(), project.version] - } - numberOfNodes = 2 - // some tests rely on the translog not being flushed - setting 'indices.memory.shard_inactive_time', '60m' - setting 'path.repo', "${buildDir}/cluster/shared/repo/${baseName}" - setting 'xpack.security.enabled', 'false' - requiresFeature 'es.index_mode_feature_flag_registered', Version.fromString("8.0.0") - } - - tasks.register("${baseName}#oldClusterTest", StandaloneRestIntegTestTask) { - useCluster baseCluster - mustRunAfter("precommit") - doFirst { - delete("${buildDir}/cluster/shared/repo/${baseName}") - } - - systemProperty 'tests.is_old_cluster', 'true' - } - - tasks.register("${baseName}#upgradedClusterTest", StandaloneRestIntegTestTask) { - useCluster baseCluster - dependsOn "${baseName}#oldClusterTest" - doFirst { - baseCluster.get().goToNextVersion() - if (bwcVersion.before(BuildParams.bwcVersions.minimumWireCompatibleVersion)) { - // When doing a full cluster restart of older versions we actually have to upgrade twice. First to 7.last, then to the current version. - baseCluster.get().goToNextVersion() - } - } - systemProperty 'tests.is_old_cluster', 'false' - } - - String oldVersion = bwcVersion.toString().minus("-SNAPSHOT") - tasks.matching { it.name.startsWith(baseName) && it.name.endsWith("ClusterTest") }.configureEach { - it.systemProperty 'tests.old_cluster_version', oldVersion - it.systemProperty 'tests.path.repo', "${buildDir}/cluster/shared/repo/${baseName}" - it.nonInputProperties.systemProperty('tests.rest.cluster', baseCluster.map(c -> c.allHttpSocketURI.join(","))) - it.nonInputProperties.systemProperty('tests.clustername', baseName) - } +testArtifacts { + registerTestArtifactFromSourceSet(sourceSets.javaRestTest) +} - tasks.register(bwcTaskName(bwcVersion)) { - dependsOn tasks.named("${baseName}#upgradedClusterTest") +BuildParams.bwcVersions.withIndexCompatible { bwcVersion, baseName -> + tasks.register(bwcTaskName(bwcVersion), StandaloneRestIntegTestTask) { + usesBwcDistribution(bwcVersion) + systemProperty("tests.old_cluster_version", bwcVersion) } } diff --git a/qa/full-cluster-restart/src/test/java/org/elasticsearch/upgrades/FullClusterRestartIT.java b/qa/full-cluster-restart/src/javaRestTest/java/org/elasticsearch/upgrades/FullClusterRestartIT.java similarity index 97% rename from qa/full-cluster-restart/src/test/java/org/elasticsearch/upgrades/FullClusterRestartIT.java rename to qa/full-cluster-restart/src/javaRestTest/java/org/elasticsearch/upgrades/FullClusterRestartIT.java index af66fbc61562b..3f9a007e6bf4e 100644 --- a/qa/full-cluster-restart/src/test/java/org/elasticsearch/upgrades/FullClusterRestartIT.java +++ b/qa/full-cluster-restart/src/javaRestTest/java/org/elasticsearch/upgrades/FullClusterRestartIT.java @@ -8,6 +8,8 @@ package org.elasticsearch.upgrades; +import com.carrotsearch.randomizedtesting.annotations.Name; + import org.apache.http.util.EntityUtils; import org.elasticsearch.Version; import org.elasticsearch.action.admin.cluster.settings.RestClusterGetSettingsResponse; @@ -28,6 +30,10 @@ import org.elasticsearch.rest.action.admin.indices.RestPutIndexTemplateAction; import org.elasticsearch.test.NotEqualMessageBuilder; import org.elasticsearch.test.XContentTestUtils; +import org.elasticsearch.test.cluster.ElasticsearchCluster; +import org.elasticsearch.test.cluster.FeatureFlag; +import org.elasticsearch.test.cluster.local.LocalClusterConfigProvider; +import org.elasticsearch.test.cluster.local.distribution.DistributionType; import org.elasticsearch.test.rest.ESRestTestCase; import org.elasticsearch.test.rest.ObjectPath; import org.elasticsearch.transport.Compression; @@ -35,6 +41,10 @@ import org.elasticsearch.xcontent.XContentParser; import org.elasticsearch.xcontent.json.JsonXContent; import org.junit.Before; +import org.junit.ClassRule; +import org.junit.rules.RuleChain; +import org.junit.rules.TemporaryFolder; +import org.junit.rules.TestRule; import java.io.IOException; import java.util.ArrayList; @@ -44,7 +54,6 @@ import java.util.HashMap; import java.util.HashSet; import java.util.List; -import java.util.Locale; import java.util.Map; import java.util.Set; import java.util.concurrent.TimeUnit; @@ -80,13 +89,41 @@ * version is started with the same data directories and then this is rerun * with {@code tests.is_old_cluster} set to {@code false}. */ -public class FullClusterRestartIT extends AbstractFullClusterRestartTestCase { +public class FullClusterRestartIT extends ParameterizedFullClusterRestartTestCase { + + private static TemporaryFolder repoDirectory = new TemporaryFolder(); + + protected static LocalClusterConfigProvider clusterConfig = c -> {}; + + private static ElasticsearchCluster cluster = ElasticsearchCluster.local() + .distribution(DistributionType.DEFAULT) + .version(getOldClusterTestVersion()) + .nodes(2) + .setting("path.repo", () -> repoDirectory.getRoot().getPath()) + .setting("xpack.security.enabled", "false") + // some tests rely on the translog not being flushed + .setting("indices.memory.shard_inactive_time", "60m") + .apply(() -> clusterConfig) + .feature(FeatureFlag.TIME_SERIES_MODE) + .build(); + + @ClassRule + public static TestRule ruleChain = RuleChain.outerRule(repoDirectory).around(cluster); private String index; + public FullClusterRestartIT(@Name("cluster") FullClusterRestartUpgradeStatus upgradeStatus) { + super(upgradeStatus); + } + + @Override + protected ElasticsearchCluster getUpgradeCluster() { + return cluster; + } + @Before public void setIndex() { - index = getTestName().toLowerCase(Locale.ROOT); + index = getRootTestName(); } public void testSearch() throws Exception { @@ -1051,7 +1088,7 @@ public void testSnapshotRestore() throws IOException { repoConfig.startObject("settings"); { repoConfig.field("compress", randomBoolean()); - repoConfig.field("location", System.getProperty("tests.path.repo")); + repoConfig.field("location", repoDirectory.getRoot().getPath()); } repoConfig.endObject(); } @@ -1725,7 +1762,7 @@ public void testEnableSoftDeletesOnRestore() throws Exception { repoConfig.startObject("settings"); { repoConfig.field("compress", randomBoolean()); - repoConfig.field("location", System.getProperty("tests.path.repo")); + repoConfig.field("location", repoDirectory.getRoot().getPath()); } repoConfig.endObject(); } @@ -1785,7 +1822,7 @@ public void testForbidDisableSoftDeletesOnRestore() throws Exception { repoConfig.startObject("settings"); { repoConfig.field("compress", randomBoolean()); - repoConfig.field("location", System.getProperty("tests.path.repo")); + repoConfig.field("location", repoDirectory.getRoot().getPath()); } repoConfig.endObject(); } diff --git a/qa/full-cluster-restart/src/javaRestTest/java/org/elasticsearch/upgrades/FullClusterRestartTestOrdering.java b/qa/full-cluster-restart/src/javaRestTest/java/org/elasticsearch/upgrades/FullClusterRestartTestOrdering.java new file mode 100644 index 0000000000000..232619ee93bb9 --- /dev/null +++ b/qa/full-cluster-restart/src/javaRestTest/java/org/elasticsearch/upgrades/FullClusterRestartTestOrdering.java @@ -0,0 +1,24 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0 and the Server Side Public License, v 1; you may not use this file except + * in compliance with, at your election, the Elastic License 2.0 or the Server + * Side Public License, v 1. + */ + +package org.elasticsearch.upgrades; + +import com.carrotsearch.randomizedtesting.TestMethodAndParams; + +import java.util.Comparator; + +public class FullClusterRestartTestOrdering implements Comparator { + @Override + public int compare(TestMethodAndParams o1, TestMethodAndParams o2) { + return Integer.compare(getOrdinal(o1), getOrdinal(o2)); + } + + private int getOrdinal(TestMethodAndParams t) { + return ((FullClusterRestartUpgradeStatus) t.getInstanceArguments().get(0)).ordinal(); + } +} diff --git a/qa/full-cluster-restart/src/javaRestTest/java/org/elasticsearch/upgrades/FullClusterRestartUpgradeStatus.java b/qa/full-cluster-restart/src/javaRestTest/java/org/elasticsearch/upgrades/FullClusterRestartUpgradeStatus.java new file mode 100644 index 0000000000000..06048d020e2a0 --- /dev/null +++ b/qa/full-cluster-restart/src/javaRestTest/java/org/elasticsearch/upgrades/FullClusterRestartUpgradeStatus.java @@ -0,0 +1,14 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0 and the Server Side Public License, v 1; you may not use this file except + * in compliance with, at your election, the Elastic License 2.0 or the Server + * Side Public License, v 1. + */ + +package org.elasticsearch.upgrades; + +public enum FullClusterRestartUpgradeStatus { + OLD, + UPGRADED +} diff --git a/qa/full-cluster-restart/src/javaRestTest/java/org/elasticsearch/upgrades/ParameterizedFullClusterRestartTestCase.java b/qa/full-cluster-restart/src/javaRestTest/java/org/elasticsearch/upgrades/ParameterizedFullClusterRestartTestCase.java new file mode 100644 index 0000000000000..a064c87743800 --- /dev/null +++ b/qa/full-cluster-restart/src/javaRestTest/java/org/elasticsearch/upgrades/ParameterizedFullClusterRestartTestCase.java @@ -0,0 +1,100 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0 and the Server Side Public License, v 1; you may not use this file except + * in compliance with, at your election, the Elastic License 2.0 or the Server + * Side Public License, v 1. + */ + +package org.elasticsearch.upgrades; + +import com.carrotsearch.randomizedtesting.annotations.Name; +import com.carrotsearch.randomizedtesting.annotations.ParametersFactory; +import com.carrotsearch.randomizedtesting.annotations.TestCaseOrdering; + +import org.elasticsearch.test.cluster.ElasticsearchCluster; +import org.elasticsearch.test.cluster.util.Version; +import org.elasticsearch.test.rest.ESRestTestCase; +import org.junit.AfterClass; +import org.junit.Before; + +import java.util.Arrays; +import java.util.Locale; + +import static org.elasticsearch.upgrades.FullClusterRestartUpgradeStatus.OLD; +import static org.elasticsearch.upgrades.FullClusterRestartUpgradeStatus.UPGRADED; + +@TestCaseOrdering(FullClusterRestartTestOrdering.class) +public abstract class ParameterizedFullClusterRestartTestCase extends ESRestTestCase { + private static final Version MINIMUM_WIRE_COMPATIBLE_VERSION = Version.fromString("7.17.0"); + private static final Version OLD_CLUSTER_VERSION = Version.fromString(System.getProperty("tests.old_cluster_version")); + private static boolean upgradeFailed = false; + private static boolean upgraded = false; + private final FullClusterRestartUpgradeStatus requestedUpgradeStatus; + + public ParameterizedFullClusterRestartTestCase(@Name("cluster") FullClusterRestartUpgradeStatus upgradeStatus) { + this.requestedUpgradeStatus = upgradeStatus; + } + + @ParametersFactory + public static Iterable parameters() throws Exception { + return Arrays.stream(FullClusterRestartUpgradeStatus.values()).map(v -> new Object[] { v }).toList(); + } + + @Before + public void maybeUpgrade() throws Exception { + if (upgraded == false && requestedUpgradeStatus == UPGRADED) { + try { + if (OLD_CLUSTER_VERSION.before(MINIMUM_WIRE_COMPATIBLE_VERSION)) { + // First upgrade to latest wire compatible version + getUpgradeCluster().upgradeToVersion(MINIMUM_WIRE_COMPATIBLE_VERSION); + } + getUpgradeCluster().upgradeToVersion(Version.CURRENT); + closeClients(); + initClient(); + } catch (Exception e) { + upgradeFailed = true; + throw e; + } finally { + upgraded = true; + } + } + + // Skip remaining tests if upgrade failed + assumeFalse("Cluster upgrade failed", upgradeFailed); + } + + @AfterClass + public static void resetUpgrade() { + upgraded = false; + upgradeFailed = false; + } + + public boolean isRunningAgainstOldCluster() { + return requestedUpgradeStatus == OLD; + } + + public static org.elasticsearch.Version getOldClusterVersion() { + return org.elasticsearch.Version.fromString(OLD_CLUSTER_VERSION.toString()); + } + + public static Version getOldClusterTestVersion() { + return Version.fromString(OLD_CLUSTER_VERSION.toString()); + } + + protected abstract ElasticsearchCluster getUpgradeCluster(); + + @Override + protected String getTestRestCluster() { + return getUpgradeCluster().getHttpAddresses(); + } + + @Override + protected boolean preserveClusterUponCompletion() { + return true; + } + + protected String getRootTestName() { + return getTestName().split(" ")[0].toLowerCase(Locale.ROOT); + } +} diff --git a/qa/full-cluster-restart/src/test/java/org/elasticsearch/upgrades/QueryBuilderBWCIT.java b/qa/full-cluster-restart/src/javaRestTest/java/org/elasticsearch/upgrades/QueryBuilderBWCIT.java similarity index 91% rename from qa/full-cluster-restart/src/test/java/org/elasticsearch/upgrades/QueryBuilderBWCIT.java rename to qa/full-cluster-restart/src/javaRestTest/java/org/elasticsearch/upgrades/QueryBuilderBWCIT.java index d69f0b05958f9..1636644409fc7 100644 --- a/qa/full-cluster-restart/src/test/java/org/elasticsearch/upgrades/QueryBuilderBWCIT.java +++ b/qa/full-cluster-restart/src/javaRestTest/java/org/elasticsearch/upgrades/QueryBuilderBWCIT.java @@ -8,6 +8,8 @@ package org.elasticsearch.upgrades; +import com.carrotsearch.randomizedtesting.annotations.Name; + import org.elasticsearch.client.Request; import org.elasticsearch.client.Response; import org.elasticsearch.common.Strings; @@ -32,7 +34,11 @@ import org.elasticsearch.index.query.functionscore.FunctionScoreQueryBuilder; import org.elasticsearch.index.query.functionscore.RandomScoreFunctionBuilder; import org.elasticsearch.search.SearchModule; +import org.elasticsearch.test.cluster.ElasticsearchCluster; +import org.elasticsearch.test.cluster.local.LocalClusterConfigProvider; +import org.elasticsearch.test.cluster.local.distribution.DistributionType; import org.elasticsearch.xcontent.XContentBuilder; +import org.junit.ClassRule; import java.io.ByteArrayInputStream; import java.io.InputStream; @@ -54,10 +60,29 @@ * The queries to test are specified in json format, which turns out to work because we tend break here rarely. If the * json format of a query being tested here then feel free to change this. */ -public class QueryBuilderBWCIT extends AbstractFullClusterRestartTestCase { - +public class QueryBuilderBWCIT extends ParameterizedFullClusterRestartTestCase { private static final List CANDIDATES = new ArrayList<>(); + protected static LocalClusterConfigProvider clusterConfig = c -> {}; + + @ClassRule + public static ElasticsearchCluster cluster = ElasticsearchCluster.local() + .distribution(DistributionType.DEFAULT) + .version(getOldClusterTestVersion()) + .nodes(2) + .setting("xpack.security.enabled", "false") + .apply(() -> clusterConfig) + .build(); + + @Override + protected ElasticsearchCluster getUpgradeCluster() { + return cluster; + } + + public QueryBuilderBWCIT(@Name("cluster") FullClusterRestartUpgradeStatus upgradeStatus) { + super(upgradeStatus); + } + static { addCandidate(""" "match": { "text_field": "value"} diff --git a/test/test-clusters/src/main/java/org/elasticsearch/test/cluster/ClusterHandle.java b/test/test-clusters/src/main/java/org/elasticsearch/test/cluster/ClusterHandle.java index 658925744860d..2a4e3e3958c57 100644 --- a/test/test-clusters/src/main/java/org/elasticsearch/test/cluster/ClusterHandle.java +++ b/test/test-clusters/src/main/java/org/elasticsearch/test/cluster/ClusterHandle.java @@ -8,6 +8,8 @@ package org.elasticsearch.test.cluster; +import org.elasticsearch.test.cluster.util.Version; + import java.io.Closeable; /** @@ -73,4 +75,19 @@ public interface ClusterHandle extends Closeable { * @return cluster node TCP transport endpoints */ String getTransportEndpoint(int index); + + /** + * Upgrades a single node to the given version. Method blocks until the node is back up and ready to respond to requests. + * + * @param index index of node ot upgrade + * @param version version to upgrade to + */ + void upgradeNodeToVersion(int index, Version version); + + /** + * Performs a "full cluster restart" upgrade to the given version. Method blocks until the cluster is restarted and available. + * + * @param version version to upgrade to + */ + void upgradeToVersion(Version version); } diff --git a/test/test-clusters/src/main/java/org/elasticsearch/test/cluster/local/AbstractLocalSpecBuilder.java b/test/test-clusters/src/main/java/org/elasticsearch/test/cluster/local/AbstractLocalSpecBuilder.java index 2e3366cdf9af3..aa71ffdf27a72 100644 --- a/test/test-clusters/src/main/java/org/elasticsearch/test/cluster/local/AbstractLocalSpecBuilder.java +++ b/test/test-clusters/src/main/java/org/elasticsearch/test/cluster/local/AbstractLocalSpecBuilder.java @@ -12,6 +12,7 @@ import org.elasticsearch.test.cluster.FeatureFlag; import org.elasticsearch.test.cluster.SettingsProvider; import org.elasticsearch.test.cluster.local.distribution.DistributionType; +import org.elasticsearch.test.cluster.util.Version; import org.elasticsearch.test.cluster.util.resource.Resource; import java.util.ArrayList; @@ -32,9 +33,11 @@ public abstract class AbstractLocalSpecBuilder> im private final Set plugins = new HashSet<>(); private final Set features = new HashSet<>(); private final Map keystoreSettings = new HashMap<>(); + private final Map keystoreFiles = new HashMap<>(); private final Map extraConfigFiles = new HashMap<>(); private final Map systemProperties = new HashMap<>(); private DistributionType distributionType; + private Version version; private String keystorePassword; protected AbstractLocalSpecBuilder(AbstractLocalSpecBuilder parent) { @@ -138,6 +141,16 @@ public Map getKeystoreSettings() { return inherit(() -> parent.getKeystoreSettings(), keystoreSettings); } + @Override + public T keystore(String key, Resource file) { + this.keystoreFiles.put(key, file); + return cast(this); + } + + public Map getKeystoreFiles() { + return inherit(() -> parent.getKeystoreFiles(), keystoreFiles); + } + @Override public T configFile(String fileName, Resource configFile) { this.extraConfigFiles.put(fileName, configFile); @@ -168,6 +181,16 @@ public String getKeystorePassword() { return inherit(() -> parent.getKeystorePassword(), keystorePassword); } + @Override + public T version(Version version) { + this.version = version; + return cast(this); + } + + public Version getVersion() { + return inherit(() -> parent.getVersion(), version); + } + private List inherit(Supplier> parent, List child) { List combinedList = new ArrayList<>(); if (this.parent != null) { diff --git a/test/test-clusters/src/main/java/org/elasticsearch/test/cluster/local/DefaultLocalClusterSpecBuilder.java b/test/test-clusters/src/main/java/org/elasticsearch/test/cluster/local/DefaultLocalClusterSpecBuilder.java index 9c4aa48eb03d4..fad762fa34c23 100644 --- a/test/test-clusters/src/main/java/org/elasticsearch/test/cluster/local/DefaultLocalClusterSpecBuilder.java +++ b/test/test-clusters/src/main/java/org/elasticsearch/test/cluster/local/DefaultLocalClusterSpecBuilder.java @@ -19,12 +19,14 @@ import java.util.List; import java.util.Optional; import java.util.function.Consumer; +import java.util.function.Supplier; public class DefaultLocalClusterSpecBuilder extends AbstractLocalSpecBuilder implements LocalClusterSpecBuilder { private String name = "test-cluster"; private final List nodeBuilders = new ArrayList<>(); private final List users = new ArrayList<>(); private final List roleFiles = new ArrayList<>(); + private final List> lazyConfigProviders = new ArrayList<>(); public DefaultLocalClusterSpecBuilder() { super(null); @@ -46,6 +48,12 @@ public DefaultLocalClusterSpecBuilder apply(LocalClusterConfigProvider configPro return this; } + @Override + public LocalClusterSpecBuilder apply(Supplier configProvider) { + lazyConfigProviders.add(configProvider); + return this; + } + @Override public DefaultLocalClusterSpecBuilder nodes(int nodes) { if (nodes < nodeBuilders.size()) { @@ -117,7 +125,28 @@ public ElasticsearchCluster build() { clusterSpec.setNodes(nodeSpecs); clusterSpec.validate(); - return new LocalElasticsearchCluster(clusterSpec); + return new LocalElasticsearchCluster(this); + } + + LocalClusterSpec buildClusterSpec() { + // Apply lazily provided configuration + lazyConfigProviders.forEach(s -> s.get().apply(this)); + + List clusterUsers = users.isEmpty() ? List.of(User.DEFAULT_USER) : users; + LocalClusterSpec clusterSpec = new LocalClusterSpec(name, clusterUsers, roleFiles); + List nodeSpecs; + + if (nodeBuilders.isEmpty()) { + // No node-specific configuration so assume a single-node cluster + nodeSpecs = List.of(new DefaultLocalNodeSpecBuilder(this).build(clusterSpec)); + } else { + nodeSpecs = nodeBuilders.stream().map(node -> node.build(clusterSpec)).toList(); + } + + clusterSpec.setNodes(nodeSpecs); + clusterSpec.validate(); + + return clusterSpec; } public static class DefaultLocalNodeSpecBuilder extends AbstractLocalSpecBuilder implements LocalNodeSpecBuilder { @@ -138,7 +167,7 @@ private LocalNodeSpec build(LocalClusterSpec cluster) { return new LocalNodeSpec( cluster, name, - Version.CURRENT, + Optional.ofNullable(getVersion()).orElse(Version.CURRENT), getSettingsProviders(), getSettings(), getEnvironmentProviders(), @@ -148,6 +177,7 @@ private LocalNodeSpec build(LocalClusterSpec cluster) { Optional.ofNullable(getDistributionType()).orElse(DistributionType.INTEG_TEST), getFeatures(), getKeystoreSettings(), + getKeystoreFiles(), getKeystorePassword(), getExtraConfigFiles(), getSystemProperties() diff --git a/test/test-clusters/src/main/java/org/elasticsearch/test/cluster/local/LocalClusterFactory.java b/test/test-clusters/src/main/java/org/elasticsearch/test/cluster/local/LocalClusterFactory.java index 08318b5145038..5f43bb8aa71b6 100644 --- a/test/test-clusters/src/main/java/org/elasticsearch/test/cluster/local/LocalClusterFactory.java +++ b/test/test-clusters/src/main/java/org/elasticsearch/test/cluster/local/LocalClusterFactory.java @@ -79,21 +79,20 @@ public LocalClusterHandle create(LocalClusterSpec spec) { public class Node { private final LocalNodeSpec spec; private final Path workingDir; - private final Path distributionDir; private final Path repoDir; private final Path dataDir; private final Path logsDir; private final Path configDir; private final Path tempDir; - private boolean initialized = false; + private Path distributionDir; + private Version currentVersion; private Process process = null; private DistributionDescriptor distributionDescriptor; public Node(LocalNodeSpec spec) { this.spec = spec; this.workingDir = baseWorkingDir.resolve(spec.getCluster().getName()).resolve(spec.getName()); - this.distributionDir = workingDir.resolve("distro"); // location of es distribution files, typically hard-linked this.repoDir = baseWorkingDir.resolve("repo"); this.dataDir = workingDir.resolve("data"); this.logsDir = workingDir.resolve("logs"); @@ -101,22 +100,30 @@ public Node(LocalNodeSpec spec) { this.tempDir = workingDir.resolve("tmp"); // elasticsearch temporary directory } - public synchronized void start() { + public synchronized void start(Version version) { LOGGER.info("Starting Elasticsearch node '{}'", spec.getName()); + if (version != null) { + spec.setVersion(version); + } - if (initialized == false) { + if (currentVersion == null || currentVersion.equals(spec.getVersion()) == false) { LOGGER.info("Creating installation for node '{}' in {}", spec.getName(), workingDir); distributionDescriptor = resolveDistribution(); LOGGER.info("Distribution for node '{}': {}", spec.getName(), distributionDescriptor); - initializeWorkingDirectory(); + distributionDir = OS.conditional( + // Use per-version distribution directories on Windows to avoid cleanup failures + c -> c.onWindows(() -> workingDir.resolve("distro").resolve(distributionDescriptor.getVersion().toString())) + .onUnix(() -> workingDir.resolve("distro")) + ); + initializeWorkingDirectory(currentVersion != null); createConfigDirectory(); copyExtraConfigFiles(); // extra config files might be needed for running cli tools like plugin install copyExtraJarFiles(); installPlugins(); - if (spec.getDistributionType() == DistributionType.INTEG_TEST) { + if (distributionDescriptor.getType() == DistributionType.INTEG_TEST) { installModules(); } - initialized = true; + currentVersion = spec.getVersion(); } else { createConfigDirectory(); copyExtraConfigFiles(); @@ -125,6 +132,7 @@ public synchronized void start() { writeConfiguration(); createKeystore(); addKeystoreSettings(); + addKeystoreFiles(); configureSecurity(); startElasticsearch(); @@ -135,6 +143,7 @@ public synchronized void stop(boolean forcibly) { ProcessUtils.stopHandle(process.toHandle(), forcibly); ProcessReaper.instance().unregister(getServiceName()); } + deletePortsFiles(); } public void waitForExit() { @@ -159,6 +168,20 @@ public String getTransportEndpoint() { return readPortsFile(portsFile).get(0); } + public void deletePortsFiles() { + try { + Path hostsFile = workingDir.resolve("config").resolve("unicast_hosts.txt"); + Path httpPortsFile = workingDir.resolve("logs").resolve("http.ports"); + Path transportPortsFile = workingDir.resolve("logs").resolve("transport.ports"); + + Files.deleteIfExists(hostsFile); + Files.deleteIfExists(httpPortsFile); + Files.deleteIfExists(transportPortsFile); + } catch (IOException e) { + throw new UncheckedIOException("Failed to write unicast_hosts for: " + this, e); + } + } + public LocalNodeSpec getSpec() { return spec; } @@ -205,9 +228,13 @@ private List readPortsFile(Path file) { } } - private void initializeWorkingDirectory() { + private void initializeWorkingDirectory(boolean preserveWorkingDirectory) { try { - IOUtils.deleteWithRetry(workingDir); + if (preserveWorkingDirectory) { + IOUtils.deleteWithRetry(distributionDir); + } else { + IOUtils.deleteWithRetry(workingDir); + } try { IOUtils.syncWithLinks(distributionDescriptor.getDistributionDir(), distributionDir); } catch (IOUtils.LinkCreationException e) { @@ -350,6 +377,31 @@ private void addKeystoreSettings() { }); } + private void addKeystoreFiles() { + spec.getKeystoreFiles().forEach((key, file) -> { + try { + Path path = Files.createTempFile(tempDir, key, null); + file.writeTo(path); + + ProcessUtils.exec( + spec.getKeystorePassword(), + workingDir, + OS.conditional( + c -> c.onWindows(() -> distributionDir.resolve("bin").resolve("elasticsearch-keystore.bat")) + .onUnix(() -> distributionDir.resolve("bin").resolve("elasticsearch-keystore")) + ), + getEnvironmentVariables(), + false, + "add-file", + key, + path.toString() + ).waitFor(); + } catch (InterruptedException | IOException e) { + throw new RuntimeException(e); + } + }); + } + private void configureSecurity() { if (spec.isSecurityEnabled()) { if (spec.getUsers().isEmpty() == false) { diff --git a/test/test-clusters/src/main/java/org/elasticsearch/test/cluster/local/LocalClusterHandle.java b/test/test-clusters/src/main/java/org/elasticsearch/test/cluster/local/LocalClusterHandle.java index 878b017e3cd62..6ad2709957299 100644 --- a/test/test-clusters/src/main/java/org/elasticsearch/test/cluster/local/LocalClusterHandle.java +++ b/test/test-clusters/src/main/java/org/elasticsearch/test/cluster/local/LocalClusterHandle.java @@ -14,7 +14,7 @@ import org.elasticsearch.test.cluster.local.LocalClusterFactory.Node; import org.elasticsearch.test.cluster.local.model.User; import org.elasticsearch.test.cluster.util.ExceptionUtils; -import org.elasticsearch.test.cluster.util.Retry; +import org.elasticsearch.test.cluster.util.Version; import java.io.IOException; import java.io.UncheckedIOException; @@ -28,7 +28,6 @@ import java.util.concurrent.ForkJoinPool; import java.util.concurrent.ForkJoinWorkerThread; import java.util.concurrent.TimeUnit; -import java.util.concurrent.TimeoutException; import java.util.concurrent.atomic.AtomicBoolean; import java.util.concurrent.atomic.AtomicLong; import java.util.stream.Collectors; @@ -66,7 +65,7 @@ public LocalClusterHandle(String name, List nodes) { public void start() { if (started.getAndSet(true) == false) { LOGGER.info("Starting Elasticsearch test cluster '{}'", name); - execute(() -> nodes.parallelStream().forEach(Node::start)); + execute(() -> nodes.parallelStream().forEach(n -> n.start(null))); } waitUntilReady(); } @@ -75,11 +74,10 @@ public void start() { public void stop(boolean forcibly) { if (started.getAndSet(false)) { LOGGER.info("Stopping Elasticsearch test cluster '{}', forcibly: {}", name, forcibly); - execute(() -> nodes.forEach(n -> n.stop(forcibly))); - deletePortFiles(); + execute(() -> nodes.parallelStream().forEach(n -> n.stop(forcibly))); } else { // Make sure the process is stopped, otherwise wait - execute(() -> nodes.forEach(n -> n.waitForExit())); + execute(() -> nodes.parallelStream().forEach(Node::waitForExit)); } } @@ -128,16 +126,31 @@ public String getTransportEndpoint(int index) { return getTransportEndpoints().split(",")[index]; } + @Override + public void upgradeNodeToVersion(int index, Version version) { + Node node = nodes.get(index); + node.stop(false); + LOGGER.info("Upgrading node '{}' to version {}", node.getSpec().getName(), version); + node.start(version); + waitUntilReady(); + } + + @Override + public void upgradeToVersion(Version version) { + stop(false); + if (started.getAndSet(true) == false) { + LOGGER.info("Upgrading Elasticsearch test cluster '{}' to version {}", name, version); + execute(() -> nodes.parallelStream().forEach(n -> n.start(version))); + } + waitUntilReady(); + } + private void waitUntilReady() { writeUnicastHostsFile(); try { - Retry.retryUntilTrue(CLUSTER_UP_TIMEOUT, Duration.ZERO, () -> { - WaitForHttpResource wait = configureWaitForReady(); - return wait.wait(500); - }); - } catch (TimeoutException e) { - throw new RuntimeException("Timed out after " + CLUSTER_UP_TIMEOUT + " waiting for cluster '" + name + "' status to be yellow"); - } catch (ExecutionException e) { + WaitForHttpResource wait = configureWaitForReady(); + wait.waitFor(CLUSTER_UP_TIMEOUT.toMillis()); + } catch (Exception e) { throw new RuntimeException("An error occurred while checking cluster '" + name + "' status.", e); } } @@ -191,7 +204,7 @@ private boolean isSecurityAutoConfigured(Node node) { private void writeUnicastHostsFile() { String transportUris = execute(() -> nodes.parallelStream().map(Node::getTransportEndpoint).collect(Collectors.joining("\n"))); - nodes.forEach(node -> { + execute(() -> nodes.parallelStream().forEach(node -> { try { Path hostsFile = node.getWorkingDir().resolve("config").resolve("unicast_hosts.txt"); if (Files.notExists(hostsFile)) { @@ -200,23 +213,7 @@ private void writeUnicastHostsFile() { } catch (IOException e) { throw new UncheckedIOException("Failed to write unicast_hosts for: " + node, e); } - }); - } - - private void deletePortFiles() { - nodes.forEach(node -> { - try { - Path hostsFile = node.getWorkingDir().resolve("config").resolve("unicast_hosts.txt"); - Path httpPortsFile = node.getWorkingDir().resolve("logs").resolve("http.ports"); - Path tranportPortsFile = node.getWorkingDir().resolve("logs").resolve("transport.ports"); - - Files.deleteIfExists(hostsFile); - Files.deleteIfExists(httpPortsFile); - Files.deleteIfExists(tranportPortsFile); - } catch (IOException e) { - throw new UncheckedIOException("Failed to write unicast_hosts for: " + node, e); - } - }); + })); } private T execute(Callable task) { diff --git a/test/test-clusters/src/main/java/org/elasticsearch/test/cluster/local/LocalClusterSpec.java b/test/test-clusters/src/main/java/org/elasticsearch/test/cluster/local/LocalClusterSpec.java index 2836411bbb067..2234b037381a8 100644 --- a/test/test-clusters/src/main/java/org/elasticsearch/test/cluster/local/LocalClusterSpec.java +++ b/test/test-clusters/src/main/java/org/elasticsearch/test/cluster/local/LocalClusterSpec.java @@ -69,7 +69,6 @@ void validate() { public static class LocalNodeSpec { private final LocalClusterSpec cluster; private final String name; - private final Version version; private final List settingsProviders; private final Map settings; private final List environmentProviders; @@ -79,9 +78,11 @@ public static class LocalNodeSpec { private final DistributionType distributionType; private final Set features; private final Map keystoreSettings; + private final Map keystoreFiles; private final String keystorePassword; private final Map extraConfigFiles; private final Map systemProperties; + private Version version; public LocalNodeSpec( LocalClusterSpec cluster, @@ -96,6 +97,7 @@ public LocalNodeSpec( DistributionType distributionType, Set features, Map keystoreSettings, + Map keystoreFiles, String keystorePassword, Map extraConfigFiles, Map systemProperties @@ -112,11 +114,16 @@ public LocalNodeSpec( this.distributionType = distributionType; this.features = features; this.keystoreSettings = keystoreSettings; + this.keystoreFiles = keystoreFiles; this.keystorePassword = keystorePassword; this.extraConfigFiles = extraConfigFiles; this.systemProperties = systemProperties; } + void setVersion(Version version) { + this.version = version; + } + public LocalClusterSpec getCluster() { return cluster; } @@ -157,6 +164,10 @@ public Map getKeystoreSettings() { return keystoreSettings; } + public Map getKeystoreFiles() { + return keystoreFiles; + } + public String getKeystorePassword() { return keystorePassword; } @@ -254,6 +265,7 @@ private LocalNodeSpec getFilteredSpec(SettingsProvider filteredProvider) { n.distributionType, n.features, n.keystoreSettings, + n.keystoreFiles, n.keystorePassword, n.extraConfigFiles, n.systemProperties diff --git a/test/test-clusters/src/main/java/org/elasticsearch/test/cluster/local/LocalClusterSpecBuilder.java b/test/test-clusters/src/main/java/org/elasticsearch/test/cluster/local/LocalClusterSpecBuilder.java index c07a491d2ace6..1f4086fd47fe8 100644 --- a/test/test-clusters/src/main/java/org/elasticsearch/test/cluster/local/LocalClusterSpecBuilder.java +++ b/test/test-clusters/src/main/java/org/elasticsearch/test/cluster/local/LocalClusterSpecBuilder.java @@ -12,6 +12,7 @@ import org.elasticsearch.test.cluster.util.resource.Resource; import java.util.function.Consumer; +import java.util.function.Supplier; public interface LocalClusterSpecBuilder extends LocalSpecBuilder { /** @@ -19,8 +20,18 @@ public interface LocalClusterSpecBuilder extends LocalSpecBuilder configProvider); + /** * Sets the number of nodes for the cluster. */ diff --git a/test/test-clusters/src/main/java/org/elasticsearch/test/cluster/local/LocalElasticsearchCluster.java b/test/test-clusters/src/main/java/org/elasticsearch/test/cluster/local/LocalElasticsearchCluster.java index 54d541cd07144..9a5e5666f5e9a 100644 --- a/test/test-clusters/src/main/java/org/elasticsearch/test/cluster/local/LocalElasticsearchCluster.java +++ b/test/test-clusters/src/main/java/org/elasticsearch/test/cluster/local/LocalElasticsearchCluster.java @@ -10,18 +10,21 @@ import org.elasticsearch.test.cluster.ElasticsearchCluster; import org.elasticsearch.test.cluster.local.distribution.LocalDistributionResolver; +import org.elasticsearch.test.cluster.local.distribution.ReleasedDistributionResolver; import org.elasticsearch.test.cluster.local.distribution.SnapshotDistributionResolver; +import org.elasticsearch.test.cluster.util.Version; import org.junit.runner.Description; import org.junit.runners.model.Statement; import java.nio.file.Path; public class LocalElasticsearchCluster implements ElasticsearchCluster { - private final LocalClusterSpec spec; + private final DefaultLocalClusterSpecBuilder builder; + private LocalClusterSpec spec; private LocalClusterHandle handle; - public LocalElasticsearchCluster(LocalClusterSpec spec) { - this.spec = spec; + public LocalElasticsearchCluster(DefaultLocalClusterSpecBuilder builder) { + this.builder = builder; } @Override @@ -30,9 +33,10 @@ public Statement apply(Statement base, Description description) { @Override public void evaluate() throws Throwable { try { + spec = builder.buildClusterSpec(); handle = new LocalClusterFactory( Path.of(System.getProperty("java.io.tmpdir")).resolve(description.getDisplayName()).toAbsolutePath(), - new LocalDistributionResolver(new SnapshotDistributionResolver()) + new LocalDistributionResolver(new SnapshotDistributionResolver(new ReleasedDistributionResolver())) ).create(spec); handle.start(); base.evaluate(); @@ -97,6 +101,18 @@ public String getTransportEndpoint(int index) { return handle.getTransportEndpoint(index); } + @Override + public void upgradeNodeToVersion(int index, Version version) { + checkHandle(); + handle.upgradeNodeToVersion(index, version); + } + + @Override + public void upgradeToVersion(Version version) { + checkHandle(); + handle.upgradeToVersion(version); + } + private void checkHandle() { if (handle == null) { throw new IllegalStateException("Cluster handle has not been initialized. Did you forget the @ClassRule annotation?"); diff --git a/test/test-clusters/src/main/java/org/elasticsearch/test/cluster/local/LocalSpecBuilder.java b/test/test-clusters/src/main/java/org/elasticsearch/test/cluster/local/LocalSpecBuilder.java index 3b9428bc1a1aa..d01d7d303748f 100644 --- a/test/test-clusters/src/main/java/org/elasticsearch/test/cluster/local/LocalSpecBuilder.java +++ b/test/test-clusters/src/main/java/org/elasticsearch/test/cluster/local/LocalSpecBuilder.java @@ -12,6 +12,7 @@ import org.elasticsearch.test.cluster.FeatureFlag; import org.elasticsearch.test.cluster.SettingsProvider; import org.elasticsearch.test.cluster.local.distribution.DistributionType; +import org.elasticsearch.test.cluster.util.Version; import org.elasticsearch.test.cluster.util.resource.Resource; import java.util.function.Supplier; @@ -68,6 +69,11 @@ interface LocalSpecBuilder> { */ T keystore(String key, String value); + /** + * Adds a secure file to the node keystore. + */ + T keystore(String key, Resource file); + /** * Sets the security setting keystore password. */ @@ -78,6 +84,11 @@ interface LocalSpecBuilder> { */ T configFile(String fileName, Resource configFile); + /** + * Sets the version of Elasticsearch. Defaults to {@link Version#CURRENT}. + */ + T version(Version version); + /** * Adds a system property to node JVM arguments. */ diff --git a/test/test-clusters/src/main/java/org/elasticsearch/test/cluster/local/WaitForHttpResource.java b/test/test-clusters/src/main/java/org/elasticsearch/test/cluster/local/WaitForHttpResource.java index edab2cdf1e7e9..f00e6f13cb314 100644 --- a/test/test-clusters/src/main/java/org/elasticsearch/test/cluster/local/WaitForHttpResource.java +++ b/test/test-clusters/src/main/java/org/elasticsearch/test/cluster/local/WaitForHttpResource.java @@ -90,7 +90,7 @@ public void setPassword(String password) { this.password = password; } - public boolean wait(int durationInMs) throws GeneralSecurityException, InterruptedException, IOException { + public boolean waitFor(long durationInMs) throws GeneralSecurityException, InterruptedException, IOException { final long waitUntil = System.nanoTime() + TimeUnit.MILLISECONDS.toNanos(durationInMs); final long sleep = Long.max(durationInMs / 10, 100); diff --git a/test/test-clusters/src/main/java/org/elasticsearch/test/cluster/local/distribution/LocalDistributionResolver.java b/test/test-clusters/src/main/java/org/elasticsearch/test/cluster/local/distribution/LocalDistributionResolver.java index 5c9f45cbe092f..b9442b28e1591 100644 --- a/test/test-clusters/src/main/java/org/elasticsearch/test/cluster/local/distribution/LocalDistributionResolver.java +++ b/test/test-clusters/src/main/java/org/elasticsearch/test/cluster/local/distribution/LocalDistributionResolver.java @@ -13,6 +13,9 @@ import java.nio.file.Files; import java.nio.file.Path; +/** + * A {@link DistributionResolver} for resolving locally built distributions for the current version of Elasticsearch. + */ public class LocalDistributionResolver implements DistributionResolver { private final DistributionResolver delegate; diff --git a/test/test-clusters/src/main/java/org/elasticsearch/test/cluster/local/distribution/ReleasedDistributionResolver.java b/test/test-clusters/src/main/java/org/elasticsearch/test/cluster/local/distribution/ReleasedDistributionResolver.java new file mode 100644 index 0000000000000..12654be310ef8 --- /dev/null +++ b/test/test-clusters/src/main/java/org/elasticsearch/test/cluster/local/distribution/ReleasedDistributionResolver.java @@ -0,0 +1,54 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0 and the Server Side Public License, v 1; you may not use this file except + * in compliance with, at your election, the Elastic License 2.0 or the Server + * Side Public License, v 1. + */ + +package org.elasticsearch.test.cluster.local.distribution; + +import org.elasticsearch.test.cluster.util.Version; + +import java.nio.file.Files; +import java.nio.file.Path; + +/** + * A {@link DistributionResolver} for resolving previously released distributions of Elasticsearch. + */ +public class ReleasedDistributionResolver implements DistributionResolver { + private static final String BWC_DISTRIBUTION_SYSPROP_PREFIX = "tests.release.distribution."; + + @Override + public DistributionDescriptor resolve(Version version, DistributionType type) { + String distributionPath = System.getProperty(BWC_DISTRIBUTION_SYSPROP_PREFIX + version.toString()); + + if (distributionPath == null) { + String taskPath = System.getProperty("tests.task"); + String project = taskPath.substring(0, taskPath.lastIndexOf(':')); + String taskName = taskPath.substring(taskPath.lastIndexOf(':') + 1); + + throw new IllegalStateException( + "Cannot locate Elasticsearch distribution. Ensure you've added the following to the build script for project '" + + project + + "':\n\n" + + "tasks.named('" + + taskName + + "') {\n" + + " usesBwcDistribution(" + + version + + ")\n" + + "}" + ); + } + + Path distributionDir = Path.of(distributionPath); + if (Files.notExists(distributionDir)) { + throw new IllegalStateException( + "Cannot locate Elasticsearch distribution. Directory at '" + distributionDir + "' does not exist." + ); + } + + return new DefaultDistributionDescriptor(version, false, distributionDir, DistributionType.DEFAULT); + } +} diff --git a/test/test-clusters/src/main/java/org/elasticsearch/test/cluster/local/distribution/SnapshotDistributionResolver.java b/test/test-clusters/src/main/java/org/elasticsearch/test/cluster/local/distribution/SnapshotDistributionResolver.java index 182dbe66a584d..c6cecf09e9b9d 100644 --- a/test/test-clusters/src/main/java/org/elasticsearch/test/cluster/local/distribution/SnapshotDistributionResolver.java +++ b/test/test-clusters/src/main/java/org/elasticsearch/test/cluster/local/distribution/SnapshotDistributionResolver.java @@ -10,10 +10,36 @@ import org.elasticsearch.test.cluster.util.Version; +import java.nio.file.Files; +import java.nio.file.Path; + +/** + * A {@link DistributionResolver} for resolving snapshot versions of Elasticsearch for previous, backwards-compatible versions. + */ public class SnapshotDistributionResolver implements DistributionResolver { + private static final String BWC_DISTRIBUTION_SYSPROP_PREFIX = "tests.snapshot.distribution."; + private final DistributionResolver delegate; + + public SnapshotDistributionResolver(DistributionResolver delegate) { + this.delegate = delegate; + } + @Override public DistributionDescriptor resolve(Version version, DistributionType type) { - // Not yet implemented - throw new UnsupportedOperationException("Cannot resolve distribution for version " + version); + String distributionPath = System.getProperty(BWC_DISTRIBUTION_SYSPROP_PREFIX + version.toString()); + + if (distributionPath != null) { + Path distributionDir = Path.of(distributionPath); + if (Files.notExists(distributionDir)) { + throw new IllegalStateException( + "Cannot locate Elasticsearch distribution. Directory at '" + distributionDir + "' does not exist." + ); + } + + // Snapshot distributions are never release builds and always use the default distribution + return new DefaultDistributionDescriptor(version, true, distributionDir, DistributionType.DEFAULT); + } + + return delegate.resolve(version, type); } } diff --git a/x-pack/plugin/shutdown/qa/full-cluster-restart/build.gradle b/x-pack/plugin/shutdown/qa/full-cluster-restart/build.gradle index 429b29bbc9fdb..d9539bb668b4b 100644 --- a/x-pack/plugin/shutdown/qa/full-cluster-restart/build.gradle +++ b/x-pack/plugin/shutdown/qa/full-cluster-restart/build.gradle @@ -1,104 +1,21 @@ -import org.elasticsearch.gradle.Version import org.elasticsearch.gradle.internal.info.BuildParams import org.elasticsearch.gradle.testclusters.StandaloneRestIntegTestTask -apply plugin: 'elasticsearch.internal-testclusters' -apply plugin: 'elasticsearch.standalone-rest-test' +apply plugin: 'elasticsearch.internal-java-rest-test' apply plugin: 'elasticsearch.bwc-test' dependencies { // TODO: Remove core dependency and change tests to not use builders that are part of xpack-core. // Currently needed for MlConfigIndexMappingsFullClusterRestartIT and SLM classes used in // FullClusterRestartIT - testImplementation(testArtifact(project(xpackModule('core')))) - testImplementation(testArtifact(project(":qa:full-cluster-restart"))) - testImplementation project(':x-pack:qa') -} - -tasks.named("forbiddenPatterns") { - exclude '**/system_key' -} - -String outputDir = "${buildDir}/generated-resources/${project.name}" - -tasks.register("copyTestNodeKeyMaterial", Copy) { - from project(':x-pack:plugin:core') - .files( - 'src/test/resources/org/elasticsearch/xpack/security/transport/ssl/certs/simple/testnode.pem', - 'src/test/resources/org/elasticsearch/xpack/security/transport/ssl/certs/simple/testnode.crt' - ) - into outputDir + javaRestTestImplementation(testArtifact(project(xpackModule('core')))) + javaRestTestImplementation(testArtifact(project(":qa:full-cluster-restart"))) + javaRestTestImplementation project(':x-pack:qa') } BuildParams.bwcVersions.withIndexCompatible { bwcVersion, baseName -> - def baseCluster = testClusters.register(baseName) { - testDistribution = "DEFAULT" - if (bwcVersion.before(BuildParams.bwcVersions.minimumWireCompatibleVersion)) { - // When testing older versions we have to first upgrade to 7.last - versions = [bwcVersion.toString(), BuildParams.bwcVersions.minimumWireCompatibleVersion.toString(), project.version] - } else { - versions = [bwcVersion.toString(), project.version] - } - numberOfNodes = 2 - setting 'path.repo', "${buildDir}/cluster/shared/repo/${baseName}" - user username: "test_user", password: "x-pack-test-password" - - setting 'path.repo', "${buildDir}/cluster/shared/repo/${baseName}" - // some tests rely on the translog not being flushed - setting 'indices.memory.shard_inactive_time', '60m' - setting 'xpack.security.enabled', 'true' - setting 'xpack.security.transport.ssl.enabled', 'true' - setting 'xpack.license.self_generated.type', 'trial' - - extraConfigFile 'testnode.pem', file("${outputDir}/testnode.pem") - extraConfigFile 'testnode.crt', file("${outputDir}/testnode.crt") - - keystore 'xpack.watcher.encryption_key', file("${project.projectDir}/src/test/resources/system_key") - setting 'xpack.watcher.encrypt_sensitive_data', 'true' - - setting 'xpack.security.transport.ssl.key', 'testnode.pem' - setting 'xpack.security.transport.ssl.certificate', 'testnode.crt' - keystore 'xpack.security.transport.ssl.secure_key_passphrase', 'testnode' - - setting 'xpack.security.authc.api_key.enabled', 'true' - - requiresFeature 'es.index_mode_feature_flag_registered', Version.fromString("8.0.0") - } - - tasks.register("${baseName}#oldClusterTest", StandaloneRestIntegTestTask) { - mustRunAfter("precommit") - useCluster baseCluster - dependsOn "copyTestNodeKeyMaterial" - doFirst { - delete("${buildDir}/cluster/shared/repo/${baseName}") - } - systemProperty 'tests.is_old_cluster', 'true' - } - - tasks.register("${baseName}#upgradedClusterTest", StandaloneRestIntegTestTask) { - mustRunAfter("precommit") - useCluster baseCluster - dependsOn "${baseName}#oldClusterTest" - doFirst { - baseCluster.get().goToNextVersion() - if (bwcVersion.before(BuildParams.bwcVersions.minimumWireCompatibleVersion)) { - // When doing a full cluster restart of older versions we actually have to upgrade twice. First to 7.last, then to the current version. - baseCluster.get().goToNextVersion() - } - } - systemProperty 'tests.is_old_cluster', 'false' - } - - String oldVersion = bwcVersion.toString().minus("-SNAPSHOT") - tasks.matching { it.name.startsWith("${baseName}#") && it.name.endsWith("ClusterTest") }.configureEach { - it.systemProperty 'tests.old_cluster_version', oldVersion - it.systemProperty 'tests.path.repo', "${buildDir}/cluster/shared/repo/${baseName}" - it.nonInputProperties.systemProperty('tests.rest.cluster', baseCluster.map(c -> c.allHttpSocketURI.join(","))) - it.nonInputProperties.systemProperty('tests.clustername', baseName) - } - - tasks.register(bwcTaskName(bwcVersion)) { - dependsOn "${baseName}#upgradedClusterTest" - } - + tasks.register(bwcTaskName(bwcVersion), StandaloneRestIntegTestTask) { + usesBwcDistribution(bwcVersion) + systemProperty("tests.old_cluster_version", bwcVersion) + } } diff --git a/x-pack/plugin/shutdown/qa/full-cluster-restart/src/test/java/org/elasticsearch/xpack/restart/FullClusterRestartIT.java b/x-pack/plugin/shutdown/qa/full-cluster-restart/src/javaRestTest/java/org/elasticsearch/xpack/restart/FullClusterRestartIT.java similarity index 65% rename from x-pack/plugin/shutdown/qa/full-cluster-restart/src/test/java/org/elasticsearch/xpack/restart/FullClusterRestartIT.java rename to x-pack/plugin/shutdown/qa/full-cluster-restart/src/javaRestTest/java/org/elasticsearch/xpack/restart/FullClusterRestartIT.java index df6e3ed6b9388..07ed594770649 100644 --- a/x-pack/plugin/shutdown/qa/full-cluster-restart/src/test/java/org/elasticsearch/xpack/restart/FullClusterRestartIT.java +++ b/x-pack/plugin/shutdown/qa/full-cluster-restart/src/javaRestTest/java/org/elasticsearch/xpack/restart/FullClusterRestartIT.java @@ -6,6 +6,8 @@ */ package org.elasticsearch.xpack.restart; +import com.carrotsearch.randomizedtesting.annotations.Name; + import org.elasticsearch.Version; import org.elasticsearch.client.Request; import org.elasticsearch.client.Response; @@ -13,10 +15,17 @@ import org.elasticsearch.common.Strings; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.util.concurrent.ThreadContext; +import org.elasticsearch.test.cluster.ElasticsearchCluster; +import org.elasticsearch.test.cluster.FeatureFlag; +import org.elasticsearch.test.cluster.local.distribution.DistributionType; +import org.elasticsearch.test.cluster.util.resource.Resource; import org.elasticsearch.test.rest.ESRestTestCase; -import org.elasticsearch.upgrades.AbstractFullClusterRestartTestCase; +import org.elasticsearch.upgrades.FullClusterRestartUpgradeStatus; +import org.elasticsearch.upgrades.ParameterizedFullClusterRestartTestCase; import org.elasticsearch.xcontent.XContentBuilder; import org.elasticsearch.xcontent.json.JsonXContent; +import org.junit.BeforeClass; +import org.junit.ClassRule; import java.io.IOException; import java.nio.charset.StandardCharsets; @@ -33,7 +42,37 @@ import static org.hamcrest.Matchers.hasSize; import static org.hamcrest.Matchers.notNullValue; -public class FullClusterRestartIT extends AbstractFullClusterRestartTestCase { +public class FullClusterRestartIT extends ParameterizedFullClusterRestartTestCase { + + @ClassRule + public static ElasticsearchCluster cluster = ElasticsearchCluster.local() + .distribution(DistributionType.DEFAULT) + .version(getOldClusterTestVersion()) + .nodes(2) + // some tests rely on the translog not being flushed + .setting("indices.memory.shard_inactive_time", "60m") + .setting("xpack.security.enabled", "true") + .setting("xpack.security.transport.ssl.enabled", "true") + .setting("xpack.security.transport.ssl.key", "testnode.pem") + .setting("xpack.security.transport.ssl.certificate", "testnode.crt") + .setting("xpack.license.self_generated.type", "trial") + .setting("xpack.watcher.encrypt_sensitive_data", "true") + .setting("xpack.security.authc.api_key.enabled", "true") + .configFile("testnode.pem", Resource.fromClasspath("org/elasticsearch/xpack/security/transport/ssl/certs/simple/testnode.pem")) + .configFile("testnode.crt", Resource.fromClasspath("org/elasticsearch/xpack/security/transport/ssl/certs/simple/testnode.crt")) + .keystore("xpack.watcher.encryption_key", Resource.fromClasspath("system_key")) + .keystore("xpack.security.transport.ssl.secure_key_passphrase", "testnode") + .feature(FeatureFlag.TIME_SERIES_MODE) + .build(); + + public FullClusterRestartIT(@Name("cluster") FullClusterRestartUpgradeStatus upgradeStatus) { + super(upgradeStatus); + } + + @Override + protected ElasticsearchCluster getUpgradeCluster() { + return cluster; + } @Override protected Settings restClientSettings() { @@ -47,10 +86,13 @@ protected Settings restClientSettings() { .build(); } - @SuppressWarnings("unchecked") - public void testNodeShutdown() throws Exception { + @BeforeClass + public static void checkClusterVersion() { assumeTrue("no shutdown in versions before " + Version.V_7_15_0, getOldClusterVersion().onOrAfter(Version.V_7_15_0)); + } + @SuppressWarnings("unchecked") + public void testNodeShutdown() throws Exception { if (isRunningAgainstOldCluster()) { final Request getNodesReq = new Request("GET", "_nodes"); final Response getNodesResp = adminClient().performRequest(getNodesReq); @@ -64,7 +106,7 @@ public void testNodeShutdown() throws Exception { // Use the types available from as early as possible final String type = randomFrom("restart", "remove"); putBody.field("type", type); - putBody.field("reason", this.getTestName()); + putBody.field("reason", getRootTestName()); } putBody.endObject(); putShutdownRequest.setJsonEntity(Strings.toString(putBody)); @@ -81,7 +123,7 @@ public void testNodeShutdown() throws Exception { assertThat("there should be exactly one shutdown registered", shutdowns, hasSize(1)); final Map shutdown = shutdowns.get(0); assertThat(shutdown.get("node_id"), notNullValue()); // Since we randomly determine the node ID, we can't check it - assertThat(shutdown.get("reason"), equalTo(this.getTestName())); + assertThat(shutdown.get("reason"), equalTo(getRootTestName())); assertThat( (String) shutdown.get("status"), anyOf( diff --git a/x-pack/plugin/shutdown/qa/full-cluster-restart/src/test/resources/system_key b/x-pack/plugin/shutdown/qa/full-cluster-restart/src/javaRestTest/resources/system_key similarity index 100% rename from x-pack/plugin/shutdown/qa/full-cluster-restart/src/test/resources/system_key rename to x-pack/plugin/shutdown/qa/full-cluster-restart/src/javaRestTest/resources/system_key diff --git a/x-pack/qa/full-cluster-restart/build.gradle b/x-pack/qa/full-cluster-restart/build.gradle index 3923d439d394d..d9539bb668b4b 100644 --- a/x-pack/qa/full-cluster-restart/build.gradle +++ b/x-pack/qa/full-cluster-restart/build.gradle @@ -1,110 +1,21 @@ -import org.elasticsearch.gradle.Version import org.elasticsearch.gradle.internal.info.BuildParams import org.elasticsearch.gradle.testclusters.StandaloneRestIntegTestTask -apply plugin: 'elasticsearch.internal-testclusters' -apply plugin: 'elasticsearch.standalone-rest-test' +apply plugin: 'elasticsearch.internal-java-rest-test' apply plugin: 'elasticsearch.bwc-test' dependencies { // TODO: Remove core dependency and change tests to not use builders that are part of xpack-core. // Currently needed for MlConfigIndexMappingsFullClusterRestartIT and SLM classes used in // FullClusterRestartIT - testImplementation(testArtifact(project(xpackModule('core')))) - testImplementation(testArtifact(project(":qa:full-cluster-restart"))) - testImplementation project(':x-pack:qa') -} - -tasks.named("forbiddenPatterns") { - exclude '**/system_key' -} - -String outputDir = "${buildDir}/generated-resources/${project.name}" - -tasks.register("copyTestNodeKeyMaterial", Copy) { - from project(':x-pack:plugin:core') - .files( - 'src/test/resources/org/elasticsearch/xpack/security/transport/ssl/certs/simple/testnode.pem', - 'src/test/resources/org/elasticsearch/xpack/security/transport/ssl/certs/simple/testnode.crt' - ) - into outputDir + javaRestTestImplementation(testArtifact(project(xpackModule('core')))) + javaRestTestImplementation(testArtifact(project(":qa:full-cluster-restart"))) + javaRestTestImplementation project(':x-pack:qa') } BuildParams.bwcVersions.withIndexCompatible { bwcVersion, baseName -> - def baseCluster = testClusters.register(baseName) { - testDistribution = "DEFAULT" - if (bwcVersion.before(BuildParams.bwcVersions.minimumWireCompatibleVersion)) { - // When testing older versions we have to first upgrade to 7.last - versions = [bwcVersion.toString(), BuildParams.bwcVersions.minimumWireCompatibleVersion.toString(), project.version] - } else { - versions = [bwcVersion.toString(), project.version] - } - numberOfNodes = 2 - setting 'path.repo', "${buildDir}/cluster/shared/repo/${baseName}" - user username: "test_user", password: "x-pack-test-password" - - setting 'path.repo', "${buildDir}/cluster/shared/repo/${baseName}" - // some tests rely on the translog not being flushed - setting 'indices.memory.shard_inactive_time', '60m' - setting 'xpack.security.enabled', 'true' - setting 'xpack.security.transport.ssl.enabled', 'true' - setting 'xpack.license.self_generated.type', 'trial' - - extraConfigFile 'testnode.pem', file("${outputDir}/testnode.pem") - extraConfigFile 'testnode.crt', file("${outputDir}/testnode.crt") - - keystore 'xpack.watcher.encryption_key', file("${project.projectDir}/src/test/resources/system_key") - setting 'xpack.watcher.encrypt_sensitive_data', 'true' - - setting 'xpack.security.transport.ssl.key', 'testnode.pem' - setting 'xpack.security.transport.ssl.certificate', 'testnode.crt' - keystore 'xpack.security.transport.ssl.secure_key_passphrase', 'testnode' - - setting 'xpack.security.authc.api_key.enabled', 'true' - - requiresFeature 'es.index_mode_feature_flag_registered', Version.fromString("8.0.0") - } - - tasks.register("${baseName}#oldClusterTest", StandaloneRestIntegTestTask) { - mustRunAfter("precommit") - useCluster baseCluster - dependsOn "copyTestNodeKeyMaterial" - doFirst { - delete("${buildDir}/cluster/shared/repo/${baseName}") - } - systemProperty 'tests.is_old_cluster', 'true' - exclude 'org/elasticsearch/upgrades/FullClusterRestartIT.class' - exclude 'org/elasticsearch/upgrades/FullClusterRestartSettingsUpgradeIT.class' - exclude 'org/elasticsearch/upgrades/QueryBuilderBWCIT.class' - } - - tasks.register("${baseName}#upgradedClusterTest", StandaloneRestIntegTestTask) { - mustRunAfter("precommit") - useCluster baseCluster - dependsOn "${baseName}#oldClusterTest" - doFirst { - baseCluster.get().goToNextVersion() - if (bwcVersion.before(BuildParams.bwcVersions.minimumWireCompatibleVersion)) { - // When doing a full cluster restart of older versions we actually have to upgrade twice. First to 7.last, then to the current version. - baseCluster.get().goToNextVersion() - } - } - systemProperty 'tests.is_old_cluster', 'false' - exclude 'org/elasticsearch/upgrades/FullClusterRestartIT.class' - exclude 'org/elasticsearch/upgrades/FullClusterRestartSettingsUpgradeIT.class' - exclude 'org/elasticsearch/upgrades/QueryBuilderBWCIT.class' - } - - String oldVersion = bwcVersion.toString().minus("-SNAPSHOT") - tasks.matching { it.name.startsWith("${baseName}#") && it.name.endsWith("ClusterTest") }.configureEach { - it.systemProperty 'tests.old_cluster_version', oldVersion - it.systemProperty 'tests.path.repo', "${buildDir}/cluster/shared/repo/${baseName}" - it.nonInputProperties.systemProperty('tests.rest.cluster', baseCluster.map(c -> c.allHttpSocketURI.join(","))) - it.nonInputProperties.systemProperty('tests.clustername', baseName) - } - - tasks.register(bwcTaskName(bwcVersion)) { - dependsOn "${baseName}#upgradedClusterTest" - } - + tasks.register(bwcTaskName(bwcVersion), StandaloneRestIntegTestTask) { + usesBwcDistribution(bwcVersion) + systemProperty("tests.old_cluster_version", bwcVersion) + } } diff --git a/x-pack/qa/full-cluster-restart/src/javaRestTest/java/org/elasticsearch/xpack/restart/AbstractXpackFullClusterRestartTestCase.java b/x-pack/qa/full-cluster-restart/src/javaRestTest/java/org/elasticsearch/xpack/restart/AbstractXpackFullClusterRestartTestCase.java new file mode 100644 index 0000000000000..0bc9101301a54 --- /dev/null +++ b/x-pack/qa/full-cluster-restart/src/javaRestTest/java/org/elasticsearch/xpack/restart/AbstractXpackFullClusterRestartTestCase.java @@ -0,0 +1,49 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0; you may not use this file except in compliance with the Elastic License + * 2.0. + */ + +package org.elasticsearch.xpack.restart; + +import org.elasticsearch.test.cluster.ElasticsearchCluster; +import org.elasticsearch.test.cluster.FeatureFlag; +import org.elasticsearch.test.cluster.local.distribution.DistributionType; +import org.elasticsearch.test.cluster.util.resource.Resource; +import org.elasticsearch.upgrades.FullClusterRestartUpgradeStatus; +import org.elasticsearch.upgrades.ParameterizedFullClusterRestartTestCase; +import org.junit.ClassRule; + +public abstract class AbstractXpackFullClusterRestartTestCase extends ParameterizedFullClusterRestartTestCase { + + @ClassRule + public static ElasticsearchCluster cluster = ElasticsearchCluster.local() + .distribution(DistributionType.DEFAULT) + .version(getOldClusterTestVersion()) + .nodes(2) + // some tests rely on the translog not being flushed + .setting("indices.memory.shard_inactive_time", "60m") + .setting("xpack.security.enabled", "true") + .setting("xpack.security.transport.ssl.enabled", "true") + .setting("xpack.security.transport.ssl.key", "testnode.pem") + .setting("xpack.security.transport.ssl.certificate", "testnode.crt") + .setting("xpack.license.self_generated.type", "trial") + .setting("xpack.watcher.encrypt_sensitive_data", "true") + .setting("xpack.security.authc.api_key.enabled", "true") + .configFile("testnode.pem", Resource.fromClasspath("org/elasticsearch/xpack/security/transport/ssl/certs/simple/testnode.pem")) + .configFile("testnode.crt", Resource.fromClasspath("org/elasticsearch/xpack/security/transport/ssl/certs/simple/testnode.crt")) + .keystore("xpack.watcher.encryption_key", Resource.fromClasspath("system_key")) + .keystore("xpack.security.transport.ssl.secure_key_passphrase", "testnode") + .feature(FeatureFlag.TIME_SERIES_MODE) + .build(); + + public AbstractXpackFullClusterRestartTestCase(FullClusterRestartUpgradeStatus upgradeStatus) { + super(upgradeStatus); + } + + @Override + protected ElasticsearchCluster getUpgradeCluster() { + return cluster; + } +} diff --git a/x-pack/qa/full-cluster-restart/src/javaRestTest/java/org/elasticsearch/xpack/restart/CoreFullClusterRestartIT.java b/x-pack/qa/full-cluster-restart/src/javaRestTest/java/org/elasticsearch/xpack/restart/CoreFullClusterRestartIT.java new file mode 100644 index 0000000000000..dcdc127079637 --- /dev/null +++ b/x-pack/qa/full-cluster-restart/src/javaRestTest/java/org/elasticsearch/xpack/restart/CoreFullClusterRestartIT.java @@ -0,0 +1,46 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0; you may not use this file except in compliance with the Elastic License + * 2.0. + */ +package org.elasticsearch.xpack.restart; + +import com.carrotsearch.randomizedtesting.annotations.Name; + +import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.common.util.concurrent.ThreadContext; +import org.elasticsearch.test.cluster.util.resource.Resource; +import org.elasticsearch.upgrades.FullClusterRestartIT; +import org.elasticsearch.upgrades.FullClusterRestartUpgradeStatus; + +import java.nio.charset.StandardCharsets; +import java.util.Base64; + +public class CoreFullClusterRestartIT extends FullClusterRestartIT { + + static { + clusterConfig = c -> c.setting("xpack.security.enabled", "true") + .setting("xpack.security.transport.ssl.enabled", "true") + .setting("xpack.security.transport.ssl.key", "testnode.pem") + .setting("xpack.security.transport.ssl.certificate", "testnode.crt") + .setting("xpack.license.self_generated.type", "trial") + .setting("xpack.watcher.encrypt_sensitive_data", "true") + .setting("xpack.security.authc.api_key.enabled", "true") + .configFile("testnode.pem", Resource.fromClasspath("org/elasticsearch/xpack/security/transport/ssl/certs/simple/testnode.pem")) + .configFile("testnode.crt", Resource.fromClasspath("org/elasticsearch/xpack/security/transport/ssl/certs/simple/testnode.crt")) + .keystore("xpack.watcher.encryption_key", Resource.fromClasspath("system_key")) + .keystore("xpack.security.transport.ssl.secure_key_passphrase", "testnode"); + } + + public CoreFullClusterRestartIT(@Name("cluster") FullClusterRestartUpgradeStatus upgradeStatus) { + super(upgradeStatus); + } + + @Override + protected Settings restClientSettings() { + String token = "Basic " + Base64.getEncoder().encodeToString("test_user:x-pack-test-password".getBytes(StandardCharsets.UTF_8)); + return Settings.builder().put(ThreadContext.PREFIX + ".Authorization", token).build(); + } + +} diff --git a/x-pack/qa/full-cluster-restart/src/test/java/org/elasticsearch/xpack/restart/FullClusterRestartIT.java b/x-pack/qa/full-cluster-restart/src/javaRestTest/java/org/elasticsearch/xpack/restart/FullClusterRestartIT.java similarity index 96% rename from x-pack/qa/full-cluster-restart/src/test/java/org/elasticsearch/xpack/restart/FullClusterRestartIT.java rename to x-pack/qa/full-cluster-restart/src/javaRestTest/java/org/elasticsearch/xpack/restart/FullClusterRestartIT.java index 42c551b16655b..ab48825ed983a 100644 --- a/x-pack/qa/full-cluster-restart/src/test/java/org/elasticsearch/xpack/restart/FullClusterRestartIT.java +++ b/x-pack/qa/full-cluster-restart/src/javaRestTest/java/org/elasticsearch/xpack/restart/FullClusterRestartIT.java @@ -6,6 +6,8 @@ */ package org.elasticsearch.xpack.restart; +import com.carrotsearch.randomizedtesting.annotations.Name; + import org.apache.http.entity.ContentType; import org.apache.http.entity.StringEntity; import org.apache.http.util.EntityUtils; @@ -25,7 +27,7 @@ import org.elasticsearch.rest.action.search.RestSearchAction; import org.elasticsearch.test.StreamsUtils; import org.elasticsearch.test.rest.ESRestTestCase; -import org.elasticsearch.upgrades.AbstractFullClusterRestartTestCase; +import org.elasticsearch.upgrades.FullClusterRestartUpgradeStatus; import org.elasticsearch.xcontent.ObjectPath; import org.elasticsearch.xcontent.XContentBuilder; import org.elasticsearch.xcontent.XContentParser; @@ -61,11 +63,15 @@ import static org.hamcrest.Matchers.not; import static org.hamcrest.Matchers.startsWith; -public class FullClusterRestartIT extends AbstractFullClusterRestartTestCase { +public class FullClusterRestartIT extends AbstractXpackFullClusterRestartTestCase { public static final int UPGRADE_FIELD_EXPECTED_INDEX_FORMAT_VERSION = 6; public static final int SECURITY_EXPECTED_INDEX_FORMAT_VERSION = 6; + public FullClusterRestartIT(@Name("cluster") FullClusterRestartUpgradeStatus upgradeStatus) { + super(upgradeStatus); + } + @Override protected Settings restClientSettings() { String token = "Basic " + Base64.getEncoder().encodeToString("test_user:x-pack-test-password".getBytes(StandardCharsets.UTF_8)); @@ -103,12 +109,7 @@ public void testSecurityNativeRealm() throws Exception { } else { waitForYellow(".security"); final Request getSettingsRequest = new Request("GET", "/.security/_settings/index.format"); - getSettingsRequest.setOptions( - expectWarnings( - "this request accesses system indices: [.security-7], but in a future major " - + "version, direct access to system indices will be prevented by default" - ) - ); + getSettingsRequest.setOptions(systemIndexWarningHandlerOptions(".security-7")); Response settingsResponse = client().performRequest(getSettingsRequest); Map settingsResponseMap = entityAsMap(settingsResponse); logger.info("settings response map {}", settingsResponseMap); @@ -390,12 +391,7 @@ public void testApiKeySuperuser() throws IOException { "doc_type": "foo" }"""); if (getOldClusterVersion().onOrAfter(Version.V_7_10_0)) { - indexRequest.setOptions( - expectWarnings( - "this request accesses system indices: [.security-7], but in a future major " - + "version, direct access to system indices will be prevented by default" - ).toBuilder().addHeader("Authorization", apiKeyAuthHeader) - ); + indexRequest.setOptions(systemIndexWarningHandlerOptions(".security-7").addHeader("Authorization", apiKeyAuthHeader)); } else { indexRequest.setOptions(RequestOptions.DEFAULT.toBuilder().addHeader("Authorization", apiKeyAuthHeader)); } @@ -409,12 +405,7 @@ public void testApiKeySuperuser() throws IOException { // read is ok final Request searchRequest = new Request("GET", ".security/_search"); - searchRequest.setOptions( - expectWarnings( - "this request accesses system indices: [.security-7], but in a future major " - + "version, direct access to system indices will be prevented by default" - ).toBuilder().addHeader("Authorization", apiKeyAuthHeader) - ); + searchRequest.setOptions(systemIndexWarningHandlerOptions(".security-7").addHeader("Authorization", apiKeyAuthHeader)); assertOK(client().performRequest(searchRequest)); // write must not be allowed @@ -423,12 +414,7 @@ public void testApiKeySuperuser() throws IOException { { "doc_type": "foo" }"""); - indexRequest.setOptions( - expectWarnings( - "this request accesses system indices: [.security-7], but in a future major " - + "version, direct access to system indices will be prevented by default" - ).toBuilder().addHeader("Authorization", apiKeyAuthHeader) - ); + indexRequest.setOptions(systemIndexWarningHandlerOptions(".security-7").addHeader("Authorization", apiKeyAuthHeader)); final ResponseException e = expectThrows(ResponseException.class, () -> client().performRequest(indexRequest)); assertThat(e.getResponse().getStatusLine().getStatusCode(), equalTo(403)); assertThat(e.getMessage(), containsString("is unauthorized")); @@ -995,4 +981,17 @@ private static void createComposableTemplate(RestClient client, String templateN createIndexTemplateRequest.setEntity(templateJSON); client.performRequest(createIndexTemplateRequest); } + + private RequestOptions.Builder systemIndexWarningHandlerOptions(String index) { + return RequestOptions.DEFAULT.toBuilder() + .setWarningsHandler( + w -> w.size() > 0 + && w.contains( + "this request accesses system indices: [" + + index + + "], but in a future major " + + "version, direct access to system indices will be prevented by default" + ) == false + ); + } } diff --git a/x-pack/qa/full-cluster-restart/src/test/java/org/elasticsearch/xpack/restart/MLModelDeploymentFullClusterRestartIT.java b/x-pack/qa/full-cluster-restart/src/javaRestTest/java/org/elasticsearch/xpack/restart/MLModelDeploymentFullClusterRestartIT.java similarity index 97% rename from x-pack/qa/full-cluster-restart/src/test/java/org/elasticsearch/xpack/restart/MLModelDeploymentFullClusterRestartIT.java rename to x-pack/qa/full-cluster-restart/src/javaRestTest/java/org/elasticsearch/xpack/restart/MLModelDeploymentFullClusterRestartIT.java index 4d1a5dfb75ab7..25a14c47e52c7 100644 --- a/x-pack/qa/full-cluster-restart/src/test/java/org/elasticsearch/xpack/restart/MLModelDeploymentFullClusterRestartIT.java +++ b/x-pack/qa/full-cluster-restart/src/javaRestTest/java/org/elasticsearch/xpack/restart/MLModelDeploymentFullClusterRestartIT.java @@ -7,6 +7,8 @@ package org.elasticsearch.xpack.restart; +import com.carrotsearch.randomizedtesting.annotations.Name; + import org.apache.http.util.EntityUtils; import org.elasticsearch.Version; import org.elasticsearch.client.Request; @@ -15,7 +17,7 @@ import org.elasticsearch.common.util.concurrent.ThreadContext; import org.elasticsearch.common.xcontent.support.XContentMapValues; import org.elasticsearch.core.Strings; -import org.elasticsearch.upgrades.AbstractFullClusterRestartTestCase; +import org.elasticsearch.upgrades.FullClusterRestartUpgradeStatus; import org.elasticsearch.xpack.core.ml.inference.assignment.AllocationStatus; import org.junit.Before; @@ -32,7 +34,7 @@ import static org.hamcrest.Matchers.equalTo; import static org.hamcrest.Matchers.hasSize; -public class MLModelDeploymentFullClusterRestartIT extends AbstractFullClusterRestartTestCase { +public class MLModelDeploymentFullClusterRestartIT extends AbstractXpackFullClusterRestartTestCase { // See PyTorchModelIT for how this model was created static final String BASE_64_ENCODED_MODEL = @@ -63,6 +65,10 @@ public class MLModelDeploymentFullClusterRestartIT extends AbstractFullClusterRe RAW_MODEL_SIZE = Base64.getDecoder().decode(BASE_64_ENCODED_MODEL).length; } + public MLModelDeploymentFullClusterRestartIT(@Name("cluster") FullClusterRestartUpgradeStatus upgradeStatus) { + super(upgradeStatus); + } + @Before public void setLogging() throws IOException { Request loggingSettings = new Request("PUT", "_cluster/settings"); diff --git a/x-pack/qa/full-cluster-restart/src/test/java/org/elasticsearch/xpack/restart/MlConfigIndexMappingsFullClusterRestartIT.java b/x-pack/qa/full-cluster-restart/src/javaRestTest/java/org/elasticsearch/xpack/restart/MlConfigIndexMappingsFullClusterRestartIT.java similarity index 94% rename from x-pack/qa/full-cluster-restart/src/test/java/org/elasticsearch/xpack/restart/MlConfigIndexMappingsFullClusterRestartIT.java rename to x-pack/qa/full-cluster-restart/src/javaRestTest/java/org/elasticsearch/xpack/restart/MlConfigIndexMappingsFullClusterRestartIT.java index bfc078ffe9206..e4ab3957f2627 100644 --- a/x-pack/qa/full-cluster-restart/src/test/java/org/elasticsearch/xpack/restart/MlConfigIndexMappingsFullClusterRestartIT.java +++ b/x-pack/qa/full-cluster-restart/src/javaRestTest/java/org/elasticsearch/xpack/restart/MlConfigIndexMappingsFullClusterRestartIT.java @@ -6,6 +6,8 @@ */ package org.elasticsearch.xpack.restart; +import com.carrotsearch.randomizedtesting.annotations.Name; + import org.elasticsearch.Version; import org.elasticsearch.client.Request; import org.elasticsearch.client.Response; @@ -13,7 +15,7 @@ import org.elasticsearch.common.util.concurrent.ThreadContext; import org.elasticsearch.common.xcontent.support.XContentMapValues; import org.elasticsearch.core.Strings; -import org.elasticsearch.upgrades.AbstractFullClusterRestartTestCase; +import org.elasticsearch.upgrades.FullClusterRestartUpgradeStatus; import org.elasticsearch.xpack.test.rest.IndexMappingTemplateAsserter; import org.elasticsearch.xpack.test.rest.XPackRestTestConstants; import org.elasticsearch.xpack.test.rest.XPackRestTestHelper; @@ -29,11 +31,15 @@ import static org.hamcrest.Matchers.is; import static org.hamcrest.Matchers.nullValue; -public class MlConfigIndexMappingsFullClusterRestartIT extends AbstractFullClusterRestartTestCase { +public class MlConfigIndexMappingsFullClusterRestartIT extends AbstractXpackFullClusterRestartTestCase { private static final String OLD_CLUSTER_JOB_ID = "ml-config-mappings-old-cluster-job"; private static final String NEW_CLUSTER_JOB_ID = "ml-config-mappings-new-cluster-job"; + public MlConfigIndexMappingsFullClusterRestartIT(@Name("cluster") FullClusterRestartUpgradeStatus upgradeStatus) { + super(upgradeStatus); + } + @Override protected Settings restClientSettings() { String token = "Basic " + Base64.getEncoder().encodeToString("test_user:x-pack-test-password".getBytes(StandardCharsets.UTF_8)); diff --git a/x-pack/qa/full-cluster-restart/src/test/java/org/elasticsearch/xpack/restart/MlHiddenIndicesFullClusterRestartIT.java b/x-pack/qa/full-cluster-restart/src/javaRestTest/java/org/elasticsearch/xpack/restart/MlHiddenIndicesFullClusterRestartIT.java similarity index 96% rename from x-pack/qa/full-cluster-restart/src/test/java/org/elasticsearch/xpack/restart/MlHiddenIndicesFullClusterRestartIT.java rename to x-pack/qa/full-cluster-restart/src/javaRestTest/java/org/elasticsearch/xpack/restart/MlHiddenIndicesFullClusterRestartIT.java index 274fa7ee40fce..aeb3dad547946 100644 --- a/x-pack/qa/full-cluster-restart/src/test/java/org/elasticsearch/xpack/restart/MlHiddenIndicesFullClusterRestartIT.java +++ b/x-pack/qa/full-cluster-restart/src/javaRestTest/java/org/elasticsearch/xpack/restart/MlHiddenIndicesFullClusterRestartIT.java @@ -6,6 +6,8 @@ */ package org.elasticsearch.xpack.restart; +import com.carrotsearch.randomizedtesting.annotations.Name; + import org.elasticsearch.Version; import org.elasticsearch.client.Request; import org.elasticsearch.client.RequestOptions; @@ -16,7 +18,7 @@ import org.elasticsearch.common.xcontent.support.XContentMapValues; import org.elasticsearch.core.Strings; import org.elasticsearch.core.Tuple; -import org.elasticsearch.upgrades.AbstractFullClusterRestartTestCase; +import org.elasticsearch.upgrades.FullClusterRestartUpgradeStatus; import org.elasticsearch.xcontent.XContentParser; import org.elasticsearch.xcontent.XContentParserConfiguration; import org.elasticsearch.xcontent.spi.XContentProvider; @@ -38,7 +40,7 @@ import static org.hamcrest.Matchers.notNullValue; import static org.hamcrest.Matchers.nullValue; -public class MlHiddenIndicesFullClusterRestartIT extends AbstractFullClusterRestartTestCase { +public class MlHiddenIndicesFullClusterRestartIT extends AbstractXpackFullClusterRestartTestCase { private static final String JOB_ID = "ml-hidden-indices-old-cluster-job"; private static final List, String>> EXPECTED_INDEX_ALIAS_PAIRS = List.of( @@ -49,6 +51,10 @@ public class MlHiddenIndicesFullClusterRestartIT extends AbstractFullClusterRest Tuple.tuple(List.of(".ml-anomalies-shared"), ".ml-anomalies-.write-" + JOB_ID) ); + public MlHiddenIndicesFullClusterRestartIT(@Name("cluster") FullClusterRestartUpgradeStatus upgradeStatus) { + super(upgradeStatus); + } + @Override protected Settings restClientSettings() { String token = "Basic " + Base64.getEncoder().encodeToString("test_user:x-pack-test-password".getBytes(StandardCharsets.UTF_8)); diff --git a/x-pack/qa/full-cluster-restart/src/test/java/org/elasticsearch/xpack/restart/MlMigrationFullClusterRestartIT.java b/x-pack/qa/full-cluster-restart/src/javaRestTest/java/org/elasticsearch/xpack/restart/MlMigrationFullClusterRestartIT.java similarity index 96% rename from x-pack/qa/full-cluster-restart/src/test/java/org/elasticsearch/xpack/restart/MlMigrationFullClusterRestartIT.java rename to x-pack/qa/full-cluster-restart/src/javaRestTest/java/org/elasticsearch/xpack/restart/MlMigrationFullClusterRestartIT.java index 61ce6f7827e2a..2bbda9123ae34 100644 --- a/x-pack/qa/full-cluster-restart/src/test/java/org/elasticsearch/xpack/restart/MlMigrationFullClusterRestartIT.java +++ b/x-pack/qa/full-cluster-restart/src/javaRestTest/java/org/elasticsearch/xpack/restart/MlMigrationFullClusterRestartIT.java @@ -6,6 +6,8 @@ */ package org.elasticsearch.xpack.restart; +import com.carrotsearch.randomizedtesting.annotations.Name; + import org.elasticsearch.Version; import org.elasticsearch.client.Request; import org.elasticsearch.client.Response; @@ -17,7 +19,7 @@ import org.elasticsearch.search.aggregations.AggregatorFactories; import org.elasticsearch.search.aggregations.bucket.terms.TermsAggregationBuilder; import org.elasticsearch.search.aggregations.metrics.MaxAggregationBuilder; -import org.elasticsearch.upgrades.AbstractFullClusterRestartTestCase; +import org.elasticsearch.upgrades.FullClusterRestartUpgradeStatus; import org.elasticsearch.xpack.core.ml.MlTasks; import org.elasticsearch.xpack.core.ml.datafeed.DatafeedConfig; import org.elasticsearch.xpack.test.rest.XPackRestTestConstants; @@ -35,13 +37,17 @@ import static org.hamcrest.Matchers.emptyOrNullString; import static org.hamcrest.Matchers.is; -public class MlMigrationFullClusterRestartIT extends AbstractFullClusterRestartTestCase { +public class MlMigrationFullClusterRestartIT extends AbstractXpackFullClusterRestartTestCase { private static final String OLD_CLUSTER_OPEN_JOB_ID = "migration-old-cluster-open-job"; private static final String OLD_CLUSTER_STARTED_DATAFEED_ID = "migration-old-cluster-started-datafeed"; private static final String OLD_CLUSTER_CLOSED_JOB_ID = "migration-old-cluster-closed-job"; private static final String OLD_CLUSTER_STOPPED_DATAFEED_ID = "migration-old-cluster-stopped-datafeed"; + public MlMigrationFullClusterRestartIT(@Name("cluster") FullClusterRestartUpgradeStatus upgradeStatus) { + super(upgradeStatus); + } + @Override protected Settings restClientSettings() { String token = "Basic " + Base64.getEncoder().encodeToString("test_user:x-pack-test-password".getBytes(StandardCharsets.UTF_8)); diff --git a/x-pack/qa/full-cluster-restart/src/javaRestTest/java/org/elasticsearch/xpack/restart/QueryBuilderBWCIT.java b/x-pack/qa/full-cluster-restart/src/javaRestTest/java/org/elasticsearch/xpack/restart/QueryBuilderBWCIT.java new file mode 100644 index 0000000000000..563cde322b725 --- /dev/null +++ b/x-pack/qa/full-cluster-restart/src/javaRestTest/java/org/elasticsearch/xpack/restart/QueryBuilderBWCIT.java @@ -0,0 +1,42 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0; you may not use this file except in compliance with the Elastic License + * 2.0. + */ +package org.elasticsearch.xpack.restart; + +import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.common.util.concurrent.ThreadContext; +import org.elasticsearch.test.cluster.util.resource.Resource; +import org.elasticsearch.upgrades.FullClusterRestartUpgradeStatus; + +import java.nio.charset.StandardCharsets; +import java.util.Base64; + +public class QueryBuilderBWCIT extends org.elasticsearch.upgrades.QueryBuilderBWCIT { + + static { + clusterConfig = c -> c.setting("xpack.security.enabled", "true") + .setting("xpack.security.transport.ssl.enabled", "true") + .setting("xpack.security.transport.ssl.key", "testnode.pem") + .setting("xpack.security.transport.ssl.certificate", "testnode.crt") + .setting("xpack.license.self_generated.type", "trial") + .setting("xpack.watcher.encrypt_sensitive_data", "true") + .setting("xpack.security.authc.api_key.enabled", "true") + .configFile("testnode.pem", Resource.fromClasspath("org/elasticsearch/xpack/security/transport/ssl/certs/simple/testnode.pem")) + .configFile("testnode.crt", Resource.fromClasspath("org/elasticsearch/xpack/security/transport/ssl/certs/simple/testnode.crt")) + .keystore("xpack.watcher.encryption_key", Resource.fromClasspath("system_key")) + .keystore("xpack.security.transport.ssl.secure_key_passphrase", "testnode"); + } + + public QueryBuilderBWCIT(FullClusterRestartUpgradeStatus upgradeStatus) { + super(upgradeStatus); + } + + @Override + protected Settings restClientSettings() { + String token = "Basic " + Base64.getEncoder().encodeToString("test_user:x-pack-test-password".getBytes(StandardCharsets.UTF_8)); + return Settings.builder().put(ThreadContext.PREFIX + ".Authorization", token).build(); + } +} diff --git a/x-pack/qa/full-cluster-restart/src/test/java/org/elasticsearch/xpack/restart/WatcherMappingUpdateIT.java b/x-pack/qa/full-cluster-restart/src/javaRestTest/java/org/elasticsearch/xpack/restart/WatcherMappingUpdateIT.java similarity index 89% rename from x-pack/qa/full-cluster-restart/src/test/java/org/elasticsearch/xpack/restart/WatcherMappingUpdateIT.java rename to x-pack/qa/full-cluster-restart/src/javaRestTest/java/org/elasticsearch/xpack/restart/WatcherMappingUpdateIT.java index ea926e964360d..043b3f49a8825 100644 --- a/x-pack/qa/full-cluster-restart/src/test/java/org/elasticsearch/xpack/restart/WatcherMappingUpdateIT.java +++ b/x-pack/qa/full-cluster-restart/src/javaRestTest/java/org/elasticsearch/xpack/restart/WatcherMappingUpdateIT.java @@ -7,6 +7,8 @@ package org.elasticsearch.xpack.restart; +import com.carrotsearch.randomizedtesting.annotations.Name; + import org.apache.http.util.EntityUtils; import org.elasticsearch.Version; import org.elasticsearch.client.Request; @@ -14,7 +16,7 @@ import org.elasticsearch.client.Response; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.util.concurrent.ThreadContext; -import org.elasticsearch.upgrades.AbstractFullClusterRestartTestCase; +import org.elasticsearch.upgrades.FullClusterRestartUpgradeStatus; import java.nio.charset.StandardCharsets; import java.util.Base64; @@ -23,7 +25,11 @@ import static org.hamcrest.Matchers.containsString; import static org.hamcrest.Matchers.not; -public class WatcherMappingUpdateIT extends AbstractFullClusterRestartTestCase { +public class WatcherMappingUpdateIT extends AbstractXpackFullClusterRestartTestCase { + + public WatcherMappingUpdateIT(@Name("cluster") FullClusterRestartUpgradeStatus upgradeStatus) { + super(upgradeStatus); + } @Override protected Settings restClientSettings() { @@ -91,7 +97,7 @@ private void assertNoMappingVersion(String index) throws Exception { private RequestOptions.Builder getWarningHandlerOptions(String index) { return RequestOptions.DEFAULT.toBuilder() - .setWarningsHandler(w -> w.contains(getWatcherSystemIndexWarning(index)) == false || w.size() != 1); + .setWarningsHandler(w -> w.size() > 0 && w.contains(getWatcherSystemIndexWarning(index)) == false); } private String getWatcherSystemIndexWarning(String index) { diff --git a/x-pack/qa/full-cluster-restart/src/test/resources/org/elasticsearch/xpack/restart/funny-timeout-watch.json b/x-pack/qa/full-cluster-restart/src/javaRestTest/resources/org/elasticsearch/xpack/restart/funny-timeout-watch.json similarity index 100% rename from x-pack/qa/full-cluster-restart/src/test/resources/org/elasticsearch/xpack/restart/funny-timeout-watch.json rename to x-pack/qa/full-cluster-restart/src/javaRestTest/resources/org/elasticsearch/xpack/restart/funny-timeout-watch.json diff --git a/x-pack/qa/full-cluster-restart/src/test/resources/org/elasticsearch/xpack/restart/logging-watch.json b/x-pack/qa/full-cluster-restart/src/javaRestTest/resources/org/elasticsearch/xpack/restart/logging-watch.json similarity index 100% rename from x-pack/qa/full-cluster-restart/src/test/resources/org/elasticsearch/xpack/restart/logging-watch.json rename to x-pack/qa/full-cluster-restart/src/javaRestTest/resources/org/elasticsearch/xpack/restart/logging-watch.json diff --git a/x-pack/qa/full-cluster-restart/src/test/resources/org/elasticsearch/xpack/restart/simple-watch.json b/x-pack/qa/full-cluster-restart/src/javaRestTest/resources/org/elasticsearch/xpack/restart/simple-watch.json similarity index 100% rename from x-pack/qa/full-cluster-restart/src/test/resources/org/elasticsearch/xpack/restart/simple-watch.json rename to x-pack/qa/full-cluster-restart/src/javaRestTest/resources/org/elasticsearch/xpack/restart/simple-watch.json diff --git a/x-pack/qa/full-cluster-restart/src/test/resources/org/elasticsearch/xpack/restart/throttle-period-watch.json b/x-pack/qa/full-cluster-restart/src/javaRestTest/resources/org/elasticsearch/xpack/restart/throttle-period-watch.json similarity index 100% rename from x-pack/qa/full-cluster-restart/src/test/resources/org/elasticsearch/xpack/restart/throttle-period-watch.json rename to x-pack/qa/full-cluster-restart/src/javaRestTest/resources/org/elasticsearch/xpack/restart/throttle-period-watch.json diff --git a/x-pack/qa/full-cluster-restart/src/test/resources/system_key b/x-pack/qa/full-cluster-restart/src/javaRestTest/resources/system_key similarity index 100% rename from x-pack/qa/full-cluster-restart/src/test/resources/system_key rename to x-pack/qa/full-cluster-restart/src/javaRestTest/resources/system_key diff --git a/x-pack/qa/full-cluster-restart/src/test/java/org/elasticsearch/xpack/restart/CoreFullClusterRestartIT.java b/x-pack/qa/full-cluster-restart/src/test/java/org/elasticsearch/xpack/restart/CoreFullClusterRestartIT.java deleted file mode 100644 index e06cb12f747a7..0000000000000 --- a/x-pack/qa/full-cluster-restart/src/test/java/org/elasticsearch/xpack/restart/CoreFullClusterRestartIT.java +++ /dev/null @@ -1,24 +0,0 @@ -/* - * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one - * or more contributor license agreements. Licensed under the Elastic License - * 2.0; you may not use this file except in compliance with the Elastic License - * 2.0. - */ -package org.elasticsearch.xpack.restart; - -import org.elasticsearch.common.settings.Settings; -import org.elasticsearch.common.util.concurrent.ThreadContext; -import org.elasticsearch.upgrades.FullClusterRestartIT; - -import java.nio.charset.StandardCharsets; -import java.util.Base64; - -public class CoreFullClusterRestartIT extends FullClusterRestartIT { - - @Override - protected Settings restClientSettings() { - String token = "Basic " + Base64.getEncoder().encodeToString("test_user:x-pack-test-password".getBytes(StandardCharsets.UTF_8)); - return Settings.builder().put(ThreadContext.PREFIX + ".Authorization", token).build(); - } - -} diff --git a/x-pack/qa/full-cluster-restart/src/test/java/org/elasticsearch/xpack/restart/QueryBuilderBWCIT.java b/x-pack/qa/full-cluster-restart/src/test/java/org/elasticsearch/xpack/restart/QueryBuilderBWCIT.java deleted file mode 100644 index cffc6881df645..0000000000000 --- a/x-pack/qa/full-cluster-restart/src/test/java/org/elasticsearch/xpack/restart/QueryBuilderBWCIT.java +++ /dev/null @@ -1,22 +0,0 @@ -/* - * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one - * or more contributor license agreements. Licensed under the Elastic License - * 2.0; you may not use this file except in compliance with the Elastic License - * 2.0. - */ -package org.elasticsearch.xpack.restart; - -import org.elasticsearch.common.settings.Settings; -import org.elasticsearch.common.util.concurrent.ThreadContext; - -import java.nio.charset.StandardCharsets; -import java.util.Base64; - -public class QueryBuilderBWCIT extends org.elasticsearch.upgrades.QueryBuilderBWCIT { - - @Override - protected Settings restClientSettings() { - String token = "Basic " + Base64.getEncoder().encodeToString("test_user:x-pack-test-password".getBytes(StandardCharsets.UTF_8)); - return Settings.builder().put(ThreadContext.PREFIX + ".Authorization", token).build(); - } -}