diff --git a/.github/dependabot.yml b/.github/dependabot.yml index a5dd25d2910f2..28c0d861158b8 100644 --- a/.github/dependabot.yml +++ b/.github/dependabot.yml @@ -445,14 +445,6 @@ updates: labels: - "dependabot" - "dependencies" - - directory: /distribution/tools/upgrade-cli/ - open-pull-requests-limit: 1 - package-ecosystem: gradle - schedule: - interval: weekly - labels: - - "dependabot" - - "dependencies" - directory: /doc-tools/ open-pull-requests-limit: 1 package-ecosystem: gradle diff --git a/.github/workflows/trigger-manifest-generation.yml b/.github/workflows/trigger-manifest-generation.yml new file mode 100644 index 0000000000000..ddee9c3710d3b --- /dev/null +++ b/.github/workflows/trigger-manifest-generation.yml @@ -0,0 +1,17 @@ +name: Trigger manifest generation workflow + +on: + workflow_dispatch: + push: + paths: + - buildSrc/version.properties + +jobs: + trigger-manifest-workflow: + if: github.repository == 'opensearch-project/OpenSearch' + runs-on: ubuntu-latest + steps: + - name: Trigger manifest-update workflow + run: | + echo "Triggering manifest-update workflow at https://build.ci.opensearch.org/job/manifest-update/" + curl -f -X POST https://build.ci.opensearch.org/job/manifest-update/build --user ${{ secrets.JENKINS_GITHUB_USER}}:${{ secrets.JENKINS_GITHUB_USER_TOKEN}} \ No newline at end of file diff --git a/CHANGELOG.md b/CHANGELOG.md index 85b50967e9fb7..1a7fd77591249 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -6,25 +6,55 @@ The format is based on [Keep a Changelog](https://keepachangelog.com/en/1.0.0/), ## [Unreleased 3.x] ### Added - Add support for Warm Indices Write Block on Flood Watermark breach ([#18375](https://github.com/opensearch-project/OpenSearch/pull/18375)) +- Add support for custom index name resolver from cluster plugin ([#18593](https://github.com/opensearch-project/OpenSearch/pull/18593)) +- Rename WorkloadGroupTestUtil to WorkloadManagementTestUtil ([#18709](https://github.com/opensearch-project/OpenSearch/pull/18709)) +- Disallow resize for Warm Index, add Parameterized ITs for close in remote store ([#18686](https://github.com/opensearch-project/OpenSearch/pull/18686)) - Ability to run Code Coverage with Gradle and produce the jacoco reports locally ([#18509](https://github.com/opensearch-project/OpenSearch/issues/18509)) +- [Workload Management] Update logging and Javadoc, rename QueryGroup to WorkloadGroup ([#18711](https://github.com/opensearch-project/OpenSearch/issues/18711)) - Add NodeResourceUsageStats to ClusterInfo ([#18480](https://github.com/opensearch-project/OpenSearch/issues/18472)) - Introduce SecureHttpTransportParameters experimental API (to complement SecureTransportParameters counterpart) ([#18572](https://github.com/opensearch-project/OpenSearch/issues/18572)) - Create equivalents of JSM's AccessController in the java agent ([#18346](https://github.com/opensearch-project/OpenSearch/issues/18346)) +- [WLM] Add WLM mode validation for workload group CRUD requests ([#18652](https://github.com/opensearch-project/OpenSearch/issues/18652)) - Introduced a new cluster-level API to fetch remote store metadata (segments and translogs) for each shard of an index. ([#18257](https://github.com/opensearch-project/OpenSearch/pull/18257)) - Add last index request timestamp columns to the `_cat/indices` API. ([10766](https://github.com/opensearch-project/OpenSearch/issues/10766)) - Introduce a new pull-based ingestion plugin for file-based indexing (for local testing) ([#18591](https://github.com/opensearch-project/OpenSearch/pull/18591)) - Add support for search pipeline in search and msearch template ([#18564](https://github.com/opensearch-project/OpenSearch/pull/18564)) +- [Workload Management] Modify logging message in WorkloadGroupService ([#18712](https://github.com/opensearch-project/OpenSearch/pull/18712)) - Add BooleanQuery rewrite moving constant-scoring must clauses to filter clauses ([#18510](https://github.com/opensearch-project/OpenSearch/issues/18510)) +- Add functionality for plugins to inject QueryCollectorContext during QueryPhase ([#18637](https://github.com/opensearch-project/OpenSearch/pull/18637)) +- Add support for non-timing info in profiler ([#18460](https://github.com/opensearch-project/OpenSearch/issues/18460)) +- Extend Approximation Framework to other numeric types ([#18530](https://github.com/opensearch-project/OpenSearch/issues/18530)) +- Add Semantic Version field type mapper and extensive unit tests([#18454](https://github.com/opensearch-project/OpenSearch/pull/18454)) +- Pass index settings to system ingest processor factories. ([#18708](https://github.com/opensearch-project/OpenSearch/pull/18708)) +- Include named queries from rescore contexts in matched_queries array ([#18697](https://github.com/opensearch-project/OpenSearch/pull/18697)) +- Add the configurable limit on rule cardinality ([#18663](https://github.com/opensearch-project/OpenSearch/pull/18663)) +- [Experimental] Start in "clusterless" mode if a clusterless ClusterPlugin is loaded ([#18479](https://github.com/opensearch-project/OpenSearch/pull/18479)) ### Changed - Update Subject interface to use CheckedRunnable ([#18570](https://github.com/opensearch-project/OpenSearch/issues/18570)) +- Update SecureAuxTransportSettingsProvider to distinguish between aux transport types ([#18616](https://github.com/opensearch-project/OpenSearch/pull/18616)) +- Make node duress values cacheable ([#18649](https://github.com/opensearch-project/OpenSearch/pull/18649)) +- Making multi rate limiters in repository dynamic [#18069](https://github.com/opensearch-project/OpenSearch/pull/18069) ### Dependencies - Bump `stefanzweifel/git-auto-commit-action` from 5 to 6 ([#18524](https://github.com/opensearch-project/OpenSearch/pull/18524)) - Bump Apache Lucene to 10.2.2 ([#18573](https://github.com/opensearch-project/OpenSearch/pull/18573)) -- Bump `org.apache.logging.log4j:log4j-core` from 2.24.3 to 2.25.0 ([#18589](https://github.com/opensearch-project/OpenSearch/pull/18589)) +- Bump `org.apache.logging.log4j:log4j-core` from 2.24.3 to 2.25.1 ([#18589](https://github.com/opensearch-project/OpenSearch/pull/18589), [#18744](https://github.com/opensearch-project/OpenSearch/pull/18744)) - Bump `com.google.code.gson:gson` from 2.13.0 to 2.13.1 ([#18585](https://github.com/opensearch-project/OpenSearch/pull/18585)) - Bump `com.azure:azure-core-http-netty` from 1.15.11 to 1.15.12 ([#18586](https://github.com/opensearch-project/OpenSearch/pull/18586)) +- Bump `com.squareup.okio:okio` from 3.13.0 to 3.15.0 ([#18645](https://github.com/opensearch-project/OpenSearch/pull/18645), [#18689](https://github.com/opensearch-project/OpenSearch/pull/18689)) +- Bump `com.netflix.nebula.ospackage-base` from 11.11.2 to 12.0.0 ([#18646](https://github.com/opensearch-project/OpenSearch/pull/18646)) +- Bump `com.azure:azure-storage-blob` from 12.30.0 to 12.30.1 ([#18644](https://github.com/opensearch-project/OpenSearch/pull/18644)) +- Bump `com.google.guava:failureaccess` from 1.0.1 to 1.0.2 ([#18672](https://github.com/opensearch-project/OpenSearch/pull/18672)) +- Bump `io.perfmark:perfmark-api` from 0.26.0 to 0.27.0 ([#18672](https://github.com/opensearch-project/OpenSearch/pull/18672)) +- Bump `org.bouncycastle:bctls-fips` from 2.0.19 to 2.0.20 ([#18668](https://github.com/opensearch-project/OpenSearch/pull/18668)) +- Bump `org.bouncycastle:bcpkix-fips` from 2.0.7 to 2.0.8 ([#18668](https://github.com/opensearch-project/OpenSearch/pull/18668)) +- Bump `org.bouncycastle:bcpg-fips` from 2.0.10 to 2.0.11 ([#18668](https://github.com/opensearch-project/OpenSearch/pull/18668)) +- Bump `com.password4j:password4j` from 1.8.2 to 1.8.3 ([#18668](https://github.com/opensearch-project/OpenSearch/pull/18668)) +- Bump `com.azure:azure-core` from 1.55.3 to 1.55.5 ([#18691](https://github.com/opensearch-project/OpenSearch/pull/18691)) +- Bump `com.google.jimfs:jimfs` from 1.3.0 to 1.3.1 ([#18743](https://github.com/opensearch-project/OpenSearch/pull/18743)) +- Bump `com.azure:azure-storage-common` from 12.29.0 to 12.29.1 ([#18742](https://github.com/opensearch-project/OpenSearch/pull/18742)) +- Bump `org.apache.commons:commons-lang3` from 3.17.0 to 3.18.0 ([#18745](https://github.com/opensearch-project/OpenSearch/pull/18745)) ### Deprecated @@ -33,10 +63,14 @@ The format is based on [Keep a Changelog](https://keepachangelog.com/en/1.0.0/), ### Fixed - Add task cancellation checks in aggregators ([#18426](https://github.com/opensearch-project/OpenSearch/pull/18426)) - Fix concurrent timings in profiler ([#18540](https://github.com/opensearch-project/OpenSearch/pull/18540)) +- Fix regex query from query string query to work with field alias ([#18215](https://github.com/opensearch-project/OpenSearch/issues/18215)) - [Autotagging] Fix delete rule event consumption in InMemoryRuleProcessingService ([#18628](https://github.com/opensearch-project/OpenSearch/pull/18628)) - Cannot communicate with HTTP/2 when reactor-netty is enabled ([#18599](https://github.com/opensearch-project/OpenSearch/pull/18599)) - Fix the visit of sub queries for HasParentQuery and HasChildQuery ([#18621](https://github.com/opensearch-project/OpenSearch/pull/18621)) - +- Fix the backward compatibility regression with COMPLEMENT for Regexp queries introduced in OpenSearch 3.0 ([#18640](https://github.com/opensearch-project/OpenSearch/pull/18640)) +- Fix Replication lag computation ([#18602](https://github.com/opensearch-project/OpenSearch/pull/18602)) +- Fix max_score is null when sorting on score firstly ([#18715](https://github.com/opensearch-project/OpenSearch/pull/18715)) +- Fixed Staggered merge - load average replace with AverageTrackers, some Default thresholds modified ([#18666](https://github.com/opensearch-project/OpenSearch/pull/18666)) ### Security diff --git a/DEVELOPER_GUIDE.md b/DEVELOPER_GUIDE.md index 016a4f9900402..2d7125b241af7 100644 --- a/DEVELOPER_GUIDE.md +++ b/DEVELOPER_GUIDE.md @@ -189,7 +189,9 @@ It's typically easier to wait until the console stops scrolling, and then run `c ```bash curl localhost:9200 - +``` +The expected reponse should be +``` { "name" : "runTask-0", "cluster_name" : "runTask", diff --git a/MAINTAINERS.md b/MAINTAINERS.md index d00dc12621d98..6b18639282efa 100644 --- a/MAINTAINERS.md +++ b/MAINTAINERS.md @@ -28,6 +28,7 @@ This document contains a list of maintainers in this repo. See [opensearch-proje | Owais Kazi | [owaiskazi19](https://github.com/owaiskazi19) | Amazon | | Pan Guixin | [bugmakerrrrrr](https://github.com/bugmakerrrrrr) | ByteDance | | Peter Nied | [peternied](https://github.com/peternied) | Amazon | +| Prudhvi Godithi | [prudhvigodithi](https://github.com/prudhvigodithi) | Amazon | | Rishabh Maurya | [rishabhmaurya](https://github.com/rishabhmaurya) | Amazon | | Rishikesh Pasham | [Rishikesh1159](https://github.com/Rishikesh1159) | Amazon | | Sachin Kale | [sachinpkale](https://github.com/sachinpkale) | Amazon | diff --git a/buildSrc/src/testKit/thirdPartyAudit/sample_jars/build.gradle b/buildSrc/src/testKit/thirdPartyAudit/sample_jars/build.gradle index b88501b0b80cb..3a44aa603378c 100644 --- a/buildSrc/src/testKit/thirdPartyAudit/sample_jars/build.gradle +++ b/buildSrc/src/testKit/thirdPartyAudit/sample_jars/build.gradle @@ -17,7 +17,7 @@ repositories { } dependencies { - implementation "org.apache.logging.log4j:log4j-core:2.25.0" + implementation "org.apache.logging.log4j:log4j-core:2.25.1" } ["0.0.1", "0.0.2"].forEach { v -> diff --git a/client/rest/licenses/bctls-fips-2.0.19.jar.sha1 b/client/rest/licenses/bctls-fips-2.0.19.jar.sha1 deleted file mode 100644 index 387635e9e1594..0000000000000 --- a/client/rest/licenses/bctls-fips-2.0.19.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -9cc33650ede63bc1a8281ed5c8e1da314d50bc76 \ No newline at end of file diff --git a/client/rest/licenses/bctls-fips-2.0.20.jar.sha1 b/client/rest/licenses/bctls-fips-2.0.20.jar.sha1 new file mode 100644 index 0000000000000..66cd82b49b537 --- /dev/null +++ b/client/rest/licenses/bctls-fips-2.0.20.jar.sha1 @@ -0,0 +1 @@ +1138f7896e0d1bb0d924bc868ed2dfda4f69470e \ No newline at end of file diff --git a/distribution/build.gradle b/distribution/build.gradle index d6ad3fb96931c..5de6fa0611ea0 100644 --- a/distribution/build.gradle +++ b/distribution/build.gradle @@ -312,7 +312,7 @@ configure(subprojects.findAll { ['archives', 'packages'].contains(it.name) }) { * Properties to expand when copying packaging files * *****************************************************************************/ configurations { - ['libs', 'libsPluginCli', 'libsKeystoreCli', 'libsUpgradeCli', 'bcFips'].each { + ['libs', 'libsPluginCli', 'libsKeystoreCli', 'bcFips'].each { create(it) { canBeConsumed = false canBeResolved = true @@ -333,7 +333,6 @@ configure(subprojects.findAll { ['archives', 'packages'].contains(it.name) }) { libsPluginCli project(':distribution:tools:plugin-cli') libsKeystoreCli project(path: ':distribution:tools:keystore-cli') - libsUpgradeCli project(path: ':distribution:tools:upgrade-cli') bcFips libs.bundles.bouncycastle } @@ -356,9 +355,6 @@ configure(subprojects.findAll { ['archives', 'packages'].contains(it.name) }) { into('tools/keystore-cli') { from(configurations.libsKeystoreCli) } - into('tools/upgrade-cli') { - from(configurations.libsUpgradeCli) - } } } diff --git a/distribution/packages/build.gradle b/distribution/packages/build.gradle index a02f907f86aef..194c683da5ec7 100644 --- a/distribution/packages/build.gradle +++ b/distribution/packages/build.gradle @@ -63,7 +63,7 @@ import java.util.regex.Pattern */ plugins { - id "com.netflix.nebula.ospackage-base" version "11.11.2" + id "com.netflix.nebula.ospackage-base" version "12.0.0" } void addProcessFilesTask(String type, boolean jdk) { diff --git a/distribution/src/bin/opensearch-upgrade b/distribution/src/bin/opensearch-upgrade deleted file mode 100755 index 5aae184341b6b..0000000000000 --- a/distribution/src/bin/opensearch-upgrade +++ /dev/null @@ -1,7 +0,0 @@ -#!/usr/bin/env bash -set -e -o pipefail - -OPENSEARCH_MAIN_CLASS=org.opensearch.upgrade.UpgradeCli \ - OPENSEARCH_ADDITIONAL_CLASSPATH_DIRECTORIES=lib/tools/upgrade-cli \ - "`dirname "$0"`"/opensearch-cli \ - "$@" diff --git a/distribution/src/bin/opensearch-upgrade.bat b/distribution/src/bin/opensearch-upgrade.bat deleted file mode 100644 index f44b3750eb9e0..0000000000000 --- a/distribution/src/bin/opensearch-upgrade.bat +++ /dev/null @@ -1,16 +0,0 @@ -@echo off - -setlocal enabledelayedexpansion -setlocal enableextensions - -set OPENSEARCH_MAIN_CLASS=org.opensearch.upgrade.UpgradeCli -set OPENSEARCH_ADDITIONAL_CLASSPATH_DIRECTORIES=lib/tools/upgrade-cli -call "%~dp0opensearch-cli.bat" ^ - %%* ^ - || goto exit - - -endlocal -endlocal -:exit -exit /b %ERRORLEVEL% diff --git a/distribution/tools/plugin-cli/build.gradle b/distribution/tools/plugin-cli/build.gradle index a239926ae56f3..8beb17bb8bf9a 100644 --- a/distribution/tools/plugin-cli/build.gradle +++ b/distribution/tools/plugin-cli/build.gradle @@ -41,7 +41,7 @@ dependencies { api "org.bouncycastle:bc-fips:${versions.bouncycastle_jce}" api "org.bouncycastle:bcpg-fips:${versions.bouncycastle_pg}" testImplementation project(":test:framework") - testImplementation 'com.google.jimfs:jimfs:1.3.0' + testImplementation 'com.google.jimfs:jimfs:1.3.1' testRuntimeOnly("com.google.guava:guava:${versions.guava}") { transitive = false } diff --git a/distribution/tools/plugin-cli/licenses/bcpg-fips-2.0.10.jar.sha1 b/distribution/tools/plugin-cli/licenses/bcpg-fips-2.0.10.jar.sha1 deleted file mode 100644 index c7aa41c7996d8..0000000000000 --- a/distribution/tools/plugin-cli/licenses/bcpg-fips-2.0.10.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -f21aff3416359ad20b2712c0727696858a2e769a \ No newline at end of file diff --git a/distribution/tools/plugin-cli/licenses/bcpg-fips-2.0.11.jar.sha1 b/distribution/tools/plugin-cli/licenses/bcpg-fips-2.0.11.jar.sha1 new file mode 100644 index 0000000000000..39805c3a32614 --- /dev/null +++ b/distribution/tools/plugin-cli/licenses/bcpg-fips-2.0.11.jar.sha1 @@ -0,0 +1 @@ +19f38a0d8048e87039b1bb6c1ba4d2b104891d04 \ No newline at end of file diff --git a/distribution/tools/plugin-cli/src/test/java/org/opensearch/tools/cli/plugin/ListPluginsCommandTests.java b/distribution/tools/plugin-cli/src/test/java/org/opensearch/tools/cli/plugin/ListPluginsCommandTests.java index 7fcdab907cbf0..36ef7ec253bc2 100644 --- a/distribution/tools/plugin-cli/src/test/java/org/opensearch/tools/cli/plugin/ListPluginsCommandTests.java +++ b/distribution/tools/plugin-cli/src/test/java/org/opensearch/tools/cli/plugin/ListPluginsCommandTests.java @@ -33,7 +33,6 @@ package org.opensearch.tools.cli.plugin; import org.apache.lucene.tests.util.LuceneTestCase; -import org.opensearch.LegacyESVersion; import org.opensearch.Version; import org.opensearch.cli.ExitCodes; import org.opensearch.cli.MockTerminal; @@ -271,7 +270,7 @@ public void testExistingIncompatiblePlugin() throws Exception { "version", "1.0", "opensearch.version", - LegacyESVersion.fromString("5.0.0").toString(), + Version.fromString("5.0.0").toString(), "java.version", System.getProperty("java.specification.version"), "classname", diff --git a/distribution/tools/upgrade-cli/build.gradle b/distribution/tools/upgrade-cli/build.gradle deleted file mode 100644 index 92c043132c021..0000000000000 --- a/distribution/tools/upgrade-cli/build.gradle +++ /dev/null @@ -1,35 +0,0 @@ -/* - * SPDX-License-Identifier: Apache-2.0 - * - * The OpenSearch Contributors require contributions made to - * this file be licensed under the Apache-2.0 license or a - * compatible open source license. - * - */ - -apply plugin: 'opensearch.build' - -base { - archivesName = 'opensearch-upgrade-cli' -} - -dependencies { - compileOnly project(":server") - compileOnly project(":libs:opensearch-cli") - implementation "com.fasterxml.jackson.core:jackson-core:${versions.jackson}" - implementation "com.fasterxml.jackson.core:jackson-databind:${versions.jackson_databind}" - implementation "com.fasterxml.jackson.core:jackson-annotations:${versions.jackson}" - testImplementation project(":test:framework") - testImplementation 'com.google.jimfs:jimfs:1.3.0' - testRuntimeOnly("com.google.guava:guava:${versions.guava}") { - transitive = false - } -} - -tasks.named("dependencyLicenses").configure { - mapping from: /jackson-.*/, to: 'jackson' -} - -test { - systemProperty 'tests.security.manager', 'false' -} diff --git a/distribution/tools/upgrade-cli/licenses/jackson-LICENSE b/distribution/tools/upgrade-cli/licenses/jackson-LICENSE deleted file mode 100644 index f5f45d26a49d6..0000000000000 --- a/distribution/tools/upgrade-cli/licenses/jackson-LICENSE +++ /dev/null @@ -1,8 +0,0 @@ -This copy of Jackson JSON processor streaming parser/generator is licensed under the -Apache (Software) License, version 2.0 ("the License"). -See the License for details about distribution rights, and the -specific rights regarding derivate works. - -You may obtain a copy of the License at: - -http://www.apache.org/licenses/LICENSE-2.0 diff --git a/distribution/tools/upgrade-cli/licenses/jackson-NOTICE b/distribution/tools/upgrade-cli/licenses/jackson-NOTICE deleted file mode 100644 index 4c976b7b4cc58..0000000000000 --- a/distribution/tools/upgrade-cli/licenses/jackson-NOTICE +++ /dev/null @@ -1,20 +0,0 @@ -# Jackson JSON processor - -Jackson is a high-performance, Free/Open Source JSON processing library. -It was originally written by Tatu Saloranta (tatu.saloranta@iki.fi), and has -been in development since 2007. -It is currently developed by a community of developers, as well as supported -commercially by FasterXML.com. - -## Licensing - -Jackson core and extension components may licensed under different licenses. -To find the details that apply to this artifact see the accompanying LICENSE file. -For more information, including possible other licensing options, contact -FasterXML.com (http://fasterxml.com). - -## Credits - -A list of contributors may be found from CREDITS file, which is included -in some artifacts (usually source distributions); but is always available -from the source code management (SCM) system project uses. diff --git a/distribution/tools/upgrade-cli/licenses/jackson-annotations-2.18.2.jar.sha1 b/distribution/tools/upgrade-cli/licenses/jackson-annotations-2.18.2.jar.sha1 deleted file mode 100644 index a06e1d5f28425..0000000000000 --- a/distribution/tools/upgrade-cli/licenses/jackson-annotations-2.18.2.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -985d77751ebc7fce5db115a986bc9aa82f973f4a \ No newline at end of file diff --git a/distribution/tools/upgrade-cli/licenses/jackson-databind-2.18.2.jar.sha1 b/distribution/tools/upgrade-cli/licenses/jackson-databind-2.18.2.jar.sha1 deleted file mode 100644 index eedbfff66c705..0000000000000 --- a/distribution/tools/upgrade-cli/licenses/jackson-databind-2.18.2.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -deef8697b92141fb6caf7aa86966cff4eec9b04f \ No newline at end of file diff --git a/distribution/tools/upgrade-cli/src/main/java/org/opensearch/tools/cli/upgrade/DetectEsInstallationTask.java b/distribution/tools/upgrade-cli/src/main/java/org/opensearch/tools/cli/upgrade/DetectEsInstallationTask.java deleted file mode 100644 index de2d24dc61ce1..0000000000000 --- a/distribution/tools/upgrade-cli/src/main/java/org/opensearch/tools/cli/upgrade/DetectEsInstallationTask.java +++ /dev/null @@ -1,203 +0,0 @@ -/* - * SPDX-License-Identifier: Apache-2.0 - * - * The OpenSearch Contributors require contributions made to - * this file be licensed under the Apache-2.0 license or a - * compatible open source license. - */ - -package org.opensearch.tools.cli.upgrade; - -import com.fasterxml.jackson.databind.ObjectMapper; - -import org.opensearch.Version; -import org.opensearch.cli.Terminal; -import org.opensearch.common.SuppressForbidden; -import org.opensearch.common.collect.Tuple; -import org.opensearch.common.settings.Settings; - -import java.io.File; -import java.io.IOException; -import java.net.HttpURLConnection; -import java.net.URL; -import java.nio.file.Path; -import java.util.ArrayList; -import java.util.Collections; -import java.util.HashMap; -import java.util.List; -import java.util.Map; -import java.util.Optional; -import java.util.Scanner; - -/** - * Looks for an existing elasticsearch installation. First it tries to identify automatically, - * and if unsuccessful, asks the user to input the missing details. - *

- * If an elasticsearch installation can not be found, throws a runtime error which fails the - * upgrade task. - */ -class DetectEsInstallationTask implements UpgradeTask { - private static final int ES_DEFAULT_PORT = 9200; - private static final String ES_CONFIG_ENV = "ES_PATH_CONF"; - private static final String ES_CONFIG_YML = "elasticsearch.yml"; - private static final String ES_HOME = "ES_HOME"; - - @SuppressForbidden(reason = "We need to read external es config files") - @Override - public void accept(final Tuple input) { - final TaskInput taskInput = input.v1(); - final Terminal terminal = input.v2(); - try { - terminal.println("Looking for an elasticsearch installation ..."); - String esHomeEnv = System.getenv(ES_HOME); - if (esHomeEnv == null) { - esHomeEnv = terminal.readText("Missing ES_HOME env variable, enter the path to elasticsearch home: "); - if (esHomeEnv == null || esHomeEnv.isEmpty()) { - throw new RuntimeException("Invalid input for path to elasticsearch home directory."); - } - } - taskInput.setEsHome(new File(esHomeEnv).toPath()); - - String esConfEnv = System.getenv(ES_CONFIG_ENV); - if (esConfEnv == null) { - esConfEnv = terminal.readText("Missing ES_PATH_CONF env variable, enter the path to elasticsearch config directory: "); - if (esConfEnv == null || esHomeEnv.isEmpty()) { - throw new RuntimeException("Invalid input for path to elasticsearch config directory."); - } - } - taskInput.setEsConfig(new File(esConfEnv).toPath()); - - final Settings esSettings = Settings.builder().loadFromPath(taskInput.getEsConfig().resolve(ES_CONFIG_YML)).build(); - final String url = retrieveUrl(esSettings); - taskInput.setBaseUrl(url); - final boolean running = isRunning(url); - taskInput.setRunning(running); - if (running) { - terminal.println("Found a running instance of elasticsearch at " + url); - taskInput.setRunning(true); - try { - updateTaskInput(taskInput, fetchInfoFromUrl(taskInput.getBaseUrl())); - } catch (RuntimeException e) { - updateTaskInput(taskInput, fetchInfoFromEsSettings(esSettings)); - } - try { - taskInput.setPlugins(fetchPluginsFromUrl(taskInput.getBaseUrl())); - } catch (RuntimeException e) { - taskInput.setPlugins(detectPluginsFromEsHome(taskInput.getEsHome())); - } - } else { - terminal.println("Did not find a running instance of elasticsearch at " + url); - updateTaskInput(taskInput, fetchInfoFromEsSettings(esSettings)); - taskInput.setPlugins(detectPluginsFromEsHome(taskInput.getEsHome())); - } - } catch (IOException e) { - throw new RuntimeException("Error detecting existing elasticsearch installation. " + e); - } - } - - @SuppressWarnings("unchecked") - private void updateTaskInput(TaskInput taskInput, Map response) { - final Map versionMap = (Map) response.get("version"); - if (versionMap != null) { - final String vStr = versionMap.get("number"); - if (vStr != null) { - taskInput.setVersion(Version.fromString(vStr)); - } - } - taskInput.setNode((String) response.get("name")); - taskInput.setCluster((String) response.get("cluster_name")); - } - - // package private for unit testing - String retrieveUrl(final Settings esSettings) { - final int port = Optional.ofNullable(esSettings.get("http.port")).map(this::extractPort).orElse(ES_DEFAULT_PORT); - return "http://localhost:" + port; - } - - private Integer extractPort(final String port) { - try { - return Integer.parseInt(port.trim()); - } catch (Exception ex) { - return ES_DEFAULT_PORT; - } - } - - @SuppressForbidden(reason = "Need to connect to http endpoint for elasticsearch.") - private boolean isRunning(final String url) { - try { - final URL esUrl = new URL(url); - final HttpURLConnection conn = (HttpURLConnection) esUrl.openConnection(); - conn.setRequestMethod("GET"); - conn.setConnectTimeout(1000); - conn.connect(); - return conn.getResponseCode() == 200; - } catch (IOException e) { - return false; - } - } - - @SuppressForbidden(reason = "Retrieve information on the installation.") - private Map fetchInfoFromUrl(final String url) { - try { - final URL esUrl = new URL(url); - final HttpURLConnection conn = (HttpURLConnection) esUrl.openConnection(); - conn.setRequestMethod("GET"); - conn.setConnectTimeout(1000); - conn.connect(); - - final StringBuilder json = new StringBuilder(); - final Scanner scanner = new Scanner(esUrl.openStream()); - while (scanner.hasNext()) { - json.append(scanner.nextLine()); - } - scanner.close(); - final ObjectMapper mapper = new ObjectMapper(); - return mapper.readValue(json.toString(), Map.class); - } catch (IOException e) { - throw new RuntimeException("Error retrieving elasticsearch cluster info, " + e); - } - } - - private Map fetchInfoFromEsSettings(final Settings esSettings) throws IOException { - final Map info = new HashMap<>(); - final String node = esSettings.get("node.name") != null ? esSettings.get("node.name") : "unknown"; - final String cluster = esSettings.get("cluster.name") != null ? esSettings.get("cluster.name") : "unknown"; - info.put("name", node); - info.put("cluster_name", cluster); - return info; - } - - @SuppressWarnings("unchecked") - @SuppressForbidden(reason = "Retrieve information on installed plugins.") - private List fetchPluginsFromUrl(final String url) { - final List plugins = new ArrayList<>(); - try { - final URL esUrl = new URL(url + "/_cat/plugins?format=json&local=true"); - final HttpURLConnection conn = (HttpURLConnection) esUrl.openConnection(); - conn.setRequestMethod("GET"); - conn.setConnectTimeout(1000); - conn.connect(); - if (conn.getResponseCode() == 200) { - final StringBuilder json = new StringBuilder(); - final Scanner scanner = new Scanner(esUrl.openStream()); - while (scanner.hasNext()) { - json.append(scanner.nextLine()); - } - scanner.close(); - final ObjectMapper mapper = new ObjectMapper(); - final Map[] response = mapper.readValue(json.toString(), Map[].class); - for (Map plugin : response) { - plugins.add(plugin.get("component")); - } - } - return plugins; - } catch (IOException e) { - throw new RuntimeException("Error retrieving elasticsearch plugin details, " + e); - } - } - - private List detectPluginsFromEsHome(final Path esHome) { - // list out the contents of the plugins directory under esHome - return Collections.emptyList(); - } -} diff --git a/distribution/tools/upgrade-cli/src/main/java/org/opensearch/tools/cli/upgrade/ImportJvmOptionsTask.java b/distribution/tools/upgrade-cli/src/main/java/org/opensearch/tools/cli/upgrade/ImportJvmOptionsTask.java deleted file mode 100644 index fcb23f02425c6..0000000000000 --- a/distribution/tools/upgrade-cli/src/main/java/org/opensearch/tools/cli/upgrade/ImportJvmOptionsTask.java +++ /dev/null @@ -1,50 +0,0 @@ -/* - * SPDX-License-Identifier: Apache-2.0 - * - * The OpenSearch Contributors require contributions made to - * this file be licensed under the Apache-2.0 license or a - * compatible open source license. - */ - -package org.opensearch.tools.cli.upgrade; - -import org.opensearch.cli.Terminal; -import org.opensearch.common.collect.Tuple; - -import java.nio.file.Files; -import java.nio.file.Path; -import java.nio.file.StandardCopyOption; -import java.util.List; -import java.util.stream.Collectors; - -/** - * Imports JVM options from an existing elasticsearch installation. - */ -class ImportJvmOptionsTask implements UpgradeTask { - private static final String JVM_OPTIONS_D = "jvm.options.d"; - - @Override - public void accept(final Tuple input) { - final TaskInput taskInput = input.v1(); - final Terminal terminal = input.v2(); - try { - terminal.println("Importing JVM options ..."); - final Path jvmOptionsDir = taskInput.getOpenSearchConfig().resolve(JVM_OPTIONS_D); - if (!Files.exists(jvmOptionsDir)) { - Files.createDirectory(jvmOptionsDir); - } - - final Path esJvmOptionsDir = taskInput.getEsConfig().resolve(JVM_OPTIONS_D); - if (Files.exists(esJvmOptionsDir) && Files.isDirectory(esJvmOptionsDir)) { - final List esJvmOptionsFiles = Files.list(esJvmOptionsDir).collect(Collectors.toList()); - for (Path esJvmOptFile : esJvmOptionsFiles) { - final Path jvmOptFile = jvmOptionsDir.resolve(esJvmOptFile.getFileName().toString()); - Files.copy(esJvmOptFile, jvmOptFile, StandardCopyOption.REPLACE_EXISTING); - } - } - terminal.println("Success!" + System.lineSeparator()); - } catch (Exception e) { - throw new RuntimeException("Error importing JVM options. " + e); - } - } -} diff --git a/distribution/tools/upgrade-cli/src/main/java/org/opensearch/tools/cli/upgrade/ImportKeystoreTask.java b/distribution/tools/upgrade-cli/src/main/java/org/opensearch/tools/cli/upgrade/ImportKeystoreTask.java deleted file mode 100644 index f2f36ab3bbe0e..0000000000000 --- a/distribution/tools/upgrade-cli/src/main/java/org/opensearch/tools/cli/upgrade/ImportKeystoreTask.java +++ /dev/null @@ -1,67 +0,0 @@ -/* - * SPDX-License-Identifier: Apache-2.0 - * - * The OpenSearch Contributors require contributions made to - * this file be licensed under the Apache-2.0 license or a - * compatible open source license. - */ - -package org.opensearch.tools.cli.upgrade; - -import org.opensearch.cli.Terminal; -import org.opensearch.common.collect.Tuple; -import org.opensearch.common.settings.KeyStoreWrapper; -import org.opensearch.core.common.settings.SecureString; - -import java.io.InputStream; - -/** - * Imports the secure Keystore settings from an existing elasticsearch installation. - */ -class ImportKeystoreTask implements UpgradeTask { - private static final String OPENSEARCH_KEYSTORE_FILENAME = "opensearch.keystore"; - private static final String ES_KEYSTORE_FILENAME = "elasticsearch.keystore"; - - @Override - public void accept(final Tuple input) { - final TaskInput taskInput = input.v1(); - final Terminal terminal = input.v2(); - SecureString keyStorePassword = new SecureString(new char[0]); - try { - terminal.println("Importing keystore settings ..."); - final KeyStoreWrapper esKeystore = KeyStoreWrapper.load(taskInput.getEsConfig(), ES_KEYSTORE_FILENAME); - if (esKeystore == null) { - terminal.println("No elasticsearch keystore settings to import."); - return; - } - KeyStoreWrapper openSearchKeystore = KeyStoreWrapper.load( - taskInput.getOpenSearchConfig().resolve(OPENSEARCH_KEYSTORE_FILENAME) - ); - if (openSearchKeystore == null) { - openSearchKeystore = KeyStoreWrapper.create(); - } - if (esKeystore.hasPassword()) { - final char[] passwordArray = terminal.readSecret("Enter password for the elasticsearch keystore : "); - keyStorePassword = new SecureString(passwordArray); - } - esKeystore.decrypt(keyStorePassword.getChars()); - for (String setting : esKeystore.getSettingNames()) { - if (setting.equals("keystore.seed")) { - continue; - } - if (!openSearchKeystore.getSettingNames().contains(setting)) { - InputStream settingIS = esKeystore.getFile(setting); - byte[] bytes = new byte[settingIS.available()]; - settingIS.read(bytes); - KeystoreWrapperUtil.saveSetting(openSearchKeystore, setting, bytes); - } - } - openSearchKeystore.save(taskInput.getOpenSearchConfig(), keyStorePassword.getChars()); - terminal.println("Success!" + System.lineSeparator()); - } catch (Exception e) { - throw new RuntimeException("Error importing keystore settings from elasticsearch, " + e); - } finally { - keyStorePassword.close(); - } - } -} diff --git a/distribution/tools/upgrade-cli/src/main/java/org/opensearch/tools/cli/upgrade/ImportLog4jPropertiesTask.java b/distribution/tools/upgrade-cli/src/main/java/org/opensearch/tools/cli/upgrade/ImportLog4jPropertiesTask.java deleted file mode 100644 index da999a3f91f08..0000000000000 --- a/distribution/tools/upgrade-cli/src/main/java/org/opensearch/tools/cli/upgrade/ImportLog4jPropertiesTask.java +++ /dev/null @@ -1,81 +0,0 @@ -/* - * SPDX-License-Identifier: Apache-2.0 - * - * The OpenSearch Contributors require contributions made to - * this file be licensed under the Apache-2.0 license or a - * compatible open source license. - */ - -package org.opensearch.tools.cli.upgrade; - -import org.opensearch.cli.Terminal; -import org.opensearch.common.collect.Tuple; - -import java.io.IOException; -import java.io.InputStream; -import java.io.OutputStream; -import java.nio.file.Files; -import java.nio.file.Path; -import java.nio.file.StandardCopyOption; -import java.nio.file.StandardOpenOption; -import java.util.Map; -import java.util.Properties; - -/** - * Imports Log4j properties from an existing elasticsearch installation. - */ -class ImportLog4jPropertiesTask implements UpgradeTask { - static final String LOG4J_PROPERTIES = "log4j2.properties"; - - @Override - public void accept(final Tuple input) { - final TaskInput taskInput = input.v1(); - final Terminal terminal = input.v2(); - try { - terminal.println("Importing log4j.properties ..."); - final Path log4jPropPath = taskInput.getOpenSearchConfig().resolve(LOG4J_PROPERTIES); - if (Files.exists(log4jPropPath)) { - Files.copy( - log4jPropPath, - taskInput.getOpenSearchConfig().resolve(LOG4J_PROPERTIES + ".bkp"), - StandardCopyOption.REPLACE_EXISTING - ); - } - final Path esLog4jPropPath = taskInput.getEsConfig().resolve(LOG4J_PROPERTIES); - try ( - InputStream esLog4jIs = Files.newInputStream(esLog4jPropPath); - OutputStream log4jOs = Files.newOutputStream(log4jPropPath, StandardOpenOption.TRUNCATE_EXISTING) - ) { - final Properties esLog4JProps = new Properties(); - esLog4JProps.load(esLog4jIs); - final Properties log4jProps = renameValues(esLog4JProps); - - log4jProps.store(log4jOs, "This is an auto-generated file imported from an existing elasticsearch installation."); - } - terminal.println("Success!" + System.lineSeparator()); - } catch (IOException e) { - throw new RuntimeException("Error copying log4j properties. " + e); - } - } - - /** - * Rename the values for OpenSearch log4j properties to reflect the changed names - * for java packages, class names and system variables. - * - * @param esLog4JProps existing elasticsearch log4j properties. - * @return updated properties for OpenSearch. - */ - private Properties renameValues(Properties esLog4JProps) { - final Properties props = new Properties(); - for (Map.Entry entry : esLog4JProps.entrySet()) { - final String key = (String) entry.getKey(); - final String value = (String) entry.getValue(); - final String newKey = key.replaceAll("esmessagefields", "opensearchmessagefields"); - final String newValue = value.replaceAll("ESJsonLayout", "OpenSearchJsonLayout") - .replaceAll("sys:es.logs", "sys:opensearch.logs") - .replaceAll("org.elasticsearch", "org.opensearch"); - props.setProperty(newKey, newValue); - } - return props; - } -} diff --git a/distribution/tools/upgrade-cli/src/main/java/org/opensearch/tools/cli/upgrade/ImportYmlConfigTask.java b/distribution/tools/upgrade-cli/src/main/java/org/opensearch/tools/cli/upgrade/ImportYmlConfigTask.java deleted file mode 100644 index 2ba8f93cd53ac..0000000000000 --- a/distribution/tools/upgrade-cli/src/main/java/org/opensearch/tools/cli/upgrade/ImportYmlConfigTask.java +++ /dev/null @@ -1,108 +0,0 @@ -/* - * SPDX-License-Identifier: Apache-2.0 - * - * The OpenSearch Contributors require contributions made to - * this file be licensed under the Apache-2.0 license or a - * compatible open source license. - */ - -package org.opensearch.tools.cli.upgrade; - -import org.opensearch.cli.Terminal; -import org.opensearch.common.collect.Tuple; -import org.opensearch.common.settings.Settings; -import org.opensearch.common.settings.SettingsException; -import org.opensearch.common.xcontent.yaml.YamlXContent; -import org.opensearch.core.xcontent.ToXContent; -import org.opensearch.core.xcontent.XContentBuilder; - -import java.io.IOException; -import java.io.OutputStream; -import java.nio.file.Files; -import java.nio.file.Path; -import java.nio.file.StandardCopyOption; -import java.nio.file.StandardOpenOption; -import java.util.Collections; -import java.util.HashMap; -import java.util.Map; - -/** - * Imports settings from an existing elasticsearch installation. - */ -class ImportYmlConfigTask implements UpgradeTask { - private static final String ES_CONFIG_FILENAME = "elasticsearch.yml"; - private static final String OPENSEARCH_CONFIG_FILENAME = "opensearch.yml"; - static final String HEADER = "# ======================== OpenSearch Configuration =========================\n" - + "# NOTE: The settings in this file are imported from an existing Elasticsearch\n" - + "# installation using the opensearch-upgrade tool. The original file is\n" - + "# backed up in this directory as opensearch.yml.bkp for reference.\n\n" - + "# Please consult the documentation for further information:\n" - + "# https://www.opensearch.org\n" - + "#\n"; - - @Override - public void accept(final Tuple input) { - final TaskInput taskInput = input.v1(); - final Terminal terminal = input.v2(); - try { - terminal.println("Importing settings from elasticsearch.yml ..."); - final Path openSearchYmlPath = taskInput.getOpenSearchConfig().resolve(OPENSEARCH_CONFIG_FILENAME); - final Path esYamlPath = taskInput.getEsConfig().resolve(ES_CONFIG_FILENAME); - final Settings esSettings = Settings.builder().loadFromPath(esYamlPath).build(); - final Settings settings = Settings.builder().loadFromPath(openSearchYmlPath).build(); - if (esSettings.size() > 0) { - if (settings.size() > 0 - && terminal.promptYesNo("Existing settings in opensearch.yml will be overwritten, proceed?", false) == false) { - terminal.println("Import settings cancelled by user"); - } - final Path backupYmlPath = taskInput.getOpenSearchConfig().resolve(OPENSEARCH_CONFIG_FILENAME + ".bkp"); - if (!Files.exists(backupYmlPath) - || terminal.promptYesNo("A backup file for opensearch.yml already exists, overwrite?", false)) { - Files.copy(openSearchYmlPath, backupYmlPath, StandardCopyOption.REPLACE_EXISTING); - } - Files.write(openSearchYmlPath, Collections.singleton(HEADER), StandardOpenOption.TRUNCATE_EXISTING); - final Settings mergeSettings = mergeSettings(settings, esSettings); - writeSettings(openSearchYmlPath, mergeSettings); - } - terminal.println("Success!" + System.lineSeparator()); - } catch (IOException ex) { - throw new RuntimeException("Error importing settings from elasticsearch.yml, " + ex); - } - } - - // package private for unit testing - Settings mergeSettings(final Settings first, final Settings second) { - Settings.Builder builder = Settings.builder(); - for (String key : first.keySet()) { - builder.copy(key, key, first); - } - for (String key : second.keySet()) { - builder.copy(key, key, second); - } - return builder.build(); - } - - /** - * Write settings to the config file on the file system. It uses the {@link XContentBuilder} - * to build the YAML content and write it to the output stream. - * - * @param configYml path to a yml file where config will be written to. - * @param settings the settings to write - * @throws IOException exception during writing to the output stream. - */ - private void writeSettings(final Path configYml, final Settings settings) throws IOException { - try ( - OutputStream os = Files.newOutputStream(configYml, StandardOpenOption.APPEND); - XContentBuilder builder = new XContentBuilder(YamlXContent.yamlXContent, os) - ) { - builder.startObject(); - final Map params = new HashMap<>(); - params.put("flat_settings", "true"); - settings.toXContent(builder, new ToXContent.MapParams(params)); - builder.endObject(); - builder.flush(); - } catch (Exception e) { - throw new SettingsException("Failed to write settings to " + configYml.toString(), e); - } - } -} diff --git a/distribution/tools/upgrade-cli/src/main/java/org/opensearch/tools/cli/upgrade/InstallPluginsTask.java b/distribution/tools/upgrade-cli/src/main/java/org/opensearch/tools/cli/upgrade/InstallPluginsTask.java deleted file mode 100644 index ae2335752bed0..0000000000000 --- a/distribution/tools/upgrade-cli/src/main/java/org/opensearch/tools/cli/upgrade/InstallPluginsTask.java +++ /dev/null @@ -1,118 +0,0 @@ -/* - * SPDX-License-Identifier: Apache-2.0 - * - * The OpenSearch Contributors require contributions made to - * this file be licensed under the Apache-2.0 license or a - * compatible open source license. - */ - -package org.opensearch.tools.cli.upgrade; - -import org.opensearch.cli.Terminal; -import org.opensearch.common.collect.Tuple; - -import java.io.BufferedReader; -import java.io.IOException; -import java.io.InputStream; -import java.io.InputStreamReader; -import java.nio.charset.StandardCharsets; -import java.util.ArrayList; -import java.util.Collections; -import java.util.HashSet; -import java.util.List; -import java.util.Locale; -import java.util.Set; - -/** - * Installs the list of plugins using the opensearch-plugin command. -*/ -class InstallPluginsTask implements UpgradeTask { - private static final String ERROR_MSG = "Error installing plugin %s. Please install it manually."; - - /** The list of official plugins that can be installed by the upgrade tool. */ - static final Set OFFICIAL_PLUGINS; - static { - try ( - InputStream stream = InstallPluginsTask.class.getResourceAsStream("/plugins.txt"); - BufferedReader reader = new BufferedReader(new InputStreamReader(stream, StandardCharsets.UTF_8)) - ) { - Set plugins = new HashSet<>(); - String line = reader.readLine(); - while (line != null) { - plugins.add(line.trim()); - line = reader.readLine(); - } - OFFICIAL_PLUGINS = Collections.unmodifiableSet(plugins); - } catch (IOException e) { - throw new RuntimeException(e); - } - } - - @Override - public void accept(final Tuple input) { - final TaskInput taskInput = input.v1(); - final Terminal terminal = input.v2(); - if (taskInput.getPlugins() == null || taskInput.getPlugins().isEmpty()) { - return; - } - terminal.println("Installing core plugins ..."); - List manualPlugins = new ArrayList<>(); - - for (String plugin : taskInput.getPlugins()) { - if (OFFICIAL_PLUGINS.contains(plugin)) { - executeInstallPluginCommand(plugin, taskInput, terminal); - } else { - manualPlugins.add(plugin); - } - } - if (!manualPlugins.isEmpty()) { - terminal.println("Please install the following custom plugins manually: " + manualPlugins); - } - terminal.println("Success!" + System.lineSeparator()); - } - - // package private for unit testing - void executeInstallPluginCommand(String plugin, TaskInput taskInput, Terminal terminal) { - ProcessBuilder processBuilder = getProcessBuilderBasedOnOS(plugin, taskInput); - try { - final Process process = processBuilder.inheritIO().start(); - if (process.waitFor() != 0) { - terminal.errorPrint(Terminal.Verbosity.NORMAL, String.format(Locale.getDefault(), ERROR_MSG, plugin)); - } - } catch (IOException | InterruptedException e) { - terminal.errorPrint(Terminal.Verbosity.NORMAL, String.format(Locale.getDefault(), ERROR_MSG, plugin) + e.getMessage()); - } - } - - // package private for unit testing - ProcessBuilder getProcessBuilderBasedOnOS(String plugin, TaskInput taskInput) { - final String command = taskInput.getOpenSearchBin().resolve("opensearch-plugin") + " install " + plugin; - final ProcessBuilder processBuilder = new ProcessBuilder(); - if (OS.WINDOWS == OS.current()) { - processBuilder.command("cmd.exe", "/c", command); - } else { - processBuilder.command("sh", "-c", command); - } - return processBuilder; - } - - private enum OS { - WINDOWS, - MAC, - LINUX; - - public static OS current() { - final String os = System.getProperty("os.name", ""); - if (os.startsWith("Windows")) { - return OS.WINDOWS; - } - if (os.startsWith("Linux") || os.startsWith("LINUX")) { - return OS.LINUX; - } - if (os.startsWith("Mac")) { - return OS.MAC; - } - throw new IllegalStateException("Can't determine OS from: " + os); - } - } -} diff --git a/distribution/tools/upgrade-cli/src/main/java/org/opensearch/tools/cli/upgrade/KeystoreWrapperUtil.java b/distribution/tools/upgrade-cli/src/main/java/org/opensearch/tools/cli/upgrade/KeystoreWrapperUtil.java deleted file mode 100644 index 3ef7d09edc046..0000000000000 --- a/distribution/tools/upgrade-cli/src/main/java/org/opensearch/tools/cli/upgrade/KeystoreWrapperUtil.java +++ /dev/null @@ -1,33 +0,0 @@ -/* - * SPDX-License-Identifier: Apache-2.0 - * - * The OpenSearch Contributors require contributions made to - * this file be licensed under the Apache-2.0 license or a - * compatible open source license. - */ - -package org.opensearch.tools.cli.upgrade; - -import org.opensearch.common.settings.KeyStoreWrapper; - -/** - * Utility that has package level access to the {@link KeyStoreWrapper} for - * saving a setting. - */ -public final class KeystoreWrapperUtil { - /** - * No public constructor. Contains only static functions. - */ - private KeystoreWrapperUtil() {} - - /** - * Save a secure setting using the wrapper. - * - * @param keystore an instance of {@link KeyStoreWrapper} - * @param setting setting to save - * @param bytes value of the setting in bytes - */ - public static void saveSetting(KeyStoreWrapper keystore, String setting, byte[] bytes) { - keystore.setFile(setting, bytes); - } -} diff --git a/distribution/tools/upgrade-cli/src/main/java/org/opensearch/tools/cli/upgrade/TaskInput.java b/distribution/tools/upgrade-cli/src/main/java/org/opensearch/tools/cli/upgrade/TaskInput.java deleted file mode 100644 index b3c5604275b54..0000000000000 --- a/distribution/tools/upgrade-cli/src/main/java/org/opensearch/tools/cli/upgrade/TaskInput.java +++ /dev/null @@ -1,108 +0,0 @@ -/* - * SPDX-License-Identifier: Apache-2.0 - * - * The OpenSearch Contributors require contributions made to - * this file be licensed under the Apache-2.0 license or a - * compatible open source license. - */ - -package org.opensearch.tools.cli.upgrade; - -import org.opensearch.Version; -import org.opensearch.env.Environment; - -import java.nio.file.Path; -import java.util.List; -import java.util.Optional; - -/** - * A plain old java object, that contains the information used by tasks - * in the upgrade process. - */ -class TaskInput { - private final Environment openSearchEnv; - private String node; - private String cluster; - private String baseUrl; - private boolean running; - private Version version; - private List plugins; - private Path esHome; - private Path esConfig; - - TaskInput(Environment openSearchEnv) { - this.openSearchEnv = openSearchEnv; - } - - public String getNode() { - return node; - } - - public void setNode(String node) { - this.node = node; - } - - public String getCluster() { - return cluster; - } - - public void setCluster(String cluster) { - this.cluster = cluster; - } - - public Optional getVersion() { - return Optional.ofNullable(version); - } - - public void setVersion(Version version) { - this.version = version; - } - - public List getPlugins() { - return plugins; - } - - public void setPlugins(List plugins) { - this.plugins = plugins; - } - - public Path getEsConfig() { - return esConfig; - } - - public void setEsConfig(Path esConfig) { - this.esConfig = esConfig; - } - - public Path getEsHome() { - return esHome; - } - - public void setEsHome(Path esHome) { - this.esHome = esHome; - } - - public Path getOpenSearchConfig() { - return openSearchEnv.configDir(); - } - - public Path getOpenSearchBin() { - return openSearchEnv.binDir(); - } - - public boolean isRunning() { - return running; - } - - public void setRunning(boolean running) { - this.running = running; - } - - public String getBaseUrl() { - return baseUrl; - } - - public void setBaseUrl(String baseUrl) { - this.baseUrl = baseUrl; - } -} diff --git a/distribution/tools/upgrade-cli/src/main/java/org/opensearch/tools/cli/upgrade/UpgradeCli.java b/distribution/tools/upgrade-cli/src/main/java/org/opensearch/tools/cli/upgrade/UpgradeCli.java deleted file mode 100644 index f609d06b8ed34..0000000000000 --- a/distribution/tools/upgrade-cli/src/main/java/org/opensearch/tools/cli/upgrade/UpgradeCli.java +++ /dev/null @@ -1,67 +0,0 @@ -/* - * SPDX-License-Identifier: Apache-2.0 - * - * The OpenSearch Contributors require contributions made to - * this file be licensed under the Apache-2.0 license or a - * compatible open source license. - */ - -package org.opensearch.tools.cli.upgrade; - -import joptsimple.OptionSet; -import org.opensearch.cli.ExitCodes; -import org.opensearch.cli.Terminal; -import org.opensearch.cli.UserException; -import org.opensearch.common.cli.EnvironmentAwareCommand; -import org.opensearch.common.collect.Tuple; -import org.opensearch.env.Environment; - -/** - * This class extends the existing opensearch-cli and provides the entry - * point for the opensearch-upgrade tool. - *

- * This class is agnostic of the actual logic which performs the upgrade - * on the node. - */ -public class UpgradeCli extends EnvironmentAwareCommand { - - /** - * Constructor to create an instance of UpgradeCli. - */ - public UpgradeCli() { - super("A CLI tool for upgrading to OpenSearch 1.x from a supported Elasticsearch version."); - } - - /** - * Entrypoint for the upgrade tool. - * - * @param args args to main. - * @throws Exception exception thrown during the execution of the UpgradeCli. - */ - public static void main(String[] args) throws Exception { - exit(new UpgradeCli().main(args, Terminal.DEFAULT)); - } - - /** - * Executes the upgrade task. This retrieves an instance of {@link UpgradeTask} which is composed - * of smaller individual tasks that perform specific operations as part of the overall process. - * - * @param terminal current terminal the command is running - * @param options options supplied to the command - * @param env current environment in which this cli tool is running. - * @throws UserException if any exception is thrown from the tasks - */ - @Override - protected void execute(final Terminal terminal, final OptionSet options, final Environment env) throws UserException { - try { - final Tuple input = new Tuple<>(new TaskInput(env), terminal); - UpgradeTask.getTask().accept(input); - terminal.println("Done!"); - terminal.println("Next Steps: "); - terminal.println(" Stop the running elasticsearch on this node."); - terminal.println(" Start OpenSearch on this node."); - } catch (RuntimeException ex) { - throw new UserException(ExitCodes.DATA_ERROR, ex.getMessage()); - } - } -} diff --git a/distribution/tools/upgrade-cli/src/main/java/org/opensearch/tools/cli/upgrade/UpgradeTask.java b/distribution/tools/upgrade-cli/src/main/java/org/opensearch/tools/cli/upgrade/UpgradeTask.java deleted file mode 100644 index 8f84dd8c9817c..0000000000000 --- a/distribution/tools/upgrade-cli/src/main/java/org/opensearch/tools/cli/upgrade/UpgradeTask.java +++ /dev/null @@ -1,41 +0,0 @@ -/* - * SPDX-License-Identifier: Apache-2.0 - * - * The OpenSearch Contributors require contributions made to - * this file be licensed under the Apache-2.0 license or a - * compatible open source license. - */ - -package org.opensearch.tools.cli.upgrade; - -import org.opensearch.cli.Terminal; -import org.opensearch.common.collect.Tuple; - -import java.util.function.Consumer; - -/** - * An interface for an upgrade task, which in this instance is an unit of - * operation that is part of the overall upgrade process. This extends the - * {@link java.util.function.Consumer} interface. - *

- * The implementing tasks consume and instance of a tuple of {@link TaskInput} - * and {@link Terminal} and operate via side effects. - * - */ -interface UpgradeTask extends Consumer> { - /** - * Composes the individual tasks to create a pipeline for the overall upgrade task. - * - * @return an instance of {@link java.util.function.Consumer} that takes a tuple of - * task input and the current terminal. The composed task fails if any of the - * individual tasks fails. - */ - static Consumer> getTask() { - return new DetectEsInstallationTask().andThen(new ValidateInputTask()) - .andThen(new ImportYmlConfigTask()) - .andThen(new ImportJvmOptionsTask()) - .andThen(new ImportLog4jPropertiesTask()) - .andThen(new InstallPluginsTask()) - .andThen(new ImportKeystoreTask()); - } -} diff --git a/distribution/tools/upgrade-cli/src/main/java/org/opensearch/tools/cli/upgrade/ValidateInputTask.java b/distribution/tools/upgrade-cli/src/main/java/org/opensearch/tools/cli/upgrade/ValidateInputTask.java deleted file mode 100644 index bea524e651827..0000000000000 --- a/distribution/tools/upgrade-cli/src/main/java/org/opensearch/tools/cli/upgrade/ValidateInputTask.java +++ /dev/null @@ -1,87 +0,0 @@ -/* - * SPDX-License-Identifier: Apache-2.0 - * - * The OpenSearch Contributors require contributions made to - * this file be licensed under the Apache-2.0 license or a - * compatible open source license. - */ - -package org.opensearch.tools.cli.upgrade; - -import org.opensearch.LegacyESVersion; -import org.opensearch.Version; -import org.opensearch.cli.Terminal; -import org.opensearch.common.collect.Tuple; - -import java.util.LinkedHashMap; -import java.util.Locale; -import java.util.Map; - -/** - * Validates the input {@link TaskInput} for the upgrade. - */ -class ValidateInputTask implements UpgradeTask { - - @Override - public void accept(final Tuple input) { - final TaskInput taskInput = input.v1(); - final Terminal terminal = input.v2(); - - terminal.println("Verifying the details ..."); - // check if the elasticsearch version is supported - if (taskInput.getVersion().isPresent()) { - final Version version = taskInput.getVersion().get(); - if (version.equals(LegacyESVersion.fromId(7100299)) == false) { - throw new RuntimeException( - String.format(Locale.getDefault(), "The installed version %s of elasticsearch is not supported.", version) - ); - } - } else { - terminal.println("Unable to detect installed elasticsearch version."); - confirmToProceed(terminal); - } - // check if the OpenSearch config is set to an external location - if (taskInput.getOpenSearchConfig().getParent().equals(taskInput.getOpenSearchBin().getParent())) { - terminal.println( - "OpenSearch config directory is set inside the installation directory. " - + "It is recommended to use an external config directory and set the environment variable " - + "OPENSEARCH_PATH_CONF to it." - ); - confirmToProceed(terminal); - } - - // print summary and confirm with user if everything looks correct. - final Map fieldsMap = getSummaryFieldsMap(taskInput); - final String format = " %-25s | %s"; - terminal.println("+----------------------- SUMMARY -----------------------+"); - for (Map.Entry entry : fieldsMap.entrySet()) { - terminal.println(String.format(Locale.getDefault(), format, entry.getKey(), entry.getValue())); - } - terminal.println("+-------------------------------------------------------+"); - terminal.println("Please verify if everything above looks good."); - confirmToProceed(terminal); - } - - private void confirmToProceed(final Terminal terminal) { - terminal.println(System.lineSeparator()); - if (terminal.promptYesNo("Do you want to proceed?", false) == false) { - throw new RuntimeException("Upgrade cancelled by user."); - } - } - - // package private for unit testing - Map getSummaryFieldsMap(final TaskInput taskInput) { - final String version = taskInput.getVersion().isPresent() ? taskInput.getVersion().get().toString() : "unknown"; - - final Map fields = new LinkedHashMap<>(); - fields.put("Cluster", taskInput.getCluster()); - fields.put("Node", taskInput.getNode()); - fields.put("Endpoint", taskInput.getBaseUrl()); - fields.put("Elasticsearch Version", version); - fields.put("Elasticsearch Config", taskInput.getEsConfig().toString()); - fields.put("Elasticsearch Plugins", taskInput.getPlugins() == null ? "[]" : taskInput.getPlugins().toString()); - fields.put("OpenSearch Config", taskInput.getOpenSearchConfig().toString()); - - return fields; - } -} diff --git a/distribution/tools/upgrade-cli/src/main/java/org/opensearch/tools/cli/upgrade/package-info.java b/distribution/tools/upgrade-cli/src/main/java/org/opensearch/tools/cli/upgrade/package-info.java deleted file mode 100644 index 86ecef5ae5576..0000000000000 --- a/distribution/tools/upgrade-cli/src/main/java/org/opensearch/tools/cli/upgrade/package-info.java +++ /dev/null @@ -1,12 +0,0 @@ -/* - * SPDX-License-Identifier: Apache-2.0 - * - * The OpenSearch Contributors require contributions made to - * this file be licensed under the Apache-2.0 license or a - * compatible open source license. - */ - -/** - * This exists to get access to the package level methods of KeyStoreWrapper. - */ -package org.opensearch.tools.cli.upgrade; diff --git a/distribution/tools/upgrade-cli/src/test/java/org/opensearch/tools/cli/upgrade/DetectEsInstallationTaskTests.java b/distribution/tools/upgrade-cli/src/test/java/org/opensearch/tools/cli/upgrade/DetectEsInstallationTaskTests.java deleted file mode 100644 index e653b3fa8d284..0000000000000 --- a/distribution/tools/upgrade-cli/src/test/java/org/opensearch/tools/cli/upgrade/DetectEsInstallationTaskTests.java +++ /dev/null @@ -1,67 +0,0 @@ -/* - * SPDX-License-Identifier: Apache-2.0 - * - * The OpenSearch Contributors require contributions made to - * this file be licensed under the Apache-2.0 license or a - * compatible open source license. - */ - -package org.opensearch.tools.cli.upgrade; - -import org.opensearch.cli.MockTerminal; -import org.opensearch.cli.Terminal; -import org.opensearch.common.SuppressForbidden; -import org.opensearch.common.collect.Tuple; -import org.opensearch.common.settings.Settings; -import org.opensearch.env.Environment; -import org.opensearch.env.TestEnvironment; -import org.opensearch.test.OpenSearchTestCase; -import org.junit.Before; - -import java.io.File; -import java.nio.file.Path; - -import static org.hamcrest.Matchers.hasSize; -import static org.hamcrest.Matchers.is; - -public class DetectEsInstallationTaskTests extends OpenSearchTestCase { - - private final MockTerminal terminal = new MockTerminal(); - private DetectEsInstallationTask task; - private Environment env; - - @Before - public void setUpTask() { - task = new DetectEsInstallationTask(); - env = TestEnvironment.newEnvironment(Settings.builder().put("path.home", "").build()); - } - - @SuppressForbidden(reason = "Read config directory from test resources.") - public void testTaskExecution() throws Exception { - Path esConfig = new File(getClass().getResource("/config").getPath()).toPath(); - // path for es_home - terminal.addTextInput(esConfig.getParent().toString()); - // path for es_config - terminal.addTextInput(esConfig.toString()); - TaskInput taskInput = new TaskInput(env); - Tuple input = new Tuple<>(taskInput, terminal); - - task.accept(input); - - assertThat(taskInput.getEsConfig(), is(esConfig)); - assertThat(taskInput.getBaseUrl(), is("http://localhost:42123")); - assertThat(taskInput.getPlugins(), hasSize(0)); - assertThat(taskInput.getNode(), is("node-x")); - assertThat(taskInput.getCluster(), is("my-cluster")); - } - - public void testRetrieveUrlFromSettings() { - Settings esSettings = Settings.builder().put("http.port", "9201").build(); - - assertThat(task.retrieveUrl(esSettings), is("http://localhost:9201")); - } - - public void testRetrieveDefaultUrlFromConfig() { - assertThat(task.retrieveUrl(Settings.EMPTY), is("http://localhost:9200")); - } -} diff --git a/distribution/tools/upgrade-cli/src/test/java/org/opensearch/tools/cli/upgrade/ImportLog4jPropertiesTaskTests.java b/distribution/tools/upgrade-cli/src/test/java/org/opensearch/tools/cli/upgrade/ImportLog4jPropertiesTaskTests.java deleted file mode 100644 index 8136504b15462..0000000000000 --- a/distribution/tools/upgrade-cli/src/test/java/org/opensearch/tools/cli/upgrade/ImportLog4jPropertiesTaskTests.java +++ /dev/null @@ -1,80 +0,0 @@ -/* - * SPDX-License-Identifier: Apache-2.0 - * - * The OpenSearch Contributors require contributions made to - * this file be licensed under the Apache-2.0 license or a - * compatible open source license. - */ - -package org.opensearch.tools.cli.upgrade; - -import com.google.common.jimfs.Configuration; -import com.google.common.jimfs.Jimfs; -import org.opensearch.cli.MockTerminal; -import org.opensearch.common.SuppressForbidden; -import org.opensearch.common.collect.Tuple; -import org.opensearch.common.io.PathUtilsForTesting; -import org.opensearch.common.settings.Settings; -import org.opensearch.env.Environment; -import org.opensearch.env.TestEnvironment; -import org.opensearch.test.OpenSearchTestCase; -import org.junit.Before; - -import java.io.File; -import java.io.IOException; -import java.nio.file.FileSystem; -import java.nio.file.Files; -import java.nio.file.Path; -import java.util.ArrayList; -import java.util.List; -import java.util.Properties; - -import static org.hamcrest.Matchers.equalTo; -import static org.hamcrest.Matchers.hasItem; -import static org.hamcrest.Matchers.hasSize; -import static org.hamcrest.Matchers.is; -import static org.hamcrest.Matchers.not; -import static org.hamcrest.Matchers.notNullValue; - -public class ImportLog4jPropertiesTaskTests extends OpenSearchTestCase { - private final MockTerminal terminal = new MockTerminal(); - private final List fileSystems = new ArrayList<>(); - private ImportLog4jPropertiesTask task; - private Environment env; - - @Before - public void setUpTask() throws IOException { - task = new ImportLog4jPropertiesTask(); - final Configuration configuration; - configuration = Configuration.unix().toBuilder().setAttributeViews("basic", "owner", "posix", "unix").build(); - FileSystem fs = Jimfs.newFileSystem(configuration); - fileSystems.add(fs); - PathUtilsForTesting.installMock(fs); - Path home = fs.getPath("test-home"); - Path config = home.resolve("config"); - Files.createDirectories(config); - Files.createFile(config.resolve(ImportLog4jPropertiesTask.LOG4J_PROPERTIES)); - env = TestEnvironment.newEnvironment(Settings.builder().put("path.home", home).build()); - } - - @SuppressForbidden(reason = "Read config directory from test resources.") - public void testImportLog4jPropertiesTask() throws IOException { - TaskInput taskInput = new TaskInput(env); - Path esConfig = new File(getClass().getResource("/config").getPath()).toPath(); - taskInput.setEsConfig(esConfig); - task.accept(new Tuple<>(taskInput, terminal)); - - Properties properties = new Properties(); - properties.load(Files.newInputStream(taskInput.getOpenSearchConfig().resolve(ImportLog4jPropertiesTask.LOG4J_PROPERTIES))); - assertThat(properties, is(notNullValue())); - assertThat(properties.entrySet(), hasSize(165)); - assertThat(properties.get("appender.rolling.layout.type"), equalTo("OpenSearchJsonLayout")); - assertThat( - properties.get("appender.deprecation_rolling.fileName"), - equalTo("${sys:opensearch.logs.base_path}${sys:file.separator}${sys:opensearch.logs.cluster_name}_deprecation.json") - ); - assertThat(properties.get("logger.deprecation.name"), equalTo("org.opensearch.deprecation")); - assertThat(properties.keySet(), not(hasItem("appender.deprecation_rolling.layout.esmessagefields"))); - assertThat(properties.keySet(), hasItem("appender.deprecation_rolling.layout.opensearchmessagefields")); - } -} diff --git a/distribution/tools/upgrade-cli/src/test/java/org/opensearch/tools/cli/upgrade/ImportYmlConfigTaskTests.java b/distribution/tools/upgrade-cli/src/test/java/org/opensearch/tools/cli/upgrade/ImportYmlConfigTaskTests.java deleted file mode 100644 index 38dfebce7f21b..0000000000000 --- a/distribution/tools/upgrade-cli/src/test/java/org/opensearch/tools/cli/upgrade/ImportYmlConfigTaskTests.java +++ /dev/null @@ -1,78 +0,0 @@ -/* - * SPDX-License-Identifier: Apache-2.0 - * - * The OpenSearch Contributors require contributions made to - * this file be licensed under the Apache-2.0 license or a - * compatible open source license. - */ - -package org.opensearch.tools.cli.upgrade; - -import com.google.common.jimfs.Configuration; -import com.google.common.jimfs.Jimfs; -import org.opensearch.cli.MockTerminal; -import org.opensearch.common.SuppressForbidden; -import org.opensearch.common.collect.Tuple; -import org.opensearch.common.io.PathUtilsForTesting; -import org.opensearch.common.settings.Settings; -import org.opensearch.env.Environment; -import org.opensearch.env.TestEnvironment; -import org.opensearch.test.OpenSearchTestCase; -import org.junit.Before; - -import java.io.File; -import java.io.IOException; -import java.nio.file.FileSystem; -import java.nio.file.Files; -import java.nio.file.Path; -import java.util.ArrayList; -import java.util.List; - -import static org.hamcrest.Matchers.contains; -import static org.hamcrest.Matchers.is; - -public class ImportYmlConfigTaskTests extends OpenSearchTestCase { - private final MockTerminal terminal = new MockTerminal(); - private final List fileSystems = new ArrayList<>(); - private ImportYmlConfigTask task; - private Environment env; - - @Before - public void setUpTask() throws IOException { - task = new ImportYmlConfigTask(); - final Configuration configuration; - configuration = Configuration.unix().toBuilder().setAttributeViews("basic", "owner", "posix", "unix").build(); - FileSystem fs = Jimfs.newFileSystem(configuration); - fileSystems.add(fs); - PathUtilsForTesting.installMock(fs); - Path home = fs.getPath("test-home"); - Path config = home.resolve("config"); - Files.createDirectories(config); - Files.createFile(config.resolve("opensearch.yml")); - env = TestEnvironment.newEnvironment(Settings.builder().put("path.home", home).build()); - } - - @SuppressForbidden(reason = "Read config directory from test resources.") - public void testImportYmlConfigTask() throws IOException { - TaskInput taskInput = new TaskInput(env); - Path esConfig = new File(getClass().getResource("/config").getPath()).toPath(); - taskInput.setEsConfig(esConfig); - task.accept(new Tuple<>(taskInput, terminal)); - Settings settings = Settings.builder().loadFromPath(taskInput.getOpenSearchConfig().resolve("opensearch.yml")).build(); - assertThat(settings.keySet(), contains("cluster.name", "http.port", "node.name", "path.data", "path.logs")); - assertThat(settings.get("cluster.name"), is("my-cluster")); - assertThat(settings.get("http.port"), is("42123")); - assertThat(settings.get("node.name"), is("node-x")); - assertThat(settings.get("path.data"), is("[/mnt/data_1, /mnt/data_2]")); - assertThat(settings.get("path.logs"), is("/var/log/eslogs")); - } - - public void testMergeSettings() { - Settings first = Settings.builder().put("setting_one", "value_one").build(); - Settings second = Settings.builder().put("setting_two", "value_two").build(); - Settings merged = task.mergeSettings(first, second); - assertThat(merged.keySet(), contains("setting_one", "setting_two")); - assertThat(merged.get("setting_one"), is("value_one")); - assertThat(merged.get("setting_two"), is("value_two")); - } -} diff --git a/distribution/tools/upgrade-cli/src/test/java/org/opensearch/tools/cli/upgrade/InstallPluginsTaskTests.java b/distribution/tools/upgrade-cli/src/test/java/org/opensearch/tools/cli/upgrade/InstallPluginsTaskTests.java deleted file mode 100644 index e7ac68331a8ca..0000000000000 --- a/distribution/tools/upgrade-cli/src/test/java/org/opensearch/tools/cli/upgrade/InstallPluginsTaskTests.java +++ /dev/null @@ -1,78 +0,0 @@ -/* - * SPDX-License-Identifier: Apache-2.0 - * - * The OpenSearch Contributors require contributions made to - * this file be licensed under the Apache-2.0 license or a - * compatible open source license. - */ - -package org.opensearch.tools.cli.upgrade; - -import org.opensearch.cli.MockTerminal; -import org.opensearch.common.collect.Tuple; -import org.opensearch.common.settings.Settings; -import org.opensearch.env.Environment; -import org.opensearch.env.TestEnvironment; -import org.opensearch.test.OpenSearchTestCase; -import org.junit.Before; - -import java.io.IOException; -import java.util.ArrayList; -import java.util.List; - -import org.mockito.Mockito; - -import static org.hamcrest.Matchers.containsString; -import static org.mockito.Mockito.spy; -import static org.mockito.Mockito.verify; - -public class InstallPluginsTaskTests extends OpenSearchTestCase { - - private final MockTerminal terminal = new MockTerminal(); - private InstallPluginsTask task; - private Environment env; - - private static final String OFFICIAL_PLUGIN = "analysis-icu"; - private static final String CUSTOM_PLUGIN = "job-scheduler"; - - @Before - public void setUpTask() throws IOException { - task = new InstallPluginsTask(); - env = TestEnvironment.newEnvironment(Settings.builder().put("path.home", "").build()); - } - - public void testInstallPluginsTaskWithOfficialPlugin() throws IOException { - InstallPluginsTask spyTask = spy(task); - TaskInput taskInput = createTaskInputWithPlugin(OFFICIAL_PLUGIN); - spyTask.accept(new Tuple<>(taskInput, terminal)); - - verify(spyTask, Mockito.atLeast(1)).executeInstallPluginCommand(OFFICIAL_PLUGIN, taskInput, terminal); - } - - public void testInstallPluginsTaskWithCustomPlugin() throws IOException { - TaskInput taskInput = createTaskInputWithPlugin(CUSTOM_PLUGIN); - task.accept(new Tuple<>(taskInput, terminal)); - - assertThat(terminal.getOutput(), containsString("Please install the following custom plugins manually")); - } - - public void testGetCommandsBasedOnOS() { - TaskInput taskInput = createTaskInputWithPlugin(OFFICIAL_PLUGIN); - List commandsList = task.getProcessBuilderBasedOnOS(OFFICIAL_PLUGIN, taskInput).command(); - - final String os = System.getProperty("os.name", ""); - if (os.startsWith("Windows")) { - assertEquals("cmd.exe", commandsList.get(0)); - } else { - assertEquals("sh", commandsList.get(0)); - } - } - - private TaskInput createTaskInputWithPlugin(String plugin) { - TaskInput taskInput = new TaskInput(env); - List pluginsList = new ArrayList<>(); - pluginsList.add(plugin); - taskInput.setPlugins(pluginsList); - return taskInput; - } -} diff --git a/distribution/tools/upgrade-cli/src/test/java/org/opensearch/tools/cli/upgrade/UpgradeCliTests.java b/distribution/tools/upgrade-cli/src/test/java/org/opensearch/tools/cli/upgrade/UpgradeCliTests.java deleted file mode 100644 index c1d1ca55ca315..0000000000000 --- a/distribution/tools/upgrade-cli/src/test/java/org/opensearch/tools/cli/upgrade/UpgradeCliTests.java +++ /dev/null @@ -1,160 +0,0 @@ -/* - * SPDX-License-Identifier: Apache-2.0 - * - * The OpenSearch Contributors require contributions made to - * this file be licensed under the Apache-2.0 license or a - * compatible open source license. - */ - -package org.opensearch.tools.cli.upgrade; - -import com.google.common.jimfs.Configuration; -import com.google.common.jimfs.Jimfs; -import org.opensearch.cli.Command; -import org.opensearch.cli.CommandTestCase; -import org.opensearch.common.SuppressForbidden; -import org.opensearch.common.io.PathUtilsForTesting; -import org.opensearch.common.settings.KeyStoreWrapper; -import org.opensearch.common.settings.Settings; -import org.opensearch.common.util.io.IOUtils; -import org.opensearch.env.Environment; -import org.opensearch.env.TestEnvironment; -import org.junit.After; -import org.junit.Before; - -import java.io.File; -import java.io.IOException; -import java.io.InputStream; -import java.nio.ByteBuffer; -import java.nio.charset.StandardCharsets; -import java.nio.file.FileSystem; -import java.nio.file.Files; -import java.nio.file.Path; -import java.security.GeneralSecurityException; -import java.util.ArrayList; -import java.util.Arrays; -import java.util.List; -import java.util.Map; -import java.util.Objects; -import java.util.stream.Collectors; - -import static org.hamcrest.Matchers.equalTo; -import static org.hamcrest.Matchers.greaterThan; -import static org.hamcrest.Matchers.hasItems; -import static org.hamcrest.Matchers.is; - -public class UpgradeCliTests extends CommandTestCase { - private final List fileSystems = new ArrayList<>(); - private Environment env; - - @Before - public void setupEnv() throws IOException { - final Configuration configuration; - configuration = Configuration.unix().toBuilder().setAttributeViews("basic", "owner", "posix", "unix").build(); - FileSystem fs = Jimfs.newFileSystem(configuration); - fileSystems.add(fs); - PathUtilsForTesting.installMock(fs); - Path home = fs.getPath("test-home"); - Path config = home.resolve("config"); - Files.createDirectories(config); - Files.createFile(config.resolve("opensearch.yml")); - Files.createDirectory(config.resolve("jvm.options.d")); - Files.createFile(config.resolve("log4j2.properties")); - env = TestEnvironment.newEnvironment(Settings.builder().put("path.home", home).build()); - } - - @After - public void closeMockFileSystems() throws IOException { - IOUtils.close(fileSystems); - } - - @Override - protected Command newCommand() { - return new UpgradeCli() { - @Override - protected Environment createEnv(Map settings) { - return env; - } - }; - } - - @SuppressForbidden(reason = "Read config directory from test resources.") - public void testUpgrade() throws Exception { - String passwd = "keystorepassword"; - - Path esConfig = new File(getClass().getResource("/config").getPath()).toPath(); - // path for es_home - terminal.addTextInput(esConfig.getParent().toString()); - // path for es_config - terminal.addTextInput(esConfig.toString()); - // input for prompt 'config directory is inside installation' - terminal.addTextInput("y"); - // input for prompt 'es version not detected' - terminal.addTextInput("y"); - // input for prompt 'confirm the details look good' - terminal.addTextInput("y"); - // as the keystore is password protected, we set it. - terminal.addSecretInput(passwd); - - execute(); - - assertYmlConfigImported(); - assertKeystoreImported(passwd); - assertJvmOptionsImported(); - assertLog4jPropertiesImported(); - } - - private void assertYmlConfigImported() throws IOException { - String[] headers = ImportYmlConfigTask.HEADER.split("[\\r\\n]+"); - List expectedSettings = new ArrayList<>(); - expectedSettings.addAll(Arrays.asList(headers)); - // this is the generated flat settings - expectedSettings.addAll( - Arrays.asList( - "---", - "cluster.name: \"my-cluster\"", - "http.port: \"42123\"", - "node.name: \"node-x\"", - "path.data:", - "- \"/mnt/data_1\"", - "- \"/mnt/data_2\"", - "path.logs: \"/var/log/eslogs\"" - ) - ); - List actualSettings = Files.readAllLines(env.configDir().resolve("opensearch.yml")) - .stream() - .filter(Objects::nonNull) - .filter(line -> !line.isEmpty()) - .collect(Collectors.toList()); - - assertThat(actualSettings, equalTo(expectedSettings)); - } - - private void assertKeystoreImported(String passwd) throws IOException, GeneralSecurityException { - // assert keystore is created - KeyStoreWrapper keystore = KeyStoreWrapper.load(env.configDir()); - assertNotNull(keystore); - - // assert all keystore settings are imported - keystore.decrypt(passwd.toCharArray()); - assertThat(keystore.getSettingNames(), hasItems(KeyStoreWrapper.SEED_SETTING.getKey(), "test.setting.key", "test.setting.file")); - assertThat(keystore.getString("test.setting.key").toString(), is("test.setting.value")); - InputStream is = keystore.getFile("test.setting.file"); - byte[] bytes = new byte[is.available()]; - assertThat(is.read(bytes), greaterThan(0)); - String actual = StandardCharsets.UTF_8.decode(ByteBuffer.wrap(bytes)).toString(); - String expected = "{\"some_key\": \"some_val\"}"; - assertThat(actual, is(expected)); - } - - private void assertJvmOptionsImported() throws IOException, GeneralSecurityException { - Path path = env.configDir().resolve("jvm.options.d"); - assertThat(Files.exists(path), is(true)); - assertThat(Files.isDirectory(path), is(true)); - assertThat(Files.exists(path.resolve("test.options")), is(true)); - } - - private void assertLog4jPropertiesImported() throws IOException, GeneralSecurityException { - assertThat(Files.exists(env.configDir().resolve("log4j2.properties")), is(true)); - } -} diff --git a/distribution/tools/upgrade-cli/src/test/java/org/opensearch/tools/cli/upgrade/ValidateInputTaskTests.java b/distribution/tools/upgrade-cli/src/test/java/org/opensearch/tools/cli/upgrade/ValidateInputTaskTests.java deleted file mode 100644 index 91d708984deb4..0000000000000 --- a/distribution/tools/upgrade-cli/src/test/java/org/opensearch/tools/cli/upgrade/ValidateInputTaskTests.java +++ /dev/null @@ -1,69 +0,0 @@ -/* - * SPDX-License-Identifier: Apache-2.0 - * - * The OpenSearch Contributors require contributions made to - * this file be licensed under the Apache-2.0 license or a - * compatible open source license. - */ - -package org.opensearch.tools.cli.upgrade; - -import org.opensearch.LegacyESVersion; -import org.opensearch.cli.MockTerminal; -import org.opensearch.common.collect.Tuple; -import org.opensearch.common.io.PathUtils; -import org.opensearch.common.settings.Settings; -import org.opensearch.env.Environment; -import org.opensearch.env.TestEnvironment; -import org.opensearch.test.OpenSearchTestCase; -import org.junit.Before; - -import java.util.Arrays; -import java.util.Map; - -import static org.hamcrest.Matchers.hasSize; -import static org.hamcrest.Matchers.is; - -public class ValidateInputTaskTests extends OpenSearchTestCase { - - private ValidateInputTask task; - private MockTerminal terminal; - private Environment env; - - @Before - public void setTask() { - task = new ValidateInputTask(); - terminal = new MockTerminal(); - env = TestEnvironment.newEnvironment(Settings.builder().put("path.home", "test_home").build()); - } - - public void testUnsupportedEsVersion() { - TaskInput taskInput = new TaskInput(env); - taskInput.setVersion(LegacyESVersion.fromId(7100199)); - - final RuntimeException e = expectThrows(RuntimeException.class, () -> task.accept(new Tuple<>(taskInput, terminal))); - - assertTrue(e.getMessage(), e.getMessage().contains("The installed version 7.10.1 of elasticsearch is not supported.")); - } - - public void testGetSummaryFields() { - TaskInput taskInput = new TaskInput(env); - taskInput.setEsConfig(PathUtils.get("es_home")); - taskInput.setCluster("some-cluster"); - taskInput.setNode("some-node"); - taskInput.setVersion(LegacyESVersion.fromId(7100299)); - taskInput.setBaseUrl("some-url"); - taskInput.setPlugins(Arrays.asList("plugin-1", "plugin-2")); - - Map summary = task.getSummaryFieldsMap(taskInput); - - assertThat(summary.entrySet(), hasSize(7)); - assertThat(summary.get("Cluster"), is("some-cluster")); - assertThat(summary.get("Node"), is("some-node")); - assertThat(summary.get("Endpoint"), is("some-url")); - assertThat(summary.get("Elasticsearch Version"), is("7.10.2")); - assertThat(summary.get("Elasticsearch Plugins"), is("[plugin-1, plugin-2]")); - assertThat(summary.get("Elasticsearch Config"), is("es_home")); - assertThat(summary.get("OpenSearch Config"), is(env.configDir().toString())); - } -} diff --git a/distribution/tools/upgrade-cli/src/test/resources/config/elasticsearch.keystore b/distribution/tools/upgrade-cli/src/test/resources/config/elasticsearch.keystore deleted file mode 100644 index 0d4d3468e5f1d..0000000000000 Binary files a/distribution/tools/upgrade-cli/src/test/resources/config/elasticsearch.keystore and /dev/null differ diff --git a/distribution/tools/upgrade-cli/src/test/resources/config/elasticsearch.yml b/distribution/tools/upgrade-cli/src/test/resources/config/elasticsearch.yml deleted file mode 100644 index 083b4c7a7452b..0000000000000 --- a/distribution/tools/upgrade-cli/src/test/resources/config/elasticsearch.yml +++ /dev/null @@ -1,8 +0,0 @@ -cluster.name: my-cluster -node.name: node-x -path: - data: - - /mnt/data_1 - - /mnt/data_2 - logs: /var/log/eslogs -http.port: 42123 diff --git a/distribution/tools/upgrade-cli/src/test/resources/config/jvm.options.d/test.options b/distribution/tools/upgrade-cli/src/test/resources/config/jvm.options.d/test.options deleted file mode 100644 index a5030caf5a2e2..0000000000000 --- a/distribution/tools/upgrade-cli/src/test/resources/config/jvm.options.d/test.options +++ /dev/null @@ -1,2 +0,0 @@ --Xms2g --Xmx2g diff --git a/distribution/tools/upgrade-cli/src/test/resources/config/log4j2.properties b/distribution/tools/upgrade-cli/src/test/resources/config/log4j2.properties deleted file mode 100644 index 4b92d3fc62376..0000000000000 --- a/distribution/tools/upgrade-cli/src/test/resources/config/log4j2.properties +++ /dev/null @@ -1,213 +0,0 @@ -status = error - -appender.console.type = Console -appender.console.name = console -appender.console.layout.type = PatternLayout -appender.console.layout.pattern = [%d{ISO8601}][%-5p][%-25c{1.}] [%node_name]%marker %m%n - -######## Server JSON ############################ -appender.rolling.type = RollingFile -appender.rolling.name = rolling -appender.rolling.fileName = ${sys:es.logs.base_path}${sys:file.separator}${sys:es.logs.cluster_name}_server.json -appender.rolling.layout.type = ESJsonLayout -appender.rolling.layout.type_name = server - -appender.rolling.filePattern = ${sys:es.logs.base_path}${sys:file.separator}${sys:es.logs.cluster_name}-%d{yyyy-MM-dd}-%i.json.gz -appender.rolling.policies.type = Policies -appender.rolling.policies.time.type = TimeBasedTriggeringPolicy -appender.rolling.policies.time.interval = 1 -appender.rolling.policies.time.modulate = true -appender.rolling.policies.size.type = SizeBasedTriggeringPolicy -appender.rolling.policies.size.size = 128MB -appender.rolling.strategy.type = DefaultRolloverStrategy -appender.rolling.strategy.fileIndex = nomax -appender.rolling.strategy.action.type = Delete -appender.rolling.strategy.action.basepath = ${sys:es.logs.base_path} -appender.rolling.strategy.action.condition.type = IfFileName -appender.rolling.strategy.action.condition.glob = ${sys:es.logs.cluster_name}-* -appender.rolling.strategy.action.condition.nested_condition.type = IfAccumulatedFileSize -appender.rolling.strategy.action.condition.nested_condition.exceeds = 2GB -################################################ -######## Server - old style pattern ########### -appender.rolling_old.type = RollingFile -appender.rolling_old.name = rolling_old -appender.rolling_old.fileName = ${sys:es.logs.base_path}${sys:file.separator}${sys:es.logs.cluster_name}.log -appender.rolling_old.layout.type = PatternLayout -appender.rolling_old.layout.pattern = [%d{ISO8601}][%-5p][%-25c{1.}] [%node_name]%marker %m%n - -appender.rolling_old.filePattern = ${sys:es.logs.base_path}${sys:file.separator}${sys:es.logs.cluster_name}-%d{yyyy-MM-dd}-%i.log.gz -appender.rolling_old.policies.type = Policies -appender.rolling_old.policies.time.type = TimeBasedTriggeringPolicy -appender.rolling_old.policies.time.interval = 1 -appender.rolling_old.policies.time.modulate = true -appender.rolling_old.policies.size.type = SizeBasedTriggeringPolicy -appender.rolling_old.policies.size.size = 128MB -appender.rolling_old.strategy.type = DefaultRolloverStrategy -appender.rolling_old.strategy.fileIndex = nomax -appender.rolling_old.strategy.action.type = Delete -appender.rolling_old.strategy.action.basepath = ${sys:es.logs.base_path} -appender.rolling_old.strategy.action.condition.type = IfFileName -appender.rolling_old.strategy.action.condition.glob = ${sys:es.logs.cluster_name}-* -appender.rolling_old.strategy.action.condition.nested_condition.type = IfAccumulatedFileSize -appender.rolling_old.strategy.action.condition.nested_condition.exceeds = 2GB -################################################ - -rootLogger.level = info -rootLogger.appenderRef.console.ref = console -rootLogger.appenderRef.rolling.ref = rolling -rootLogger.appenderRef.rolling_old.ref = rolling_old - -######## Deprecation JSON ####################### -appender.deprecation_rolling.type = RollingFile -appender.deprecation_rolling.name = deprecation_rolling -appender.deprecation_rolling.fileName = ${sys:es.logs.base_path}${sys:file.separator}${sys:es.logs.cluster_name}_deprecation.json -appender.deprecation_rolling.layout.type = ESJsonLayout -appender.deprecation_rolling.layout.type_name = deprecation -appender.deprecation_rolling.layout.esmessagefields=x-opaque-id -appender.deprecation_rolling.filter.rate_limit.type = RateLimitingFilter - -appender.deprecation_rolling.filePattern = ${sys:es.logs.base_path}${sys:file.separator}${sys:es.logs.cluster_name}_deprecation-%i.json.gz -appender.deprecation_rolling.policies.type = Policies -appender.deprecation_rolling.policies.size.type = SizeBasedTriggeringPolicy -appender.deprecation_rolling.policies.size.size = 1GB -appender.deprecation_rolling.strategy.type = DefaultRolloverStrategy -appender.deprecation_rolling.strategy.max = 4 - -appender.header_warning.type = HeaderWarningAppender -appender.header_warning.name = header_warning -################################################# -######## Deprecation - old style pattern ####### -appender.deprecation_rolling_old.type = RollingFile -appender.deprecation_rolling_old.name = deprecation_rolling_old -appender.deprecation_rolling_old.fileName = ${sys:es.logs.base_path}${sys:file.separator}${sys:es.logs.cluster_name}_deprecation.log -appender.deprecation_rolling_old.layout.type = PatternLayout -appender.deprecation_rolling_old.layout.pattern = [%d{ISO8601}][%-5p][%-25c{1.}] [%node_name]%marker %m%n - -appender.deprecation_rolling_old.filePattern = ${sys:es.logs.base_path}${sys:file.separator}${sys:es.logs.cluster_name}\ - _deprecation-%i.log.gz -appender.deprecation_rolling_old.policies.type = Policies -appender.deprecation_rolling_old.policies.size.type = SizeBasedTriggeringPolicy -appender.deprecation_rolling_old.policies.size.size = 1GB -appender.deprecation_rolling_old.strategy.type = DefaultRolloverStrategy -appender.deprecation_rolling_old.strategy.max = 4 -################################################# -logger.deprecation.name = org.elasticsearch.deprecation -logger.deprecation.level = deprecation -logger.deprecation.appenderRef.deprecation_rolling.ref = deprecation_rolling -logger.deprecation.appenderRef.deprecation_rolling_old.ref = deprecation_rolling_old -logger.deprecation.appenderRef.header_warning.ref = header_warning -logger.deprecation.additivity = false - -######## Search slowlog JSON #################### -appender.index_search_slowlog_rolling.type = RollingFile -appender.index_search_slowlog_rolling.name = index_search_slowlog_rolling -appender.index_search_slowlog_rolling.fileName = ${sys:es.logs.base_path}${sys:file.separator}${sys:es.logs\ - .cluster_name}_index_search_slowlog.json -appender.index_search_slowlog_rolling.layout.type = ESJsonLayout -appender.index_search_slowlog_rolling.layout.type_name = index_search_slowlog -appender.index_search_slowlog_rolling.layout.esmessagefields=message,took,took_millis,total_hits,types,stats,search_type,total_shards,source,id - -appender.index_search_slowlog_rolling.filePattern = ${sys:es.logs.base_path}${sys:file.separator}${sys:es.logs\ - .cluster_name}_index_search_slowlog-%i.json.gz -appender.index_search_slowlog_rolling.policies.type = Policies -appender.index_search_slowlog_rolling.policies.size.type = SizeBasedTriggeringPolicy -appender.index_search_slowlog_rolling.policies.size.size = 1GB -appender.index_search_slowlog_rolling.strategy.type = DefaultRolloverStrategy -appender.index_search_slowlog_rolling.strategy.max = 4 -################################################# -######## Search slowlog - old style pattern #### -appender.index_search_slowlog_rolling_old.type = RollingFile -appender.index_search_slowlog_rolling_old.name = index_search_slowlog_rolling_old -appender.index_search_slowlog_rolling_old.fileName = ${sys:es.logs.base_path}${sys:file.separator}${sys:es.logs.cluster_name}\ - _index_search_slowlog.log -appender.index_search_slowlog_rolling_old.layout.type = PatternLayout -appender.index_search_slowlog_rolling_old.layout.pattern = [%d{ISO8601}][%-5p][%-25c{1.}] [%node_name]%marker %m%n - -appender.index_search_slowlog_rolling_old.filePattern = ${sys:es.logs.base_path}${sys:file.separator}${sys:es.logs.cluster_name}\ - _index_search_slowlog-%i.log.gz -appender.index_search_slowlog_rolling_old.policies.type = Policies -appender.index_search_slowlog_rolling_old.policies.size.type = SizeBasedTriggeringPolicy -appender.index_search_slowlog_rolling_old.policies.size.size = 1GB -appender.index_search_slowlog_rolling_old.strategy.type = DefaultRolloverStrategy -appender.index_search_slowlog_rolling_old.strategy.max = 4 -################################################# -logger.index_search_slowlog_rolling.name = index.search.slowlog -logger.index_search_slowlog_rolling.level = trace -logger.index_search_slowlog_rolling.appenderRef.index_search_slowlog_rolling.ref = index_search_slowlog_rolling -logger.index_search_slowlog_rolling.appenderRef.index_search_slowlog_rolling_old.ref = index_search_slowlog_rolling_old -logger.index_search_slowlog_rolling.additivity = false - -######## Indexing slowlog JSON ################## -appender.index_indexing_slowlog_rolling.type = RollingFile -appender.index_indexing_slowlog_rolling.name = index_indexing_slowlog_rolling -appender.index_indexing_slowlog_rolling.fileName = ${sys:es.logs.base_path}${sys:file.separator}${sys:es.logs.cluster_name}\ - _index_indexing_slowlog.json -appender.index_indexing_slowlog_rolling.layout.type = ESJsonLayout -appender.index_indexing_slowlog_rolling.layout.type_name = index_indexing_slowlog -appender.index_indexing_slowlog_rolling.layout.esmessagefields=message,took,took_millis,doc_type,id,routing,source - -appender.index_indexing_slowlog_rolling.filePattern = ${sys:es.logs.base_path}${sys:file.separator}${sys:es.logs.cluster_name}\ - _index_indexing_slowlog-%i.json.gz -appender.index_indexing_slowlog_rolling.policies.type = Policies -appender.index_indexing_slowlog_rolling.policies.size.type = SizeBasedTriggeringPolicy -appender.index_indexing_slowlog_rolling.policies.size.size = 1GB -appender.index_indexing_slowlog_rolling.strategy.type = DefaultRolloverStrategy -appender.index_indexing_slowlog_rolling.strategy.max = 4 -################################################# -######## Indexing slowlog - old style pattern ## -appender.index_indexing_slowlog_rolling_old.type = RollingFile -appender.index_indexing_slowlog_rolling_old.name = index_indexing_slowlog_rolling_old -appender.index_indexing_slowlog_rolling_old.fileName = ${sys:es.logs.base_path}${sys:file.separator}${sys:es.logs.cluster_name}\ - _index_indexing_slowlog.log -appender.index_indexing_slowlog_rolling_old.layout.type = PatternLayout -appender.index_indexing_slowlog_rolling_old.layout.pattern = [%d{ISO8601}][%-5p][%-25c{1.}] [%node_name]%marker %m%n - -appender.index_indexing_slowlog_rolling_old.filePattern = ${sys:es.logs.base_path}${sys:file.separator}${sys:es.logs.cluster_name}\ - _index_indexing_slowlog-%i.log.gz -appender.index_indexing_slowlog_rolling_old.policies.type = Policies -appender.index_indexing_slowlog_rolling_old.policies.size.type = SizeBasedTriggeringPolicy -appender.index_indexing_slowlog_rolling_old.policies.size.size = 1GB -appender.index_indexing_slowlog_rolling_old.strategy.type = DefaultRolloverStrategy -appender.index_indexing_slowlog_rolling_old.strategy.max = 4 -################################################# - -logger.index_indexing_slowlog.name = index.indexing.slowlog.index -logger.index_indexing_slowlog.level = trace -logger.index_indexing_slowlog.appenderRef.index_indexing_slowlog_rolling.ref = index_indexing_slowlog_rolling -logger.index_indexing_slowlog.appenderRef.index_indexing_slowlog_rolling_old.ref = index_indexing_slowlog_rolling_old -logger.index_indexing_slowlog.additivity = false - -######## Task details log JSON #################### -appender.task_detailslog_rolling.type = RollingFile -appender.task_detailslog_rolling.name = task_detailslog_rolling -appender.task_detailslog_rolling.fileName = ${sys:es.logs.base_path}${sys:file.separator}${sys:es.logs.cluster_name}_task_detailslog.json -appender.task_detailslog_rolling.layout.type = ESJsonLayout -appender.task_detailslog_rolling.layout.type_name = task_detailslog -appender.task_detailslog_rolling.layout.esmessagefields=taskId,type,action,description,start_time_millis,resource_stats,metadata - -appender.task_detailslog_rolling.filePattern = ${sys:es.logs.base_path}${sys:file.separator}${sys:es.logs.cluster_name}_task_detailslog-%i.json.gz -appender.task_detailslog_rolling.policies.type = Policies -appender.task_detailslog_rolling.policies.size.type = SizeBasedTriggeringPolicy -appender.task_detailslog_rolling.policies.size.size = 1GB -appender.task_detailslog_rolling.strategy.type = DefaultRolloverStrategy -appender.task_detailslog_rolling.strategy.max = 4 -################################################# -######## Task details log - old style pattern #### -appender.task_detailslog_rolling_old.type = RollingFile -appender.task_detailslog_rolling_old.name = task_detailslog_rolling_old -appender.task_detailslog_rolling_old.fileName = ${sys:es.logs.base_path}${sys:file.separator}${sys:es.logs.cluster_name}_task_detailslog.log -appender.task_detailslog_rolling_old.layout.type = PatternLayout -appender.task_detailslog_rolling_old.layout.pattern = [%d{ISO8601}][%-5p][%-25c{1.}] [%node_name]%marker %m%n - -appender.task_detailslog_rolling_old.filePattern = ${sys:es.logs.base_path}${sys:file.separator}${sys:es.logs.cluster_name}_task_detailslog-%i.log.gz -appender.task_detailslog_rolling_old.policies.type = Policies -appender.task_detailslog_rolling_old.policies.size.type = SizeBasedTriggeringPolicy -appender.task_detailslog_rolling_old.policies.size.size = 1GB -appender.task_detailslog_rolling_old.strategy.type = DefaultRolloverStrategy -appender.task_detailslog_rolling_old.strategy.max = 4 -################################################# -logger.task_detailslog_rolling.name = task.detailslog -logger.task_detailslog_rolling.level = trace -logger.task_detailslog_rolling.appenderRef.task_detailslog_rolling.ref = task_detailslog_rolling -logger.task_detailslog_rolling.appenderRef.task_detailslog_rolling_old.ref = task_detailslog_rolling_old -logger.task_detailslog_rolling.additivity = false diff --git a/gradle/libs.versions.toml b/gradle/libs.versions.toml index d8bf39525abfe..fe0716b4b62f6 100644 --- a/gradle/libs.versions.toml +++ b/gradle/libs.versions.toml @@ -62,11 +62,12 @@ hadoop3 = "3.3.6" # - plugins/ingest-attachment (transitive dependency, check the upstream POM) # - distribution/tools/plugin-cli bouncycastle_jce = "2.0.0" -bouncycastle_tls = "2.0.19" -bouncycastle_pkix = "2.0.7" -bouncycastle_pg = "2.0.10" +bouncycastle_tls = "2.0.20" +bouncycastle_pkix = "2.0.8" +bouncycastle_pg = "2.0.11" bouncycastle_util = "2.0.3" -password4j = "1.8.2" +password4j = "1.8.3" + # test dependencies randomizedrunner = "2.7.1" junit = "4.13.2" diff --git a/libs/agent-sm/agent/src/main/java/org/opensearch/javaagent/FileInterceptor.java b/libs/agent-sm/agent/src/main/java/org/opensearch/javaagent/FileInterceptor.java index f52f1bc91f559..455be2a83f840 100644 --- a/libs/agent-sm/agent/src/main/java/org/opensearch/javaagent/FileInterceptor.java +++ b/libs/agent-sm/agent/src/main/java/org/opensearch/javaagent/FileInterceptor.java @@ -74,8 +74,23 @@ public static void intercept(@Advice.AllArguments Object[] args, @Advice.Origin final boolean isDelete = isMutating == false ? name.startsWith("delete") : false; // This is Windows implementation of UNIX Domain Sockets (close) - if (isDelete == true - && walker.getCallerClass().getName().equalsIgnoreCase("sun.nio.ch.PipeImpl$Initializer$LoopbackConnector") == true) { + boolean isUnixSocketCaller = false; + if (isDelete == true) { + final Collection> chain = walker.walk(StackCallerClassChainExtractor.INSTANCE); + + if (walker.getCallerClass().getName().equalsIgnoreCase("sun.nio.ch.PipeImpl$Initializer$LoopbackConnector") == true) { + isUnixSocketCaller = true; + } else { + for (final Class caller : chain) { + if (caller.getName().equalsIgnoreCase("sun.nio.ch.PipeImpl$Initializer$LoopbackConnector")) { + isUnixSocketCaller = true; + break; + } + } + } + } + + if (isDelete == true && isUnixSocketCaller == true) { final NetPermission permission = new NetPermission("accessUnixDomainSocket"); for (ProtectionDomain domain : callers) { if (!policy.implies(domain, permission)) { diff --git a/libs/core/src/main/java/org/opensearch/semver/SemverRange.java b/libs/core/src/main/java/org/opensearch/semver/SemverRange.java index da8c06c07d8e5..94ec876178ea5 100644 --- a/libs/core/src/main/java/org/opensearch/semver/SemverRange.java +++ b/libs/core/src/main/java/org/opensearch/semver/SemverRange.java @@ -16,11 +16,15 @@ import org.opensearch.semver.expr.Caret; import org.opensearch.semver.expr.Equal; import org.opensearch.semver.expr.Expression; +import org.opensearch.semver.expr.Range; import org.opensearch.semver.expr.Tilde; import java.io.IOException; +import java.util.Locale; import java.util.Objects; import java.util.Optional; +import java.util.regex.Matcher; +import java.util.regex.Pattern; import static java.util.Arrays.stream; @@ -31,6 +35,7 @@ *

  • '=' Requires exact match with the range version. For example, =1.2.3 range would match only 1.2.3
  • *
  • '~' Allows for patch version variability starting from the range version. For example, ~1.2.3 range would match versions greater than or equal to 1.2.3 but less than 1.3.0
  • *
  • '^' Allows for patch and minor version variability starting from the range version. For example, ^1.2.3 range would match versions greater than or equal to 1.2.3 but less than 2.0.0
  • + *
  • Explicit ranges: [2.0.0,3.0.0], (2.0.0,3.0.0), [2.0.0,3.0.0), (2.0.0,3.0.0]
  • * * * @opensearch.api @@ -38,12 +43,16 @@ @PublicApi(since = "2.13.0") public class SemverRange implements ToXContentFragment { + private static final Pattern RANGE_PATTERN = Pattern.compile("([\\[\\(])([\\d.]+)\\s*,\\s*([\\d.]+)([\\]\\)])"); + private final Version rangeVersion; private final RangeOperator rangeOperator; + private final Expression expression; public SemverRange(final Version rangeVersion, final RangeOperator rangeOperator) { this.rangeVersion = rangeVersion; this.rangeOperator = rangeOperator; + this.expression = rangeOperator.expression; } /** @@ -52,6 +61,23 @@ public SemverRange(final Version rangeVersion, final RangeOperator rangeOperator * @return a {@code SemverRange} */ public static SemverRange fromString(final String range) { + // Check if it's a range expression + Matcher matcher = RANGE_PATTERN.matcher(range); + if (matcher.matches()) { + char leftBracket = matcher.group(1).charAt(0); + String lowerVersionStr = matcher.group(2); + String upperVersionStr = matcher.group(3); + char rightBracket = matcher.group(4).charAt(0); + + Version lowerVersion = Version.fromString(matcher.group(2)); + Version upperVersion = Version.fromString(matcher.group(3)); + boolean includeLower = leftBracket == '['; + boolean includeUpper = rightBracket == ']'; + + Range rangeExpression = new Range(lowerVersion, upperVersion, includeLower, includeUpper); + return new SemverRange(lowerVersion, RangeOperator.RANGE, rangeExpression); + } + RangeOperator rangeOperator = RangeOperator.fromRange(range); String version = range.replaceFirst(rangeOperator.asEscapedString(), ""); if (!Version.stringHasLength(version)) { @@ -60,6 +86,12 @@ public static SemverRange fromString(final String range) { return new SemverRange(Version.fromString(version), rangeOperator); } + public SemverRange(Version rangeVersion, RangeOperator operator, Expression customExpression) { + this.rangeVersion = rangeVersion; + this.rangeOperator = operator; + this.expression = customExpression; + } + /** * Return the range operator for this range. * @return range operator @@ -94,7 +126,7 @@ public boolean isSatisfiedBy(final String versionToEvaluate) { * @see #isSatisfiedBy(String) */ public boolean isSatisfiedBy(final Version versionToEvaluate) { - return this.rangeOperator.expression.evaluate(this.rangeVersion, versionToEvaluate); + return this.expression.evaluate(this.rangeVersion, versionToEvaluate); } @Override @@ -106,16 +138,29 @@ public boolean equals(@Nullable final Object o) { return false; } SemverRange range = (SemverRange) o; - return Objects.equals(rangeVersion, range.rangeVersion) && rangeOperator == range.rangeOperator; + return Objects.equals(rangeVersion, range.rangeVersion) + && rangeOperator == range.rangeOperator + && Objects.equals(expression, range.expression); } @Override public int hashCode() { - return Objects.hash(rangeVersion, rangeOperator); + return Objects.hash(rangeVersion, rangeOperator, expression); } @Override public String toString() { + if (rangeOperator == RangeOperator.RANGE && expression instanceof Range) { + Range range = (Range) expression; + return String.format( + Locale.ROOT, + "%s%s,%s%s", + range.isIncludeLower() ? "[" : "(", + range.getLowerBound(), + range.getUpperBound(), + range.isIncludeUpper() ? "]" : ")" + ); + } return rangeOperator.asString() + rangeVersion; } @@ -128,10 +173,10 @@ public XContentBuilder toXContent(final XContentBuilder builder, final Params pa * A range operator. */ public enum RangeOperator { - EQ("=", new Equal()), TILDE("~", new Tilde()), CARET("^", new Caret()), + RANGE("range", new Range()), DEFAULT("", new Equal()); private final String operator; diff --git a/libs/core/src/main/java/org/opensearch/semver/expr/Range.java b/libs/core/src/main/java/org/opensearch/semver/expr/Range.java new file mode 100644 index 0000000000000..5fcc352a8b2ad --- /dev/null +++ b/libs/core/src/main/java/org/opensearch/semver/expr/Range.java @@ -0,0 +1,95 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.semver.expr; + +import org.opensearch.Version; + +import java.util.Objects; + +/** + * Expression to evaluate version compatibility within a specified range with configurable bounds. + */ +public class Range implements Expression { + private final Version lowerBound; + private final Version upperBound; + private final boolean includeLower; + private final boolean includeUpper; + + public Range() { + this.lowerBound = Version.fromString("0.0.0"); // Minimum version + this.upperBound = Version.fromString("999.999.999"); // Maximum version + this.includeLower = true; // Default to inclusive bounds + this.includeUpper = true; + } + + public Range(Version lowerBound, Version upperBound, boolean includeLower, boolean includeUpper) { + if (lowerBound == null) { + throw new IllegalArgumentException("Lower bound cannot be null"); + } + if (upperBound == null) { + throw new IllegalArgumentException("Upper bound cannot be null"); + } + if (lowerBound.after(upperBound)) { + throw new IllegalArgumentException("Lower bound must be less than or equal to upper bound"); + } + this.lowerBound = lowerBound; + this.upperBound = upperBound; + this.includeLower = includeLower; + this.includeUpper = includeUpper; + } + + public void updateRange(Range other) { + if (other == null) { + throw new IllegalArgumentException("Range cannot be null"); + } + } + + @Override + public boolean evaluate(final Version rangeVersion, final Version versionToEvaluate) { + + boolean satisfiesLower = includeLower ? versionToEvaluate.onOrAfter(lowerBound) : versionToEvaluate.after(lowerBound); + + boolean satisfiesUpper = includeUpper ? versionToEvaluate.onOrBefore(upperBound) : versionToEvaluate.before(upperBound); + + return satisfiesLower && satisfiesUpper; + } + + @Override + public boolean equals(Object o) { + if (this == o) return true; + if (o == null || getClass() != o.getClass()) return false; + Range range = (Range) o; + return includeLower == range.includeLower + && includeUpper == range.includeUpper + && Objects.equals(lowerBound, range.lowerBound) + && Objects.equals(upperBound, range.upperBound); + } + + @Override + public int hashCode() { + return Objects.hash(lowerBound, upperBound, includeLower, includeUpper); + } + + public boolean isIncludeLower() { + return includeLower; + } + + public boolean isIncludeUpper() { + return includeUpper; + } + + public Version getLowerBound() { + return lowerBound; + } + + public Version getUpperBound() { + return upperBound; + } + +} diff --git a/libs/core/src/test/java/org/opensearch/semver/SemverRangeTests.java b/libs/core/src/test/java/org/opensearch/semver/SemverRangeTests.java index af1d95b2561b7..ba624d5287980 100644 --- a/libs/core/src/test/java/org/opensearch/semver/SemverRangeTests.java +++ b/libs/core/src/test/java/org/opensearch/semver/SemverRangeTests.java @@ -101,5 +101,204 @@ public void testInvalidRanges() { assertTrue(ex.getMessage().contains("the version needs to contain major, minor, and revision, and optionally the build")); expectThrows(NumberFormatException.class, () -> SemverRange.fromString("$1.2.3")); + + assertThrows(IllegalArgumentException.class, () -> SemverRange.fromString("[2.3.0]")); + assertThrows(IllegalArgumentException.class, () -> SemverRange.fromString("[2.3.0,]")); + assertThrows(IllegalArgumentException.class, () -> SemverRange.fromString("[,2.7.0]")); + assertThrows(IllegalArgumentException.class, () -> SemverRange.fromString("2.3.0,2.7.0")); + assertThrows(IllegalArgumentException.class, () -> SemverRange.fromString("[2.7.0,2.3.0]")); + } + + public void testInclusiveRange() { + SemverRange range = SemverRange.fromString("[2.3.0,2.7.0]"); + + // Test lower bound + assertTrue("Should include lower bound", range.isSatisfiedBy("2.3.0")); + + // Test upper bound + assertTrue("Should include upper bound", range.isSatisfiedBy("2.7.0")); + + // Test middle values + assertTrue("Should include values in range", range.isSatisfiedBy("2.5.0")); + assertTrue("Should include patch versions", range.isSatisfiedBy("2.4.1")); + + // Test out of range values + assertFalse("Should exclude values below range", range.isSatisfiedBy("2.2.9")); + assertFalse("Should exclude values above range", range.isSatisfiedBy("2.7.1")); } + + public void testExclusiveRange() { + SemverRange range = SemverRange.fromString("(2.3.0,2.7.0)"); + + // Test bounds + assertFalse("Should exclude lower bound", range.isSatisfiedBy("2.3.0")); + assertFalse("Should exclude upper bound", range.isSatisfiedBy("2.7.0")); + + // Test middle values + assertTrue("Should include values in range", range.isSatisfiedBy("2.5.0")); + assertTrue("Should include values near lower bound", range.isSatisfiedBy("2.3.1")); + assertTrue("Should include values near upper bound", range.isSatisfiedBy("2.6.9")); + + // Test out of range values + assertFalse("Should exclude values below range", range.isSatisfiedBy("2.2.9")); + assertFalse("Should exclude values above range", range.isSatisfiedBy("2.7.1")); + } + + public void testMixedRanges() { + // Test inclusive lower bound, exclusive upper bound + SemverRange range1 = SemverRange.fromString("[2.3.0,2.7.0)"); + assertTrue("Should include lower bound", range1.isSatisfiedBy("2.3.0")); + assertFalse("Should exclude upper bound", range1.isSatisfiedBy("2.7.0")); + assertTrue("Should include values in range", range1.isSatisfiedBy("2.6.9")); + + // Test exclusive lower bound, inclusive upper bound + SemverRange range2 = SemverRange.fromString("(2.3.0,2.7.0]"); + assertFalse("Should exclude lower bound", range2.isSatisfiedBy("2.3.0")); + assertTrue("Should include upper bound", range2.isSatisfiedBy("2.7.0")); + assertTrue("Should include values in range", range2.isSatisfiedBy("2.3.1")); + } + + public void testInvalidRangeConstructions() { + // Test invalid range constructions + IllegalArgumentException ex = expectThrows( + IllegalArgumentException.class, + () -> SemverRange.fromString("[2.0.0,1.0.0]") // Lower bound greater than upper bound + ); + assertTrue(ex.getMessage().contains("Lower bound must be less than or equal to upper bound")); + + // Test malformed ranges + assertThrows(IllegalArgumentException.class, () -> SemverRange.fromString("[2.0.0]")); + assertThrows(IllegalArgumentException.class, () -> SemverRange.fromString("[2.0.0,]")); + assertThrows(IllegalArgumentException.class, () -> SemverRange.fromString("[,2.0.0]")); + } + + public void testEqualsAndHashCode() { + // Test inclusive ranges [a,b] + SemverRange inclusiveRange1 = SemverRange.fromString("[1.0.0,2.0.0]"); + SemverRange inclusiveRange2 = SemverRange.fromString("[1.0.0,2.0.0]"); + + assertTrue("Same inclusive range should be equal to itself", inclusiveRange1.equals(inclusiveRange1)); + assertTrue("Identical inclusive ranges should be equal", inclusiveRange1.equals(inclusiveRange2)); + assertEquals("Equal inclusive ranges should have same hashcode", inclusiveRange1.hashCode(), inclusiveRange2.hashCode()); + + // Test exclusive ranges (a,b) + SemverRange exclusiveRange1 = SemverRange.fromString("(1.0.0,2.0.0)"); + SemverRange exclusiveRange2 = SemverRange.fromString("(1.0.0,2.0.0)"); + + assertTrue("Same exclusive range should be equal to itself", exclusiveRange1.equals(exclusiveRange1)); + assertTrue("Identical exclusive ranges should be equal", exclusiveRange1.equals(exclusiveRange2)); + assertEquals("Equal exclusive ranges should have same hashcode", exclusiveRange1.hashCode(), exclusiveRange2.hashCode()); + + // Test mixed ranges [a,b) and (a,b] + SemverRange leftInclusiveRange1 = SemverRange.fromString("[1.0.0,2.0.0)"); + SemverRange leftInclusiveRange2 = SemverRange.fromString("[1.0.0,2.0.0)"); + SemverRange rightInclusiveRange1 = SemverRange.fromString("(1.0.0,2.0.0]"); + SemverRange rightInclusiveRange2 = SemverRange.fromString("(1.0.0,2.0.0]"); + + assertTrue("Same left-inclusive ranges should be equal", leftInclusiveRange1.equals(leftInclusiveRange2)); + assertTrue("Same right-inclusive ranges should be equal", rightInclusiveRange1.equals(rightInclusiveRange2)); + assertEquals( + "Equal left-inclusive ranges should have same hashcode", + leftInclusiveRange1.hashCode(), + leftInclusiveRange2.hashCode() + ); + assertEquals( + "Equal right-inclusive ranges should have same hashcode", + rightInclusiveRange1.hashCode(), + rightInclusiveRange2.hashCode() + ); + + // Test inequality between different range types + assertFalse("Inclusive range should not equal exclusive range", inclusiveRange1.equals(exclusiveRange1)); + assertFalse("Inclusive range should not equal left-inclusive range", inclusiveRange1.equals(leftInclusiveRange1)); + assertFalse("Inclusive range should not equal right-inclusive range", inclusiveRange1.equals(rightInclusiveRange1)); + assertFalse("Exclusive range should not equal left-inclusive range", exclusiveRange1.equals(leftInclusiveRange1)); + assertFalse("Left-inclusive range should not equal right-inclusive range", leftInclusiveRange1.equals(rightInclusiveRange1)); + + // Test different version ranges + SemverRange differentRange = SemverRange.fromString("[1.0.0,3.0.0]"); + assertFalse("Ranges with different versions should not be equal", inclusiveRange1.equals(differentRange)); + + // Test null and different types + assertFalse("Range should not equal null", inclusiveRange1.equals(null)); + assertFalse("Range should not equal different type", inclusiveRange1.equals("not a range")); + } + + public void testRangeOperatorExpression() { + // Test that expressions are properly assigned for different operators + SemverRange eqRange = SemverRange.fromString("=2.0.0"); + SemverRange tildeRange = SemverRange.fromString("~2.0.0"); + SemverRange caretRange = SemverRange.fromString("^2.0.0"); + + assertEquals(SemverRange.RangeOperator.EQ, eqRange.getRangeOperator()); + assertEquals(SemverRange.RangeOperator.TILDE, tildeRange.getRangeOperator()); + assertEquals(SemverRange.RangeOperator.CARET, caretRange.getRangeOperator()); + } + + public void testRangeToStringConsistency() { + // Test that toString produces consistent output + String[] testRanges = { "[2.0.0,3.0.0]", "(2.0.0,3.0.0)", "[2.0.0,3.0.0)", "(2.0.0,3.0.0]", "^2.0.0", "~2.0.0", "=2.0.0" }; + + for (String rangeStr : testRanges) { + SemverRange range = SemverRange.fromString(rangeStr); + assertEquals("toString should match original string", rangeStr, range.toString()); + } + } + + public void testRangeBoundaryConditions() { + // Test edge cases for range boundaries + SemverRange range = SemverRange.fromString("[2.0.0,3.0.0]"); + + // Test exact boundaries + assertTrue("Should include lower bound", range.isSatisfiedBy("2.0.0")); + assertTrue("Should include upper bound", range.isSatisfiedBy("3.0.0")); + + // Test just outside boundaries + assertFalse("Should exclude version before lower bound", range.isSatisfiedBy("1.9.999")); + assertFalse("Should exclude version after upper bound", range.isSatisfiedBy("3.0.1")); + } + + public void testRangeEvaluationConsistency() { + // Test that range evaluation is consistent + SemverRange range = SemverRange.fromString("[2.0.0,3.0.0]"); + + // Test same version evaluation + boolean result1 = range.isSatisfiedBy("2.5.0"); + boolean result2 = range.isSatisfiedBy("2.5.0"); + assertEquals("Same version should evaluate consistently", result1, result2); + + } + + public void testRangeToString() { + // Test that toString produces the same string that was parsed + String[] ranges = { "[2.3.0,2.7.0]", "(2.3.0,2.7.0)", "[2.3.0,2.7.0)", "(2.3.0,2.7.0]" }; + + for (String rangeStr : ranges) { + SemverRange range = SemverRange.fromString(rangeStr); + assertEquals("toString should match original string", rangeStr, range.toString()); + } + } + + public void testRangeEquality() { + SemverRange range1 = SemverRange.fromString("[2.3.0,2.7.0]"); + SemverRange range2 = SemverRange.fromString("[2.3.0,2.7.0]"); + SemverRange range3 = SemverRange.fromString("(2.3.0,2.7.0]"); + + assertEquals("Identical ranges should be equal", range1, range2); + assertNotEquals("Different ranges should not be equal", range1, range3); + assertNotEquals("Range should not equal null", null, range1); + } + + public void testVersionEdgeCases() { + SemverRange range = SemverRange.fromString("[2.0.0,3.0.0]"); + + // Test major version boundaries + assertTrue(range.isSatisfiedBy("2.0.0")); + assertTrue(range.isSatisfiedBy("2.99.99")); + assertTrue(range.isSatisfiedBy("3.0.0")); + assertFalse(range.isSatisfiedBy("1.99.99")); + assertFalse(range.isSatisfiedBy("3.0.1")); + + } + } diff --git a/libs/core/src/test/java/org/opensearch/semver/expr/RangeTests.java b/libs/core/src/test/java/org/opensearch/semver/expr/RangeTests.java new file mode 100644 index 0000000000000..a9e953ef5b270 --- /dev/null +++ b/libs/core/src/test/java/org/opensearch/semver/expr/RangeTests.java @@ -0,0 +1,103 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.semver.expr; + +import org.opensearch.Version; +import org.opensearch.test.OpenSearchTestCase; + +public class RangeTests extends OpenSearchTestCase { + + public void testDefaultConstructor() { + Range range = new Range(); + assertEquals(Version.fromString("0.0.0"), range.getLowerBound()); + assertEquals(Version.fromString("999.999.999"), range.getUpperBound()); + assertTrue(range.isIncludeLower()); + assertTrue(range.isIncludeUpper()); + + } + + public void testRangeEvaluation() { + Version lowerBound = Version.fromString("2.3.0"); + Version upperBound = Version.fromString("2.7.0"); + + // Test inclusive range + Range inclusiveRange = new Range(lowerBound, upperBound, true, true); + assertTrue(inclusiveRange.evaluate(null, Version.fromString("2.3.0"))); + assertTrue(inclusiveRange.evaluate(null, Version.fromString("2.5.0"))); + assertTrue(inclusiveRange.evaluate(null, Version.fromString("2.7.0"))); + assertFalse(inclusiveRange.evaluate(null, Version.fromString("2.2.9"))); + assertFalse(inclusiveRange.evaluate(null, Version.fromString("2.7.1"))); + + // Test exclusive range + Range exclusiveRange = new Range(lowerBound, upperBound, false, false); + assertFalse(exclusiveRange.evaluate(null, Version.fromString("2.3.0"))); + assertTrue(exclusiveRange.evaluate(null, Version.fromString("2.5.0"))); + assertFalse(exclusiveRange.evaluate(null, Version.fromString("2.7.0"))); + assertFalse(exclusiveRange.evaluate(null, Version.fromString("2.2.9"))); + assertFalse(exclusiveRange.evaluate(null, Version.fromString("2.7.1"))); + } + + public void testInvalidRanges() { + Version lowerBound = Version.fromString("2.3.0"); + Version upperBound = Version.fromString("2.7.0"); + + // Test null bounds + IllegalArgumentException ex = expectThrows(IllegalArgumentException.class, () -> new Range(null, upperBound, true, true)); + assertEquals("Lower bound cannot be null", ex.getMessage()); + + ex = expectThrows(IllegalArgumentException.class, () -> new Range(lowerBound, null, true, true)); + assertEquals("Upper bound cannot be null", ex.getMessage()); + + // Test invalid range (upper < lower) + ex = expectThrows(IllegalArgumentException.class, () -> new Range(upperBound, lowerBound, true, true)); + assertEquals("Lower bound must be less than or equal to upper bound", ex.getMessage()); + } + + public void testMixedBoundTypes() { + Version lowerBound = Version.fromString("2.3.0"); + Version upperBound = Version.fromString("2.7.0"); + + // Test inclusive lower, exclusive upper + Range mixedRange1 = new Range(lowerBound, upperBound, true, false); + assertTrue(mixedRange1.evaluate(null, Version.fromString("2.3.0"))); + assertTrue(mixedRange1.evaluate(null, Version.fromString("2.6.9"))); + assertFalse(mixedRange1.evaluate(null, Version.fromString("2.7.0"))); + + // Test exclusive lower, inclusive upper + Range mixedRange2 = new Range(lowerBound, upperBound, false, true); + assertFalse(mixedRange2.evaluate(null, Version.fromString("2.3.0"))); + assertTrue(mixedRange2.evaluate(null, Version.fromString("2.3.1"))); + assertTrue(mixedRange2.evaluate(null, Version.fromString("2.7.0"))); + } + + public void testRangeAccessors() { + Version lowerBound = Version.fromString("2.3.0"); + Version upperBound = Version.fromString("2.7.0"); + Range range = new Range(lowerBound, upperBound, true, false); + + assertEquals("Lower bound should match", lowerBound, range.getLowerBound()); + assertEquals("Upper bound should match", upperBound, range.getUpperBound()); + assertTrue("Should be inclusive lower", range.isIncludeLower()); + assertFalse("Should be exclusive upper", range.isIncludeUpper()); + } + + public void testUpdateRangeNullCheck() { + Range range = new Range( + Version.fromString("1.0.0"), + Version.fromString("2.0.0"), + true, // includeLower + true // includeUpper + ); + + // Test that updateRange throws IllegalArgumentException for null input + IllegalArgumentException ex = expectThrows(IllegalArgumentException.class, () -> range.updateRange(null)); + assertEquals("Range cannot be null", ex.getMessage()); + } + +} diff --git a/libs/ssl-config/licenses/bcpkix-fips-2.0.7.jar.sha1 b/libs/ssl-config/licenses/bcpkix-fips-2.0.7.jar.sha1 deleted file mode 100644 index 5df930b54fe44..0000000000000 --- a/libs/ssl-config/licenses/bcpkix-fips-2.0.7.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -01eea0f325315ca6295b0a6926ff862d8001cdf9 \ No newline at end of file diff --git a/libs/ssl-config/licenses/bcpkix-fips-2.0.8.jar.sha1 b/libs/ssl-config/licenses/bcpkix-fips-2.0.8.jar.sha1 new file mode 100644 index 0000000000000..69293a600d472 --- /dev/null +++ b/libs/ssl-config/licenses/bcpkix-fips-2.0.8.jar.sha1 @@ -0,0 +1 @@ +aad7b0fcf55892e7ff7e2d23a290f143f4bb56e0 \ No newline at end of file diff --git a/libs/ssl-config/licenses/bctls-fips-2.0.19.jar.sha1 b/libs/ssl-config/licenses/bctls-fips-2.0.19.jar.sha1 deleted file mode 100644 index 387635e9e1594..0000000000000 --- a/libs/ssl-config/licenses/bctls-fips-2.0.19.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -9cc33650ede63bc1a8281ed5c8e1da314d50bc76 \ No newline at end of file diff --git a/libs/ssl-config/licenses/bctls-fips-2.0.20.jar.sha1 b/libs/ssl-config/licenses/bctls-fips-2.0.20.jar.sha1 new file mode 100644 index 0000000000000..66cd82b49b537 --- /dev/null +++ b/libs/ssl-config/licenses/bctls-fips-2.0.20.jar.sha1 @@ -0,0 +1 @@ +1138f7896e0d1bb0d924bc868ed2dfda4f69470e \ No newline at end of file diff --git a/modules/autotagging-commons/common/src/main/java/org/opensearch/rule/RuleQueryMapper.java b/modules/autotagging-commons/common/src/main/java/org/opensearch/rule/RuleQueryMapper.java index f0ee052790b4b..cb116b7626dd2 100644 --- a/modules/autotagging-commons/common/src/main/java/org/opensearch/rule/RuleQueryMapper.java +++ b/modules/autotagging-commons/common/src/main/java/org/opensearch/rule/RuleQueryMapper.java @@ -24,4 +24,11 @@ public interface RuleQueryMapper { * @return */ T from(GetRuleRequest request); + + /** + * This method returns the cardinality query for the rule, this query should + * be constructed in such a way that it can be used to calculate the cardinality of the rules + * @return + */ + T getCardinalityQuery(); } diff --git a/modules/autotagging-commons/common/src/main/java/org/opensearch/rule/autotagging/Rule.java b/modules/autotagging-commons/common/src/main/java/org/opensearch/rule/autotagging/Rule.java index ae1cebdda99d5..5907b9aebecc9 100644 --- a/modules/autotagging-commons/common/src/main/java/org/opensearch/rule/autotagging/Rule.java +++ b/modules/autotagging-commons/common/src/main/java/org/opensearch/rule/autotagging/Rule.java @@ -31,7 +31,7 @@ * of a rule. The indexed view may differ in representation. * { * "id": "fwehf8302582mglfio349==", - * "description": "Assign Query Group for Index Logs123" + * "description": "Assign Workload Group for Index Logs123" * "index_pattern": ["logs123"], * "workload_group": "dev_workload_group_id", * "updated_at": "01-10-2025T21:23:21.456Z" diff --git a/modules/autotagging-commons/common/src/main/java/org/opensearch/rule/service/IndexStoredRulePersistenceService.java b/modules/autotagging-commons/common/src/main/java/org/opensearch/rule/service/IndexStoredRulePersistenceService.java index d7a6f396bed74..73e68afd48f0e 100644 --- a/modules/autotagging-commons/common/src/main/java/org/opensearch/rule/service/IndexStoredRulePersistenceService.java +++ b/modules/autotagging-commons/common/src/main/java/org/opensearch/rule/service/IndexStoredRulePersistenceService.java @@ -19,9 +19,11 @@ import org.opensearch.action.support.clustermanager.AcknowledgedResponse; import org.opensearch.action.update.UpdateRequest; import org.opensearch.cluster.service.ClusterService; +import org.opensearch.common.settings.Setting; import org.opensearch.common.util.concurrent.ThreadContext; import org.opensearch.common.xcontent.XContentFactory; import org.opensearch.core.action.ActionListener; +import org.opensearch.core.concurrency.OpenSearchRejectedExecutionException; import org.opensearch.core.xcontent.ToXContent; import org.opensearch.index.engine.DocumentMissingException; import org.opensearch.index.query.QueryBuilder; @@ -53,8 +55,28 @@ */ public class IndexStoredRulePersistenceService implements RulePersistenceService { /** - * The system index name used for storing rules + * Default value for max rules count */ + public static final int DEFAULT_MAX_ALLOWED_RULE_COUNT = 200; + + /** + * max wlm rules setting name + */ + public static final String MAX_RULES_COUNT_SETTING_NAME = "wlm.autotagging.max_rules"; + + /** + * max wlm rules setting + */ + public static final Setting MAX_WLM_RULES_SETTING = Setting.intSetting( + MAX_RULES_COUNT_SETTING_NAME, + DEFAULT_MAX_ALLOWED_RULE_COUNT, + 10, + 500, + Setting.Property.NodeScope, + Setting.Property.Dynamic + ); + + private int maxAllowedRulesCount; private final String indexName; private final Client client; private final ClusterService clusterService; @@ -87,6 +109,12 @@ public IndexStoredRulePersistenceService( this.maxRulesPerPage = maxRulesPerPage; this.parser = parser; this.queryBuilder = queryBuilder; + this.maxAllowedRulesCount = MAX_WLM_RULES_SETTING.get(clusterService.getSettings()); + clusterService.getClusterSettings().addSettingsUpdateConsumer(MAX_WLM_RULES_SETTING, this::setMaxAllowedRules); + } + + private void setMaxAllowedRules(int maxAllowedRules) { + this.maxAllowedRulesCount = maxAllowedRules; } /** @@ -101,12 +129,27 @@ public void createRule(CreateRuleRequest request, ActionListener persistRule(rule, listener), listener::onFailure)); } } } + private void performCardinalityCheck(ActionListener listener) { + SearchResponse searchResponse = client.prepareSearch(indexName).setQuery(queryBuilder.getCardinalityQuery()).get(); + if (searchResponse.getHits().getTotalHits() != null && searchResponse.getHits().getTotalHits().value() >= maxAllowedRulesCount) { + listener.onFailure( + new OpenSearchRejectedExecutionException( + "This create operation will violate" + + " the cardinality limit of " + + DEFAULT_MAX_ALLOWED_RULE_COUNT + + ". Please delete some stale or redundant rules first" + ) + ); + } + } + /** * Validates that no existing rule has the same attribute map as the given rule. * This validation must be performed one at a time to prevent writing duplicate rules. diff --git a/modules/autotagging-commons/common/src/main/java/org/opensearch/rule/storage/IndexBasedRuleQueryMapper.java b/modules/autotagging-commons/common/src/main/java/org/opensearch/rule/storage/IndexBasedRuleQueryMapper.java index 11b0ad5e564fb..5189c4b2ce48b 100644 --- a/modules/autotagging-commons/common/src/main/java/org/opensearch/rule/storage/IndexBasedRuleQueryMapper.java +++ b/modules/autotagging-commons/common/src/main/java/org/opensearch/rule/storage/IndexBasedRuleQueryMapper.java @@ -53,4 +53,9 @@ public QueryBuilder from(GetRuleRequest request) { } return boolQuery; } + + @Override + public QueryBuilder getCardinalityQuery() { + return QueryBuilders.matchAllQuery(); + } } diff --git a/modules/autotagging-commons/common/src/test/java/org/opensearch/rule/service/IndexStoredRulePersistenceServiceTests.java b/modules/autotagging-commons/common/src/test/java/org/opensearch/rule/service/IndexStoredRulePersistenceServiceTests.java index e348d104cfe27..5d90dd3d0b7be 100644 --- a/modules/autotagging-commons/common/src/test/java/org/opensearch/rule/service/IndexStoredRulePersistenceServiceTests.java +++ b/modules/autotagging-commons/common/src/test/java/org/opensearch/rule/service/IndexStoredRulePersistenceServiceTests.java @@ -21,10 +21,12 @@ import org.opensearch.cluster.metadata.Metadata; import org.opensearch.cluster.service.ClusterService; import org.opensearch.common.action.ActionFuture; +import org.opensearch.common.settings.ClusterSettings; import org.opensearch.common.settings.Settings; import org.opensearch.common.util.concurrent.ThreadContext; import org.opensearch.core.action.ActionListener; import org.opensearch.core.common.bytes.BytesArray; +import org.opensearch.core.concurrency.OpenSearchRejectedExecutionException; import org.opensearch.core.index.shard.ShardId; import org.opensearch.index.engine.DocumentMissingException; import org.opensearch.index.query.QueryBuilder; @@ -50,6 +52,7 @@ import java.util.ArrayList; import java.util.Collections; import java.util.HashMap; +import java.util.HashSet; import java.util.List; import java.util.Map; import java.util.Set; @@ -99,6 +102,11 @@ public void setUp() throws Exception { client = setUpMockClient(searchRequestBuilder); rule = mock(Rule.class); clusterService = mock(ClusterService.class); + Settings testSettings = Settings.EMPTY; + ClusterSettings clusterSettings = new ClusterSettings(testSettings, new HashSet<>()); + when(clusterService.getSettings()).thenReturn(testSettings); + clusterSettings.registerSetting(IndexStoredRulePersistenceService.MAX_WLM_RULES_SETTING); + when(clusterService.getClusterSettings()).thenReturn(clusterSettings); ClusterState clusterState = mock(ClusterState.class); Metadata metadata = mock(Metadata.class); when(clusterService.state()).thenReturn(clusterState); @@ -109,6 +117,7 @@ public void setUp() throws Exception { queryBuilder = mock(QueryBuilder.class); when(queryBuilder.filter(any())).thenReturn(queryBuilder); when(ruleQueryMapper.from(any(GetRuleRequest.class))).thenReturn(queryBuilder); + when(ruleQueryMapper.getCardinalityQuery()).thenReturn(mock(QueryBuilder.class)); when(ruleEntityParser.parse(anyString())).thenReturn(rule); rulePersistenceService = new IndexStoredRulePersistenceService( @@ -144,6 +153,25 @@ public void testCreateRuleOnExistingIndex() throws Exception { assertNotNull(responseCaptor.getValue().getRule()); } + public void testCardinalityCheckBasedFailure() throws Exception { + CreateRuleRequest createRuleRequest = mock(CreateRuleRequest.class); + when(createRuleRequest.getRule()).thenReturn(rule); + when(rule.toXContent(any(), any())).thenAnswer(invocation -> invocation.getArgument(0)); + + SearchResponse searchResponse = mock(SearchResponse.class); + when(searchResponse.getHits()).thenReturn( + new SearchHits(new SearchHit[] {}, new TotalHits(10000, TotalHits.Relation.EQUAL_TO), 1.0f) + ); + when(searchRequestBuilder.get()).thenReturn(searchResponse); + + ActionListener listener = mock(ActionListener.class); + rulePersistenceService.createRule(createRuleRequest, listener); + + ArgumentCaptor exceptionCaptor = ArgumentCaptor.forClass(OpenSearchRejectedExecutionException.class); + verify(listener).onFailure(exceptionCaptor.capture()); + assertNotNull(exceptionCaptor.getValue()); + } + public void testConcurrentCreateDuplicateRules() throws InterruptedException { ExecutorService singleThreadExecutor = Executors.newSingleThreadExecutor(); int threadCount = 10; diff --git a/plugins/examples/system-ingest-processor/src/main/java/org/opensearch/example/systemingestprocessor/ExampleSystemIngestProcessorFactory.java b/plugins/examples/system-ingest-processor/src/main/java/org/opensearch/example/systemingestprocessor/ExampleSystemIngestProcessorFactory.java index d23079a113d1b..64a863e94c94f 100644 --- a/plugins/examples/system-ingest-processor/src/main/java/org/opensearch/example/systemingestprocessor/ExampleSystemIngestProcessorFactory.java +++ b/plugins/examples/system-ingest-processor/src/main/java/org/opensearch/example/systemingestprocessor/ExampleSystemIngestProcessorFactory.java @@ -8,15 +8,20 @@ package org.opensearch.example.systemingestprocessor; +import org.opensearch.common.settings.Settings; import org.opensearch.ingest.AbstractBatchingSystemProcessor; import java.util.ArrayList; +import java.util.Collection; import java.util.HashMap; import java.util.List; import java.util.Map; +import static org.opensearch.example.systemingestprocessor.ExampleSystemIngestProcessorPlugin.TRIGGER_SETTING; import static org.opensearch.plugins.IngestPlugin.SystemIngestPipelineConfigKeys.INDEX_MAPPINGS; +import static org.opensearch.plugins.IngestPlugin.SystemIngestPipelineConfigKeys.INDEX_SETTINGS; import static org.opensearch.plugins.IngestPlugin.SystemIngestPipelineConfigKeys.INDEX_TEMPLATE_MAPPINGS; +import static org.opensearch.plugins.IngestPlugin.SystemIngestPipelineConfigKeys.INDEX_TEMPLATE_SETTINGS; /** * A factory to create the example system ingest processor @@ -55,17 +60,26 @@ protected ExampleSystemIngestProcessorFactory() { @Override protected AbstractBatchingSystemProcessor newProcessor(String tag, String description, Map config) { final List> mappings = new ArrayList<>(); + final List settings = new ArrayList<>(); final Object mappingFromIndex = config.get(INDEX_MAPPINGS); final Object mappingFromTemplates = config.get(INDEX_TEMPLATE_MAPPINGS); + final Object settingsFromIndex = config.get(INDEX_SETTINGS); + final Object settingsFromTemplates = config.get(INDEX_TEMPLATE_SETTINGS); if (mappingFromTemplates instanceof List) { mappings.addAll((List>) mappingFromTemplates); } if (mappingFromIndex instanceof Map) { mappings.add((Map) mappingFromIndex); } + if (settingsFromTemplates instanceof List) { + settings.addAll((Collection) settingsFromTemplates); + } + if (settingsFromIndex instanceof Settings) { + settings.add((Settings) settingsFromIndex); + } // If no config we are not able to create a processor so simply return a null to show no processor created - if (mappings.isEmpty()) { + if (mappings.isEmpty() && settings.isEmpty()) { return null; } @@ -87,6 +101,15 @@ protected AbstractBatchingSystemProcessor newProcessor(String tag, String descri } } + // If the trigger setting is configured then use it directly. + // When we rely on the v1 template to create the index there can be multiple settings and the later one can + // override the previous one so we need to loop through all the settings. + for (final Settings setting : settings) { + if (setting.hasValue(TRIGGER_SETTING.getKey())) { + isTriggerFieldFound = TRIGGER_SETTING.get(setting); + } + } + return isTriggerFieldFound ? new ExampleSystemIngestProcessor(tag, description, DEFAULT_BATCH_SIZE) : null; } diff --git a/plugins/examples/system-ingest-processor/src/main/java/org/opensearch/example/systemingestprocessor/ExampleSystemIngestProcessorPlugin.java b/plugins/examples/system-ingest-processor/src/main/java/org/opensearch/example/systemingestprocessor/ExampleSystemIngestProcessorPlugin.java index b1b5286b44b52..e370cbf8ae3e7 100644 --- a/plugins/examples/system-ingest-processor/src/main/java/org/opensearch/example/systemingestprocessor/ExampleSystemIngestProcessorPlugin.java +++ b/plugins/examples/system-ingest-processor/src/main/java/org/opensearch/example/systemingestprocessor/ExampleSystemIngestProcessorPlugin.java @@ -8,10 +8,12 @@ package org.opensearch.example.systemingestprocessor; +import org.opensearch.common.settings.Setting; import org.opensearch.ingest.Processor; import org.opensearch.plugins.IngestPlugin; import org.opensearch.plugins.Plugin; +import java.util.List; import java.util.Map; /** @@ -23,8 +25,23 @@ public class ExampleSystemIngestProcessorPlugin extends Plugin implements Ingest */ public ExampleSystemIngestProcessorPlugin() {} + /** + * A custom index setting which is used to control if we should create the example system ingest processor. + */ + public static final Setting TRIGGER_SETTING = Setting.boolSetting( + "index.example_system_ingest_processor_plugin.trigger_setting", + false, + Setting.Property.IndexScope, + Setting.Property.Dynamic + ); + @Override public Map getSystemIngestProcessors(Processor.Parameters parameters) { return Map.of(ExampleSystemIngestProcessorFactory.TYPE, new ExampleSystemIngestProcessorFactory()); } + + @Override + public List> getSettings() { + return List.of(TRIGGER_SETTING); + } } diff --git a/plugins/examples/system-ingest-processor/src/test/java/org/opensearch/example/systemingestprocessor/ExampleSystemIngestProcessorFactoryTests.java b/plugins/examples/system-ingest-processor/src/test/java/org/opensearch/example/systemingestprocessor/ExampleSystemIngestProcessorFactoryTests.java index 2c46c12b1d9bf..54b998f261a8a 100644 --- a/plugins/examples/system-ingest-processor/src/test/java/org/opensearch/example/systemingestprocessor/ExampleSystemIngestProcessorFactoryTests.java +++ b/plugins/examples/system-ingest-processor/src/test/java/org/opensearch/example/systemingestprocessor/ExampleSystemIngestProcessorFactoryTests.java @@ -8,6 +8,7 @@ package org.opensearch.example.systemingestprocessor; +import org.opensearch.common.settings.Settings; import org.opensearch.ingest.AbstractBatchingSystemProcessor; import org.opensearch.test.OpenSearchTestCase; @@ -18,8 +19,11 @@ import static org.opensearch.example.systemingestprocessor.ExampleSystemIngestProcessorFactory.DOC; import static org.opensearch.example.systemingestprocessor.ExampleSystemIngestProcessorFactory.PROPERTIES; import static org.opensearch.example.systemingestprocessor.ExampleSystemIngestProcessorFactory.TRIGGER_FIELD_NAME; +import static org.opensearch.example.systemingestprocessor.ExampleSystemIngestProcessorPlugin.TRIGGER_SETTING; import static org.opensearch.plugins.IngestPlugin.SystemIngestPipelineConfigKeys.INDEX_MAPPINGS; +import static org.opensearch.plugins.IngestPlugin.SystemIngestPipelineConfigKeys.INDEX_SETTINGS; import static org.opensearch.plugins.IngestPlugin.SystemIngestPipelineConfigKeys.INDEX_TEMPLATE_MAPPINGS; +import static org.opensearch.plugins.IngestPlugin.SystemIngestPipelineConfigKeys.INDEX_TEMPLATE_SETTINGS; public class ExampleSystemIngestProcessorFactoryTests extends OpenSearchTestCase { public void testNewProcessor_whenWithTriggerField_thenReturnProcessor() { @@ -55,4 +59,37 @@ public void testNewProcessor_whenNoMapping_thenReturnNull() { assertNull("Should not create an example system ingest processor when the mapping is not found.", processor); } + + public void testNewProcessor_whenWithTriggerSettingFromIndex_thenReturnProcessor() { + final ExampleSystemIngestProcessorFactory factory = new ExampleSystemIngestProcessorFactory(); + Settings triggerEnabled = Settings.builder().put(TRIGGER_SETTING.getKey(), true).build(); + + AbstractBatchingSystemProcessor processor = factory.newProcessor("tag", "description", Map.of(INDEX_SETTINGS, triggerEnabled)); + + assertNotNull("Should create an example system ingest processor when the trigger_setting is true.", processor); + assertTrue(processor instanceof ExampleSystemIngestProcessor); + } + + public void testNewProcessor_whenWithTriggerSettingFromTemplate_thenReturnProcessor() { + final ExampleSystemIngestProcessorFactory factory = new ExampleSystemIngestProcessorFactory(); + Settings triggerEnabled = Settings.builder().put(TRIGGER_SETTING.getKey(), true).build(); + + AbstractBatchingSystemProcessor processor = factory.newProcessor( + "tag", + "description", + Map.of(INDEX_TEMPLATE_SETTINGS, List.of(triggerEnabled)) + ); + + assertNotNull("Should create an example system ingest processor when the trigger_setting is true.", processor); + assertTrue(processor instanceof ExampleSystemIngestProcessor); + } + + public void testNewProcessor_whenWithTriggerSettingDisabled_thenReturnProcessor() { + final ExampleSystemIngestProcessorFactory factory = new ExampleSystemIngestProcessorFactory(); + Settings triggerDisabled = Settings.builder().put(TRIGGER_SETTING.getKey(), false).build(); + + AbstractBatchingSystemProcessor processor = factory.newProcessor("tag", "description", Map.of(INDEX_SETTINGS, triggerDisabled)); + + assertNull("Should not create an example system ingest processor when the trigger_setting is false.", processor); + } } diff --git a/plugins/examples/system-ingest-processor/src/test/java/org/opensearch/example/systemingestprocessor/ExampleSystemIngestProcessorPluginTests.java b/plugins/examples/system-ingest-processor/src/test/java/org/opensearch/example/systemingestprocessor/ExampleSystemIngestProcessorPluginTests.java index afad3a5e9366b..207fb024ea9c8 100644 --- a/plugins/examples/system-ingest-processor/src/test/java/org/opensearch/example/systemingestprocessor/ExampleSystemIngestProcessorPluginTests.java +++ b/plugins/examples/system-ingest-processor/src/test/java/org/opensearch/example/systemingestprocessor/ExampleSystemIngestProcessorPluginTests.java @@ -11,8 +11,10 @@ import org.opensearch.ingest.Processor; import org.opensearch.test.OpenSearchTestCase; +import java.util.List; import java.util.Map; +import static org.opensearch.example.systemingestprocessor.ExampleSystemIngestProcessorPlugin.TRIGGER_SETTING; import static org.mockito.Mockito.mock; public class ExampleSystemIngestProcessorPluginTests extends OpenSearchTestCase { @@ -27,4 +29,8 @@ public void testGetSystemIngestProcessors() { factories.get(ExampleSystemIngestProcessorFactory.TYPE) instanceof ExampleSystemIngestProcessorFactory ); } + + public void testGetSettings() { + assertEquals(List.of(TRIGGER_SETTING), plugin.getSettings()); + } } diff --git a/plugins/examples/system-ingest-processor/src/yamlRestTest/resources/rest-api-spec/test/example-system-ingest-processor/20_system_ingest_processor.yml b/plugins/examples/system-ingest-processor/src/yamlRestTest/resources/rest-api-spec/test/example-system-ingest-processor/20_system_ingest_processor.yml index 746e7b451c266..52ab654f03115 100644 --- a/plugins/examples/system-ingest-processor/src/yamlRestTest/resources/rest-api-spec/test/example-system-ingest-processor/20_system_ingest_processor.yml +++ b/plugins/examples/system-ingest-processor/src/yamlRestTest/resources/rest-api-spec/test/example-system-ingest-processor/20_system_ingest_processor.yml @@ -29,6 +29,28 @@ teardown: - match: _source.field_auto_added_by_system_ingest_processor: "This field is auto added by the example system ingest processor." +--- +"Processor injects a field on indexing a doc to existing index when trigger_setting is true": + - do: + indices.create: + index: test-index + body: + settings: + index.example_system_ingest_processor_plugin.trigger_setting: true + - do: + index: + index: test-index + id: 1 + body: + system_ingest_processor_trigger_field: "dummy value" + refresh: true + - do: + get: + index: test-index + id: 1 + - match: + _source.field_auto_added_by_system_ingest_processor: "This field is auto added by the example system ingest processor." + --- "Processor should not inject a field on indexing a doc to existing index when trigger field is not defined in the index mapping": - do: @@ -56,6 +78,7 @@ teardown: - skip: features: allowed_warnings - do: + # test v1 template indices.put_template: name: example-template body: @@ -79,7 +102,41 @@ teardown: id: 1 - match: _source.field_auto_added_by_system_ingest_processor: "This field is auto added by the example system ingest processor." + - do: + indices.delete_template: + name: example-template +--- +"Processor injects field when index is created from matching template where trigger_setting is true": + - skip: + features: allowed_warnings + - do: + # test v2 template + indices.put_index_template: + name: example-template + body: + index_patterns: ["template-*"] + template: + settings: + index.example_system_ingest_processor_plugin.trigger_setting: true + - do: + allowed_warnings: + - "index [template-index-1] matches multiple legacy templates [example-template, global], composable templates will only match a single template" + index: + index: template-index-1 + id: 1 + body: + system_ingest_processor_trigger_field: "dummy value" + refresh: true + - do: + get: + index: template-index-1 + id: 1 + - match: + _source.field_auto_added_by_system_ingest_processor: "This field is auto added by the example system ingest processor." + - do: + indices.delete_index_template: + name: example-template --- "Processor injects field on bulk indexing to existing index": - do: @@ -156,7 +213,9 @@ teardown: id: 2 - match: _source.field_auto_added_by_system_ingest_processor: "This field is auto added by the example system ingest processor." - + - do: + indices.delete_template: + name: bulk-template --- "Processor injects field on bulk update, upsert on existing/new documents": # Temporarily disable system ingest pipelines to insert without triggering the system ingest field diff --git a/plugins/identity-shiro/licenses/password4j-1.8.2.jar.sha1 b/plugins/identity-shiro/licenses/password4j-1.8.2.jar.sha1 deleted file mode 100644 index bee14467d32a2..0000000000000 --- a/plugins/identity-shiro/licenses/password4j-1.8.2.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -f8ac106c667c0b081075e81a90dc92861b9bb66e \ No newline at end of file diff --git a/plugins/identity-shiro/licenses/password4j-1.8.3.jar.sha1 b/plugins/identity-shiro/licenses/password4j-1.8.3.jar.sha1 new file mode 100644 index 0000000000000..fc1e6d2e1b2a8 --- /dev/null +++ b/plugins/identity-shiro/licenses/password4j-1.8.3.jar.sha1 @@ -0,0 +1 @@ +619cb40bd02455cf2c1a858bfaf5726d79acdf0c \ No newline at end of file diff --git a/plugins/repository-azure/build.gradle b/plugins/repository-azure/build.gradle index 5064a737619c1..9975e15ae3ff1 100644 --- a/plugins/repository-azure/build.gradle +++ b/plugins/repository-azure/build.gradle @@ -44,10 +44,10 @@ opensearchplugin { } dependencies { - api 'com.azure:azure-core:1.55.3' + api 'com.azure:azure-core:1.55.5' api 'com.azure:azure-json:1.5.0' api 'com.azure:azure-xml:1.2.0' - api 'com.azure:azure-storage-common:12.29.0' + api 'com.azure:azure-storage-common:12.29.1' api 'com.azure:azure-core-http-netty:1.15.12' api "io.netty:netty-codec-dns:${versions.netty}" api "io.netty:netty-codec-socks:${versions.netty}" @@ -56,7 +56,7 @@ dependencies { api "io.netty:netty-resolver-dns:${versions.netty}" api "io.netty:netty-transport-native-unix-common:${versions.netty}" implementation project(':modules:transport-netty4') - api 'com.azure:azure-storage-blob:12.30.0' + api 'com.azure:azure-storage-blob:12.30.1' api 'com.azure:azure-identity:1.14.2' // Start of transitive dependencies for azure-identity api 'com.microsoft.azure:msal4j-persistence-extension:1.3.0' diff --git a/plugins/repository-azure/licenses/azure-core-1.55.3.jar.sha1 b/plugins/repository-azure/licenses/azure-core-1.55.3.jar.sha1 deleted file mode 100644 index 966919b5c3c86..0000000000000 --- a/plugins/repository-azure/licenses/azure-core-1.55.3.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -120adc6c3de019097b163390a7eb511f0acd050b \ No newline at end of file diff --git a/plugins/repository-azure/licenses/azure-core-1.55.5.jar.sha1 b/plugins/repository-azure/licenses/azure-core-1.55.5.jar.sha1 new file mode 100644 index 0000000000000..da66667656d04 --- /dev/null +++ b/plugins/repository-azure/licenses/azure-core-1.55.5.jar.sha1 @@ -0,0 +1 @@ +93227034496e2a0dc0b7babcbba57f5a6bb8b4cb \ No newline at end of file diff --git a/plugins/repository-azure/licenses/azure-storage-blob-12.30.0.jar.sha1 b/plugins/repository-azure/licenses/azure-storage-blob-12.30.0.jar.sha1 deleted file mode 100644 index 2f6fc7a879e5f..0000000000000 --- a/plugins/repository-azure/licenses/azure-storage-blob-12.30.0.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -a187bbdf04d9d4c0144ef619ba02ce1cd07211ac \ No newline at end of file diff --git a/plugins/repository-azure/licenses/azure-storage-blob-12.30.1.jar.sha1 b/plugins/repository-azure/licenses/azure-storage-blob-12.30.1.jar.sha1 new file mode 100644 index 0000000000000..34189c82a88ba --- /dev/null +++ b/plugins/repository-azure/licenses/azure-storage-blob-12.30.1.jar.sha1 @@ -0,0 +1 @@ +deaa55c7c985bec01cbbc4fef41d2da3d511dcbc \ No newline at end of file diff --git a/plugins/repository-azure/licenses/azure-storage-common-12.29.0.jar.sha1 b/plugins/repository-azure/licenses/azure-storage-common-12.29.0.jar.sha1 deleted file mode 100644 index 6de0e8a945011..0000000000000 --- a/plugins/repository-azure/licenses/azure-storage-common-12.29.0.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -900fefe982179300c239fbe661e6135a760f5ee6 \ No newline at end of file diff --git a/plugins/repository-azure/licenses/azure-storage-common-12.29.1.jar.sha1 b/plugins/repository-azure/licenses/azure-storage-common-12.29.1.jar.sha1 new file mode 100644 index 0000000000000..5bfb37b06f137 --- /dev/null +++ b/plugins/repository-azure/licenses/azure-storage-common-12.29.1.jar.sha1 @@ -0,0 +1 @@ +d4151d507125bfb255287bfde5d4ab27cd35e478 \ No newline at end of file diff --git a/plugins/repository-hdfs/build.gradle b/plugins/repository-hdfs/build.gradle index bd0257243bec1..c73fd2e57ee34 100644 --- a/plugins/repository-hdfs/build.gradle +++ b/plugins/repository-hdfs/build.gradle @@ -72,7 +72,7 @@ dependencies { api "org.apache.commons:commons-compress:${versions.commonscompress}" api 'org.apache.commons:commons-configuration2:2.12.0' api "commons-io:commons-io:${versions.commonsio}" - api 'org.apache.commons:commons-lang3:3.17.0' + api 'org.apache.commons:commons-lang3:3.18.0' implementation 'com.google.re2j:re2j:1.8' api 'javax.servlet:servlet-api:2.5' api "org.slf4j:slf4j-api:${versions.slf4j}" diff --git a/plugins/repository-hdfs/licenses/commons-lang3-3.17.0.jar.sha1 b/plugins/repository-hdfs/licenses/commons-lang3-3.17.0.jar.sha1 deleted file mode 100644 index 073922fda1dbe..0000000000000 --- a/plugins/repository-hdfs/licenses/commons-lang3-3.17.0.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -b17d2136f0460dcc0d2016ceefca8723bdf4ee70 \ No newline at end of file diff --git a/plugins/repository-hdfs/licenses/commons-lang3-3.18.0.jar.sha1 b/plugins/repository-hdfs/licenses/commons-lang3-3.18.0.jar.sha1 new file mode 100644 index 0000000000000..a1a6598bd4f1b --- /dev/null +++ b/plugins/repository-hdfs/licenses/commons-lang3-3.18.0.jar.sha1 @@ -0,0 +1 @@ +fb14946f0e39748a6571de0635acbe44e7885491 \ No newline at end of file diff --git a/plugins/repository-hdfs/src/test/java/org/opensearch/repositories/hdfs/HdfsTests.java b/plugins/repository-hdfs/src/test/java/org/opensearch/repositories/hdfs/HdfsTests.java index 297a3052cc6d9..aabeb34b80ae5 100644 --- a/plugins/repository-hdfs/src/test/java/org/opensearch/repositories/hdfs/HdfsTests.java +++ b/plugins/repository-hdfs/src/test/java/org/opensearch/repositories/hdfs/HdfsTests.java @@ -160,7 +160,7 @@ public void testSimpleWorkflow() { public void testMissingUri() { try { - OpenSearchIntegTestCase.putRepository(client().admin().cluster(), "test-repo", "hdfs", Settings.builder()); + OpenSearchIntegTestCase.putRepository(client().admin().cluster(), "test-repo1", "hdfs", Settings.builder()); fail(); } catch (RepositoryException e) { assertTrue(e.getCause() instanceof IllegalArgumentException); @@ -193,7 +193,7 @@ public void testNonHdfsUri() { public void testPathSpecifiedInHdfs() { try { Settings.Builder settings = Settings.builder().put("uri", "hdfs:///some/path"); - OpenSearchIntegTestCase.putRepository(client().admin().cluster(), "test-repo", "hdfs", settings); + OpenSearchIntegTestCase.putRepository(client().admin().cluster(), "test-repo2", "hdfs", settings); fail(); } catch (RepositoryException e) { assertTrue(e.getCause() instanceof IllegalArgumentException); @@ -204,7 +204,7 @@ public void testPathSpecifiedInHdfs() { public void testMissingPath() { try { Settings.Builder settings = Settings.builder().put("uri", "hdfs:///"); - OpenSearchIntegTestCase.putRepository(client().admin().cluster(), "test-repo", "hdfs", settings); + OpenSearchIntegTestCase.putRepository(client().admin().cluster(), "test-repo3", "hdfs", settings); fail(); } catch (RepositoryException e) { assertTrue(e.getCause() instanceof IllegalArgumentException); diff --git a/plugins/transport-grpc/build.gradle b/plugins/transport-grpc/build.gradle index 2e5db8116ee63..5553fdca716ff 100644 --- a/plugins/transport-grpc/build.gradle +++ b/plugins/transport-grpc/build.gradle @@ -25,7 +25,7 @@ dependencies { compileOnly "com.google.code.findbugs:jsr305:3.0.2" runtimeOnly "com.google.guava:guava:${versions.guava}" implementation "com.google.errorprone:error_prone_annotations:2.24.1" - implementation "com.google.guava:failureaccess:1.0.1" + implementation "com.google.guava:failureaccess:1.0.2" implementation "io.grpc:grpc-api:${versions.grpc}" implementation "io.grpc:grpc-core:${versions.grpc}" implementation "io.grpc:grpc-netty-shaded:${versions.grpc}" @@ -34,7 +34,7 @@ dependencies { implementation "io.grpc:grpc-services:${versions.grpc}" implementation "io.grpc:grpc-stub:${versions.grpc}" implementation "io.grpc:grpc-util:${versions.grpc}" - implementation "io.perfmark:perfmark-api:0.26.0" + implementation "io.perfmark:perfmark-api:0.27.0" implementation "org.opensearch:protobufs:0.3.0" testImplementation project(':test:framework') } diff --git a/plugins/transport-grpc/licenses/failureaccess-1.0.1.jar.sha1 b/plugins/transport-grpc/licenses/failureaccess-1.0.1.jar.sha1 deleted file mode 100644 index 4798b37e20691..0000000000000 --- a/plugins/transport-grpc/licenses/failureaccess-1.0.1.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -1dcf1de382a0bf95a3d8b0849546c88bac1292c9 \ No newline at end of file diff --git a/plugins/transport-grpc/licenses/failureaccess-1.0.2.jar.sha1 b/plugins/transport-grpc/licenses/failureaccess-1.0.2.jar.sha1 new file mode 100644 index 0000000000000..43cb5aa469900 --- /dev/null +++ b/plugins/transport-grpc/licenses/failureaccess-1.0.2.jar.sha1 @@ -0,0 +1 @@ +c4a06a64e650562f30b7bf9aaec1bfed43aca12b diff --git a/plugins/transport-grpc/licenses/perfmark-api-0.26.0.jar.sha1 b/plugins/transport-grpc/licenses/perfmark-api-0.26.0.jar.sha1 deleted file mode 100644 index abf1becd13298..0000000000000 --- a/plugins/transport-grpc/licenses/perfmark-api-0.26.0.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -ef65452adaf20bf7d12ef55913aba24037b82738 \ No newline at end of file diff --git a/plugins/transport-grpc/licenses/perfmark-api-0.27.0.jar.sha1 b/plugins/transport-grpc/licenses/perfmark-api-0.27.0.jar.sha1 new file mode 100644 index 0000000000000..54651a33e5194 --- /dev/null +++ b/plugins/transport-grpc/licenses/perfmark-api-0.27.0.jar.sha1 @@ -0,0 +1 @@ +f86f575a41b091786a4b027cd9c0c1d2e3fc1c01 diff --git a/plugins/transport-grpc/src/main/java/org/opensearch/plugin/transport/grpc/ssl/SecureNetty4GrpcServerTransport.java b/plugins/transport-grpc/src/main/java/org/opensearch/plugin/transport/grpc/ssl/SecureNetty4GrpcServerTransport.java index a886679ada293..3facd6305f176 100644 --- a/plugins/transport-grpc/src/main/java/org/opensearch/plugin/transport/grpc/ssl/SecureNetty4GrpcServerTransport.java +++ b/plugins/transport-grpc/src/main/java/org/opensearch/plugin/transport/grpc/ssl/SecureNetty4GrpcServerTransport.java @@ -106,7 +106,7 @@ public String settingKey() { * @param provider for SSLContext and SecureAuxTransportParameters (ClientAuth and enabled ciphers). */ private JdkSslContext getSslContext(Settings settings, SecureAuxTransportSettingsProvider provider) throws SSLException { - Optional sslContext = provider.buildSecureAuxServerTransportContext(settings, this); + Optional sslContext = provider.buildSecureAuxServerTransportContext(settings, this.settingKey()); if (sslContext.isEmpty()) { try { sslContext = Optional.of(SSLContext.getDefault()); @@ -114,7 +114,8 @@ private JdkSslContext getSslContext(Settings settings, SecureAuxTransportSetting throw new SSLException("Failed to build default SSLContext for " + SecureNetty4GrpcServerTransport.class.getName(), e); } } - SecureAuxTransportSettingsProvider.SecureAuxTransportParameters params = provider.parameters().orElseGet(DefaultParameters::new); + SecureAuxTransportSettingsProvider.SecureAuxTransportParameters params = provider.parameters(settings, this.settingKey()) + .orElseGet(DefaultParameters::new); ClientAuth clientAuth = ClientAuth.valueOf(params.clientAuth().orElseThrow().toUpperCase(Locale.ROOT)); return new JdkSslContext( sslContext.get(), diff --git a/plugins/transport-grpc/src/test/java/org/opensearch/plugin/transport/grpc/ssl/SecureSettingsHelpers.java b/plugins/transport-grpc/src/test/java/org/opensearch/plugin/transport/grpc/ssl/SecureSettingsHelpers.java index 387889eb87ae0..5cc65ee615a2a 100644 --- a/plugins/transport-grpc/src/test/java/org/opensearch/plugin/transport/grpc/ssl/SecureSettingsHelpers.java +++ b/plugins/transport-grpc/src/test/java/org/opensearch/plugin/transport/grpc/ssl/SecureSettingsHelpers.java @@ -10,7 +10,6 @@ import org.opensearch.common.settings.Settings; import org.opensearch.plugins.SecureAuxTransportSettingsProvider; -import org.opensearch.transport.AuxTransport; import javax.net.ssl.KeyManagerFactory; import javax.net.ssl.SSLContext; @@ -110,7 +109,7 @@ static SecureAuxTransportSettingsProvider getSecureSettingsProvider( ) { return new SecureAuxTransportSettingsProvider() { @Override - public Optional buildSecureAuxServerTransportContext(Settings settings, AuxTransport transport) + public Optional buildSecureAuxServerTransportContext(Settings settings, String auxTransportType) throws SSLException { // Choose a random protocol from among supported test defaults String protocol = randomFrom(DEFAULT_SSL_PROTOCOLS); @@ -126,7 +125,7 @@ public Optional buildSecureAuxServerTransportContext(Settings settin } @Override - public Optional parameters() { + public Optional parameters(Settings settings, String auxTransportType) { return Optional.of(new SecureAuxTransportParameters() { @Override public Optional clientAuth() { diff --git a/plugins/transport-reactor-netty4/src/main/java/org/opensearch/http/reactor/netty4/ReactorNetty4HttpServerTransport.java b/plugins/transport-reactor-netty4/src/main/java/org/opensearch/http/reactor/netty4/ReactorNetty4HttpServerTransport.java index da0aa229e6059..6ddc98ba9d22e 100644 --- a/plugins/transport-reactor-netty4/src/main/java/org/opensearch/http/reactor/netty4/ReactorNetty4HttpServerTransport.java +++ b/plugins/transport-reactor-netty4/src/main/java/org/opensearch/http/reactor/netty4/ReactorNetty4HttpServerTransport.java @@ -243,6 +243,7 @@ protected HttpServerChannel bind(InetSocketAddress socketAddress) throws Excepti .runOn(sharedGroup.getLowLevelGroup()) .bindAddress(() -> socketAddress) .compress(true) + .http2Settings(spec -> spec.maxHeaderListSize(maxHeaderSize.bytesAsInt())) .httpRequestDecoder( spec -> spec.maxChunkSize(maxChunkSize.bytesAsInt()) .h2cMaxContentLength(h2cMaxContentLength.bytesAsInt()) diff --git a/plugins/transport-reactor-netty4/src/test/java/org/opensearch/http/reactor/netty4/ReactorHttpClient.java b/plugins/transport-reactor-netty4/src/test/java/org/opensearch/http/reactor/netty4/ReactorHttpClient.java index 7bdaef3ac2304..edd5bc97368dd 100644 --- a/plugins/transport-reactor-netty4/src/test/java/org/opensearch/http/reactor/netty4/ReactorHttpClient.java +++ b/plugins/transport-reactor-netty4/src/test/java/org/opensearch/http/reactor/netty4/ReactorHttpClient.java @@ -25,6 +25,7 @@ import java.io.UncheckedIOException; import java.net.InetSocketAddress; import java.nio.charset.StandardCharsets; +import java.time.Duration; import java.util.ArrayList; import java.util.Collection; import java.util.Collections; @@ -56,6 +57,8 @@ import reactor.netty.http.Http2SslContextSpec; import reactor.netty.http.HttpProtocol; import reactor.netty.http.client.HttpClient; +import reactor.netty.http.client.PrematureCloseException; +import reactor.util.retry.Retry; import static io.netty.handler.codec.http.HttpHeaderNames.HOST; import static io.netty.handler.codec.http.HttpVersion.HTTP_1_1; @@ -255,6 +258,7 @@ private FullHttpResponse sendRequestStream( ) ) ) + .retryWhen(Retry.backoff(3, Duration.ofSeconds(1)).filter(throwable -> throwable instanceof PrematureCloseException)) .blockLast(); } finally { @@ -283,7 +287,7 @@ private HttpClient createClient(final InetSocketAddress remoteAddress, final Nio .configure(s -> s.clientAuth(ClientAuth.NONE).trustManager(InsecureTrustManagerFactory.INSTANCE)) : Http2SslContextSpec.forClient() .configure(s -> s.clientAuth(ClientAuth.NONE).trustManager(InsecureTrustManagerFactory.INSTANCE)) - ) + ).handshakeTimeout(Duration.ofSeconds(30)) ); } else { return client.protocol( diff --git a/plugins/workload-management/src/javaRestTest/java/org/opensearch/rest/WorkloadManagementRestIT.java b/plugins/workload-management/src/javaRestTest/java/org/opensearch/rest/WorkloadManagementRestIT.java index 00d3e901588b1..a1ade144a7701 100644 --- a/plugins/workload-management/src/javaRestTest/java/org/opensearch/rest/WorkloadManagementRestIT.java +++ b/plugins/workload-management/src/javaRestTest/java/org/opensearch/rest/WorkloadManagementRestIT.java @@ -13,11 +13,18 @@ import org.opensearch.client.Response; import org.opensearch.client.ResponseException; import org.opensearch.test.rest.OpenSearchRestTestCase; +import org.junit.Before; import java.io.IOException; +import java.util.Locale; public class WorkloadManagementRestIT extends OpenSearchRestTestCase { + @Before + public void enableWlmMode() throws Exception { + setWlmMode("enabled"); + } + public void testCreate() throws Exception { Response response = performOperation("PUT", "_wlm/workload_group", getCreateJson("analytics", "enforced", 0.4, 0.2)); assertEquals(response.getStatusLine().getStatusCode(), 200); @@ -129,6 +136,16 @@ public void testCRUD() throws Exception { performOperation("DELETE", "_wlm/workload_group/users3", null); } + public void testOperationWhenWlmDisabled() throws Exception { + setWlmMode("disabled"); + assertThrows( + ResponseException.class, + () -> performOperation("PUT", "_wlm/workload_group", getCreateJson("analytics", "enforced", 0.4, 0.2)) + ); + assertThrows(ResponseException.class, () -> performOperation("DELETE", "_wlm/workload_group/analytics4", null)); + assertOK(performOperation("GET", "_wlm/workload_group/", null)); + } + static String getCreateJson(String name, String resiliencyMode, double cpu, double memory) { return "{\n" + " \"name\": \"" @@ -171,4 +188,19 @@ Response performOperation(String method, String uriPath, String json) throws IOE } return client().performRequest(request); } + + private void setWlmMode(String mode) throws Exception { + String settingJson = String.format(Locale.ROOT, """ + { + "persistent": { + "wlm.workload_group.mode": "%s" + } + } + """, mode); + + Request request = new Request("PUT", "/_cluster/settings"); + request.setJsonEntity(settingJson); + Response response = client().performRequest(request); + assertEquals(200, response.getStatusLine().getStatusCode()); + } } diff --git a/plugins/workload-management/src/main/java/org/opensearch/plugin/wlm/WlmClusterSettingValuesProvider.java b/plugins/workload-management/src/main/java/org/opensearch/plugin/wlm/WlmClusterSettingValuesProvider.java new file mode 100644 index 0000000000000..56b78714c0db5 --- /dev/null +++ b/plugins/workload-management/src/main/java/org/opensearch/plugin/wlm/WlmClusterSettingValuesProvider.java @@ -0,0 +1,64 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.plugin.wlm; + +import org.opensearch.common.settings.ClusterSettings; +import org.opensearch.common.settings.Settings; +import org.opensearch.wlm.WlmMode; +import org.opensearch.wlm.WorkloadManagementSettings; + +/** + * Central provider for maintaining and supplying the current values of wlm cluster settings. + * This class listens for updates to relevant settings and provides the latest setting values. + */ +public class WlmClusterSettingValuesProvider { + + private volatile WlmMode wlmMode; + + /** + * Constructor for WlmClusterSettingValuesProvider + * @param settings OpenSearch settings + * @param clusterSettings Cluster settings to register update listener + */ + public WlmClusterSettingValuesProvider(Settings settings, ClusterSettings clusterSettings) { + this.wlmMode = WorkloadManagementSettings.WLM_MODE_SETTING.get(settings); + clusterSettings.addSettingsUpdateConsumer(WorkloadManagementSettings.WLM_MODE_SETTING, this::setWlmMode); + } + + /** + * Check if WLM mode is ENABLED + * Throws an IllegalStateException if WLM mode is DISABLED or MONITOR ONLY. + * @param operationDescription A short text describing the operation, e.g. "create workload group". + */ + public void ensureWlmEnabled(String operationDescription) { + if (wlmMode != WlmMode.ENABLED) { + throw new IllegalStateException( + "Cannot " + + operationDescription + + " because workload management mode is disabled or monitor_only." + + "To enable this feature, set [wlm.workload_group.mode] to 'enabled' in cluster settings." + ); + } + } + + /** + * Set the latest WLM mode. + * @param mode The wlm mode to set + */ + private void setWlmMode(WlmMode mode) { + this.wlmMode = mode; + } + + /** + * Get the latest WLM mode. + */ + public WlmMode getWlmMode() { + return wlmMode; + } +} diff --git a/plugins/workload-management/src/main/java/org/opensearch/plugin/wlm/WorkloadManagementPlugin.java b/plugins/workload-management/src/main/java/org/opensearch/plugin/wlm/WorkloadManagementPlugin.java index 9135b12c9cfaf..fea81507633a1 100644 --- a/plugins/workload-management/src/main/java/org/opensearch/plugin/wlm/WorkloadManagementPlugin.java +++ b/plugins/workload-management/src/main/java/org/opensearch/plugin/wlm/WorkloadManagementPlugin.java @@ -75,6 +75,8 @@ import java.util.Map; import java.util.function.Supplier; +import static org.opensearch.rule.service.IndexStoredRulePersistenceService.MAX_WLM_RULES_SETTING; + /** * Plugin class for WorkloadManagement */ @@ -91,6 +93,7 @@ public class WorkloadManagementPlugin extends Plugin implements ActionPlugin, Sy private static FeatureType featureType; private static RulePersistenceService rulePersistenceService; private static RuleRoutingService ruleRoutingService; + private WlmClusterSettingValuesProvider wlmClusterSettingValuesProvider; private AutoTaggingActionFilter autoTaggingActionFilter; /** @@ -112,6 +115,10 @@ public Collection createComponents( IndexNameExpressionResolver indexNameExpressionResolver, Supplier repositoriesServiceSupplier ) { + wlmClusterSettingValuesProvider = new WlmClusterSettingValuesProvider( + clusterService.getSettings(), + clusterService.getClusterSettings() + ); featureType = new WorkloadGroupFeatureType(new WorkloadGroupFeatureValueValidator(clusterService)); RuleEntityParser parser = new XContentRuleParser(featureType); AttributeValueStoreFactory attributeValueStoreFactory = new AttributeValueStoreFactory( @@ -132,12 +139,10 @@ public Collection createComponents( RefreshBasedSyncMechanism refreshMechanism = new RefreshBasedSyncMechanism( threadPool, clusterService.getSettings(), - clusterService.getClusterSettings(), - parser, - ruleProcessingService, featureType, rulePersistenceService, - new RuleEventClassifier(Collections.emptySet(), ruleProcessingService) + new RuleEventClassifier(Collections.emptySet(), ruleProcessingService), + wlmClusterSettingValuesProvider ); autoTaggingActionFilter = new AutoTaggingActionFilter(ruleProcessingService, threadPool); @@ -181,16 +186,20 @@ public List getRestHandlers( Supplier nodesInCluster ) { return List.of( - new RestCreateWorkloadGroupAction(), + new RestCreateWorkloadGroupAction(wlmClusterSettingValuesProvider), new RestGetWorkloadGroupAction(), - new RestDeleteWorkloadGroupAction(), - new RestUpdateWorkloadGroupAction() + new RestDeleteWorkloadGroupAction(wlmClusterSettingValuesProvider), + new RestUpdateWorkloadGroupAction(wlmClusterSettingValuesProvider) ); } @Override public List> getSettings() { - return List.of(WorkloadGroupPersistenceService.MAX_QUERY_GROUP_COUNT, RefreshBasedSyncMechanism.RULE_SYNC_REFRESH_INTERVAL_SETTING); + return List.of( + WorkloadGroupPersistenceService.MAX_QUERY_GROUP_COUNT, + RefreshBasedSyncMechanism.RULE_SYNC_REFRESH_INTERVAL_SETTING, + MAX_WLM_RULES_SETTING + ); } @Override diff --git a/plugins/workload-management/src/main/java/org/opensearch/plugin/wlm/action/package-info.java b/plugins/workload-management/src/main/java/org/opensearch/plugin/wlm/action/package-info.java index df30f55a99b3c..5cd8892949436 100644 --- a/plugins/workload-management/src/main/java/org/opensearch/plugin/wlm/action/package-info.java +++ b/plugins/workload-management/src/main/java/org/opensearch/plugin/wlm/action/package-info.java @@ -7,6 +7,6 @@ */ /** - * Package for the action classes related to query groups in WorkloadManagementPlugin + * Package for the action classes related to workload groups in WorkloadManagementPlugin */ package org.opensearch.plugin.wlm.action; diff --git a/plugins/workload-management/src/main/java/org/opensearch/plugin/wlm/rest/RestCreateWorkloadGroupAction.java b/plugins/workload-management/src/main/java/org/opensearch/plugin/wlm/rest/RestCreateWorkloadGroupAction.java index 5ef59602f7893..5cb6a8a582df7 100644 --- a/plugins/workload-management/src/main/java/org/opensearch/plugin/wlm/rest/RestCreateWorkloadGroupAction.java +++ b/plugins/workload-management/src/main/java/org/opensearch/plugin/wlm/rest/RestCreateWorkloadGroupAction.java @@ -11,6 +11,7 @@ import org.opensearch.core.rest.RestStatus; import org.opensearch.core.xcontent.ToXContent; import org.opensearch.core.xcontent.XContentParser; +import org.opensearch.plugin.wlm.WlmClusterSettingValuesProvider; import org.opensearch.plugin.wlm.action.CreateWorkloadGroupAction; import org.opensearch.plugin.wlm.action.CreateWorkloadGroupRequest; import org.opensearch.plugin.wlm.action.CreateWorkloadGroupResponse; @@ -35,10 +36,15 @@ */ public class RestCreateWorkloadGroupAction extends BaseRestHandler { + private final WlmClusterSettingValuesProvider nonPluginSettingValuesProvider; + /** * Constructor for RestCreateWorkloadGroupAction + * @param nonPluginSettingValuesProvider the settings provider to access the current WLM mode */ - public RestCreateWorkloadGroupAction() {} + public RestCreateWorkloadGroupAction(WlmClusterSettingValuesProvider nonPluginSettingValuesProvider) { + this.nonPluginSettingValuesProvider = nonPluginSettingValuesProvider; + } @Override public String getName() { @@ -55,6 +61,7 @@ public List routes() { @Override protected RestChannelConsumer prepareRequest(RestRequest request, NodeClient client) throws IOException { + nonPluginSettingValuesProvider.ensureWlmEnabled(getName()); try (XContentParser parser = request.contentParser()) { CreateWorkloadGroupRequest createWorkloadGroupRequest = CreateWorkloadGroupRequest.fromXContent(parser); return channel -> client.execute( diff --git a/plugins/workload-management/src/main/java/org/opensearch/plugin/wlm/rest/RestDeleteWorkloadGroupAction.java b/plugins/workload-management/src/main/java/org/opensearch/plugin/wlm/rest/RestDeleteWorkloadGroupAction.java index d0d82f43679fa..e1ad166ed6bda 100644 --- a/plugins/workload-management/src/main/java/org/opensearch/plugin/wlm/rest/RestDeleteWorkloadGroupAction.java +++ b/plugins/workload-management/src/main/java/org/opensearch/plugin/wlm/rest/RestDeleteWorkloadGroupAction.java @@ -8,6 +8,7 @@ package org.opensearch.plugin.wlm.rest; +import org.opensearch.plugin.wlm.WlmClusterSettingValuesProvider; import org.opensearch.plugin.wlm.action.DeleteWorkloadGroupAction; import org.opensearch.plugin.wlm.action.DeleteWorkloadGroupRequest; import org.opensearch.rest.BaseRestHandler; @@ -27,10 +28,15 @@ */ public class RestDeleteWorkloadGroupAction extends BaseRestHandler { + private final WlmClusterSettingValuesProvider nonPluginSettingValuesProvider; + /** * Constructor for RestDeleteWorkloadGroupAction + * @param nonPluginSettingValuesProvider the settings provider to access the current WLM mode */ - public RestDeleteWorkloadGroupAction() {} + public RestDeleteWorkloadGroupAction(WlmClusterSettingValuesProvider nonPluginSettingValuesProvider) { + this.nonPluginSettingValuesProvider = nonPluginSettingValuesProvider; + } @Override public String getName() { @@ -47,6 +53,7 @@ public List routes() { @Override protected RestChannelConsumer prepareRequest(RestRequest request, NodeClient client) throws IOException { + nonPluginSettingValuesProvider.ensureWlmEnabled(getName()); DeleteWorkloadGroupRequest deleteWorkloadGroupRequest = new DeleteWorkloadGroupRequest(request.param("name")); deleteWorkloadGroupRequest.clusterManagerNodeTimeout( request.paramAsTime("cluster_manager_timeout", deleteWorkloadGroupRequest.clusterManagerNodeTimeout()) diff --git a/plugins/workload-management/src/main/java/org/opensearch/plugin/wlm/rest/RestUpdateWorkloadGroupAction.java b/plugins/workload-management/src/main/java/org/opensearch/plugin/wlm/rest/RestUpdateWorkloadGroupAction.java index db77dc5963037..2e237cf191c75 100644 --- a/plugins/workload-management/src/main/java/org/opensearch/plugin/wlm/rest/RestUpdateWorkloadGroupAction.java +++ b/plugins/workload-management/src/main/java/org/opensearch/plugin/wlm/rest/RestUpdateWorkloadGroupAction.java @@ -11,6 +11,7 @@ import org.opensearch.core.rest.RestStatus; import org.opensearch.core.xcontent.ToXContent; import org.opensearch.core.xcontent.XContentParser; +import org.opensearch.plugin.wlm.WlmClusterSettingValuesProvider; import org.opensearch.plugin.wlm.action.UpdateWorkloadGroupAction; import org.opensearch.plugin.wlm.action.UpdateWorkloadGroupRequest; import org.opensearch.plugin.wlm.action.UpdateWorkloadGroupResponse; @@ -35,10 +36,15 @@ */ public class RestUpdateWorkloadGroupAction extends BaseRestHandler { + private final WlmClusterSettingValuesProvider nonPluginSettingValuesProvider; + /** * Constructor for RestUpdateWorkloadGroupAction + * @param nonPluginSettingValuesProvider the settings provider to access the current WLM mode */ - public RestUpdateWorkloadGroupAction() {} + public RestUpdateWorkloadGroupAction(WlmClusterSettingValuesProvider nonPluginSettingValuesProvider) { + this.nonPluginSettingValuesProvider = nonPluginSettingValuesProvider; + } @Override public String getName() { @@ -55,6 +61,7 @@ public List routes() { @Override protected RestChannelConsumer prepareRequest(RestRequest request, NodeClient client) throws IOException { + nonPluginSettingValuesProvider.ensureWlmEnabled(getName()); try (XContentParser parser = request.contentParser()) { UpdateWorkloadGroupRequest updateWorkloadGroupRequest = UpdateWorkloadGroupRequest.fromXContent(parser, request.param("name")); return channel -> client.execute( diff --git a/plugins/workload-management/src/main/java/org/opensearch/plugin/wlm/rest/package-info.java b/plugins/workload-management/src/main/java/org/opensearch/plugin/wlm/rest/package-info.java index 889f3e107db07..abefac4fe9675 100644 --- a/plugins/workload-management/src/main/java/org/opensearch/plugin/wlm/rest/package-info.java +++ b/plugins/workload-management/src/main/java/org/opensearch/plugin/wlm/rest/package-info.java @@ -7,6 +7,6 @@ */ /** - * Package for the rest classes related to query groups in WorkloadManagementPlugin + * Package for the rest classes related to workload groups in WorkloadManagementPlugin */ package org.opensearch.plugin.wlm.rest; diff --git a/plugins/workload-management/src/main/java/org/opensearch/plugin/wlm/rule/sync/RefreshBasedSyncMechanism.java b/plugins/workload-management/src/main/java/org/opensearch/plugin/wlm/rule/sync/RefreshBasedSyncMechanism.java index f367e63aa3f51..9bded4c845204 100644 --- a/plugins/workload-management/src/main/java/org/opensearch/plugin/wlm/rule/sync/RefreshBasedSyncMechanism.java +++ b/plugins/workload-management/src/main/java/org/opensearch/plugin/wlm/rule/sync/RefreshBasedSyncMechanism.java @@ -11,15 +11,13 @@ import org.apache.logging.log4j.LogManager; import org.apache.logging.log4j.Logger; import org.opensearch.common.lifecycle.AbstractLifecycleComponent; -import org.opensearch.common.settings.ClusterSettings; import org.opensearch.common.settings.Setting; import org.opensearch.common.settings.Settings; import org.opensearch.common.unit.TimeValue; import org.opensearch.core.action.ActionListener; +import org.opensearch.plugin.wlm.WlmClusterSettingValuesProvider; import org.opensearch.plugin.wlm.rule.sync.detect.RuleEvent; import org.opensearch.plugin.wlm.rule.sync.detect.RuleEventClassifier; -import org.opensearch.rule.InMemoryRuleProcessingService; -import org.opensearch.rule.RuleEntityParser; import org.opensearch.rule.RulePersistenceService; import org.opensearch.rule.action.GetRuleRequest; import org.opensearch.rule.action.GetRuleResponse; @@ -28,7 +26,6 @@ import org.opensearch.threadpool.Scheduler; import org.opensearch.threadpool.ThreadPool; import org.opensearch.wlm.WlmMode; -import org.opensearch.wlm.WorkloadManagementSettings; import java.io.IOException; import java.util.Collections; @@ -65,12 +62,10 @@ public class RefreshBasedSyncMechanism extends AbstractLifecycleComponent { private final ThreadPool threadPool; private long refreshInterval; private volatile Scheduler.Cancellable scheduledFuture; - private final RuleEntityParser parser; - private final InMemoryRuleProcessingService ruleProcessingService; private final RulePersistenceService rulePersistenceService; private final RuleEventClassifier ruleEventClassifier; private final FeatureType featureType; - private WlmMode wlmMode; + private final WlmClusterSettingValuesProvider nonPluginSettingValuesProvider; // This var keeps the Rules which were present during last run of this service private Set lastRunIndexedRules; private static final Logger logger = LogManager.getLogger(RefreshBasedSyncMechanism.class); @@ -80,33 +75,26 @@ public class RefreshBasedSyncMechanism extends AbstractLifecycleComponent { * * @param threadPool * @param settings - * @param clusterSettings - * @param parser - * @param ruleProcessingService * @param featureType * @param rulePersistenceService * @param ruleEventClassifier + * @param nonPluginSettingValuesProvider */ public RefreshBasedSyncMechanism( ThreadPool threadPool, Settings settings, - ClusterSettings clusterSettings, - RuleEntityParser parser, - InMemoryRuleProcessingService ruleProcessingService, FeatureType featureType, RulePersistenceService rulePersistenceService, - RuleEventClassifier ruleEventClassifier + RuleEventClassifier ruleEventClassifier, + WlmClusterSettingValuesProvider nonPluginSettingValuesProvider ) { this.threadPool = threadPool; refreshInterval = RULE_SYNC_REFRESH_INTERVAL_SETTING.get(settings); - this.parser = parser; - this.ruleProcessingService = ruleProcessingService; this.featureType = featureType; this.rulePersistenceService = rulePersistenceService; this.lastRunIndexedRules = new HashSet<>(); this.ruleEventClassifier = ruleEventClassifier; - wlmMode = WorkloadManagementSettings.WLM_MODE_SETTING.get(settings); - clusterSettings.addSettingsUpdateConsumer(WorkloadManagementSettings.WLM_MODE_SETTING, this::setWlmMode); + this.nonPluginSettingValuesProvider = nonPluginSettingValuesProvider; } /** @@ -114,7 +102,7 @@ public RefreshBasedSyncMechanism( * but theoretically possible */ synchronized void doRun() { - if (wlmMode != WlmMode.ENABLED) { + if (nonPluginSettingValuesProvider.getWlmMode() != WlmMode.ENABLED) { return; } @@ -161,8 +149,4 @@ protected void doClose() throws IOException { scheduledFuture.cancel(); } } - - void setWlmMode(WlmMode mode) { - this.wlmMode = mode; - } } diff --git a/plugins/workload-management/src/main/java/org/opensearch/plugin/wlm/service/WorkloadGroupPersistenceService.java b/plugins/workload-management/src/main/java/org/opensearch/plugin/wlm/service/WorkloadGroupPersistenceService.java index 8fd5fe5dfcfed..f37e90509c0fb 100644 --- a/plugins/workload-management/src/main/java/org/opensearch/plugin/wlm/service/WorkloadGroupPersistenceService.java +++ b/plugins/workload-management/src/main/java/org/opensearch/plugin/wlm/service/WorkloadGroupPersistenceService.java @@ -54,7 +54,7 @@ public class WorkloadGroupPersistenceService { /** * max WorkloadGroup count setting name */ - public static final String QUERY_GROUP_COUNT_SETTING_NAME = "node.workload_group.max_count"; + public static final String WORKLOAD_GROUP_COUNT_SETTING_NAME = "node.workload_group.max_count"; /** * default max workloadGroup count on any node at any given point in time */ @@ -67,7 +67,7 @@ public class WorkloadGroupPersistenceService { * max WorkloadGroup count setting */ public static final Setting MAX_QUERY_GROUP_COUNT = Setting.intSetting( - QUERY_GROUP_COUNT_SETTING_NAME, + WORKLOAD_GROUP_COUNT_SETTING_NAME, DEFAULT_MAX_QUERY_GROUP_COUNT_VALUE, 0, WorkloadGroupPersistenceService::validateMaxWorkloadGroupCount, @@ -116,7 +116,7 @@ public void setMaxWorkloadGroupCount(int newMaxWorkloadGroupCount) { */ private static void validateMaxWorkloadGroupCount(int maxWorkloadGroupCount) { if (maxWorkloadGroupCount > DEFAULT_MAX_QUERY_GROUP_COUNT_VALUE || maxWorkloadGroupCount < MIN_QUERY_GROUP_COUNT_VALUE) { - throw new IllegalArgumentException(QUERY_GROUP_COUNT_SETTING_NAME + " should be in range [1-100]."); + throw new IllegalArgumentException(WORKLOAD_GROUP_COUNT_SETTING_NAME + " should be in range [1-100]."); } } @@ -162,7 +162,7 @@ ClusterState saveWorkloadGroupInClusterState(final WorkloadGroup workloadGroup, // check if maxWorkloadGroupCount will breach if (existingWorkloadGroups.size() == maxWorkloadGroupCount) { - logger.warn("{} value exceeded its assigned limit of {}.", QUERY_GROUP_COUNT_SETTING_NAME, maxWorkloadGroupCount); + logger.warn("{} value exceeded its assigned limit of {}.", WORKLOAD_GROUP_COUNT_SETTING_NAME, maxWorkloadGroupCount); throw new IllegalStateException("Can't create more than " + maxWorkloadGroupCount + " WorkloadGroups in the system."); } diff --git a/plugins/workload-management/src/main/java/org/opensearch/plugin/wlm/service/package-info.java b/plugins/workload-management/src/main/java/org/opensearch/plugin/wlm/service/package-info.java index e8c88ee656dc7..ac01ed9fb4923 100644 --- a/plugins/workload-management/src/main/java/org/opensearch/plugin/wlm/service/package-info.java +++ b/plugins/workload-management/src/main/java/org/opensearch/plugin/wlm/service/package-info.java @@ -7,6 +7,6 @@ */ /** - * Package for the service classes related to query groups in WorkloadManagementPlugin + * Package for the service classes related to workload groups in WorkloadManagementPlugin */ package org.opensearch.plugin.wlm.service; diff --git a/plugins/workload-management/src/test/java/org/opensearch/plugin/wlm/WlmClusterSettingValuesProviderTests.java b/plugins/workload-management/src/test/java/org/opensearch/plugin/wlm/WlmClusterSettingValuesProviderTests.java new file mode 100644 index 0000000000000..4dd4b423abc28 --- /dev/null +++ b/plugins/workload-management/src/test/java/org/opensearch/plugin/wlm/WlmClusterSettingValuesProviderTests.java @@ -0,0 +1,54 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.plugin.wlm; + +import org.opensearch.common.settings.ClusterSettings; +import org.opensearch.common.settings.Settings; +import org.opensearch.test.OpenSearchTestCase; +import org.opensearch.wlm.WorkloadManagementSettings; +import org.junit.Before; + +import java.util.HashSet; + +import static org.mockito.Mockito.spy; + +public class WlmClusterSettingValuesProviderTests extends OpenSearchTestCase { + + private ClusterSettings clusterSettings; + + @Before + public void setUp() throws Exception { + super.setUp(); + try (WorkloadManagementPlugin plugin = new WorkloadManagementPlugin()) { + clusterSettings = new ClusterSettings(Settings.EMPTY, new HashSet<>(plugin.getSettings())); + clusterSettings.registerSetting(WorkloadManagementSettings.WLM_MODE_SETTING); + } + } + + public void testEnsureWlmEnabledThrowsWhenDisabled() { + WlmClusterSettingValuesProvider spyProvider = createSpyProviderWithMode("disabled"); + assertThrows(IllegalStateException.class, () -> spyProvider.ensureWlmEnabled("")); + } + + public void testEnsureWlmEnabledThrowsWhenMonitorOnly() { + WlmClusterSettingValuesProvider spyProvider = createSpyProviderWithMode("monitor_only"); + assertThrows(IllegalStateException.class, () -> spyProvider.ensureWlmEnabled("")); + } + + public void testEnsureWlmEnabledSucceedsWhenEnabled() { + WlmClusterSettingValuesProvider spyProvider = createSpyProviderWithMode("enabled"); + spyProvider.ensureWlmEnabled(""); + } + + private WlmClusterSettingValuesProvider createSpyProviderWithMode(String mode) { + Settings settings = Settings.builder().put(WorkloadManagementSettings.WLM_MODE_SETTING.getKey(), mode).build(); + WlmClusterSettingValuesProvider realProvider = new WlmClusterSettingValuesProvider(settings, clusterSettings); + return spy(realProvider); + } +} diff --git a/plugins/workload-management/src/test/java/org/opensearch/plugin/wlm/WorkloadManagementPluginTests.java b/plugins/workload-management/src/test/java/org/opensearch/plugin/wlm/WorkloadManagementPluginTests.java index 9de5c94a24bf3..57bb1e33d2373 100644 --- a/plugins/workload-management/src/test/java/org/opensearch/plugin/wlm/WorkloadManagementPluginTests.java +++ b/plugins/workload-management/src/test/java/org/opensearch/plugin/wlm/WorkloadManagementPluginTests.java @@ -138,7 +138,7 @@ public void testGetFeatureTypeReturnsWorkloadGroupFeatureType() { assertEquals("workload_group", featureType.getName()); } - public void testGetSettingsIncludesMaxQueryGroupCount() { + public void testGetSettingsIncludesMaxWorkloadGroupCount() { List settings = plugin.getSettings(); assertTrue(settings.contains(WorkloadGroupPersistenceService.MAX_QUERY_GROUP_COUNT)); } diff --git a/plugins/workload-management/src/test/java/org/opensearch/plugin/wlm/WorkloadGroupTestUtils.java b/plugins/workload-management/src/test/java/org/opensearch/plugin/wlm/WorkloadManagementTestUtils.java similarity index 88% rename from plugins/workload-management/src/test/java/org/opensearch/plugin/wlm/WorkloadGroupTestUtils.java rename to plugins/workload-management/src/test/java/org/opensearch/plugin/wlm/WorkloadManagementTestUtils.java index bac644a172c1e..60eee025e1c02 100644 --- a/plugins/workload-management/src/test/java/org/opensearch/plugin/wlm/WorkloadGroupTestUtils.java +++ b/plugins/workload-management/src/test/java/org/opensearch/plugin/wlm/WorkloadManagementTestUtils.java @@ -19,10 +19,12 @@ import org.opensearch.common.settings.ClusterSettings; import org.opensearch.common.settings.Setting; import org.opensearch.common.settings.Settings; +import org.opensearch.plugin.wlm.rule.sync.RefreshBasedSyncMechanism; import org.opensearch.plugin.wlm.service.WorkloadGroupPersistenceService; import org.opensearch.threadpool.ThreadPool; import org.opensearch.wlm.MutableWorkloadGroupFragment; import org.opensearch.wlm.ResourceType; +import org.opensearch.wlm.WorkloadManagementSettings; import java.util.ArrayList; import java.util.Collection; @@ -38,7 +40,7 @@ import static org.junit.Assert.assertTrue; import static org.mockito.Mockito.mock; -public class WorkloadGroupTestUtils { +public class WorkloadManagementTestUtils { public static final String NAME_ONE = "workload_group_one"; public static final String NAME_TWO = "workload_group_two"; public static final String _ID_ONE = "AgfUO5Ja9yfsYlONlYi3TQ=="; @@ -165,4 +167,16 @@ public static void assertEqualWorkloadGroups( } } } + + public static WlmClusterSettingValuesProvider setUpNonPluginSettingValuesProvider(String wlmMode) throws Exception { + try (WorkloadManagementPlugin plugin = new WorkloadManagementPlugin()) { + Settings settings = Settings.builder() + .put(RefreshBasedSyncMechanism.RULE_SYNC_REFRESH_INTERVAL_SETTING_NAME, 1000) + .put(WorkloadManagementSettings.WLM_MODE_SETTING_NAME, wlmMode) + .build(); + ClusterSettings clusterSettings = new ClusterSettings(Settings.EMPTY, new HashSet<>(plugin.getSettings())); + clusterSettings.registerSetting(WorkloadManagementSettings.WLM_MODE_SETTING); + return new WlmClusterSettingValuesProvider(settings, clusterSettings); + } + } } diff --git a/plugins/workload-management/src/test/java/org/opensearch/plugin/wlm/action/CreateWorkloadGroupRequestTests.java b/plugins/workload-management/src/test/java/org/opensearch/plugin/wlm/action/CreateWorkloadGroupRequestTests.java index 31d3ea00b7bda..a0addd1e7b412 100644 --- a/plugins/workload-management/src/test/java/org/opensearch/plugin/wlm/action/CreateWorkloadGroupRequestTests.java +++ b/plugins/workload-management/src/test/java/org/opensearch/plugin/wlm/action/CreateWorkloadGroupRequestTests.java @@ -17,8 +17,8 @@ import java.util.ArrayList; import java.util.List; -import static org.opensearch.plugin.wlm.WorkloadGroupTestUtils.assertEqualWorkloadGroups; -import static org.opensearch.plugin.wlm.WorkloadGroupTestUtils.workloadGroupOne; +import static org.opensearch.plugin.wlm.WorkloadManagementTestUtils.assertEqualWorkloadGroups; +import static org.opensearch.plugin.wlm.WorkloadManagementTestUtils.workloadGroupOne; public class CreateWorkloadGroupRequestTests extends OpenSearchTestCase { diff --git a/plugins/workload-management/src/test/java/org/opensearch/plugin/wlm/action/CreateWorkloadGroupResponseTests.java b/plugins/workload-management/src/test/java/org/opensearch/plugin/wlm/action/CreateWorkloadGroupResponseTests.java index d25050341f997..c741daf458268 100644 --- a/plugins/workload-management/src/test/java/org/opensearch/plugin/wlm/action/CreateWorkloadGroupResponseTests.java +++ b/plugins/workload-management/src/test/java/org/opensearch/plugin/wlm/action/CreateWorkloadGroupResponseTests.java @@ -15,7 +15,7 @@ import org.opensearch.core.rest.RestStatus; import org.opensearch.core.xcontent.ToXContent; import org.opensearch.core.xcontent.XContentBuilder; -import org.opensearch.plugin.wlm.WorkloadGroupTestUtils; +import org.opensearch.plugin.wlm.WorkloadManagementTestUtils; import org.opensearch.test.OpenSearchTestCase; import java.io.IOException; @@ -30,7 +30,7 @@ public class CreateWorkloadGroupResponseTests extends OpenSearchTestCase { * Test case to verify serialization and deserialization of CreateWorkloadGroupResponse. */ public void testSerialization() throws IOException { - CreateWorkloadGroupResponse response = new CreateWorkloadGroupResponse(WorkloadGroupTestUtils.workloadGroupOne, RestStatus.OK); + CreateWorkloadGroupResponse response = new CreateWorkloadGroupResponse(WorkloadManagementTestUtils.workloadGroupOne, RestStatus.OK); BytesStreamOutput out = new BytesStreamOutput(); response.writeTo(out); StreamInput streamInput = out.bytes().streamInput(); @@ -42,7 +42,7 @@ public void testSerialization() throws IOException { List listTwo = new ArrayList<>(); listOne.add(responseGroup); listTwo.add(otherResponseGroup); - WorkloadGroupTestUtils.assertEqualWorkloadGroups(listOne, listTwo, false); + WorkloadManagementTestUtils.assertEqualWorkloadGroups(listOne, listTwo, false); } /** @@ -50,7 +50,7 @@ public void testSerialization() throws IOException { */ public void testToXContentCreateWorkloadGroup() throws IOException { XContentBuilder builder = JsonXContent.contentBuilder().prettyPrint(); - CreateWorkloadGroupResponse response = new CreateWorkloadGroupResponse(WorkloadGroupTestUtils.workloadGroupOne, RestStatus.OK); + CreateWorkloadGroupResponse response = new CreateWorkloadGroupResponse(WorkloadManagementTestUtils.workloadGroupOne, RestStatus.OK); String actual = response.toXContent(builder, mock(ToXContent.Params.class)).toString(); String expected = "{\n" + " \"_id\" : \"AgfUO5Ja9yfsYlONlYi3TQ==\",\n" diff --git a/plugins/workload-management/src/test/java/org/opensearch/plugin/wlm/action/DeleteWorkloadGroupRequestTests.java b/plugins/workload-management/src/test/java/org/opensearch/plugin/wlm/action/DeleteWorkloadGroupRequestTests.java index a7fa0939583c5..0ab32e537d5c5 100644 --- a/plugins/workload-management/src/test/java/org/opensearch/plugin/wlm/action/DeleteWorkloadGroupRequestTests.java +++ b/plugins/workload-management/src/test/java/org/opensearch/plugin/wlm/action/DeleteWorkloadGroupRequestTests.java @@ -11,7 +11,7 @@ import org.opensearch.action.ActionRequestValidationException; import org.opensearch.common.io.stream.BytesStreamOutput; import org.opensearch.core.common.io.stream.StreamInput; -import org.opensearch.plugin.wlm.WorkloadGroupTestUtils; +import org.opensearch.plugin.wlm.WorkloadManagementTestUtils; import org.opensearch.test.OpenSearchTestCase; import java.io.IOException; @@ -22,8 +22,8 @@ public class DeleteWorkloadGroupRequestTests extends OpenSearchTestCase { * Test case to verify the serialization and deserialization of DeleteWorkloadGroupRequest. */ public void testSerialization() throws IOException { - DeleteWorkloadGroupRequest request = new DeleteWorkloadGroupRequest(WorkloadGroupTestUtils.NAME_ONE); - assertEquals(WorkloadGroupTestUtils.NAME_ONE, request.getName()); + DeleteWorkloadGroupRequest request = new DeleteWorkloadGroupRequest(WorkloadManagementTestUtils.NAME_ONE); + assertEquals(WorkloadManagementTestUtils.NAME_ONE, request.getName()); BytesStreamOutput out = new BytesStreamOutput(); request.writeTo(out); StreamInput streamInput = out.bytes().streamInput(); diff --git a/plugins/workload-management/src/test/java/org/opensearch/plugin/wlm/action/GetWorkloadGroupRequestTests.java b/plugins/workload-management/src/test/java/org/opensearch/plugin/wlm/action/GetWorkloadGroupRequestTests.java index 832761d5084bb..675b34228669c 100644 --- a/plugins/workload-management/src/test/java/org/opensearch/plugin/wlm/action/GetWorkloadGroupRequestTests.java +++ b/plugins/workload-management/src/test/java/org/opensearch/plugin/wlm/action/GetWorkloadGroupRequestTests.java @@ -10,7 +10,7 @@ import org.opensearch.common.io.stream.BytesStreamOutput; import org.opensearch.core.common.io.stream.StreamInput; -import org.opensearch.plugin.wlm.WorkloadGroupTestUtils; +import org.opensearch.plugin.wlm.WorkloadManagementTestUtils; import org.opensearch.test.OpenSearchTestCase; import java.io.IOException; @@ -21,8 +21,8 @@ public class GetWorkloadGroupRequestTests extends OpenSearchTestCase { * Test case to verify the serialization and deserialization of GetWorkloadGroupRequest. */ public void testSerialization() throws IOException { - GetWorkloadGroupRequest request = new GetWorkloadGroupRequest(WorkloadGroupTestUtils.NAME_ONE); - assertEquals(WorkloadGroupTestUtils.NAME_ONE, request.getName()); + GetWorkloadGroupRequest request = new GetWorkloadGroupRequest(WorkloadManagementTestUtils.NAME_ONE); + assertEquals(WorkloadManagementTestUtils.NAME_ONE, request.getName()); BytesStreamOutput out = new BytesStreamOutput(); request.writeTo(out); StreamInput streamInput = out.bytes().streamInput(); diff --git a/plugins/workload-management/src/test/java/org/opensearch/plugin/wlm/action/GetWorkloadGroupResponseTests.java b/plugins/workload-management/src/test/java/org/opensearch/plugin/wlm/action/GetWorkloadGroupResponseTests.java index dc0aeabc7a033..ffc2fa1f2ca4f 100644 --- a/plugins/workload-management/src/test/java/org/opensearch/plugin/wlm/action/GetWorkloadGroupResponseTests.java +++ b/plugins/workload-management/src/test/java/org/opensearch/plugin/wlm/action/GetWorkloadGroupResponseTests.java @@ -15,7 +15,7 @@ import org.opensearch.core.rest.RestStatus; import org.opensearch.core.xcontent.ToXContent; import org.opensearch.core.xcontent.XContentBuilder; -import org.opensearch.plugin.wlm.WorkloadGroupTestUtils; +import org.opensearch.plugin.wlm.WorkloadManagementTestUtils; import org.opensearch.test.OpenSearchTestCase; import java.io.IOException; @@ -31,7 +31,7 @@ public class GetWorkloadGroupResponseTests extends OpenSearchTestCase { */ public void testSerializationSingleWorkloadGroup() throws IOException { List list = new ArrayList<>(); - list.add(WorkloadGroupTestUtils.workloadGroupOne); + list.add(WorkloadManagementTestUtils.workloadGroupOne); GetWorkloadGroupResponse response = new GetWorkloadGroupResponse(list, RestStatus.OK); assertEquals(response.getWorkloadGroups(), list); @@ -41,15 +41,15 @@ public void testSerializationSingleWorkloadGroup() throws IOException { GetWorkloadGroupResponse otherResponse = new GetWorkloadGroupResponse(streamInput); assertEquals(response.getRestStatus(), otherResponse.getRestStatus()); - WorkloadGroupTestUtils.assertEqualWorkloadGroups(response.getWorkloadGroups(), otherResponse.getWorkloadGroups(), false); + WorkloadManagementTestUtils.assertEqualWorkloadGroups(response.getWorkloadGroups(), otherResponse.getWorkloadGroups(), false); } /** * Test case to verify the serialization and deserialization of GetWorkloadGroupResponse when the result contains multiple WorkloadGroups. */ public void testSerializationMultipleWorkloadGroup() throws IOException { - GetWorkloadGroupResponse response = new GetWorkloadGroupResponse(WorkloadGroupTestUtils.workloadGroupList(), RestStatus.OK); - assertEquals(response.getWorkloadGroups(), WorkloadGroupTestUtils.workloadGroupList()); + GetWorkloadGroupResponse response = new GetWorkloadGroupResponse(WorkloadManagementTestUtils.workloadGroupList(), RestStatus.OK); + assertEquals(response.getWorkloadGroups(), WorkloadManagementTestUtils.workloadGroupList()); BytesStreamOutput out = new BytesStreamOutput(); response.writeTo(out); @@ -58,7 +58,7 @@ public void testSerializationMultipleWorkloadGroup() throws IOException { GetWorkloadGroupResponse otherResponse = new GetWorkloadGroupResponse(streamInput); assertEquals(response.getRestStatus(), otherResponse.getRestStatus()); assertEquals(2, otherResponse.getWorkloadGroups().size()); - WorkloadGroupTestUtils.assertEqualWorkloadGroups(response.getWorkloadGroups(), otherResponse.getWorkloadGroups(), false); + WorkloadManagementTestUtils.assertEqualWorkloadGroups(response.getWorkloadGroups(), otherResponse.getWorkloadGroups(), false); } /** @@ -83,7 +83,7 @@ public void testSerializationNull() throws IOException { */ public void testToXContentGetSingleWorkloadGroup() throws IOException { List workloadGroupList = new ArrayList<>(); - workloadGroupList.add(WorkloadGroupTestUtils.workloadGroupOne); + workloadGroupList.add(WorkloadManagementTestUtils.workloadGroupOne); XContentBuilder builder = JsonXContent.contentBuilder().prettyPrint(); GetWorkloadGroupResponse response = new GetWorkloadGroupResponse(workloadGroupList, RestStatus.OK); String actual = response.toXContent(builder, mock(ToXContent.Params.class)).toString(); @@ -108,8 +108,8 @@ public void testToXContentGetSingleWorkloadGroup() throws IOException { */ public void testToXContentGetMultipleWorkloadGroup() throws IOException { List workloadGroupList = new ArrayList<>(); - workloadGroupList.add(WorkloadGroupTestUtils.workloadGroupOne); - workloadGroupList.add(WorkloadGroupTestUtils.workloadGroupTwo); + workloadGroupList.add(WorkloadManagementTestUtils.workloadGroupOne); + workloadGroupList.add(WorkloadManagementTestUtils.workloadGroupTwo); XContentBuilder builder = JsonXContent.contentBuilder().prettyPrint(); GetWorkloadGroupResponse response = new GetWorkloadGroupResponse(workloadGroupList, RestStatus.OK); String actual = response.toXContent(builder, mock(ToXContent.Params.class)).toString(); diff --git a/plugins/workload-management/src/test/java/org/opensearch/plugin/wlm/action/TransportGetWorkloadGroupActionTests.java b/plugins/workload-management/src/test/java/org/opensearch/plugin/wlm/action/TransportGetWorkloadGroupActionTests.java index cf12d9f6408cf..311eda7728d25 100644 --- a/plugins/workload-management/src/test/java/org/opensearch/plugin/wlm/action/TransportGetWorkloadGroupActionTests.java +++ b/plugins/workload-management/src/test/java/org/opensearch/plugin/wlm/action/TransportGetWorkloadGroupActionTests.java @@ -17,9 +17,9 @@ import org.opensearch.threadpool.ThreadPool; import org.opensearch.transport.TransportService; -import static org.opensearch.plugin.wlm.WorkloadGroupTestUtils.NAME_NONE_EXISTED; -import static org.opensearch.plugin.wlm.WorkloadGroupTestUtils.NAME_ONE; -import static org.opensearch.plugin.wlm.WorkloadGroupTestUtils.clusterState; +import static org.opensearch.plugin.wlm.WorkloadManagementTestUtils.NAME_NONE_EXISTED; +import static org.opensearch.plugin.wlm.WorkloadManagementTestUtils.NAME_ONE; +import static org.opensearch.plugin.wlm.WorkloadManagementTestUtils.clusterState; import static org.mockito.Mockito.mock; public class TransportGetWorkloadGroupActionTests extends OpenSearchTestCase { diff --git a/plugins/workload-management/src/test/java/org/opensearch/plugin/wlm/action/UpdateWorkloadGroupRequestTests.java b/plugins/workload-management/src/test/java/org/opensearch/plugin/wlm/action/UpdateWorkloadGroupRequestTests.java index e8d883da5c6eb..e45a0b0b6cfff 100644 --- a/plugins/workload-management/src/test/java/org/opensearch/plugin/wlm/action/UpdateWorkloadGroupRequestTests.java +++ b/plugins/workload-management/src/test/java/org/opensearch/plugin/wlm/action/UpdateWorkloadGroupRequestTests.java @@ -19,8 +19,8 @@ import java.util.HashMap; import java.util.Map; -import static org.opensearch.plugin.wlm.WorkloadGroupTestUtils.NAME_ONE; -import static org.opensearch.plugin.wlm.WorkloadGroupTestUtils.workloadGroupOne; +import static org.opensearch.plugin.wlm.WorkloadManagementTestUtils.NAME_ONE; +import static org.opensearch.plugin.wlm.WorkloadManagementTestUtils.workloadGroupOne; public class UpdateWorkloadGroupRequestTests extends OpenSearchTestCase { diff --git a/plugins/workload-management/src/test/java/org/opensearch/plugin/wlm/action/UpdateWorkloadGroupResponseTests.java b/plugins/workload-management/src/test/java/org/opensearch/plugin/wlm/action/UpdateWorkloadGroupResponseTests.java index 97b9b9029373f..44437ee10aa1e 100644 --- a/plugins/workload-management/src/test/java/org/opensearch/plugin/wlm/action/UpdateWorkloadGroupResponseTests.java +++ b/plugins/workload-management/src/test/java/org/opensearch/plugin/wlm/action/UpdateWorkloadGroupResponseTests.java @@ -15,14 +15,14 @@ import org.opensearch.core.rest.RestStatus; import org.opensearch.core.xcontent.ToXContent; import org.opensearch.core.xcontent.XContentBuilder; -import org.opensearch.plugin.wlm.WorkloadGroupTestUtils; +import org.opensearch.plugin.wlm.WorkloadManagementTestUtils; import org.opensearch.test.OpenSearchTestCase; import java.io.IOException; import java.util.ArrayList; import java.util.List; -import static org.opensearch.plugin.wlm.WorkloadGroupTestUtils.workloadGroupOne; +import static org.opensearch.plugin.wlm.WorkloadManagementTestUtils.workloadGroupOne; import static org.mockito.Mockito.mock; public class UpdateWorkloadGroupResponseTests extends OpenSearchTestCase { @@ -43,7 +43,7 @@ public void testSerialization() throws IOException { List list2 = new ArrayList<>(); list1.add(responseGroup); list2.add(otherResponseGroup); - WorkloadGroupTestUtils.assertEqualWorkloadGroups(list1, list2, false); + WorkloadManagementTestUtils.assertEqualWorkloadGroups(list1, list2, false); } /** diff --git a/plugins/workload-management/src/test/java/org/opensearch/plugin/wlm/rest/RestCreateWorkloadGroupActionTests.java b/plugins/workload-management/src/test/java/org/opensearch/plugin/wlm/rest/RestCreateWorkloadGroupActionTests.java new file mode 100644 index 0000000000000..abd5135f5f529 --- /dev/null +++ b/plugins/workload-management/src/test/java/org/opensearch/plugin/wlm/rest/RestCreateWorkloadGroupActionTests.java @@ -0,0 +1,44 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.plugin.wlm.rest; + +import org.opensearch.plugin.wlm.WlmClusterSettingValuesProvider; +import org.opensearch.plugin.wlm.WorkloadManagementTestUtils; +import org.opensearch.rest.RestRequest; +import org.opensearch.test.OpenSearchTestCase; +import org.opensearch.transport.client.node.NodeClient; + +import static org.mockito.Mockito.mock; + +public class RestCreateWorkloadGroupActionTests extends OpenSearchTestCase { + + public void testPrepareRequestThrowsWhenWlmModeDisabled() { + try { + WlmClusterSettingValuesProvider nonPluginSettingValuesProvider = WorkloadManagementTestUtils + .setUpNonPluginSettingValuesProvider("disabled"); + RestCreateWorkloadGroupAction restCreateWorkloadGroupAction = new RestCreateWorkloadGroupAction(nonPluginSettingValuesProvider); + restCreateWorkloadGroupAction.prepareRequest(mock(RestRequest.class), mock(NodeClient.class)); + fail("Expected exception when WLM mode is DISABLED"); + } catch (Exception e) { + assertTrue(e.getMessage().contains("create")); + } + } + + public void testPrepareRequestThrowsWhenWlmModeMonitorOnly() { + try { + WlmClusterSettingValuesProvider nonPluginSettingValuesProvider = WorkloadManagementTestUtils + .setUpNonPluginSettingValuesProvider("monitor_only"); + RestCreateWorkloadGroupAction restCreateWorkloadGroupAction = new RestCreateWorkloadGroupAction(nonPluginSettingValuesProvider); + restCreateWorkloadGroupAction.prepareRequest(mock(RestRequest.class), mock(NodeClient.class)); + fail("Expected exception when WLM mode is MONITOR_ONLY"); + } catch (Exception e) { + assertTrue(e.getMessage().contains("create")); + } + } +} diff --git a/plugins/workload-management/src/test/java/org/opensearch/plugin/wlm/rest/RestDeleteWorkloadGroupActionTests.java b/plugins/workload-management/src/test/java/org/opensearch/plugin/wlm/rest/RestDeleteWorkloadGroupActionTests.java index 8ce5c869f4481..c54313af62bae 100644 --- a/plugins/workload-management/src/test/java/org/opensearch/plugin/wlm/rest/RestDeleteWorkloadGroupActionTests.java +++ b/plugins/workload-management/src/test/java/org/opensearch/plugin/wlm/rest/RestDeleteWorkloadGroupActionTests.java @@ -11,6 +11,8 @@ import org.opensearch.action.support.clustermanager.AcknowledgedResponse; import org.opensearch.common.CheckedConsumer; import org.opensearch.common.unit.TimeValue; +import org.opensearch.plugin.wlm.WlmClusterSettingValuesProvider; +import org.opensearch.plugin.wlm.WorkloadManagementTestUtils; import org.opensearch.plugin.wlm.action.DeleteWorkloadGroupAction; import org.opensearch.plugin.wlm.action.DeleteWorkloadGroupRequest; import org.opensearch.rest.RestChannel; @@ -25,7 +27,7 @@ import org.mockito.ArgumentCaptor; -import static org.opensearch.plugin.wlm.WorkloadGroupTestUtils.NAME_ONE; +import static org.opensearch.plugin.wlm.WorkloadManagementTestUtils.NAME_ONE; import static org.opensearch.rest.RestRequest.Method.DELETE; import static org.mockito.ArgumentMatchers.any; import static org.mockito.ArgumentMatchers.eq; @@ -40,7 +42,7 @@ public class RestDeleteWorkloadGroupActionTests extends OpenSearchTestCase { * Test case to validate the construction for RestDeleteWorkloadGroupAction */ public void testConstruction() { - RestDeleteWorkloadGroupAction action = new RestDeleteWorkloadGroupAction(); + RestDeleteWorkloadGroupAction action = new RestDeleteWorkloadGroupAction(mock(WlmClusterSettingValuesProvider.class)); assertNotNull(action); assertEquals("delete_workload_group", action.getName()); List routes = action.routes(); @@ -55,7 +57,9 @@ public void testConstruction() { */ @SuppressWarnings("unchecked") public void testPrepareRequest() throws Exception { - RestDeleteWorkloadGroupAction restDeleteWorkloadGroupAction = new RestDeleteWorkloadGroupAction(); + RestDeleteWorkloadGroupAction restDeleteWorkloadGroupAction = new RestDeleteWorkloadGroupAction( + mock(WlmClusterSettingValuesProvider.class) + ); NodeClient nodeClient = mock(NodeClient.class); RestRequest realRequest = new FakeRestRequest(); realRequest.params().put("name", NAME_ONE); @@ -82,4 +86,16 @@ public void testPrepareRequest() throws Exception { any(RestToXContentListener.class) ); } + + public void testPrepareRequestThrowsWhenWlmModeDisabled() throws Exception { + try { + WlmClusterSettingValuesProvider nonPluginSettingValuesProvider = WorkloadManagementTestUtils + .setUpNonPluginSettingValuesProvider("disabled"); + RestDeleteWorkloadGroupAction restDeleteWorkloadGroupAction = new RestDeleteWorkloadGroupAction(nonPluginSettingValuesProvider); + restDeleteWorkloadGroupAction.prepareRequest(mock(RestRequest.class), mock(NodeClient.class)); + fail("Expected exception when WLM mode is DISABLED"); + } catch (Exception e) { + assertTrue(e.getMessage().contains("delete")); + } + } } diff --git a/plugins/workload-management/src/test/java/org/opensearch/plugin/wlm/rest/RestUpdateWorkloadGroupActionTests.java b/plugins/workload-management/src/test/java/org/opensearch/plugin/wlm/rest/RestUpdateWorkloadGroupActionTests.java new file mode 100644 index 0000000000000..ba6ddb1a947a8 --- /dev/null +++ b/plugins/workload-management/src/test/java/org/opensearch/plugin/wlm/rest/RestUpdateWorkloadGroupActionTests.java @@ -0,0 +1,44 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.plugin.wlm.rest; + +import org.opensearch.plugin.wlm.WlmClusterSettingValuesProvider; +import org.opensearch.plugin.wlm.WorkloadManagementTestUtils; +import org.opensearch.rest.RestRequest; +import org.opensearch.test.OpenSearchTestCase; +import org.opensearch.transport.client.node.NodeClient; + +import static org.mockito.Mockito.mock; + +public class RestUpdateWorkloadGroupActionTests extends OpenSearchTestCase { + + public void testPrepareRequestThrowsWhenWlmModeDisabled() { + try { + WlmClusterSettingValuesProvider nonPluginSettingValuesProvider = WorkloadManagementTestUtils + .setUpNonPluginSettingValuesProvider("disabled"); + RestUpdateWorkloadGroupAction restUpdateWorkloadGroupAction = new RestUpdateWorkloadGroupAction(nonPluginSettingValuesProvider); + restUpdateWorkloadGroupAction.prepareRequest(mock(RestRequest.class), mock(NodeClient.class)); + fail("Expected exception when WLM mode is DISABLED"); + } catch (Exception e) { + assertTrue(e.getMessage().contains("update")); + } + } + + public void testPrepareRequestThrowsWhenWlmModeMonitorOnly() { + try { + WlmClusterSettingValuesProvider nonPluginSettingValuesProvider = WorkloadManagementTestUtils + .setUpNonPluginSettingValuesProvider("monitor_only"); + RestUpdateWorkloadGroupAction restUpdateWorkloadGroupAction = new RestUpdateWorkloadGroupAction(nonPluginSettingValuesProvider); + restUpdateWorkloadGroupAction.prepareRequest(mock(RestRequest.class), mock(NodeClient.class)); + fail("Expected exception when WLM mode is MONITOR_ONLY"); + } catch (Exception e) { + assertTrue(e.getMessage().contains("update")); + } + } +} diff --git a/plugins/workload-management/src/test/java/org/opensearch/plugin/wlm/rule/sync/RefreshBasedSyncMechanismTests.java b/plugins/workload-management/src/test/java/org/opensearch/plugin/wlm/rule/sync/RefreshBasedSyncMechanismTests.java index cca14c8778a87..739122e022bcb 100644 --- a/plugins/workload-management/src/test/java/org/opensearch/plugin/wlm/rule/sync/RefreshBasedSyncMechanismTests.java +++ b/plugins/workload-management/src/test/java/org/opensearch/plugin/wlm/rule/sync/RefreshBasedSyncMechanismTests.java @@ -12,10 +12,10 @@ import org.opensearch.common.settings.Settings; import org.opensearch.core.action.ActionListener; import org.opensearch.plugin.wlm.AutoTaggingActionFilterTests; +import org.opensearch.plugin.wlm.WlmClusterSettingValuesProvider; import org.opensearch.plugin.wlm.WorkloadManagementPlugin; import org.opensearch.plugin.wlm.rule.sync.detect.RuleEventClassifier; import org.opensearch.rule.InMemoryRuleProcessingService; -import org.opensearch.rule.RuleEntityParser; import org.opensearch.rule.RulePersistenceService; import org.opensearch.rule.action.GetRuleRequest; import org.opensearch.rule.action.GetRuleResponse; @@ -24,12 +24,10 @@ import org.opensearch.rule.autotagging.Rule; import org.opensearch.rule.storage.AttributeValueStoreFactory; import org.opensearch.rule.storage.DefaultAttributeValueStore; -import org.opensearch.rule.storage.XContentRuleParser; import org.opensearch.test.OpenSearchTestCase; import org.opensearch.threadpool.Scheduler; import org.opensearch.threadpool.ThreadPool; import org.opensearch.transport.client.Client; -import org.opensearch.wlm.WlmMode; import org.opensearch.wlm.WorkloadManagementSettings; import java.io.IOException; @@ -59,6 +57,8 @@ public class RefreshBasedSyncMechanismTests extends OpenSearchTestCase { Scheduler.Cancellable scheduledFuture; RuleEventClassifier ruleEventClassifier; FeatureType featureType; + WlmClusterSettingValuesProvider nonPluginSettingValuesProvider; + ClusterSettings clusterSettings; @Override public void setUp() throws Exception { @@ -68,7 +68,7 @@ public void setUp() throws Exception { .put(RefreshBasedSyncMechanism.RULE_SYNC_REFRESH_INTERVAL_SETTING_NAME, 1000) .put(WorkloadManagementSettings.WLM_MODE_SETTING_NAME, "enabled") .build(); - ClusterSettings clusterSettings = new ClusterSettings(Settings.EMPTY, new HashSet<>(plugin.getSettings())); + clusterSettings = new ClusterSettings(Settings.EMPTY, new HashSet<>(plugin.getSettings())); clusterSettings.registerSetting(WorkloadManagementSettings.WLM_MODE_SETTING); featureType = mock(FeatureType.class); mockThreadPool = mock(ThreadPool.class); @@ -76,7 +76,7 @@ public void setUp() throws Exception { rulePersistenceService = mock(RulePersistenceService.class); ruleEventClassifier = new RuleEventClassifier(Collections.emptySet(), ruleProcessingService); attributeValueStoreFactory = new AttributeValueStoreFactory(featureType, DefaultAttributeValueStore::new); - RuleEntityParser parser = new XContentRuleParser(featureType); + nonPluginSettingValuesProvider = new WlmClusterSettingValuesProvider(settings, clusterSettings); mockClient = mock(Client.class); scheduledFuture = mock(Scheduler.Cancellable.class); when(mockThreadPool.scheduleWithFixedDelay(any(), any(), any())).thenReturn(scheduledFuture); @@ -84,12 +84,10 @@ public void setUp() throws Exception { sut = new RefreshBasedSyncMechanism( mockThreadPool, settings, - clusterSettings, - parser, - ruleProcessingService, featureType, rulePersistenceService, - ruleEventClassifier + ruleEventClassifier, + nonPluginSettingValuesProvider ); } } @@ -123,7 +121,19 @@ public void testDoClose() throws IOException { */ @SuppressWarnings("unchecked") public void testDoRunWhenWLM_isDisabled() { - sut.setWlmMode(WlmMode.DISABLED); + Settings disabledSettings = Settings.builder() + .put(RefreshBasedSyncMechanism.RULE_SYNC_REFRESH_INTERVAL_SETTING_NAME, 1000) + .put(WorkloadManagementSettings.WLM_MODE_SETTING_NAME, "disabled") + .build(); + WlmClusterSettingValuesProvider disabledWlmModeProvider = new WlmClusterSettingValuesProvider(disabledSettings, clusterSettings); + sut = new RefreshBasedSyncMechanism( + mockThreadPool, + disabledSettings, + featureType, + rulePersistenceService, + ruleEventClassifier, + disabledWlmModeProvider + ); sut.doRun(); verify(rulePersistenceService, times(0)).getRule(any(GetRuleRequest.class), any(ActionListener.class)); } diff --git a/plugins/workload-management/src/test/java/org/opensearch/plugin/wlm/service/WorkloadGroupPersistenceServiceTests.java b/plugins/workload-management/src/test/java/org/opensearch/plugin/wlm/service/WorkloadGroupPersistenceServiceTests.java index 571103b32205d..51911b2b67df8 100644 --- a/plugins/workload-management/src/test/java/org/opensearch/plugin/wlm/service/WorkloadGroupPersistenceServiceTests.java +++ b/plugins/workload-management/src/test/java/org/opensearch/plugin/wlm/service/WorkloadGroupPersistenceServiceTests.java @@ -21,7 +21,7 @@ import org.opensearch.common.settings.ClusterSettings; import org.opensearch.common.settings.Settings; import org.opensearch.core.action.ActionListener; -import org.opensearch.plugin.wlm.WorkloadGroupTestUtils; +import org.opensearch.plugin.wlm.WorkloadManagementTestUtils; import org.opensearch.plugin.wlm.action.CreateWorkloadGroupResponse; import org.opensearch.plugin.wlm.action.DeleteWorkloadGroupRequest; import org.opensearch.plugin.wlm.action.UpdateWorkloadGroupRequest; @@ -44,23 +44,23 @@ import org.mockito.ArgumentCaptor; import static org.opensearch.cluster.metadata.WorkloadGroup.builder; -import static org.opensearch.plugin.wlm.WorkloadGroupTestUtils.NAME_NONE_EXISTED; -import static org.opensearch.plugin.wlm.WorkloadGroupTestUtils.NAME_ONE; -import static org.opensearch.plugin.wlm.WorkloadGroupTestUtils.NAME_TWO; -import static org.opensearch.plugin.wlm.WorkloadGroupTestUtils._ID_ONE; -import static org.opensearch.plugin.wlm.WorkloadGroupTestUtils._ID_TWO; -import static org.opensearch.plugin.wlm.WorkloadGroupTestUtils.assertEqualWorkloadGroups; -import static org.opensearch.plugin.wlm.WorkloadGroupTestUtils.clusterSettings; -import static org.opensearch.plugin.wlm.WorkloadGroupTestUtils.clusterSettingsSet; -import static org.opensearch.plugin.wlm.WorkloadGroupTestUtils.clusterState; -import static org.opensearch.plugin.wlm.WorkloadGroupTestUtils.preparePersistenceServiceSetup; -import static org.opensearch.plugin.wlm.WorkloadGroupTestUtils.workloadGroupList; -import static org.opensearch.plugin.wlm.WorkloadGroupTestUtils.workloadGroupOne; -import static org.opensearch.plugin.wlm.WorkloadGroupTestUtils.workloadGroupPersistenceService; -import static org.opensearch.plugin.wlm.WorkloadGroupTestUtils.workloadGroupTwo; +import static org.opensearch.plugin.wlm.WorkloadManagementTestUtils.NAME_NONE_EXISTED; +import static org.opensearch.plugin.wlm.WorkloadManagementTestUtils.NAME_ONE; +import static org.opensearch.plugin.wlm.WorkloadManagementTestUtils.NAME_TWO; +import static org.opensearch.plugin.wlm.WorkloadManagementTestUtils._ID_ONE; +import static org.opensearch.plugin.wlm.WorkloadManagementTestUtils._ID_TWO; +import static org.opensearch.plugin.wlm.WorkloadManagementTestUtils.assertEqualWorkloadGroups; +import static org.opensearch.plugin.wlm.WorkloadManagementTestUtils.clusterSettings; +import static org.opensearch.plugin.wlm.WorkloadManagementTestUtils.clusterSettingsSet; +import static org.opensearch.plugin.wlm.WorkloadManagementTestUtils.clusterState; +import static org.opensearch.plugin.wlm.WorkloadManagementTestUtils.preparePersistenceServiceSetup; +import static org.opensearch.plugin.wlm.WorkloadManagementTestUtils.workloadGroupList; +import static org.opensearch.plugin.wlm.WorkloadManagementTestUtils.workloadGroupOne; +import static org.opensearch.plugin.wlm.WorkloadManagementTestUtils.workloadGroupPersistenceService; +import static org.opensearch.plugin.wlm.WorkloadManagementTestUtils.workloadGroupTwo; import static org.opensearch.plugin.wlm.action.WorkloadGroupActionTestUtils.updateWorkloadGroupRequest; -import static org.opensearch.plugin.wlm.service.WorkloadGroupPersistenceService.QUERY_GROUP_COUNT_SETTING_NAME; import static org.opensearch.plugin.wlm.service.WorkloadGroupPersistenceService.SOURCE; +import static org.opensearch.plugin.wlm.service.WorkloadGroupPersistenceService.WORKLOAD_GROUP_COUNT_SETTING_NAME; import static org.mockito.ArgumentMatchers.any; import static org.mockito.ArgumentMatchers.eq; import static org.mockito.Mockito.anyString; @@ -155,7 +155,7 @@ public void testCreateWorkloadGroupOverflowCount() { .updatedAt(1690934400000L) .build(); Metadata metadata = Metadata.builder().workloadGroups(Map.of(_ID_ONE, workloadGroupOne, _ID_TWO, workloadGroupTwo)).build(); - Settings settings = Settings.builder().put(QUERY_GROUP_COUNT_SETTING_NAME, 2).build(); + Settings settings = Settings.builder().put(WORKLOAD_GROUP_COUNT_SETTING_NAME, 2).build(); ClusterSettings clusterSettings = new ClusterSettings(settings, clusterSettingsSet()); ClusterService clusterService = new ClusterService(settings, clusterSettings, mock(ThreadPool.class)); ClusterState clusterState = ClusterState.builder(new ClusterName("_name")).metadata(metadata).build(); @@ -174,7 +174,7 @@ public void testCreateWorkloadGroupOverflowCount() { * Tests the invalid value of {@code node.workload_group.max_count} */ public void testInvalidMaxWorkloadGroupCount() { - Settings settings = Settings.builder().put(QUERY_GROUP_COUNT_SETTING_NAME, 2).build(); + Settings settings = Settings.builder().put(WORKLOAD_GROUP_COUNT_SETTING_NAME, 2).build(); ClusterSettings clusterSettings = new ClusterSettings(settings, clusterSettingsSet()); ClusterService clusterService = new ClusterService(settings, clusterSettings, mock(ThreadPool.class)); WorkloadGroupPersistenceService workloadGroupPersistenceService = new WorkloadGroupPersistenceService( @@ -189,7 +189,7 @@ public void testInvalidMaxWorkloadGroupCount() { * Tests the valid value of {@code node.workload_group.max_count} */ public void testValidMaxSandboxCountSetting() { - Settings settings = Settings.builder().put(QUERY_GROUP_COUNT_SETTING_NAME, 100).build(); + Settings settings = Settings.builder().put(WORKLOAD_GROUP_COUNT_SETTING_NAME, 100).build(); ClusterService clusterService = new ClusterService(settings, clusterSettings(), mock(ThreadPool.class)); WorkloadGroupPersistenceService workloadGroupPersistenceService = new WorkloadGroupPersistenceService( clusterService, @@ -209,7 +209,7 @@ public void testPersistInClusterStateMetadata() { ActionListener listener = mock(ActionListener.class); WorkloadGroupPersistenceService workloadGroupPersistenceService = new WorkloadGroupPersistenceService( clusterService, - WorkloadGroupTestUtils.settings(), + WorkloadManagementTestUtils.settings(), clusterSettings() ); workloadGroupPersistenceService.persistInClusterStateMetadata(workloadGroupOne, listener); @@ -225,7 +225,7 @@ public void testPersistInClusterStateMetadataInner() { ActionListener listener = mock(ActionListener.class); WorkloadGroupPersistenceService workloadGroupPersistenceService = new WorkloadGroupPersistenceService( clusterService, - WorkloadGroupTestUtils.settings(), + WorkloadManagementTestUtils.settings(), clusterSettings() ); ArgumentCaptor captor = ArgumentCaptor.forClass(ClusterStateUpdateTask.class); @@ -252,7 +252,7 @@ public void testPersistInClusterStateMetadataFailure() { ActionListener listener = mock(ActionListener.class); WorkloadGroupPersistenceService workloadGroupPersistenceService = new WorkloadGroupPersistenceService( clusterService, - WorkloadGroupTestUtils.settings(), + WorkloadManagementTestUtils.settings(), clusterSettings() ); doAnswer(invocation -> { @@ -275,23 +275,23 @@ public void testGetSingleWorkloadGroup() { WorkloadGroup workloadGroup = groups.get(0); List listOne = new ArrayList<>(); List listTwo = new ArrayList<>(); - listOne.add(WorkloadGroupTestUtils.workloadGroupOne); + listOne.add(WorkloadManagementTestUtils.workloadGroupOne); listTwo.add(workloadGroup); - WorkloadGroupTestUtils.assertEqualWorkloadGroups(listOne, listTwo, false); + WorkloadManagementTestUtils.assertEqualWorkloadGroups(listOne, listTwo, false); } /** * Tests getting all WorkloadGroups */ public void testGetAllWorkloadGroups() { - assertEquals(2, WorkloadGroupTestUtils.clusterState().metadata().workloadGroups().size()); + assertEquals(2, WorkloadManagementTestUtils.clusterState().metadata().workloadGroups().size()); Collection groupsCollections = WorkloadGroupPersistenceService.getFromClusterStateMetadata(null, clusterState()); List res = new ArrayList<>(groupsCollections); assertEquals(2, res.size()); Set currentNAME = res.stream().map(WorkloadGroup::getName).collect(Collectors.toSet()); - assertTrue(currentNAME.contains(WorkloadGroupTestUtils.NAME_ONE)); - assertTrue(currentNAME.contains(WorkloadGroupTestUtils.NAME_TWO)); - WorkloadGroupTestUtils.assertEqualWorkloadGroups(WorkloadGroupTestUtils.workloadGroupList(), res, false); + assertTrue(currentNAME.contains(WorkloadManagementTestUtils.NAME_ONE)); + assertTrue(currentNAME.contains(WorkloadManagementTestUtils.NAME_TWO)); + WorkloadManagementTestUtils.assertEqualWorkloadGroups(WorkloadManagementTestUtils.workloadGroupList(), res, false); } /** @@ -312,9 +312,9 @@ public void testGetNonExistedWorkloadGroups() { public void testMaxWorkloadGroupCount() { assertThrows( IllegalArgumentException.class, - () -> WorkloadGroupTestUtils.workloadGroupPersistenceService().setMaxWorkloadGroupCount(-1) + () -> WorkloadManagementTestUtils.workloadGroupPersistenceService().setMaxWorkloadGroupCount(-1) ); - WorkloadGroupPersistenceService workloadGroupPersistenceService = WorkloadGroupTestUtils.workloadGroupPersistenceService(); + WorkloadGroupPersistenceService workloadGroupPersistenceService = WorkloadManagementTestUtils.workloadGroupPersistenceService(); workloadGroupPersistenceService.setMaxWorkloadGroupCount(50); assertEquals(50, workloadGroupPersistenceService.getMaxWorkloadGroupCount()); } @@ -353,7 +353,7 @@ public void testDeleteInClusterStateMetadata() throws Exception { ActionListener listener = mock(ActionListener.class); WorkloadGroupPersistenceService workloadGroupPersistenceService = new WorkloadGroupPersistenceService( clusterService, - WorkloadGroupTestUtils.settings(), + WorkloadManagementTestUtils.settings(), clusterSettings() ); doAnswer(invocation -> { @@ -467,7 +467,7 @@ public void testUpdateInClusterStateMetadata() { ActionListener listener = mock(ActionListener.class); WorkloadGroupPersistenceService workloadGroupPersistenceService = new WorkloadGroupPersistenceService( clusterService, - WorkloadGroupTestUtils.settings(), + WorkloadManagementTestUtils.settings(), clusterSettings() ); workloadGroupPersistenceService.updateInClusterStateMetadata(null, listener); @@ -483,7 +483,7 @@ public void testUpdateInClusterStateMetadataInner() { ActionListener listener = mock(ActionListener.class); WorkloadGroupPersistenceService workloadGroupPersistenceService = new WorkloadGroupPersistenceService( clusterService, - WorkloadGroupTestUtils.settings(), + WorkloadManagementTestUtils.settings(), clusterSettings() ); UpdateWorkloadGroupRequest updateWorkloadGroupRequest = updateWorkloadGroupRequest( @@ -514,7 +514,7 @@ public void testUpdateInClusterStateMetadataFailure() { ActionListener listener = mock(ActionListener.class); WorkloadGroupPersistenceService workloadGroupPersistenceService = new WorkloadGroupPersistenceService( clusterService, - WorkloadGroupTestUtils.settings(), + WorkloadManagementTestUtils.settings(), clusterSettings() ); UpdateWorkloadGroupRequest updateWorkloadGroupRequest = updateWorkloadGroupRequest( diff --git a/plugins/workload-management/src/yamlRestTest/resources/rest-api-spec/test/wlm/10_workload_group.yml b/plugins/workload-management/src/yamlRestTest/resources/rest-api-spec/test/wlm/10_workload_group.yml index 178639638890d..a9ba5d300c9fa 100644 --- a/plugins/workload-management/src/yamlRestTest/resources/rest-api-spec/test/wlm/10_workload_group.yml +++ b/plugins/workload-management/src/yamlRestTest/resources/rest-api-spec/test/wlm/10_workload_group.yml @@ -3,6 +3,13 @@ version: " - 2.16.99" reason: "QueryGroup WorkloadManagement feature was added in 2.17" + - do: + cluster.put_settings: + flat_settings: true + body: + transient: + wlm.workload_group.mode: "enabled" + - do: create_workload_group_context: body: diff --git a/rest-api-spec/src/main/resources/rest-api-spec/test/search/350_matched_queries.yml b/rest-api-spec/src/main/resources/rest-api-spec/test/search/350_matched_queries.yml index 08a20df093c01..4094e2f08eb1a 100644 --- a/rest-api-spec/src/main/resources/rest-api-spec/test/search/350_matched_queries.yml +++ b/rest-api-spec/src/main/resources/rest-api-spec/test/search/350_matched_queries.yml @@ -101,3 +101,101 @@ setup: - match: { hits.hits.0.matched_queries.match_field_2: 10 } - length: { hits.hits.1.matched_queries: 1 } - match: { hits.hits.1.matched_queries.match_field_1: 1 } + +--- + +"named queries in rescore": + - skip: + version: " - 3.1.99" + reason: "named queries in rescore is supported in 3.2.0 and above" + + - do: + indices.create: + index: test + + - do: + bulk: + refresh: true + body: + - '{ "index" : { "_index" : "test_1", "_id" : "1" } }' + - '{"field" : 1, "title": "hello world" }' + - '{ "index" : { "_index" : "test_1", "_id" : "2" } }' + - '{"field" : 2, "title": "hello universe" }' + + - do: + search: + index: test_1 + body: + query: + match: { + field: { + query: 1, + _name: main_query + } + } + rescore: + window_size: 10 + query: + rescore_query: + match: { + title: { + query: "hello", + _name: rescore_query + } + } + query_weight: 0.5 + rescore_query_weight: 1.5 + + - match: { hits.total.value: 1 } + - length: { hits.hits.0.matched_queries: 2 } + - match: { hits.hits.0.matched_queries: [ "main_query", "rescore_query" ] } + +--- + +"named queries in rescore with scores": + - skip: + version: " - 3.1.99" + reason: "named queries in rescore is supported in 3.2.0 and above" + + - do: + indices.create: + index: test + + - do: + bulk: + refresh: true + body: + - '{ "index" : { "_index" : "test_1", "_id" : "1" } }' + - '{"field" : 1, "title": "hello world" }' + - '{ "index" : { "_index" : "test_1", "_id" : "2" } }' + - '{"field" : 2, "title": "hello universe" }' + + - do: + search: + include_named_queries_score: true + index: test_1 + body: + query: + match: { + field: { + query: 1, + _name: main_query + } + } + rescore: + window_size: 10 + query: + rescore_query: + match: { + title: { + query: "hello", + _name: rescore_query + } + } + query_weight: 0.5 + rescore_query_weight: 1.5 + + - match: { hits.total.value: 1 } + - length: { hits.hits.0.matched_queries: 2 } + - gte: { hits.hits.0.matched_queries.main_query: 0.0 } + - gte: { hits.hits.0.matched_queries.rescore_query: 0.0 } diff --git a/rest-api-spec/src/main/resources/rest-api-spec/test/search/400_max_score.yml b/rest-api-spec/src/main/resources/rest-api-spec/test/search/400_max_score.yml new file mode 100644 index 0000000000000..f81aafb3150de --- /dev/null +++ b/rest-api-spec/src/main/resources/rest-api-spec/test/search/400_max_score.yml @@ -0,0 +1,89 @@ +setup: + - do: + indices.create: + index: test_1 + body: + mappings: + properties: + foo: + type: keyword + + - do: + bulk: + refresh: true + body: + - index: + _index: test_1 + - foo: bar + - index: + _index: test_1 + - foo: bar + + - do: + indices.refresh: + index: [test_1] + +--- +teardown: + - do: + indices.delete: + index: test_1 + +# related issue: https://github.com/opensearch-project/OpenSearch/issues/18714 +--- +"Test max score with sorting on score firstly": + - skip: + version: " - 3.2.0" + reason: Fixed in 3.2.0 + + - do: + search: + index: test_1 + body: + query: { term: { foo: bar} } + sort: [{ _score: desc }, { _doc: desc }] + - match: { hits.total: 2 } + - length: { hits.hits: 2 } + - match: { max_score: 1.0 } + + - do: + search: + index: test_1 + body: + query: { term: { foo: bar} } + sort: [{ _score: asc }, { _doc: desc }] + - match: { hits.total: 2 } + - length: { hits.hits: 2 } + - match: { max_score: null } + +--- +"Test max score with sorting on score firstly with concurrent segment search enabled": + - skip: + version: " - 3.2.0" + reason: Fixed in 3.2.0 + + - do: + indices.put_settings: + index: test_1 + body: + index.search.concurrent_segment_search.mode: 'all' + + - do: + search: + index: test_1 + body: + query: { term: { foo: bar} } + sort: [{ _score: desc }, { _doc: desc }] + - match: { hits.total: 2 } + - length: { hits.hits: 2 } + - match: { max_score: 1.0 } + + - do: + search: + index: test_1 + body: + query: { term: { foo: bar} } + sort: [{ _score: asc }, { _doc: desc }] + - match: { hits.total: 2 } + - length: { hits.hits: 2 } + - match: { max_score: null } diff --git a/rest-api-spec/src/main/resources/rest-api-spec/test/search/61_query_string_field_alias.yml b/rest-api-spec/src/main/resources/rest-api-spec/test/search/61_query_string_field_alias.yml new file mode 100644 index 0000000000000..64926b1333174 --- /dev/null +++ b/rest-api-spec/src/main/resources/rest-api-spec/test/search/61_query_string_field_alias.yml @@ -0,0 +1,56 @@ +setup: + - skip: + version: " - 3.1.99" + reason: "regex query over field alias support starts 3.2" + + - do: + indices.create: + index: test_index + body: + settings: + number_of_shards: 1 + number_of_replicas: 0 + mappings: + properties: + test: + type: text + test_alias: + type: alias + path: test + + - do: + bulk: + refresh: true + body: | + {"index":{"_index":"test_index","_id":"1"}} + {"test":"hello"} + {"index":{"_index":"test_index","_id":"2"}} + {"test":"world"} + +--- +"regex search on normal field": + - do: + search: + rest_total_hits_as_int: true + index: test_index + body: + query: + query_string: + query: "test: /h[a-z].*/" + + - match: {hits.total: 1} + - match: {hits.hits.0._id: "1"} + +--- +"regex search on alias field": + - do: + search: + rest_total_hits_as_int: true + index: test_index + body: + query: + query_string: + query: "test_alias: /h[a-z].*/" + + - match: {hits.total: 1} + - match: {hits.hits.0._id: "1"} diff --git a/server/src/internalClusterTest/java/org/opensearch/index/autoforcemerge/AutoForceMergeManagerIT.java b/server/src/internalClusterTest/java/org/opensearch/index/autoforcemerge/AutoForceMergeManagerIT.java index bd17d82c4d46e..142e2da95653e 100644 --- a/server/src/internalClusterTest/java/org/opensearch/index/autoforcemerge/AutoForceMergeManagerIT.java +++ b/server/src/internalClusterTest/java/org/opensearch/index/autoforcemerge/AutoForceMergeManagerIT.java @@ -45,11 +45,6 @@ public class AutoForceMergeManagerIT extends RemoteStoreBaseIntegTestCase { private static final String MERGE_DELAY = "1s"; private static final Integer SEGMENT_COUNT = 1; - @Override - protected boolean addMockIndexStorePlugin() { - return false; - } - @Override protected Settings nodeSettings(int nodeOrdinal) { ByteSizeValue cacheSize = new ByteSizeValue(16, ByteSizeUnit.GB); @@ -158,8 +153,8 @@ public void testAutoForceMergeTriggeringBasicWithOneShard() throws Exception { SegmentsStats segmentsStatsBefore = shard.segmentStats(false, false); waitUntil(() -> shard.segmentStats(false, false).getCount() == SEGMENT_COUNT, 1, TimeUnit.MINUTES); SegmentsStats segmentsStatsAfter = shard.segmentStats(false, false); - // assertTrue((int) segmentsStatsBefore.getCount() > segmentsStatsAfter.getCount()); - // assertEquals((int) SEGMENT_COUNT, segmentsStatsAfter.getCount()); + assertTrue((int) segmentsStatsBefore.getCount() > segmentsStatsAfter.getCount()); + assertEquals((int) SEGMENT_COUNT, segmentsStatsAfter.getCount()); assertAcked(client().admin().indices().prepareDelete(INDEX_NAME_1).get()); } @@ -221,11 +216,11 @@ public void testAutoForceMergeTriggeringBasicWithFiveShardsOfTwoIndex() throws E SegmentsStats segmentsStatsForShard3Before = shard3.segmentStats(false, false); SegmentsStats segmentsStatsForShard4Before = shard4.segmentStats(false, false); SegmentsStats segmentsStatsForShard5Before = shard5.segmentStats(false, false); - AtomicLong totalSegments = new AtomicLong( + AtomicLong totalSegmentsBefore = new AtomicLong( segmentsStatsForShard1Before.getCount() + segmentsStatsForShard2Before.getCount() + segmentsStatsForShard3Before.getCount() + segmentsStatsForShard4Before.getCount() + segmentsStatsForShard5Before.getCount() ); - assertTrue(totalSegments.get() > 5); + assertTrue(totalSegmentsBefore.get() > 5); waitUntil(() -> shard1.segmentStats(false, false).getCount() == SEGMENT_COUNT, 1, TimeUnit.MINUTES); waitUntil(() -> shard2.segmentStats(false, false).getCount() == SEGMENT_COUNT, 1, TimeUnit.MINUTES); waitUntil(() -> shard3.segmentStats(false, false).getCount() == SEGMENT_COUNT, 1, TimeUnit.MINUTES); @@ -236,11 +231,11 @@ public void testAutoForceMergeTriggeringBasicWithFiveShardsOfTwoIndex() throws E SegmentsStats segmentsStatsForShard3After = shard3.segmentStats(false, false); SegmentsStats segmentsStatsForShard4After = shard4.segmentStats(false, false); SegmentsStats segmentsStatsForShard5After = shard5.segmentStats(false, false); - totalSegments.set( + AtomicLong totalSegmentsAfter = new AtomicLong( segmentsStatsForShard1After.getCount() + segmentsStatsForShard2After.getCount() + segmentsStatsForShard3After.getCount() + segmentsStatsForShard4After.getCount() + segmentsStatsForShard5After.getCount() ); - // assertEquals(5, totalSegments.get()); + assertTrue(totalSegmentsBefore.get() > totalSegmentsAfter.get()); assertAcked(client().admin().indices().prepareDelete(INDEX_NAME_1).get()); assertAcked(client().admin().indices().prepareDelete(INDEX_NAME_2).get()); } diff --git a/server/src/internalClusterTest/java/org/opensearch/indices/replication/WarmIndexSegmentReplicationIT.java b/server/src/internalClusterTest/java/org/opensearch/indices/replication/WarmIndexSegmentReplicationIT.java index 2f0b1e5b07f16..91754f069217c 100644 --- a/server/src/internalClusterTest/java/org/opensearch/indices/replication/WarmIndexSegmentReplicationIT.java +++ b/server/src/internalClusterTest/java/org/opensearch/indices/replication/WarmIndexSegmentReplicationIT.java @@ -517,6 +517,7 @@ public void testReplicationPostDeleteAndForceMerge() throws Exception { assertHitCount(client(replica).prepareSearch(INDEX_NAME).setSize(0).setPreference("_only_local").get(), expectedHitCount + 1); } + @AwaitsFix(bugUrl = "https://github.com/opensearch-project/OpenSearch/issues/18157") public void testScrollWithConcurrentIndexAndSearch() throws Exception { final String primary = internalCluster().startDataAndWarmNodes(1).get(0); final String replica = internalCluster().startDataAndWarmNodes(1).get(0); diff --git a/server/src/internalClusterTest/java/org/opensearch/indices/state/CloseIndexIT.java b/server/src/internalClusterTest/java/org/opensearch/indices/state/CloseIndexIT.java index 6ad69698c774e..77d63eb20002a 100644 --- a/server/src/internalClusterTest/java/org/opensearch/indices/state/CloseIndexIT.java +++ b/server/src/internalClusterTest/java/org/opensearch/indices/state/CloseIndexIT.java @@ -32,6 +32,8 @@ package org.opensearch.indices.state; +import com.carrotsearch.randomizedtesting.annotations.ParametersFactory; + import org.opensearch.ExceptionsHelper; import org.opensearch.action.ActionRequestValidationException; import org.opensearch.action.admin.indices.close.CloseIndexRequestBuilder; @@ -56,11 +58,13 @@ import org.opensearch.indices.recovery.RecoveryState; import org.opensearch.test.BackgroundIndexer; import org.opensearch.test.InternalTestCluster; -import org.opensearch.test.OpenSearchIntegTestCase; +import org.opensearch.test.ParameterizedStaticSettingsOpenSearchIntegTestCase; import org.opensearch.transport.client.Client; import java.util.ArrayList; import java.util.Arrays; +import java.util.Collection; +import java.util.Collections; import java.util.List; import java.util.Locale; import java.util.concurrent.CountDownLatch; @@ -70,6 +74,7 @@ import static java.util.Collections.emptySet; import static java.util.stream.Collectors.toList; import static org.opensearch.action.support.IndicesOptions.lenientExpandOpen; +import static org.opensearch.search.SearchService.CLUSTER_CONCURRENT_SEGMENT_SEARCH_SETTING; import static org.opensearch.search.internal.SearchContext.TRACK_TOTAL_HITS_ACCURATE; import static org.opensearch.test.hamcrest.OpenSearchAssertions.assertAcked; import static org.opensearch.test.hamcrest.OpenSearchAssertions.assertHitCount; @@ -82,10 +87,23 @@ import static org.hamcrest.Matchers.notNullValue; import static org.hamcrest.Matchers.nullValue; -public class CloseIndexIT extends OpenSearchIntegTestCase { +public class CloseIndexIT extends ParameterizedStaticSettingsOpenSearchIntegTestCase { private static final int MAX_DOCS = 25_000; + public CloseIndexIT(Settings nodeSettings) { + super(nodeSettings); + } + + // This is to reuse CloseIndexIT in RemoteCloseIndexIT . + // Concurrent search deosn't make a difference in these tests. + @ParametersFactory + public static Collection parameters() { + return Collections.singletonList( + new Object[] { Settings.builder().put(CLUSTER_CONCURRENT_SEGMENT_SEARCH_SETTING.getKey(), false).build() } + ); + } + @Override public Settings indexSettings() { return Settings.builder() diff --git a/server/src/internalClusterTest/java/org/opensearch/indices/state/RemoteCloseIndexIT.java b/server/src/internalClusterTest/java/org/opensearch/indices/state/RemoteCloseIndexIT.java new file mode 100644 index 0000000000000..3267da06a99bb --- /dev/null +++ b/server/src/internalClusterTest/java/org/opensearch/indices/state/RemoteCloseIndexIT.java @@ -0,0 +1,74 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.indices.state; + +import com.carrotsearch.randomizedtesting.annotations.ParametersFactory; + +import org.opensearch.cluster.metadata.IndexMetadata; +import org.opensearch.common.settings.Settings; +import org.opensearch.core.common.unit.ByteSizeUnit; +import org.opensearch.core.common.unit.ByteSizeValue; +import org.opensearch.index.IndexModule; +import org.opensearch.index.IndexSettings; +import org.opensearch.node.Node; + +import java.nio.file.Path; +import java.util.Arrays; +import java.util.Collection; + +import static org.opensearch.common.util.FeatureFlags.WRITABLE_WARM_INDEX_SETTING; + +public class RemoteCloseIndexIT extends CloseIndexIT { + + public RemoteCloseIndexIT(Settings nodeSettings) { + super(nodeSettings); + } + + protected Path remoteRepoPath; + + protected final static String TEST_REMOTE_STORE_REPO_SUFFIX = "__rs"; + protected static final String BASE_REMOTE_REPO = "test-rs-repo" + TEST_REMOTE_STORE_REPO_SUFFIX; + + @Override + protected Settings nodeSettings(int nodeOrdinal) { + if (remoteRepoPath == null) { + remoteRepoPath = randomRepoPath().toAbsolutePath(); + } + ByteSizeValue cacheSize = new ByteSizeValue(16, ByteSizeUnit.GB); + return Settings.builder() + .put(super.nodeSettings(nodeOrdinal)) + .put(remoteStoreClusterSettings(BASE_REMOTE_REPO, remoteRepoPath)) + .put(Node.NODE_SEARCH_CACHE_SIZE_SETTING.getKey(), cacheSize.toString()) + .build(); + } + + protected Settings.Builder getIndexSettings(int numOfShards, int numOfReplicas) { + Settings.Builder settingsBuilder = Settings.builder() + .put(super.indexSettings()) + .put(IndexMetadata.SETTING_NUMBER_OF_SHARDS, numOfShards) + .put(IndexMetadata.SETTING_NUMBER_OF_REPLICAS, numOfReplicas) + .put(IndexSettings.INDEX_REFRESH_INTERVAL_SETTING.getKey(), "300s"); + if (WRITABLE_WARM_INDEX_SETTING.get(settings)) { + settingsBuilder.put(IndexModule.IS_WARM_INDEX_SETTING.getKey(), true); + } + return settingsBuilder; + } + + @ParametersFactory + public static Collection parameters() { + return Arrays.asList( + new Object[] { Settings.builder().put(WRITABLE_WARM_INDEX_SETTING.getKey(), true).build() }, + new Object[] { Settings.builder().put(WRITABLE_WARM_INDEX_SETTING.getKey(), false).build() } + ); + } + + void assertNoFileBasedRecovery(String indexName) { + // skipping for remote store + } +} diff --git a/server/src/internalClusterTest/java/org/opensearch/remotestore/WritableWarmIT.java b/server/src/internalClusterTest/java/org/opensearch/remotestore/WritableWarmIT.java index 9ae3ae5b7a451..a94d04cf17a06 100644 --- a/server/src/internalClusterTest/java/org/opensearch/remotestore/WritableWarmIT.java +++ b/server/src/internalClusterTest/java/org/opensearch/remotestore/WritableWarmIT.java @@ -18,6 +18,7 @@ import org.opensearch.action.admin.indices.delete.DeleteIndexRequest; import org.opensearch.action.admin.indices.get.GetIndexRequest; import org.opensearch.action.admin.indices.get.GetIndexResponse; +import org.opensearch.action.admin.indices.shrink.ResizeType; import org.opensearch.action.search.SearchResponse; import org.opensearch.cluster.metadata.IndexMetadata; import org.opensearch.common.settings.Settings; @@ -34,9 +35,11 @@ import org.opensearch.index.store.remote.filecache.FileCache; import org.opensearch.index.store.remote.utils.FileTypeUtils; import org.opensearch.indices.IndicesService; +import org.opensearch.indices.replication.common.ReplicationType; import org.opensearch.node.Node; import org.opensearch.test.InternalTestCluster; import org.opensearch.test.OpenSearchIntegTestCase; +import org.junit.Assert; import java.util.Arrays; import java.util.HashSet; @@ -45,8 +48,11 @@ import java.util.concurrent.ExecutionException; import java.util.stream.Collectors; +import static org.opensearch.cluster.metadata.IndexMetadata.SETTING_NUMBER_OF_REPLICAS; +import static org.opensearch.cluster.metadata.IndexMetadata.SETTING_NUMBER_OF_SHARDS; import static org.opensearch.test.hamcrest.OpenSearchAssertions.assertAcked; import static org.opensearch.test.hamcrest.OpenSearchAssertions.assertHitCount; +import static org.hamcrest.core.StringContains.containsString; @ThreadLeakFilters(filters = CleanerDaemonThreadLeakFilter.class) @OpenSearchIntegTestCase.ClusterScope(scope = OpenSearchIntegTestCase.Scope.TEST, numDataNodes = 0, supportsDedicatedMasters = false) @@ -247,4 +253,41 @@ public void testFullFileAndFileCacheStats() throws ExecutionException, Interrupt private boolean isFileCacheEmpty(AggregateFileCacheStats stats) { return stats.getUsed().getBytes() == 0L && stats.getActive().getBytes() == 0L; } + + public void testNoResizeOnWarm() { + InternalTestCluster internalTestCluster = internalCluster(); + internalTestCluster.startClusterManagerOnlyNode(); + internalCluster().startDataAndWarmNodes(1).get(0); + Settings idxSettings = Settings.builder() + .put(super.indexSettings()) + .put(IndexModule.INDEX_QUERY_CACHE_ENABLED_SETTING.getKey(), false) + .put(SETTING_NUMBER_OF_REPLICAS, 0) + .put(SETTING_NUMBER_OF_SHARDS, 2) + .put(IndexMetadata.SETTING_REPLICATION_TYPE, ReplicationType.SEGMENT) + .put(IndexModule.IS_WARM_INDEX_SETTING.getKey(), true) + .build(); + + createIndex(INDEX_NAME, idxSettings); + ensureYellowAndNoInitializingShards(INDEX_NAME); + ensureGreen(INDEX_NAME); + client().admin().indices().prepareUpdateSettings(INDEX_NAME).setSettings(Settings.builder().put("index.blocks.write", true)).get(); + ResizeType type = randomFrom(ResizeType.CLONE, ResizeType.SHRINK, ResizeType.SPLIT); + try { + client().admin() + .indices() + .prepareResizeIndex(INDEX_NAME, "target") + .setResizeType(type) + .setSettings( + Settings.builder() + .put("index.number_of_replicas", 0) + .put("index.number_of_shards", 2) + .putNull("index.blocks.write") + .build() + ) + .get(); + fail(); + } catch (Exception e) { + Assert.assertThat(e.getMessage(), containsString("cannot resize warm index")); + } + } } diff --git a/server/src/internalClusterTest/java/org/opensearch/repositories/RepositoriesServiceIT.java b/server/src/internalClusterTest/java/org/opensearch/repositories/RepositoriesServiceIT.java index b5d5bddd160ca..5f86fd791821e 100644 --- a/server/src/internalClusterTest/java/org/opensearch/repositories/RepositoriesServiceIT.java +++ b/server/src/internalClusterTest/java/org/opensearch/repositories/RepositoriesServiceIT.java @@ -32,6 +32,7 @@ package org.opensearch.repositories; +import org.apache.lucene.store.RateLimiter; import org.opensearch.action.admin.cluster.repositories.get.GetRepositoriesResponse; import org.opensearch.cluster.metadata.RepositoryMetadata; import org.opensearch.common.settings.Settings; @@ -42,12 +43,18 @@ import org.opensearch.test.OpenSearchIntegTestCase; import org.opensearch.transport.client.Client; +import java.nio.file.Path; import java.util.Collection; import java.util.Collections; import java.util.concurrent.atomic.AtomicInteger; import static org.opensearch.cluster.metadata.IndexMetadata.SETTING_NUMBER_OF_REPLICAS; import static org.opensearch.cluster.metadata.IndexMetadata.SETTING_NUMBER_OF_SHARDS; +import static org.opensearch.repositories.blobstore.BlobStoreRepository.MAX_REMOTE_DOWNLOAD_BYTES_PER_SEC; +import static org.opensearch.repositories.blobstore.BlobStoreRepository.MAX_REMOTE_LOW_PRIORITY_UPLOAD_BYTES_PER_SEC; +import static org.opensearch.repositories.blobstore.BlobStoreRepository.MAX_REMOTE_UPLOAD_BYTES_PER_SEC; +import static org.opensearch.repositories.blobstore.BlobStoreRepository.MAX_RESTORE_BYTES_PER_SEC; +import static org.opensearch.repositories.blobstore.BlobStoreRepository.MAX_SNAPSHOT_BYTES_PER_SEC; import static org.hamcrest.Matchers.containsString; import static org.hamcrest.Matchers.equalTo; import static org.hamcrest.Matchers.hasSize; @@ -138,10 +145,11 @@ public void testCreatSnapAndUpdateReposityCauseInfiniteLoop() throws Interrupted // create repository final String repositoryName = "test-repo"; + Path path = randomRepoPath(); Settings.Builder repoSettings = Settings.builder() - .put("location", randomRepoPath()) - .put("max_snapshot_bytes_per_sec", "10mb") - .put("max_restore_bytes_per_sec", "10mb"); + .put("location", path) + .put(MAX_SNAPSHOT_BYTES_PER_SEC, "10mb") + .put(MAX_RESTORE_BYTES_PER_SEC, "10mb"); OpenSearchIntegTestCase.putRepositoryWithNoSettingOverrides( client().admin().cluster(), repositoryName, @@ -176,7 +184,7 @@ public void testCreatSnapAndUpdateReposityCauseInfiniteLoop() throws Interrupted try { logger.info("--> begin to reset repository"); - repoSettings = Settings.builder().put("location", randomRepoPath()).put("max_snapshot_bytes_per_sec", "300mb"); + repoSettings = Settings.builder().put("location", randomRepoPath()).put(MAX_SNAPSHOT_BYTES_PER_SEC, "300mb"); OpenSearchIntegTestCase.putRepositoryWithNoSettingOverrides( client().admin().cluster(), repositoryName, @@ -194,4 +202,121 @@ public void testCreatSnapAndUpdateReposityCauseInfiniteLoop() throws Interrupted thread.join(); } + + public void testAdjustBytesPerSecSettingForSnapAndRestore() { + final InternalTestCluster cluster = internalCluster(); + final RepositoriesService repositoriesService = cluster.getDataOrClusterManagerNodeInstances(RepositoriesService.class) + .iterator() + .next(); + + // create repository + final String repositoryName = "test-repo1"; + long rateBytes = 200000; + Path path = randomRepoPath(); + Settings.Builder repoSettings = Settings.builder() + .put("location", path) + .put(MAX_SNAPSHOT_BYTES_PER_SEC, (rateBytes + "b")) + .put(MAX_RESTORE_BYTES_PER_SEC, (rateBytes + "b")) + .put(MAX_REMOTE_UPLOAD_BYTES_PER_SEC, (rateBytes + "b")) + .put(MAX_REMOTE_LOW_PRIORITY_UPLOAD_BYTES_PER_SEC, (rateBytes + "b")) + .put(MAX_REMOTE_DOWNLOAD_BYTES_PER_SEC, (rateBytes + "b")); + OpenSearchIntegTestCase.putRepositoryWithNoSettingOverrides( + client().admin().cluster(), + repositoryName, + FsRepository.TYPE, + true, + repoSettings + ); + + FsRepository repository = (FsRepository) repositoriesService.repository(repositoryName); + RateLimiter snapshotRateLimiter = repository.snapshotRateLimiter(); + assertThat(snapshotRateLimiter.getMBPerSec(), equalTo((double) rateBytes / (1024 * 1024))); + RateLimiter restoreRateLimiter = repository.restoreRateLimiter(); + assertThat(restoreRateLimiter.getMBPerSec(), equalTo((double) rateBytes / (1024 * 1024))); + RateLimiter remoteUploadRateLimiter = repository.remoteUploadRateLimiter(); + assertThat(remoteUploadRateLimiter.getMBPerSec(), equalTo((double) rateBytes / (1024 * 1024))); + RateLimiter remoteUploadLowPriorityRateLimiter = repository.remoteUploadLowPriorityRateLimiter(); + assertThat(remoteUploadLowPriorityRateLimiter.getMBPerSec(), equalTo((double) rateBytes / (1024 * 1024))); + RateLimiter remoteDownloadRateLimiter = repository.remoteDownloadRateLimiter(); + assertThat(remoteDownloadRateLimiter.getMBPerSec(), equalTo((double) rateBytes / (1024 * 1024))); + + // adjust all the reloadable settings + { + rateBytes = rateBytes / 2; + repoSettings = Settings.builder() + .put(MAX_SNAPSHOT_BYTES_PER_SEC, (rateBytes + "b")) + .put(MAX_RESTORE_BYTES_PER_SEC, (rateBytes + "b")) + .put(MAX_REMOTE_UPLOAD_BYTES_PER_SEC, (rateBytes + "b")) + .put(MAX_REMOTE_LOW_PRIORITY_UPLOAD_BYTES_PER_SEC, (rateBytes + "b")) + .put(MAX_REMOTE_DOWNLOAD_BYTES_PER_SEC, (rateBytes + "b")); + OpenSearchIntegTestCase.putRepositoryWithNoSettingOverrides( + client().admin().cluster(), + repositoryName, + FsRepository.TYPE, + true, + repoSettings + ); + FsRepository newRepository = (FsRepository) repositoriesService.repository(repositoryName); + assertThat(newRepository, sameInstance(repository)); + snapshotRateLimiter = newRepository.snapshotRateLimiter(); + assertThat(snapshotRateLimiter.getMBPerSec(), equalTo((double) rateBytes / (1024 * 1024))); + restoreRateLimiter = newRepository.restoreRateLimiter(); + assertThat(restoreRateLimiter.getMBPerSec(), equalTo((double) rateBytes / (1024 * 1024))); + remoteUploadRateLimiter = newRepository.remoteUploadRateLimiter(); + assertThat(remoteUploadRateLimiter.getMBPerSec(), equalTo((double) rateBytes / (1024 * 1024))); + remoteUploadLowPriorityRateLimiter = newRepository.remoteUploadLowPriorityRateLimiter(); + assertThat(remoteUploadLowPriorityRateLimiter.getMBPerSec(), equalTo((double) rateBytes / (1024 * 1024))); + remoteDownloadRateLimiter = newRepository.remoteDownloadRateLimiter(); + assertThat(remoteDownloadRateLimiter.getMBPerSec(), equalTo((double) rateBytes / (1024 * 1024))); + } + + // In addition to the settings in RELOADABLE_SETTINGS, all the new settings should be equal to current settings + { + long newRateBytes = rateBytes / 2; + repoSettings = Settings.builder() + .put("location", path) + .put(MAX_SNAPSHOT_BYTES_PER_SEC, (newRateBytes + "b")) + .put(MAX_RESTORE_BYTES_PER_SEC, (newRateBytes + "b")); + OpenSearchIntegTestCase.putRepositoryWithNoSettingOverrides( + client().admin().cluster(), + repositoryName, + FsRepository.TYPE, + true, + repoSettings + ); + FsRepository newRepository = (FsRepository) repositoriesService.repository(repositoryName); + assertThat(newRepository, sameInstance(repository)); + snapshotRateLimiter = newRepository.snapshotRateLimiter(); + assertThat(snapshotRateLimiter.getMBPerSec(), equalTo((double) newRateBytes / (1024 * 1024))); + restoreRateLimiter = newRepository.restoreRateLimiter(); + assertThat(restoreRateLimiter.getMBPerSec(), equalTo((double) newRateBytes / (1024 * 1024))); + remoteUploadRateLimiter = newRepository.remoteUploadRateLimiter(); + assertThat(remoteUploadRateLimiter.getMBPerSec(), equalTo((double) rateBytes / (1024 * 1024))); + remoteUploadLowPriorityRateLimiter = newRepository.remoteUploadLowPriorityRateLimiter(); + assertThat(remoteUploadLowPriorityRateLimiter.getMBPerSec(), equalTo((double) rateBytes / (1024 * 1024))); + remoteDownloadRateLimiter = newRepository.remoteDownloadRateLimiter(); + assertThat(remoteDownloadRateLimiter.getMBPerSec(), equalTo((double) rateBytes / (1024 * 1024))); + } + + // the new settings are not all equal to the old settings, so the repository will be not reloaded + { + rateBytes = rateBytes / 2; + repoSettings = Settings.builder() + .put("location", path) + .put("io_buffer_size", "8mb") + .put(MAX_RESTORE_BYTES_PER_SEC, (rateBytes + "b")) + .put(MAX_REMOTE_UPLOAD_BYTES_PER_SEC, (rateBytes + "b")) + .put(MAX_REMOTE_LOW_PRIORITY_UPLOAD_BYTES_PER_SEC, (rateBytes + "b")) + .put(MAX_REMOTE_DOWNLOAD_BYTES_PER_SEC, (rateBytes + "b")); + OpenSearchIntegTestCase.putRepositoryWithNoSettingOverrides( + client().admin().cluster(), + repositoryName, + FsRepository.TYPE, + true, + repoSettings + ); + FsRepository newRepository = (FsRepository) repositoriesService.repository(repositoryName); + assertNotEquals(newRepository, repository); + } + } } diff --git a/server/src/internalClusterTest/java/org/opensearch/search/simple/SimpleSearchIT.java b/server/src/internalClusterTest/java/org/opensearch/search/simple/SimpleSearchIT.java index 419ea2f74019c..d32bad5f17d2a 100644 --- a/server/src/internalClusterTest/java/org/opensearch/search/simple/SimpleSearchIT.java +++ b/server/src/internalClusterTest/java/org/opensearch/search/simple/SimpleSearchIT.java @@ -49,6 +49,8 @@ import org.opensearch.index.mapper.MapperService; import org.opensearch.index.query.ConstantScoreQueryBuilder; import org.opensearch.index.query.QueryBuilders; +import org.opensearch.index.query.RegexpFlag; +import org.opensearch.index.query.RegexpQueryBuilder; import org.opensearch.index.query.TermQueryBuilder; import org.opensearch.search.rescore.QueryRescorerBuilder; import org.opensearch.search.sort.SortOrder; @@ -59,7 +61,9 @@ import java.util.Arrays; import java.util.Collection; import java.util.List; +import java.util.Set; import java.util.concurrent.ExecutionException; +import java.util.stream.Collectors; import static org.opensearch.action.support.WriteRequest.RefreshPolicy.IMMEDIATE; import static org.opensearch.cluster.metadata.IndexMetadata.SETTING_NUMBER_OF_REPLICAS; @@ -763,4 +767,19 @@ private void assertRescoreWindowFails(int windowSize) { ) ); } + + public void testRegexQueryWithComplementFlag() { + createIndex("test_regex"); + client().prepareIndex("test_regex").setId("1").setSource("text", "abc").get(); + client().prepareIndex("test_regex").setId("2").setSource("text", "adc").get(); + client().prepareIndex("test_regex").setId("3").setSource("text", "acc").get(); + refresh(); + RegexpQueryBuilder query = new RegexpQueryBuilder("text.keyword", "a~bc"); + query.flags(RegexpFlag.COMPLEMENT); + SearchResponse response = client().prepareSearch("test_regex").setQuery(query).get(); + assertEquals("COMPLEMENT should match 2 documents", 2L, response.getHits().getTotalHits().value()); + Set matchedIds = Arrays.stream(response.getHits().getHits()).map(hit -> hit.getId()).collect(Collectors.toSet()); + assertEquals("Should match exactly 2 documents", 2, matchedIds.size()); + assertTrue("Should match documents 2 and 3", matchedIds.containsAll(Arrays.asList("2", "3"))); + } } diff --git a/server/src/internalClusterTest/java/org/opensearch/search/sort/FieldSortIT.java b/server/src/internalClusterTest/java/org/opensearch/search/sort/FieldSortIT.java index e8675ff3a6e55..c1a869a43d8aa 100644 --- a/server/src/internalClusterTest/java/org/opensearch/search/sort/FieldSortIT.java +++ b/server/src/internalClusterTest/java/org/opensearch/search/sort/FieldSortIT.java @@ -934,7 +934,7 @@ public void testSortMissingNumbers() throws Exception { indexRandomForConcurrentSearch("test"); // DOUBLE - logger.info("--> sort with no missing (same as missing _last)"); + logger.info("--> sort with no missing"); SearchResponse searchResponse = client().prepareSearch() .setQuery(matchAllQuery()) .addSort(SortBuilders.fieldSort("i_value").order(SortOrder.ASC)) @@ -944,7 +944,6 @@ public void testSortMissingNumbers() throws Exception { assertThat(searchResponse.getHits().getTotalHits().value(), equalTo(3L)); assertThat(searchResponse.getHits().getAt(0).getId(), equalTo("1")); assertThat(searchResponse.getHits().getAt(1).getId(), equalTo("3")); - assertThat(searchResponse.getHits().getAt(2).getId(), equalTo("2")); logger.info("--> sort with missing _last"); searchResponse = client().prepareSearch() @@ -983,7 +982,7 @@ public void testSortMissingNumbers() throws Exception { assertThat(searchResponse.getHits().getAt(2).getId(), equalTo("3")); // FLOAT - logger.info("--> sort with no missing (same as missing _last)"); + logger.info("--> sort with no missing"); searchResponse = client().prepareSearch() .setQuery(matchAllQuery()) .addSort(SortBuilders.fieldSort("d_value").order(SortOrder.ASC)) @@ -993,7 +992,6 @@ public void testSortMissingNumbers() throws Exception { assertThat(searchResponse.getHits().getTotalHits().value(), equalTo(3L)); assertThat(searchResponse.getHits().getAt(0).getId(), equalTo("1")); assertThat(searchResponse.getHits().getAt(1).getId(), equalTo("3")); - assertThat(searchResponse.getHits().getAt(2).getId(), equalTo("2")); logger.info("--> sort with missing _last"); searchResponse = client().prepareSearch() @@ -1032,7 +1030,7 @@ public void testSortMissingNumbers() throws Exception { assertThat(searchResponse.getHits().getAt(2).getId(), equalTo("3")); // UNSIGNED_LONG - logger.info("--> sort with no missing (same as missing _last)"); + logger.info("--> sort with no missing"); searchResponse = client().prepareSearch() .setQuery(matchAllQuery()) .addSort(SortBuilders.fieldSort("u_value").order(SortOrder.ASC)) @@ -1042,7 +1040,6 @@ public void testSortMissingNumbers() throws Exception { assertThat(searchResponse.getHits().getTotalHits().value(), equalTo(3L)); assertThat(searchResponse.getHits().getAt(0).getId(), equalTo("1")); assertThat(searchResponse.getHits().getAt(1).getId(), equalTo("3")); - assertThat(searchResponse.getHits().getAt(2).getId(), equalTo("2")); logger.info("--> sort with missing _last"); searchResponse = client().prepareSearch() @@ -1138,7 +1135,7 @@ public void testSortMissingNumbersMinMax() throws Exception { indexRandomForConcurrentSearch("test"); // LONG - logger.info("--> sort with no missing (same as missing _last)"); + logger.info("--> sort with no missing"); SearchResponse searchResponse = client().prepareSearch() .setQuery(matchAllQuery()) .addSort(SortBuilders.fieldSort("l_value").order(SortOrder.ASC)) @@ -1177,7 +1174,7 @@ public void testSortMissingNumbersMinMax() throws Exception { assertThat(searchResponse.getHits().getAt(2).getId(), equalTo("3")); // FLOAT - logger.info("--> sort with no missing (same as missing _last)"); + logger.info("--> sort with no missing"); searchResponse = client().prepareSearch() .setQuery(matchAllQuery()) .addSort(SortBuilders.fieldSort("d_value").order(SortOrder.ASC)) @@ -1187,7 +1184,6 @@ public void testSortMissingNumbersMinMax() throws Exception { assertThat(searchResponse.getHits().getTotalHits().value(), equalTo(3L)); assertThat(searchResponse.getHits().getAt(0).getId(), equalTo("1")); assertThat(searchResponse.getHits().getAt(1).getId(), equalTo("3")); - assertThat(searchResponse.getHits().getAt(2).getId(), equalTo("2")); logger.info("--> sort with missing _last"); searchResponse = client().prepareSearch() @@ -1214,7 +1210,7 @@ public void testSortMissingNumbersMinMax() throws Exception { assertThat(searchResponse.getHits().getAt(2).getId(), equalTo("3")); // UNSIGNED_LONG - logger.info("--> sort with no missing (same as missing _last)"); + logger.info("--> sort with no missing"); searchResponse = client().prepareSearch() .setQuery(matchAllQuery()) .addSort(SortBuilders.fieldSort("u_value").order(SortOrder.ASC)) @@ -1225,7 +1221,6 @@ public void testSortMissingNumbersMinMax() throws Exception { assertThat(searchResponse.getHits().getAt(0).getId(), equalTo("1")); // The order here could be unstable (depends on document order) since missing == field value assertThat(searchResponse.getHits().getAt(1).getId(), is(oneOf("3", "2"))); - assertThat(searchResponse.getHits().getAt(2).getId(), is(oneOf("2", "3"))); logger.info("--> sort with missing _last"); searchResponse = client().prepareSearch() diff --git a/server/src/internalClusterTest/java/org/opensearch/snapshots/ConcurrentSnapshotsIT.java b/server/src/internalClusterTest/java/org/opensearch/snapshots/ConcurrentSnapshotsIT.java index 252efcdc979bb..af17ef596b6a4 100644 --- a/server/src/internalClusterTest/java/org/opensearch/snapshots/ConcurrentSnapshotsIT.java +++ b/server/src/internalClusterTest/java/org/opensearch/snapshots/ConcurrentSnapshotsIT.java @@ -50,6 +50,7 @@ import org.opensearch.common.util.concurrent.UncategorizedExecutionException; import org.opensearch.core.action.ActionListener; import org.opensearch.core.common.Strings; +import org.opensearch.core.common.unit.ByteSizeUnit; import org.opensearch.core.rest.RestStatus; import org.opensearch.discovery.AbstractDisruptionTestCase; import org.opensearch.plugins.Plugin; @@ -154,6 +155,7 @@ public void testSettingsUpdateFailWhenCreateSnapshotInProgress() throws Exceptio Thread.sleep(1000); // Wait for the snapshot to start assertFalse(createSlowFuture.isDone()); // Ensure the snapshot is still in progress // Attempt to update the repository settings while the snapshot is in progress + settings.put("chunk_size", 2000, ByteSizeUnit.BYTES); IllegalStateException ex = assertThrows(IllegalStateException.class, () -> updateRepository(repoName, "mock", settings)); // Verify that the update fails with an appropriate exception assertEquals("trying to modify or unregister repository that is currently used", ex.getMessage()); @@ -180,10 +182,9 @@ public void testSettingsUpdateFailWhenDeleteSnapshotInProgress() throws Interrup Thread.sleep(1000); // Wait for the delete operation to start assertFalse(future.isDone()); // Ensure the delete operation is still in progress // Attempt to update the repository settings while the delete operation is in progress - IllegalStateException ex = assertThrows( - IllegalStateException.class, - () -> updateRepository(repoName, "mock", randomRepositorySettings()) - ); + Settings.Builder newSettings = randomRepositorySettings(); + newSettings.put("chunk_size", 2000, ByteSizeUnit.BYTES); + IllegalStateException ex = assertThrows(IllegalStateException.class, () -> updateRepository(repoName, "mock", newSettings)); // Verify that the update fails with an appropriate exception assertEquals("trying to modify or unregister repository that is currently used", ex.getMessage()); unblockNode(repoName, clusterManagerName); // Unblock the delete operation diff --git a/server/src/main/java/org/opensearch/action/admin/indices/shrink/TransportResizeAction.java b/server/src/main/java/org/opensearch/action/admin/indices/shrink/TransportResizeAction.java index 41b6df062ab42..0927ec234928a 100644 --- a/server/src/main/java/org/opensearch/action/admin/indices/shrink/TransportResizeAction.java +++ b/server/src/main/java/org/opensearch/action/admin/indices/shrink/TransportResizeAction.java @@ -53,6 +53,7 @@ import org.opensearch.core.common.io.stream.StreamInput; import org.opensearch.core.common.unit.ByteSizeValue; import org.opensearch.core.index.shard.ShardId; +import org.opensearch.index.IndexModule; import org.opensearch.index.IndexNotFoundException; import org.opensearch.index.IndexSettings; import org.opensearch.index.shard.DocsStats; @@ -146,6 +147,10 @@ protected void clusterManagerOperation( final String sourceIndex = indexNameExpressionResolver.resolveDateMathExpression(resizeRequest.getSourceIndex()); final String targetIndex = indexNameExpressionResolver.resolveDateMathExpression(resizeRequest.getTargetIndexRequest().index()); IndexMetadata indexMetadata = state.metadata().index(sourceIndex); + if (indexMetadata.getSettings().getAsBoolean(IndexModule.IS_WARM_INDEX_SETTING.getKey(), false) == true) { + throw new IllegalStateException("cannot resize warm index"); + } + ClusterSettings clusterSettings = clusterService.getClusterSettings(); if (resizeRequest.getResizeType().equals(ResizeType.SHRINK) && state.metadata().isSegmentReplicationEnabled(sourceIndex) diff --git a/server/src/main/java/org/opensearch/cluster/ClusterModule.java b/server/src/main/java/org/opensearch/cluster/ClusterModule.java index fc7dae7854ff2..517d3abd2a472 100644 --- a/server/src/main/java/org/opensearch/cluster/ClusterModule.java +++ b/server/src/main/java/org/opensearch/cluster/ClusterModule.java @@ -41,6 +41,7 @@ import org.opensearch.cluster.metadata.DataStreamMetadata; import org.opensearch.cluster.metadata.IndexGraveyard; import org.opensearch.cluster.metadata.IndexNameExpressionResolver; +import org.opensearch.cluster.metadata.IndexNameExpressionResolver.ExpressionResolver; import org.opensearch.cluster.metadata.Metadata; import org.opensearch.cluster.metadata.MetadataDeleteIndexService; import org.opensearch.cluster.metadata.MetadataIndexAliasesService; @@ -143,6 +144,7 @@ public class ClusterModule extends AbstractModule { final Collection deciderList; final ShardsAllocator shardsAllocator; private final ClusterManagerMetrics clusterManagerMetrics; + private final Class shardStateActionClass; public ClusterModule( Settings settings, @@ -151,14 +153,15 @@ public ClusterModule( ClusterInfoService clusterInfoService, SnapshotsInfoService snapshotsInfoService, ThreadContext threadContext, - ClusterManagerMetrics clusterManagerMetrics + ClusterManagerMetrics clusterManagerMetrics, + Class shardStateActionClass ) { this.clusterPlugins = clusterPlugins; this.deciderList = createAllocationDeciders(settings, clusterService.getClusterSettings(), clusterPlugins); this.allocationDeciders = new AllocationDeciders(deciderList); this.shardsAllocator = createShardsAllocator(settings, clusterService.getClusterSettings(), clusterPlugins); this.clusterService = clusterService; - this.indexNameExpressionResolver = new IndexNameExpressionResolver(threadContext); + this.indexNameExpressionResolver = new IndexNameExpressionResolver(threadContext, getCustomResolvers(clusterPlugins)); this.allocationService = new AllocationService( allocationDeciders, shardsAllocator, @@ -168,6 +171,7 @@ public ClusterModule( clusterManagerMetrics ); this.clusterManagerMetrics = clusterManagerMetrics; + this.shardStateActionClass = shardStateActionClass; } public static List getNamedWriteables() { @@ -439,6 +443,21 @@ private static ShardsAllocator createShardsAllocator( return Objects.requireNonNull(allocatorSupplier.get(), "ShardsAllocator factory for [" + allocatorName + "] returned null"); } + private static List getCustomResolvers(List clusterPlugins) { + Map resolvers = new HashMap<>(); + clusterPlugins.stream().flatMap(c -> c.getIndexNameCustomResolvers().stream()).forEach(r -> addCustomResolver(resolvers, r)); + return Collections.unmodifiableList(new ArrayList<>(resolvers.values())); + } + + private static void addCustomResolver( + Map resolvers, + IndexNameExpressionResolver.ExpressionResolver customResolver + ) { + if (resolvers.put(customResolver.getClass(), customResolver) != null) { + throw new IllegalArgumentException("Cannot specify expression resolver [" + customResolver.getClass().getName() + "] twice"); + } + } + public AllocationService getAllocationService() { return allocationService; } @@ -458,7 +477,11 @@ protected void configure() { bind(MetadataIndexTemplateService.class).asEagerSingleton(); bind(IndexNameExpressionResolver.class).toInstance(indexNameExpressionResolver); bind(DelayedAllocationService.class).asEagerSingleton(); - bind(ShardStateAction.class).asEagerSingleton(); + if (shardStateActionClass == ShardStateAction.class) { + bind(ShardStateAction.class).asEagerSingleton(); + } else { + bind(ShardStateAction.class).to(shardStateActionClass).asEagerSingleton(); + } bind(NodeMappingRefreshAction.class).asEagerSingleton(); bind(MappingUpdatedAction.class).asEagerSingleton(); bind(TaskResultsService.class).asEagerSingleton(); diff --git a/server/src/main/java/org/opensearch/cluster/action/shard/LocalShardStateAction.java b/server/src/main/java/org/opensearch/cluster/action/shard/LocalShardStateAction.java new file mode 100644 index 0000000000000..335cede540b57 --- /dev/null +++ b/server/src/main/java/org/opensearch/cluster/action/shard/LocalShardStateAction.java @@ -0,0 +1,88 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.cluster.action.shard; + +import org.opensearch.cluster.ClusterState; +import org.opensearch.cluster.routing.IndexRoutingTable; +import org.opensearch.cluster.routing.IndexShardRoutingTable; +import org.opensearch.cluster.routing.RerouteService; +import org.opensearch.cluster.routing.RoutingTable; +import org.opensearch.cluster.routing.ShardRouting; +import org.opensearch.cluster.routing.allocation.AllocationService; +import org.opensearch.cluster.service.ClusterService; +import org.opensearch.common.inject.Inject; +import org.opensearch.core.action.ActionListener; +import org.opensearch.threadpool.ThreadPool; +import org.opensearch.transport.TransportService; + +import java.util.function.Function; + +/** + * A local implementation of {@link ShardStateAction} that applies shard state changes directly to the + * local cluster state. This is used in clusterless mode, where there is no cluster manager. + */ +public class LocalShardStateAction extends ShardStateAction { + @Inject + public LocalShardStateAction( + ClusterService clusterService, + TransportService transportService, + AllocationService allocationService, + RerouteService rerouteService, + ThreadPool threadPool + ) { + super(clusterService, transportService, allocationService, rerouteService, threadPool); + } + + @Override + public void shardStarted( + ShardRouting shardRouting, + long primaryTerm, + String message, + ActionListener listener, + ClusterState currentState + ) { + Function clusterStateUpdater = clusterState -> { + // We're running in clusterless mode. Apply the state change directly to the local cluster state. + RoutingTable routingTable = clusterState.getRoutingTable(); + IndexRoutingTable indexRoutingTable = routingTable.index(shardRouting.index()); + + ClusterState.Builder clusterStateBuilder = ClusterState.builder(clusterState); + RoutingTable.Builder routingTableBuilder = RoutingTable.builder(routingTable); + IndexRoutingTable.Builder indexRoutingTableBuilder = IndexRoutingTable.builder(shardRouting.index()); + for (IndexShardRoutingTable indexShardRoutingTable : indexRoutingTable) { + if (indexShardRoutingTable.shardId().equals(shardRouting.shardId())) { + IndexShardRoutingTable.Builder indexShardRoutingTableBuilder = new IndexShardRoutingTable.Builder( + indexShardRoutingTable + ); + indexShardRoutingTableBuilder.removeShard(shardRouting); + indexShardRoutingTableBuilder.addShard(shardRouting.moveToStarted()); + indexRoutingTableBuilder.addIndexShard(indexShardRoutingTableBuilder.build()); + } else { + indexRoutingTableBuilder.addIndexShard(indexShardRoutingTable); + } + } + routingTableBuilder.add(indexRoutingTableBuilder); + clusterStateBuilder.routingTable(routingTableBuilder.build()); + return clusterStateBuilder.build(); + }; + clusterService.getClusterApplierService() + .updateClusterState("shard-started " + shardRouting.shardId(), clusterStateUpdater, (s, e) -> {}); + } + + @Override + public void localShardFailed( + ShardRouting shardRouting, + String message, + Exception failure, + ActionListener listener, + ClusterState currentState + ) { + // Do not send a failure to the cluster manager, as we are running in clusterless mode. + } +} diff --git a/server/src/main/java/org/opensearch/cluster/action/shard/ShardStateAction.java b/server/src/main/java/org/opensearch/cluster/action/shard/ShardStateAction.java index cb5749a91d448..6a204925ccd04 100644 --- a/server/src/main/java/org/opensearch/cluster/action/shard/ShardStateAction.java +++ b/server/src/main/java/org/opensearch/cluster/action/shard/ShardStateAction.java @@ -88,7 +88,7 @@ import java.util.function.Supplier; /** - * Transport action for retrieving the shard state + * Registers transport actions that react to shard state changes, such as shard started or shard failed. * * @opensearch.internal */ @@ -128,7 +128,7 @@ private static Priority parseReroutePriority(String priorityString) { } private final TransportService transportService; - private final ClusterService clusterService; + final ClusterService clusterService; private final ThreadPool threadPool; private volatile Priority followUpRerouteTaskPriority; diff --git a/server/src/main/java/org/opensearch/cluster/metadata/IndexNameExpressionResolver.java b/server/src/main/java/org/opensearch/cluster/metadata/IndexNameExpressionResolver.java index 24ff83d638d4b..abfabdd10c340 100644 --- a/server/src/main/java/org/opensearch/cluster/metadata/IndexNameExpressionResolver.java +++ b/server/src/main/java/org/opensearch/cluster/metadata/IndexNameExpressionResolver.java @@ -88,14 +88,34 @@ public class IndexNameExpressionResolver { private final DateMathExpressionResolver dateMathExpressionResolver = new DateMathExpressionResolver(); private final WildcardExpressionResolver wildcardExpressionResolver = new WildcardExpressionResolver(); - private final List expressionResolvers = List.of(dateMathExpressionResolver, wildcardExpressionResolver); + private final List expressionResolvers; + private final List customResolvers = new ArrayList<>(); private final ThreadContext threadContext; public IndexNameExpressionResolver(ThreadContext threadContext) { + expressionResolvers = new ArrayList<>(); + expressionResolvers.add(dateMathExpressionResolver); + expressionResolvers.add(wildcardExpressionResolver); this.threadContext = Objects.requireNonNull(threadContext, "Thread Context must not be null"); } + public IndexNameExpressionResolver(ThreadContext threadContext, List resolvers) { + // Set custom resolvers to the top of the list to ensure to WildcardExpressionResolver is triggered at the end, + // otherwise it will throw exception in case it cannot resolve any keywords which are expected to be handled + // by custom resolvers. + customResolvers.addAll(resolvers); + expressionResolvers = new ArrayList<>(customResolvers); + expressionResolvers.add(dateMathExpressionResolver); + expressionResolvers.add(wildcardExpressionResolver); + this.threadContext = Objects.requireNonNull(threadContext, "Thread Context must not be null"); + } + + // Visible for testing + public List getExpressionResolvers() { + return new ArrayList<>(this.expressionResolvers); + } + /** * Same as {@link #concreteIndexNames(ClusterState, IndicesOptions, String...)}, but the index expressions and options * are encapsulated in the specified request. @@ -171,7 +191,14 @@ public List dataStreamNames(ClusterState state, IndicesOptions options, indexExpressions = new String[] { "*" }; } - List dataStreams = wildcardExpressionResolver.resolve(context, Arrays.asList(indexExpressions)); + // Using customResolvers to filter out invalid expressions + List finalExpressions = Arrays.asList(indexExpressions); + for (ExpressionResolver resolver : customResolvers) { + finalExpressions = resolver.resolve(context, finalExpressions); + } + + List dataStreams = wildcardExpressionResolver.resolve(context, finalExpressions); + return ((dataStreams == null) ? List.of() : dataStreams).stream() .map(x -> state.metadata().getIndicesLookup().get(x)) .filter(Objects::nonNull) @@ -778,6 +805,7 @@ public boolean isSystemIndexAccessAllowed() { * * @opensearch.internal */ + @PublicApi(since = "3.1.0") public static class Context { private final ClusterState state; @@ -789,7 +817,7 @@ public static class Context { private final boolean preserveDataStreams; private final boolean isSystemIndexAccessAllowed; - Context(ClusterState state, IndicesOptions options, boolean isSystemIndexAccessAllowed) { + public Context(ClusterState state, IndicesOptions options, boolean isSystemIndexAccessAllowed) { this(state, options, System.currentTimeMillis(), isSystemIndexAccessAllowed); } @@ -903,7 +931,13 @@ public boolean isSystemIndexAccessAllowed() { } } - private interface ExpressionResolver { + /** + * Expression resolver for index name expressions. + * + * @opensearch.internal + */ + @PublicApi(since = "3.1.0") + public interface ExpressionResolver { /** * Resolves the list of expressions into other expressions if possible (possible concrete indices and aliases, but diff --git a/server/src/main/java/org/opensearch/cluster/node/DiscoveryNodeRole.java b/server/src/main/java/org/opensearch/cluster/node/DiscoveryNodeRole.java index 3dc86be816b02..ac4135686486e 100644 --- a/server/src/main/java/org/opensearch/cluster/node/DiscoveryNodeRole.java +++ b/server/src/main/java/org/opensearch/cluster/node/DiscoveryNodeRole.java @@ -32,7 +32,6 @@ package org.opensearch.cluster.node; -import org.opensearch.LegacyESVersion; import org.opensearch.Version; import org.opensearch.common.Booleans; import org.opensearch.common.annotation.PublicApi; @@ -349,11 +348,7 @@ public void validateRole(List roles) { * The version that {@link #REMOTE_CLUSTER_CLIENT_ROLE} is introduced. Nodes before this version do not have that role even * they can connect to remote clusters. */ - public static final Version REMOTE_CLUSTER_CLIENT_ROLE_VERSION = LegacyESVersion.fromString("7.8.0"); - - static SortedSet LEGACY_ROLES = Collections.unmodifiableSortedSet( - new TreeSet<>(Arrays.asList(DATA_ROLE, INGEST_ROLE, MASTER_ROLE)) - ); + public static final Version REMOTE_CLUSTER_CLIENT_ROLE_VERSION = Version.fromString("7.8.0"); /** * Represents an unknown role. This can occur if a newer version adds a role that an older version does not know about, or a newer diff --git a/server/src/main/java/org/opensearch/cluster/service/ClusterApplierService.java b/server/src/main/java/org/opensearch/cluster/service/ClusterApplierService.java index 7ab1a082a4620..fc2a121c90e54 100644 --- a/server/src/main/java/org/opensearch/cluster/service/ClusterApplierService.java +++ b/server/src/main/java/org/opensearch/cluster/service/ClusterApplierService.java @@ -382,6 +382,14 @@ public void onNewClusterState( submitStateUpdateTask(source, ClusterStateTaskConfig.build(Priority.HIGH), applyFunction, listener); } + public void updateClusterState( + final String source, + final Function updateFunction, + final ClusterApplyListener listener + ) { + submitStateUpdateTask(source, ClusterStateTaskConfig.build(Priority.HIGH), updateFunction, listener); + } + private void submitStateUpdateTask( final String source, final ClusterStateTaskConfig config, diff --git a/server/src/main/java/org/opensearch/cluster/service/LocalClusterService.java b/server/src/main/java/org/opensearch/cluster/service/LocalClusterService.java new file mode 100644 index 0000000000000..4caf37cacfc42 --- /dev/null +++ b/server/src/main/java/org/opensearch/cluster/service/LocalClusterService.java @@ -0,0 +1,87 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.cluster.service; + +import org.opensearch.cluster.ClusterManagerMetrics; +import org.opensearch.cluster.ClusterStateTaskConfig; +import org.opensearch.cluster.ClusterStateTaskExecutor; +import org.opensearch.cluster.ClusterStateTaskListener; +import org.opensearch.cluster.coordination.ClusterStatePublisher; +import org.opensearch.common.settings.ClusterSettings; +import org.opensearch.common.settings.Settings; +import org.opensearch.node.Node; +import org.opensearch.threadpool.ThreadPool; + +import java.util.Map; + +/** + * A local implementation of {@link ClusterService} that assumes we have no cluster manager. + * This is used in clusterless mode. + */ +public class LocalClusterService extends ClusterService { + private static class DummyClusterManagerService extends ClusterManagerService { + private static final ClusterManagerThrottlingStats EMPTY_THROTTLING_STATS = new ClusterManagerThrottlingStats(); + + public DummyClusterManagerService(Settings settings, ClusterSettings clusterSettings) { + super(settings, clusterSettings, null, null); + } + + @Override + public synchronized void setClusterStatePublisher(ClusterStatePublisher publisher) {} + + @Override + public ClusterManagerThrottlingStats getThrottlingStats() { + return EMPTY_THROTTLING_STATS; + } + } + + public LocalClusterService( + Settings settings, + ClusterSettings clusterSettings, + ThreadPool threadPool, + ClusterManagerMetrics clusterManagerMetrics + ) { + super( + settings, + clusterSettings, + new DummyClusterManagerService(settings, clusterSettings), + new ClusterApplierService(Node.NODE_NAME_SETTING.get(settings), settings, clusterSettings, threadPool, clusterManagerMetrics) + ); + } + + @Override + protected synchronized void doStart() { + getClusterApplierService().start(); + } + + @Override + protected synchronized void doStop() { + getClusterApplierService().stop(); + } + + @Override + protected synchronized void doClose() { + getClusterApplierService().close(); + } + + @Override + public ClusterManagerTaskThrottler.ThrottlingKey registerClusterManagerTask(ClusterManagerTask task, boolean throttlingEnabled) { + return null; + } + + @Override + public void submitStateUpdateTasks( + final String source, + final Map tasks, + final ClusterStateTaskConfig config, + final ClusterStateTaskExecutor executor + ) { + throw new UnsupportedOperationException("Cannot submit cluster state update tasks when cluster manager service is not available"); + } +} diff --git a/server/src/main/java/org/opensearch/common/lucene/Lucene.java b/server/src/main/java/org/opensearch/common/lucene/Lucene.java index 4845ee9ae09c9..47427b08b6207 100644 --- a/server/src/main/java/org/opensearch/common/lucene/Lucene.java +++ b/server/src/main/java/org/opensearch/common/lucene/Lucene.java @@ -357,63 +357,38 @@ public static TopDocsAndMaxScore readTopDocs(StreamInput in) throws IOException public static FieldDoc readFieldDoc(StreamInput in) throws IOException { Comparable[] cFields = new Comparable[in.readVInt()]; for (int j = 0; j < cFields.length; j++) { - byte type = in.readByte(); - if (type == 0) { - cFields[j] = null; - } else if (type == 1) { - cFields[j] = in.readString(); - } else if (type == 2) { - cFields[j] = in.readInt(); - } else if (type == 3) { - cFields[j] = in.readLong(); - } else if (type == 4) { - cFields[j] = in.readFloat(); - } else if (type == 5) { - cFields[j] = in.readDouble(); - } else if (type == 6) { - cFields[j] = in.readByte(); - } else if (type == 7) { - cFields[j] = in.readShort(); - } else if (type == 8) { - cFields[j] = in.readBoolean(); - } else if (type == 9) { - cFields[j] = in.readBytesRef(); - } else if (type == 10) { - cFields[j] = new BigInteger(in.readString()); - } else { - throw new IOException("Can't match type [" + type + "]"); - } + cFields[j] = readTypedValue(in); } return new FieldDoc(in.readVInt(), in.readFloat(), cFields); } public static Comparable readSortValue(StreamInput in) throws IOException { + return readTypedValue(in); + } + + /** + * Reads a typed value from the stream based on a type byte prefix. + * + * @param in the input stream + * @return the deserialized Comparable value + * @throws IOException if reading fails or type is unknown + */ + private static Comparable readTypedValue(StreamInput in) throws IOException { byte type = in.readByte(); - if (type == 0) { - return null; - } else if (type == 1) { - return in.readString(); - } else if (type == 2) { - return in.readInt(); - } else if (type == 3) { - return in.readLong(); - } else if (type == 4) { - return in.readFloat(); - } else if (type == 5) { - return in.readDouble(); - } else if (type == 6) { - return in.readByte(); - } else if (type == 7) { - return in.readShort(); - } else if (type == 8) { - return in.readBoolean(); - } else if (type == 9) { - return in.readBytesRef(); - } else if (type == 10) { - return new BigInteger(in.readString()); - } else { - throw new IOException("Can't match type [" + type + "]"); - } + return switch (type) { + case 0 -> null; + case 1 -> in.readString(); + case 2 -> in.readInt(); + case 3 -> in.readLong(); + case 4 -> in.readFloat(); + case 5 -> in.readDouble(); + case 6 -> in.readByte(); + case 7 -> in.readShort(); + case 8 -> in.readBoolean(); + case 9 -> in.readBytesRef(); + case 10 -> new BigInteger(in.readString()); + default -> throw new IOException("Can't match type [" + type + "]"); + }; } public static ScoreDoc readScoreDoc(StreamInput in) throws IOException { diff --git a/server/src/main/java/org/opensearch/common/util/TimeBasedExpiryTracker.java b/server/src/main/java/org/opensearch/common/util/TimeBasedExpiryTracker.java new file mode 100644 index 0000000000000..c99d0f5be0fa9 --- /dev/null +++ b/server/src/main/java/org/opensearch/common/util/TimeBasedExpiryTracker.java @@ -0,0 +1,45 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.common.util; + +import java.util.function.BooleanSupplier; +import java.util.function.LongSupplier; + +/** + * This class can be utilised to track the time based expiration events. + * Clients should be more cautious with the expiry time as this class is not completely thread safe. This is intentional + * as nanoSecond granularity level error for `lastAccessTimeInNanos` is tolerable and can be ignored. + * @opensearch.internal + */ +public class TimeBasedExpiryTracker implements BooleanSupplier { + private final LongSupplier nanoTimeSupplier; + private volatile long lastAccessTimeInNanos; + private final long expiryTimeInNanos; + private static final long ONE_SEC = 1000_000_000; + + public TimeBasedExpiryTracker(LongSupplier nanoTimeSupplier) { + this(nanoTimeSupplier, ONE_SEC); + } + + public TimeBasedExpiryTracker(LongSupplier nanoTimeSupplier, long expiryTimeInNanos) { + this.nanoTimeSupplier = nanoTimeSupplier; + this.lastAccessTimeInNanos = nanoTimeSupplier.getAsLong(); + this.expiryTimeInNanos = expiryTimeInNanos; + } + + @Override + public boolean getAsBoolean() { + final long currentTime = nanoTimeSupplier.getAsLong(); + final boolean isExpired = (currentTime - lastAccessTimeInNanos) > expiryTimeInNanos; + if (isExpired) { + lastAccessTimeInNanos = currentTime; + } + return isExpired; + } +} diff --git a/server/src/main/java/org/opensearch/discovery/LocalDiscovery.java b/server/src/main/java/org/opensearch/discovery/LocalDiscovery.java new file mode 100644 index 0000000000000..d0090f5b4e7f4 --- /dev/null +++ b/server/src/main/java/org/opensearch/discovery/LocalDiscovery.java @@ -0,0 +1,82 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.discovery; + +import org.opensearch.cluster.ClusterChangedEvent; +import org.opensearch.cluster.ClusterState; +import org.opensearch.cluster.NodeConnectionsService; +import org.opensearch.cluster.coordination.PendingClusterStateStats; +import org.opensearch.cluster.coordination.PublishClusterStateStats; +import org.opensearch.cluster.node.DiscoveryNode; +import org.opensearch.cluster.node.DiscoveryNodes; +import org.opensearch.cluster.service.ClusterApplier; +import org.opensearch.cluster.service.ClusterStateStats; +import org.opensearch.common.lifecycle.AbstractLifecycleComponent; +import org.opensearch.core.action.ActionListener; +import org.opensearch.transport.TransportService; + +import java.io.IOException; + +/** + * Clusterless implementation of Discovery. This is only able to "discover" the local node. + */ +public class LocalDiscovery extends AbstractLifecycleComponent implements Discovery { + private static final DiscoveryStats EMPTY_STATS = new DiscoveryStats( + new PendingClusterStateStats(0, 0, 0), + new PublishClusterStateStats(0, 0, 0), + new ClusterStateStats() + ); + private final TransportService transportService; + private final ClusterApplier clusterApplier; + + public LocalDiscovery(TransportService transportService, ClusterApplier clusterApplier) { + this.transportService = transportService; + this.clusterApplier = clusterApplier; + } + + @Override + public void publish(ClusterChangedEvent clusterChangedEvent, ActionListener publishListener, AckListener ackListener) { + // In clusterless mode, we should never be asked to publish a cluster state. + throw new UnsupportedOperationException("Should not be called in clusterless mode"); + } + + @Override + protected void doStart() { + DiscoveryNode localNode = transportService.getLocalNode(); + ClusterState bootstrapClusterState = ClusterState.builder(ClusterState.EMPTY_STATE) + .nodes(DiscoveryNodes.builder().localNodeId(localNode.getId()).add(localNode).build()) + .build(); + clusterApplier.setInitialState(bootstrapClusterState); + } + + @Override + protected void doStop() { + + } + + @Override + protected void doClose() throws IOException { + + } + + @Override + public DiscoveryStats stats() { + return EMPTY_STATS; + } + + @Override + public void startInitialJoin() { + + } + + @Override + public void setNodeConnectionsService(NodeConnectionsService nodeConnectionsService) { + + } +} diff --git a/server/src/main/java/org/opensearch/index/autoforcemerge/AutoForceMergeManager.java b/server/src/main/java/org/opensearch/index/autoforcemerge/AutoForceMergeManager.java index f79b0e72c683a..ae7f98a138407 100644 --- a/server/src/main/java/org/opensearch/index/autoforcemerge/AutoForceMergeManager.java +++ b/server/src/main/java/org/opensearch/index/autoforcemerge/AutoForceMergeManager.java @@ -66,10 +66,11 @@ public class AutoForceMergeManager extends AbstractLifecycleComponent { private ConfigurationValidator configurationValidator; private NodeValidator nodeValidator; private ShardValidator shardValidator; + private Integer allocatedProcessors; + private ResourceTrackerProvider.ResourceTrackers resourceTrackers; private final ForceMergeManagerSettings forceMergeManagerSettings; private final CommonStatsFlags flags = new CommonStatsFlags(CommonStatsFlags.Flag.Segments, CommonStatsFlags.Flag.Translog); private final Set mergingShards; - private Integer allocatedProcessors; private static final Logger logger = LogManager.getLogger(AutoForceMergeManager.class); @@ -96,6 +97,7 @@ protected void doStart() { this.nodeValidator = new NodeValidator(); this.shardValidator = new ShardValidator(); this.allocatedProcessors = OpenSearchExecutors.allocatedProcessors(clusterService.getSettings()); + this.resourceTrackers = ResourceTrackerProvider.create(threadPool); } @Override @@ -117,43 +119,65 @@ private void modifySchedulerInterval(TimeValue schedulerInterval) { } private void triggerForceMerge() { + if (isValidForForceMerge() == false) { + return; + } + executeForceMergeOnShards(); + } + + private boolean isValidForForceMerge() { if (configurationValidator.hasWarmNodes() == false) { + resourceTrackers.stop(); logger.debug("No warm nodes found. Skipping Auto Force merge."); - return; + return false; } if (nodeValidator.validate().isAllowed() == false) { logger.debug("Node capacity constraints are not allowing to trigger auto ForceMerge"); - return; + return false; } - int iteration = nodeValidator.getMaxConcurrentForceMerges(); + return true; + } + + private void executeForceMergeOnShards() { + int remainingIterations = nodeValidator.getMaxConcurrentForceMerges(); for (IndexShard shard : getShardsBasedOnSorting(indicesService)) { - if (iteration == 0) { + if (remainingIterations == 0 || !nodeValidator.validate().isAllowed()) { + if (remainingIterations > 0) { + logger.debug("Node conditions no longer suitable for force merge."); + } break; } - if (nodeValidator.validate().isAllowed() == false) { - logger.debug("Node conditions no longer suitable for force merge."); + remainingIterations--; + executeForceMergeForShard(shard); + if (!waitBetweenShards()) { break; } - iteration--; - CompletableFuture.runAsync(() -> { - try { - mergingShards.add(shard.shardId().getId()); - shard.forceMerge(new ForceMergeRequest().maxNumSegments(forceMergeManagerSettings.getSegmentCount())); - logger.debug("Merging is completed successfully for the shard {}", shard.shardId()); - } catch (Exception e) { - logger.error("Error during force merge for shard {}\nException: {}", shard.shardId(), e); - } finally { - mergingShards.remove(shard.shardId().getId()); - } - }, threadPool.executor(ThreadPool.Names.FORCE_MERGE)); - logger.info("Successfully triggered force merge for shard {}", shard.shardId()); + } + } + + private void executeForceMergeForShard(IndexShard shard) { + CompletableFuture.runAsync(() -> { try { - Thread.sleep(forceMergeManagerSettings.getForcemergeDelay().getMillis()); - } catch (InterruptedException e) { - Thread.currentThread().interrupt(); - logger.error("Timer was interrupted while waiting between shards", e); - break; + mergingShards.add(shard.shardId().getId()); + shard.forceMerge(new ForceMergeRequest().maxNumSegments(forceMergeManagerSettings.getSegmentCount())); + logger.debug("Merging is completed successfully for the shard {}", shard.shardId()); + } catch (Exception e) { + logger.error("Error during force merge for shard {}\nException: {}", shard.shardId(), e); + } finally { + mergingShards.remove(shard.shardId().getId()); } + }, threadPool.executor(ThreadPool.Names.FORCE_MERGE)); + logger.info("Successfully triggered force merge for shard {}", shard.shardId()); + } + + private boolean waitBetweenShards() { + try { + Thread.sleep(forceMergeManagerSettings.getForcemergeDelay().getMillis()); + return true; + } catch (InterruptedException e) { + Thread.currentThread().interrupt(); + logger.error("Timer was interrupted while waiting between shards", e); + return false; } } @@ -264,15 +288,14 @@ protected class NodeValidator implements ValidationStrategy { @Override public ValidationResult validate() { + resourceTrackers.start(); if (isCpuUsageOverThreshold()) { return new ValidationResult(false); } if (isDiskUsageOverThreshold()) { return new ValidationResult(false); } - double jvmUsedPercent = jvmService.stats().getMem().getHeapUsedPercent(); - if (jvmUsedPercent >= forceMergeManagerSettings.getJvmThreshold()) { - logger.debug("JVM memory: {}% breached the threshold: {}", jvmUsedPercent, forceMergeManagerSettings.getJvmThreshold()); + if (isJvmUsageOverThreshold()) { return new ValidationResult(false); } if (areForceMergeThreadsAvailable() == false) { @@ -291,24 +314,34 @@ private boolean areForceMergeThreadsAvailable() { return false; } + private boolean isJvmUsageOverThreshold() { + double jvmAverage = resourceTrackers.jvmFiveMinute.getAverage(); + if (jvmAverage >= forceMergeManagerSettings.getJvmThreshold()) { + logger.debug("JVM Average: 5m({}%) breached the threshold: {}", jvmAverage, forceMergeManagerSettings.getJvmThreshold()); + return true; + } + jvmAverage = resourceTrackers.jvmOneMinute.getAverage(); + if (jvmAverage >= forceMergeManagerSettings.getJvmThreshold()) { + logger.debug("JVM Average: 1m({}%) breached the threshold: {}", jvmAverage, forceMergeManagerSettings.getJvmThreshold()); + return true; + } + double jvmUsedPercent = jvmService.stats().getMem().getHeapUsedPercent(); + if (jvmUsedPercent >= forceMergeManagerSettings.getJvmThreshold()) { + logger.debug("JVM memory: {}% breached the threshold: {}", jvmUsedPercent, forceMergeManagerSettings.getJvmThreshold()); + return true; + } + return false; + } + private boolean isCpuUsageOverThreshold() { - double[] loadAverage = osService.stats().getCpu().getLoadAverage(); - double loadAverage5m = (loadAverage[1] / (double) allocatedProcessors) * 100; - if (loadAverage5m >= forceMergeManagerSettings.getCpuThreshold()) { - logger.debug( - "Load Average: 5m({}%) breached the threshold: {}", - loadAverage5m, - forceMergeManagerSettings.getCpuThreshold() - ); + double cpuAverage = resourceTrackers.cpuFiveMinute.getAverage(); + if (cpuAverage >= forceMergeManagerSettings.getCpuThreshold()) { + logger.debug("CPU Average: 5m({}%) breached the threshold: {}", cpuAverage, forceMergeManagerSettings.getCpuThreshold()); return true; } - double loadAverage1m = (loadAverage[0] / (double) allocatedProcessors) * 100; - if (loadAverage1m >= forceMergeManagerSettings.getCpuThreshold()) { - logger.debug( - "Load Average: 1m({}%) breached the threshold: {}", - loadAverage1m, - forceMergeManagerSettings.getCpuThreshold() - ); + cpuAverage = resourceTrackers.cpuOneMinute.getAverage(); + if (cpuAverage >= forceMergeManagerSettings.getCpuThreshold()) { + logger.debug("CPU Average: 1m({}%) breached the threshold: {}", cpuAverage, forceMergeManagerSettings.getCpuThreshold()); return true; } double cpuPercent = osService.stats().getCpu().getPercent(); @@ -445,6 +478,7 @@ protected boolean mustReschedule() { @Override protected void runInternal() { if (configurationValidator.validate().isAllowed() == false) { + resourceTrackers.stop(); return; } triggerForceMerge(); diff --git a/server/src/main/java/org/opensearch/index/autoforcemerge/ForceMergeManagerSettings.java b/server/src/main/java/org/opensearch/index/autoforcemerge/ForceMergeManagerSettings.java index b1d9ccc77988c..4077cd5768574 100644 --- a/server/src/main/java/org/opensearch/index/autoforcemerge/ForceMergeManagerSettings.java +++ b/server/src/main/java/org/opensearch/index/autoforcemerge/ForceMergeManagerSettings.java @@ -56,11 +56,11 @@ public class ForceMergeManagerSettings { ); /** - * Setting for wait time between force merge operations (default: 10s). + * Setting for wait time between force merge operations (default: 15s). */ public static final Setting MERGE_DELAY_BETWEEN_SHARDS_FOR_AUTO_FORCE_MERGE = Setting.timeSetting( "node.auto_force_merge.merge_delay", - TimeValue.timeValueSeconds(10), + TimeValue.timeValueSeconds(15), TimeValue.timeValueSeconds(1), TimeValue.timeValueSeconds(60), Setting.Property.Dynamic, @@ -92,11 +92,11 @@ public class ForceMergeManagerSettings { ); /** - * Setting for cpu threshold. (default: 80) + * Setting for cpu threshold. (default: 75) */ public static final Setting CPU_THRESHOLD_PERCENTAGE_FOR_AUTO_FORCE_MERGE = Setting.doubleSetting( "node.auto_force_merge.cpu.threshold", - 80.0, + 75.0, 10, 100, Setting.Property.Dynamic, @@ -104,11 +104,11 @@ public class ForceMergeManagerSettings { ); /** - * Setting for memory threshold. (default: 90) + * Setting for disk threshold. (default: 85) */ public static final Setting DISK_THRESHOLD_PERCENTAGE_FOR_AUTO_FORCE_MERGE = Setting.doubleSetting( "node.auto_force_merge.disk.threshold", - 90.0, + 85.0, 10, 100, Setting.Property.Dynamic, diff --git a/server/src/main/java/org/opensearch/index/autoforcemerge/ResourceTrackerProvider.java b/server/src/main/java/org/opensearch/index/autoforcemerge/ResourceTrackerProvider.java new file mode 100644 index 0000000000000..6db76a6359ea9 --- /dev/null +++ b/server/src/main/java/org/opensearch/index/autoforcemerge/ResourceTrackerProvider.java @@ -0,0 +1,85 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.index.autoforcemerge; + +import org.opensearch.common.unit.TimeValue; +import org.opensearch.node.resource.tracker.AverageCpuUsageTracker; +import org.opensearch.node.resource.tracker.AverageMemoryUsageTracker; +import org.opensearch.threadpool.ThreadPool; + +/** + * Provider for creating resource usage trackers used in auto force merge operations. + * + * @opensearch.internal + */ +public class ResourceTrackerProvider { + + public static final TimeValue SHORT_POLL_INTERVAL = TimeValue.timeValueSeconds(6); + public static final TimeValue LONG_POLL_INTERVAL = TimeValue.timeValueSeconds(30); + public static final TimeValue SHORT_AVERAGE_WINDOW = TimeValue.timeValueMinutes(1); + public static final TimeValue LONG_AVERAGE_WINDOW = TimeValue.timeValueMinutes(5); + + public static ResourceTrackers resourceTrackers; + + public static ResourceTrackers create(ThreadPool threadPool) { + return resourceTrackers = new ResourceTrackers( + new AverageCpuUsageTracker(threadPool, SHORT_POLL_INTERVAL, SHORT_AVERAGE_WINDOW), + new AverageCpuUsageTracker(threadPool, LONG_POLL_INTERVAL, LONG_AVERAGE_WINDOW), + new AverageMemoryUsageTracker(threadPool, SHORT_POLL_INTERVAL, SHORT_AVERAGE_WINDOW), + new AverageMemoryUsageTracker(threadPool, LONG_POLL_INTERVAL, LONG_AVERAGE_WINDOW) + ); + } + + /** + * Container for resource usage trackers used in auto force merge operations. + * Provides access to CPU and JVM memory usage trackers with different time windows. + * + * @opensearch.internal + */ + public static class ResourceTrackers { + public final AverageCpuUsageTracker cpuOneMinute; + public final AverageCpuUsageTracker cpuFiveMinute; + public final AverageMemoryUsageTracker jvmOneMinute; + public final AverageMemoryUsageTracker jvmFiveMinute; + + /** + * Creates a new ResourceTrackers instance. + * + * @param cpuOneMinute CPU tracker with 1-minute window + * @param cpuFiveMinute CPU tracker with 5-minute window + * @param jvmOneMinute JVM memory tracker with 1-minute window + * @param jvmFiveMinute JVM memory tracker with 5-minute window + */ + ResourceTrackers( + AverageCpuUsageTracker cpuOneMinute, + AverageCpuUsageTracker cpuFiveMinute, + AverageMemoryUsageTracker jvmOneMinute, + AverageMemoryUsageTracker jvmFiveMinute + ) { + this.cpuOneMinute = cpuOneMinute; + this.cpuFiveMinute = cpuFiveMinute; + this.jvmOneMinute = jvmOneMinute; + this.jvmFiveMinute = jvmFiveMinute; + } + + public void start() { + cpuOneMinute.start(); + cpuFiveMinute.start(); + jvmOneMinute.start(); + jvmFiveMinute.start(); + } + + public void stop() { + cpuOneMinute.stop(); + cpuFiveMinute.stop(); + jvmOneMinute.stop(); + jvmFiveMinute.stop(); + } + } +} diff --git a/server/src/main/java/org/opensearch/index/codec/CodecService.java b/server/src/main/java/org/opensearch/index/codec/CodecService.java index 1568e262fb6b9..dcd681ac2bf0b 100644 --- a/server/src/main/java/org/opensearch/index/codec/CodecService.java +++ b/server/src/main/java/org/opensearch/index/codec/CodecService.java @@ -93,6 +93,13 @@ public CodecService(@Nullable MapperService mapperService, IndexSettings indexSe this.codecs = codecs.immutableMap(); } + /** + * Returns default codec + */ + public final Codec defaultCodec() { + return codecs.get(DEFAULT_CODEC); + } + public Codec codec(String name) { Codec codec = codecs.get(name); if (codec == null) { diff --git a/server/src/main/java/org/opensearch/index/engine/LocalMergedSegmentWarmer.java b/server/src/main/java/org/opensearch/index/engine/LocalMergedSegmentWarmer.java index 33f1a2b547b78..a9c23fd1f6c5b 100644 --- a/server/src/main/java/org/opensearch/index/engine/LocalMergedSegmentWarmer.java +++ b/server/src/main/java/org/opensearch/index/engine/LocalMergedSegmentWarmer.java @@ -6,30 +6,6 @@ * compatible open source license. */ -/* - * Licensed to Elasticsearch under one or more contributor - * license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright - * ownership. Elasticsearch licenses this file to you under - * the Apache License, Version 2.0 (the "License"); you may - * not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ - -/* - * Modifications Copyright OpenSearch Contributors. See - * GitHub history for details. - */ - package org.opensearch.index.engine; import org.apache.lucene.index.IndexWriter; diff --git a/server/src/main/java/org/opensearch/index/engine/MergedSegmentWarmerFactory.java b/server/src/main/java/org/opensearch/index/engine/MergedSegmentWarmerFactory.java index eda93d22d9c3b..3e118ff6cc020 100644 --- a/server/src/main/java/org/opensearch/index/engine/MergedSegmentWarmerFactory.java +++ b/server/src/main/java/org/opensearch/index/engine/MergedSegmentWarmerFactory.java @@ -6,30 +6,6 @@ * compatible open source license. */ -/* - * Licensed to Elasticsearch under one or more contributor - * license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright - * ownership. Elasticsearch licenses this file to you under - * the Apache License, Version 2.0 (the "License"); you may - * not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ - -/* - * Modifications Copyright OpenSearch Contributors. See - * GitHub history for details. - */ - package org.opensearch.index.engine; import org.apache.lucene.index.IndexWriter; diff --git a/server/src/main/java/org/opensearch/index/engine/RemoteStoreMergedSegmentWarmer.java b/server/src/main/java/org/opensearch/index/engine/RemoteStoreMergedSegmentWarmer.java index f3d5f8a4cc93b..9a34e43423dbd 100644 --- a/server/src/main/java/org/opensearch/index/engine/RemoteStoreMergedSegmentWarmer.java +++ b/server/src/main/java/org/opensearch/index/engine/RemoteStoreMergedSegmentWarmer.java @@ -6,30 +6,6 @@ * compatible open source license. */ -/* - * Licensed to Elasticsearch under one or more contributor - * license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright - * ownership. Elasticsearch licenses this file to you under - * the Apache License, Version 2.0 (the "License"); you may - * not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ - -/* - * Modifications Copyright OpenSearch Contributors. See - * GitHub history for details. - */ - package org.opensearch.index.engine; import org.apache.lucene.index.IndexWriter; diff --git a/server/src/main/java/org/opensearch/index/mapper/NumberFieldMapper.java b/server/src/main/java/org/opensearch/index/mapper/NumberFieldMapper.java index 61a5308fc3b3c..6512007c9683e 100644 --- a/server/src/main/java/org/opensearch/index/mapper/NumberFieldMapper.java +++ b/server/src/main/java/org/opensearch/index/mapper/NumberFieldMapper.java @@ -102,6 +102,8 @@ public class NumberFieldMapper extends ParametrizedFieldMapper { public static final Setting COERCE_SETTING = Setting.boolSetting("index.mapping.coerce", true, Property.IndexScope); + private static final int APPROX_QUERY_NUMERIC_DIMS = 1; + private static NumberFieldMapper toType(FieldMapper in) { return (NumberFieldMapper) in; } @@ -356,23 +358,50 @@ public Query rangeQuery( } u = HalfFloatPoint.nextDown(u); } - if (isSearchable && hasDocValues) { - Query query = HalfFloatPoint.newRangeQuery(field, l, u); - Query dvQuery = SortedNumericDocValuesField.newSlowRangeQuery( + + Query dvQuery = hasDocValues + ? SortedNumericDocValuesField.newSlowRangeQuery( field, HalfFloatPoint.halfFloatToSortableShort(l), HalfFloatPoint.halfFloatToSortableShort(u) + ) + : null; + if (isSearchable) { + Query pointRangeQuery = HalfFloatPoint.newRangeQuery(field, l, u); + Query query; + if (dvQuery != null) { + query = new IndexOrDocValuesQuery(pointRangeQuery, dvQuery); + if (context.indexSortedOnField(field)) { + query = new IndexSortSortedNumericDocValuesRangeQuery( + field, + HalfFloatPoint.halfFloatToSortableShort(l), + HalfFloatPoint.halfFloatToSortableShort(u), + query + ); + } + } else { + query = pointRangeQuery; + } + return new ApproximateScoreQuery( + query, + new ApproximatePointRangeQuery( + field, + NumberType.HALF_FLOAT.encodePoint(l), + NumberType.HALF_FLOAT.encodePoint(u), + APPROX_QUERY_NUMERIC_DIMS, + ApproximatePointRangeQuery.HALF_FLOAT_FORMAT + ) ); - return new IndexOrDocValuesQuery(query, dvQuery); } - if (hasDocValues) { - return SortedNumericDocValuesField.newSlowRangeQuery( + if (context.indexSortedOnField(field)) { + dvQuery = new IndexSortSortedNumericDocValuesRangeQuery( field, HalfFloatPoint.halfFloatToSortableShort(l), - HalfFloatPoint.halfFloatToSortableShort(u) + HalfFloatPoint.halfFloatToSortableShort(u), + dvQuery ); } - return HalfFloatPoint.newRangeQuery(field, l, u); + return dvQuery; } @Override @@ -503,23 +532,52 @@ public Query rangeQuery( u = FloatPoint.nextDown(u); } } - if (isSearchable && hasDocValues) { - Query query = FloatPoint.newRangeQuery(field, l, u); - Query dvQuery = SortedNumericDocValuesField.newSlowRangeQuery( + + Query dvQuery = hasDocValues + ? SortedNumericDocValuesField.newSlowRangeQuery( field, NumericUtils.floatToSortableInt(l), NumericUtils.floatToSortableInt(u) + ) + : null; + + if (isSearchable) { + Query pointRangeQuery = FloatPoint.newRangeQuery(field, l, u); + Query query; + if (dvQuery != null) { + query = new IndexOrDocValuesQuery(pointRangeQuery, dvQuery); + if (context.indexSortedOnField(field)) { + query = new IndexSortSortedNumericDocValuesRangeQuery( + field, + NumericUtils.floatToSortableInt(l), + NumericUtils.floatToSortableInt(u), + query + ); + } + } else { + query = pointRangeQuery; + } + return new ApproximateScoreQuery( + query, + new ApproximatePointRangeQuery( + field, + FloatPoint.pack(new float[] { l }).bytes, + FloatPoint.pack(new float[] { u }).bytes, + APPROX_QUERY_NUMERIC_DIMS, + ApproximatePointRangeQuery.FLOAT_FORMAT + ) ); - return new IndexOrDocValuesQuery(query, dvQuery); } - if (hasDocValues) { - return SortedNumericDocValuesField.newSlowRangeQuery( + + if (context.indexSortedOnField(field)) { + dvQuery = new IndexSortSortedNumericDocValuesRangeQuery( field, NumericUtils.floatToSortableInt(l), - NumericUtils.floatToSortableInt(u) + NumericUtils.floatToSortableInt(u), + dvQuery ); } - return FloatPoint.newRangeQuery(field, l, u); + return dvQuery; } @Override @@ -628,23 +686,49 @@ public Query rangeQuery( QueryShardContext context ) { return doubleRangeQuery(lowerTerm, upperTerm, includeLower, includeUpper, (l, u) -> { - if (isSearchable && hasDocValues) { - Query query = DoublePoint.newRangeQuery(field, l, u); - Query dvQuery = SortedNumericDocValuesField.newSlowRangeQuery( + Query dvQuery = hasDocValues + ? SortedNumericDocValuesField.newSlowRangeQuery( field, NumericUtils.doubleToSortableLong(l), NumericUtils.doubleToSortableLong(u) + ) + : null; + if (isSearchable) { + Query pointRangeQuery = DoublePoint.newRangeQuery(field, l, u); + Query query; + if (dvQuery != null) { + query = new IndexOrDocValuesQuery(pointRangeQuery, dvQuery); + if (context.indexSortedOnField(field)) { + query = new IndexSortSortedNumericDocValuesRangeQuery( + field, + NumericUtils.doubleToSortableLong(l), + NumericUtils.doubleToSortableLong(u), + query + ); + } + } else { + query = pointRangeQuery; + } + return new ApproximateScoreQuery( + query, + new ApproximatePointRangeQuery( + field, + DoublePoint.pack(new double[] { l }).bytes, + DoublePoint.pack(new double[] { u }).bytes, + APPROX_QUERY_NUMERIC_DIMS, + ApproximatePointRangeQuery.DOUBLE_FORMAT + ) ); - return new IndexOrDocValuesQuery(query, dvQuery); } - if (hasDocValues) { - return SortedNumericDocValuesField.newSlowRangeQuery( + if (context.indexSortedOnField(field)) { + dvQuery = new IndexSortSortedNumericDocValuesRangeQuery( field, NumericUtils.doubleToSortableLong(l), - NumericUtils.doubleToSortableLong(u) + NumericUtils.doubleToSortableLong(u), + dvQuery ); } - return DoublePoint.newRangeQuery(field, l, u); + return dvQuery; }); } @@ -988,23 +1072,33 @@ public Query rangeQuery( --u; } } - if (isSearchable && hasDocValues) { - Query query = IntPoint.newRangeQuery(field, l, u); - Query dvQuery = SortedNumericDocValuesField.newSlowRangeQuery(field, l, u); - query = new IndexOrDocValuesQuery(query, dvQuery); - if (context.indexSortedOnField(field)) { - query = new IndexSortSortedNumericDocValuesRangeQuery(field, l, u, query); + Query dvQuery = hasDocValues ? SortedNumericDocValuesField.newSlowRangeQuery(field, l, u) : null; + if (isSearchable) { + Query pointRangeQuery = IntPoint.newRangeQuery(field, l, u); + Query query; + if (dvQuery != null) { + query = new IndexOrDocValuesQuery(pointRangeQuery, dvQuery); + if (context.indexSortedOnField(field)) { + query = new IndexSortSortedNumericDocValuesRangeQuery(field, l, u, query); + } + } else { + query = pointRangeQuery; } - return query; + return new ApproximateScoreQuery( + query, + new ApproximatePointRangeQuery( + field, + IntPoint.pack(new int[] { l }).bytes, + IntPoint.pack(new int[] { u }).bytes, + APPROX_QUERY_NUMERIC_DIMS, + ApproximatePointRangeQuery.INT_FORMAT + ) + ); } - if (hasDocValues) { - Query query = SortedNumericDocValuesField.newSlowRangeQuery(field, l, u); - if (context.indexSortedOnField(field)) { - query = new IndexSortSortedNumericDocValuesRangeQuery(field, l, u, query); - } - return query; + if (context.indexSortedOnField(field)) { + dvQuery = new IndexSortSortedNumericDocValuesRangeQuery(field, l, u, dvQuery); } - return IntPoint.newRangeQuery(field, l, u); + return dvQuery; } @Override @@ -1136,11 +1230,10 @@ public Query rangeQuery( field, LongPoint.pack(new long[] { l }).bytes, LongPoint.pack(new long[] { u }).bytes, - new long[] { l }.length, + APPROX_QUERY_NUMERIC_DIMS, ApproximatePointRangeQuery.LONG_FORMAT ) ); - } if (context.indexSortedOnField(field)) { dvQuery = new IndexSortSortedNumericDocValuesRangeQuery(field, l, u, dvQuery); @@ -1257,10 +1350,22 @@ public Query rangeQuery( QueryShardContext context ) { return unsignedLongRangeQuery(lowerTerm, upperTerm, includeLower, includeUpper, (l, u) -> { - if (isSearchable && hasDocValues) { + if (isSearchable) { Query query = BigIntegerPoint.newRangeQuery(field, l, u); - Query dvQuery = SortedUnsignedLongDocValuesRangeQuery.newSlowRangeQuery(field, l, u); - return new IndexOrDocValuesQuery(query, dvQuery); + if (hasDocValues) { + Query dvQuery = SortedUnsignedLongDocValuesRangeQuery.newSlowRangeQuery(field, l, u); + query = new IndexOrDocValuesQuery(query, dvQuery); + } + return new ApproximateScoreQuery( + query, + new ApproximatePointRangeQuery( + field, + NumberType.UNSIGNED_LONG.encodePoint(l), + NumberType.UNSIGNED_LONG.encodePoint(u), + APPROX_QUERY_NUMERIC_DIMS, + ApproximatePointRangeQuery.UNSIGNED_LONG_FORMAT + ) + ); } if (hasDocValues) { return SortedUnsignedLongDocValuesRangeQuery.newSlowRangeQuery(field, l, u); diff --git a/server/src/main/java/org/opensearch/index/mapper/SemanticVersion.java b/server/src/main/java/org/opensearch/index/mapper/SemanticVersion.java new file mode 100644 index 0000000000000..812d65c5494e8 --- /dev/null +++ b/server/src/main/java/org/opensearch/index/mapper/SemanticVersion.java @@ -0,0 +1,257 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.index.mapper; + +import java.util.Arrays; +import java.util.Locale; +import java.util.regex.Matcher; +import java.util.regex.Pattern; + +/** + * Represents a semantic version number (major.minor.patch-preRelease+build). + * This class implements semantic versioning (SemVer) according to the specification at semver.org. + * It provides methods to parse, compare, and manipulate semantic version numbers. + * Primarily used in {@link SemanticVersionFieldMapper} for mapping and sorting purposes. + * + * @see Semantic Versioning 2.0.0 + * @see OpenSearch github issue + */ +public class SemanticVersion implements Comparable { + + // Regex used to check SemVer string. Source: https://semver.org/#is-there-a-suggested-regular-expression-regex-to-check-a-semver-string + private static final String SEMANTIC_VERSION_REGEX = + "^(0|[1-9]\\d*)\\.(0|[1-9]\\d*)\\.(0|[1-9]\\d*)(?:-((?:0|[1-9]\\d*|\\d*[a-zA-Z-][0-9a-zA-Z-]*)(?:\\.(?:0|[1-9]\\d*|\\d*[a-zA-Z-][0-9a-zA-Z-]*))*))?(?:\\+([0-9a-zA-Z-]+(?:\\.[0-9a-zA-Z-]+)*))?$"; + private final int major; + private final int minor; + private final int patch; + private final String preRelease; + private final String build; + + public SemanticVersion(int major, int minor, int patch, String preRelease, String build) { + if (major < 0 || minor < 0 || patch < 0) { + throw new IllegalArgumentException("Version numbers cannot be negative"); + } + this.major = major; + this.minor = minor; + this.patch = patch; + this.preRelease = preRelease; + this.build = build; + } + + public int getMajor() { + return major; + } + + public int getMinor() { + return minor; + } + + public int getPatch() { + return patch; + } + + public String getPreRelease() { + return preRelease; + } + + public String getBuild() { + return build; + } + + public static SemanticVersion parse(String version) { + if (version == null || version.isEmpty()) { + throw new IllegalArgumentException("Version string cannot be null or empty"); + } + + // Clean up the input string + version = version.trim(); + version = version.replaceAll("\\[|\\]", ""); // Remove square brackets + + // Handle encoded byte format + if (version.matches(".*\\s+.*")) { + version = version.replaceAll("\\s+", "."); + } + + Pattern pattern = Pattern.compile(SEMANTIC_VERSION_REGEX); + + Matcher matcher = pattern.matcher(version); + if (!matcher.matches()) { + throw new IllegalArgumentException("Invalid semantic version format: [" + version + "]"); + } + + try { + return new SemanticVersion( + Integer.parseInt(matcher.group(1)), + Integer.parseInt(matcher.group(2)), + Integer.parseInt(matcher.group(3)), + matcher.group(4), + matcher.group(5) + ); + } catch (NumberFormatException e) { + throw new IllegalArgumentException("Invalid version numbers in: " + version, e); + } + } + + /** + * Returns a normalized string representation of the semantic version. + * This format ensures proper lexicographical ordering of versions. + * The format is: + * - Major, minor, and patch numbers are padded to 20 digits + * - Pre-release version is appended with a "-" prefix if present + * - Build metadata is appended with a "+" prefix if present + * + * Example: "1.2.3-alpha+build.123" becomes "00000000000000000001.00000000000000000002.00000000000000000003-alpha+build.123" + * + * Note: Build metadata is included for completeness but does not affect version precedence + * as per SemVer specification. + * + * @return normalized string representation of the version + */ + public String getNormalizedString() { + StringBuilder sb = new StringBuilder(); + + // Pad numbers to 20 digits for consistent lexicographical sorting + // This allows for very large version numbers while maintaining proper order + sb.append(padWithZeros(major, 20)).append('.').append(padWithZeros(minor, 20)).append('.').append(padWithZeros(patch, 20)); + + // Add pre-release version if present + // Pre-release versions have lower precedence than the associated normal version + if (preRelease != null) { + sb.append('-').append(preRelease); + } + + // Add build metadata if present + // Note: Build metadata does not affect version precedence + if (build != null) { + sb.append('+').append(build); + } + + return sb.toString(); + } + + /** + * Returns a normalized comparable string representation of the semantic version. + *

    + * The format zero-pads major, minor, and patch versions to 20 digits each, + * separated by dots, to ensure correct lexical sorting of numeric components. + *

    + * For pre-release versions, the pre-release label is appended with a leading + * hyphen (`-`) in lowercase, preserving lexicographical order among pre-release + * versions. + *

    + * For stable releases (no pre-release), a tilde character (`~`) is appended, + * which lexically sorts after any pre-release versions to ensure stable + * releases are ordered last. + *

    + * Ordering: 1.0.0-alpha < 1.0.0-beta < 1.0.0 + *

    + * Examples: + *

      + *
    • 1.0.0 → 00000000000000000001.00000000000000000000.00000000000000000000~
    • + *
    • 1.0.0-alpha → 00000000000000000001.00000000000000000000.00000000000000000000-alpha
    • + *
    • 1.0.0-beta → 00000000000000000001.00000000000000000000.00000000000000000000-beta
    • + *
    + * + * @return normalized string for lexicographical comparison of semantic versions + */ + public String getNormalizedComparableString() { + StringBuilder sb = new StringBuilder(); + + // Zero-pad major, minor, patch + sb.append(padWithZeros(major, 20)).append("."); + sb.append(padWithZeros(minor, 20)).append("."); + sb.append(padWithZeros(patch, 20)); + + if (preRelease == null || preRelease.isEmpty()) { + // Stable release: append '~' to sort AFTER any pre-release + sb.append("~"); + } else { + // Pre-release: append '-' plus normalized pre-release string (lowercase, trimmed) + sb.append("-").append(preRelease.trim().toLowerCase(Locale.ROOT)); + } + + return sb.toString(); + } + + @Override + public int compareTo(SemanticVersion other) { + if (other == null) { + return 1; + } + + int majorComparison = Integer.compare(this.major, other.major); + if (majorComparison != 0) return majorComparison; + + int minorComparison = Integer.compare(this.minor, other.minor); + if (minorComparison != 0) return minorComparison; + + int patchComparison = Integer.compare(this.patch, other.patch); + if (patchComparison != 0) return patchComparison; + + // Pre-release versions have lower precedence + if (this.preRelease == null && other.preRelease != null) return 1; + if (this.preRelease != null && other.preRelease == null) return -1; + if (this.preRelease != null && other.preRelease != null) { + return comparePreRelease(this.preRelease, other.preRelease); + } + + return 0; + } + + private int comparePreRelease(String pre1, String pre2) { + String[] parts1 = pre1.split("\\."); + String[] parts2 = pre2.split("\\."); + + int length = Math.min(parts1.length, parts2.length); + for (int i = 0; i < length; i++) { + String part1 = parts1[i]; + String part2 = parts2[i]; + + boolean isNum1 = part1.matches("\\d+"); + boolean isNum2 = part2.matches("\\d+"); + + if (isNum1 && isNum2) { + int num1 = Integer.parseInt(part1); + int num2 = Integer.parseInt(part2); + int comparison = Integer.compare(num1, num2); + if (comparison != 0) return comparison; + } else { + int comparison = part1.compareTo(part2); + if (comparison != 0) return comparison; + } + } + + return Integer.compare(parts1.length, parts2.length); + } + + @Override + public String toString() { + StringBuilder sb = new StringBuilder(); + sb.append(major).append('.').append(minor).append('.').append(patch); + if (preRelease != null) { + sb.append('-').append(preRelease); + } + if (build != null) { + sb.append('+').append(build); + } + return sb.toString(); + } + + private static String padWithZeros(long value, int width) { + String str = Long.toString(value); + int padding = width - str.length(); + if (padding > 0) { + char[] zeros = new char[padding]; + Arrays.fill(zeros, '0'); + return new String(zeros) + str; + } + return str; + } + +} diff --git a/server/src/main/java/org/opensearch/index/mapper/SemanticVersionFieldMapper.java b/server/src/main/java/org/opensearch/index/mapper/SemanticVersionFieldMapper.java new file mode 100644 index 0000000000000..3e3222ff5df75 --- /dev/null +++ b/server/src/main/java/org/opensearch/index/mapper/SemanticVersionFieldMapper.java @@ -0,0 +1,395 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.index.mapper; + +import org.apache.lucene.document.Field; +import org.apache.lucene.document.FieldType; +import org.apache.lucene.document.KeywordField; +import org.apache.lucene.document.SortedSetDocValuesField; +import org.apache.lucene.document.StoredField; +import org.apache.lucene.index.DocValuesType; +import org.apache.lucene.index.IndexOptions; +import org.apache.lucene.index.Term; +import org.apache.lucene.search.FuzzyQuery; +import org.apache.lucene.search.IndexOrDocValuesQuery; +import org.apache.lucene.search.MultiTermQuery; +import org.apache.lucene.search.PrefixQuery; +import org.apache.lucene.search.Query; +import org.apache.lucene.search.RegexpQuery; +import org.apache.lucene.search.TermQuery; +import org.apache.lucene.search.TermRangeQuery; +import org.apache.lucene.search.WildcardQuery; +import org.apache.lucene.util.BytesRef; +import org.apache.lucene.util.automaton.Operations; +import org.opensearch.common.unit.Fuzziness; +import org.opensearch.core.xcontent.XContentBuilder; +import org.opensearch.index.fielddata.IndexFieldData; +import org.opensearch.index.fielddata.plain.SortedSetOrdinalsIndexFieldData; +import org.opensearch.index.query.QueryShardContext; +import org.opensearch.index.query.QueryShardException; +import org.opensearch.search.aggregations.support.CoreValuesSourceType; +import org.opensearch.search.lookup.SearchLookup; + +import java.io.IOException; +import java.util.ArrayList; +import java.util.List; +import java.util.Locale; +import java.util.Map; +import java.util.function.Supplier; + +/** + * A field mapper for a semantic version field + * + */ +public class SemanticVersionFieldMapper extends ParametrizedFieldMapper { + public static final String CONTENT_TYPE = "version"; + public static final FieldType FIELD_TYPE = new FieldType(); + public static final String NORMALIZED_FIELD_SUFFIX = "._normalized"; + + static { + FIELD_TYPE.setTokenized(false); + FIELD_TYPE.setStored(false); + FIELD_TYPE.setIndexOptions(IndexOptions.DOCS); + FIELD_TYPE.setDocValuesType(DocValuesType.SORTED_SET); + FIELD_TYPE.freeze(); + } + + private final Map meta; + + protected SemanticVersionFieldMapper( + String simpleName, + FieldType fieldType, + MappedFieldType mappedFieldType, + MultiFields multiFields, + CopyTo copyTo, + Map meta + ) { + super(simpleName, mappedFieldType, multiFields, copyTo); + this.meta = meta; + } + + @Override + protected void doXContentBody(XContentBuilder builder, boolean includeDefaults, Params params) throws IOException { + super.doXContentBody(builder, includeDefaults, params); + } + + /** + * Builder class for constructing the SemanticVersionFieldMapper. + */ + public static class Builder extends ParametrizedFieldMapper.Builder { + private final Parameter> meta = Parameter.metaParam(); + private final Parameter indexed = Parameter.indexParam(m -> toType(m).isSearchable, true).alwaysSerialize(); + private final Parameter hasDocValues = Parameter.docValuesParam(m -> toType(m).hasDocValues, true); + private final Parameter stored = Parameter.storeParam(m -> toType(m).isStored, false); + + private static SemanticVersionFieldType toType(FieldMapper m) { + return (SemanticVersionFieldType) ((ParametrizedFieldMapper) m).mappedFieldType; + } + + public Builder(String name) { + super(name); + } + + @Override + protected List> getParameters() { + List> parameters = new ArrayList<>(); + parameters.add(indexed); + parameters.add(hasDocValues); + parameters.add(stored); + parameters.add(meta); + return parameters; + } + + @Override + public SemanticVersionFieldMapper build(BuilderContext context) { + FieldType fieldType = new FieldType(); + fieldType.setTokenized(false); + fieldType.setStored(stored.getValue()); + fieldType.setIndexOptions(indexed.getValue() ? IndexOptions.DOCS : IndexOptions.NONE); + fieldType.setDocValuesType(hasDocValues.getValue() ? DocValuesType.SORTED_SET : DocValuesType.NONE); + fieldType.freeze(); + return new SemanticVersionFieldMapper( + name, + fieldType, + new SemanticVersionFieldType( + buildFullName(context), + meta.getValue(), + indexed.getValue(), + hasDocValues.getValue(), + stored.getValue() + ), + multiFieldsBuilder.build(this, context), + copyTo.build(), + meta.getValue() + ); + } + } + + public static final TypeParser PARSER = new TypeParser((n, c) -> new Builder(n)); + + /** + * The specific field type for SemanticVersionFieldMapper + * + * @opensearch.internal + */ + public static class SemanticVersionFieldType extends TermBasedFieldType { + private final Map meta; + private final String normalizedFieldName; + private final boolean isSearchable; + private final boolean hasDocValues; + private final boolean isStored; + + public SemanticVersionFieldType( + String name, + Map meta, + boolean isSearchable, + boolean hasDocValues, + boolean isStored + ) { + super(name, isSearchable, isStored, hasDocValues, TextSearchInfo.SIMPLE_MATCH_ONLY, meta); + this.meta = meta; + this.normalizedFieldName = name + NORMALIZED_FIELD_SUFFIX; + this.isSearchable = isSearchable; + this.hasDocValues = hasDocValues; + this.isStored = isStored; + } + + @Override + public String typeName() { + return CONTENT_TYPE; + } + + @Override + public Query termQuery(Object value, QueryShardContext context) { + if (value == null) { + throw new IllegalArgumentException("Cannot search for null value"); + } + BytesRef bytes = value instanceof BytesRef ? (BytesRef) value : new BytesRef(value.toString()); + Query indexQuery = isSearchable ? new TermQuery(new Term(name(), bytes)) : null; + Query dvQuery = hasDocValues ? SortedSetDocValuesField.newSlowExactQuery(normalizedFieldName, bytes) : null; + if (indexQuery != null && dvQuery != null) { + return new IndexOrDocValuesQuery(indexQuery, dvQuery); + } else if (indexQuery != null) { + return indexQuery; + } else if (dvQuery != null) { + return dvQuery; + } else { + throw new IllegalArgumentException("Field [" + name() + "] is neither indexed nor has doc_values enabled"); + } + } + + @Override + public Query rangeQuery(Object lowerTerm, Object upperTerm, boolean includeLower, boolean includeUpper, QueryShardContext context) { + try { + BytesRef lower = null; + BytesRef upper = null; + if (lowerTerm != null) { + String lowerStr = (lowerTerm instanceof BytesRef) ? ((BytesRef) lowerTerm).utf8ToString() : lowerTerm.toString(); + SemanticVersion lowerVersion = SemanticVersion.parse(lowerStr); + lower = new BytesRef(lowerVersion.getNormalizedComparableString()); + } + if (upperTerm != null) { + String upperStr = (upperTerm instanceof BytesRef) ? ((BytesRef) upperTerm).utf8ToString() : upperTerm.toString(); + SemanticVersion upperVersion = SemanticVersion.parse(upperStr); + upper = new BytesRef(upperVersion.getNormalizedComparableString()); + } + Query indexQuery = isSearchable ? new TermRangeQuery(normalizedFieldName, lower, upper, includeLower, includeUpper) : null; + Query dvQuery = hasDocValues + ? SortedSetDocValuesField.newSlowRangeQuery(normalizedFieldName, lower, upper, includeLower, includeUpper) + : null; + if (indexQuery != null && dvQuery != null) { + return new IndexOrDocValuesQuery(indexQuery, dvQuery); + } else if (indexQuery != null) { + return indexQuery; + } else if (dvQuery != null) { + return dvQuery; + } else { + throw new IllegalArgumentException("Field [" + name() + "] is neither indexed nor has doc_values enabled"); + } + } catch (Exception e) { + throw new QueryShardException( + context, + "Failed to create range query for field [" + + name() + + "]. Lower term: [" + + (lowerTerm != null ? lowerTerm.toString() : "null") + + "], Upper term: [" + + (upperTerm != null ? upperTerm.toString() : "null") + + "]" + ); + } + } + + @Override + public Query termsQuery(List values, QueryShardContext context) { + List bytesList = new ArrayList<>(); + for (Object value : values) { + bytesList.add(value instanceof BytesRef ? (BytesRef) value : new BytesRef(value.toString())); + } + Query indexQuery = isSearchable ? new org.apache.lucene.search.TermInSetQuery(name(), bytesList) : null; + Query dvQuery = hasDocValues ? SortedSetDocValuesField.newSlowSetQuery(normalizedFieldName, bytesList) : null; + if (indexQuery != null && dvQuery != null) { + return new IndexOrDocValuesQuery(indexQuery, dvQuery); + } else if (indexQuery != null) { + return indexQuery; + } else if (dvQuery != null) { + return dvQuery; + } else { + throw new IllegalArgumentException("Field [" + name() + "] is neither indexed nor has doc_values enabled"); + } + } + + @Override + public Query regexpQuery( + String value, + int syntaxFlags, + int matchFlags, + int maxDeterminizedStates, + MultiTermQuery.RewriteMethod method, + QueryShardContext context + ) { + if (method == null) { + method = MultiTermQuery.CONSTANT_SCORE_REWRITE; + } + if (isSearchable) { + return new RegexpQuery( + new Term(name(), indexedValueForSearch(value)), + syntaxFlags, + matchFlags, + RegexpQuery.DEFAULT_PROVIDER, + maxDeterminizedStates, + method + ); + } else { + throw new IllegalArgumentException("Regexp queries require the field to be indexed"); + } + } + + @Override + public Query wildcardQuery(String value, MultiTermQuery.RewriteMethod method, boolean caseInsensitive, QueryShardContext context) { + if (caseInsensitive) { + value = value.toLowerCase(Locale.ROOT); + } + if (isSearchable) { + return new WildcardQuery(new Term(name(), indexedValueForSearch(value)), Operations.DEFAULT_DETERMINIZE_WORK_LIMIT); + } else { + throw new IllegalArgumentException("Wildcard queries require the field to be indexed"); + } + } + + @Override + public Query prefixQuery(String value, MultiTermQuery.RewriteMethod method, boolean caseInsensitive, QueryShardContext context) { + if (method == null) { + method = MultiTermQuery.CONSTANT_SCORE_REWRITE; + } + if (isSearchable) { + return new PrefixQuery(new Term(name(), indexedValueForSearch(value)), method); + } else { + throw new IllegalArgumentException("Prefix queries require the field to be indexed"); + } + } + + @Override + public Query fuzzyQuery( + Object value, + Fuzziness fuzziness, + int prefixLength, + int maxExpansions, + boolean transpositions, + MultiTermQuery.RewriteMethod method, + QueryShardContext context + ) { + if (method == null) { + method = MultiTermQuery.CONSTANT_SCORE_REWRITE; + } + if (isSearchable) { + return new FuzzyQuery( + new Term(name(), indexedValueForSearch(value)), + fuzziness.asDistance(), + prefixLength, + maxExpansions, + transpositions, + method + ); + } else { + throw new IllegalArgumentException("Fuzzy queries require the field to be indexed"); + } + } + + @Override + public ValueFetcher valueFetcher(QueryShardContext context, SearchLookup searchLookup, String format) { + if (format != null) { + throw new IllegalArgumentException("Field [" + name() + "] of type [" + typeName() + "] doesn't support formats."); + } + return new SourceValueFetcher(name(), context, format) { + @Override + protected String parseSourceValue(Object value) { + return value.toString(); + } + }; + } + + @Override + public IndexFieldData.Builder fielddataBuilder(String fullyQualifiedIndexName, Supplier searchLookup) { + if (!hasDocValues) { + throw new IllegalArgumentException("Field [" + name() + "] does not have doc_values enabled"); + } + return new SortedSetOrdinalsIndexFieldData.Builder(normalizedFieldName, CoreValuesSourceType.BYTES); + } + + @Override + public Map meta() { + return meta; + } + } + + @Override + protected void parseCreateField(ParseContext context) throws IOException { + String value = context.parser().textOrNull(); + if (value == null) { + return; + } + + SemanticVersion version = SemanticVersion.parse(value); + String versionString = version.toString(); + String normalizedValue = version.getNormalizedComparableString(); + BytesRef bytes = new BytesRef(versionString); + BytesRef normalizedValueBytes = new BytesRef(normalizedValue); + + // For retrieval: store original version string + if (fieldType().isStored()) { + context.doc().add(new StoredField(fieldType().name(), versionString)); + } + + // For searching (term queries): use original version string + if (fieldType().isSearchable()) { + context.doc().add(new KeywordField(fieldType().name(), bytes, this.fieldType.stored() ? Field.Store.YES : Field.Store.NO)); + } + + // For range queries and sorting: use normalized form + if (fieldType().hasDocValues() || fieldType().isSearchable()) { + context.doc().add(new KeywordField(fieldType().name() + NORMALIZED_FIELD_SUFFIX, normalizedValueBytes, Field.Store.NO)); + } + if (fieldType().hasDocValues()) { + context.doc().add(new SortedSetDocValuesField(fieldType().name() + NORMALIZED_FIELD_SUFFIX, normalizedValueBytes)); + } + } + + @Override + public ParametrizedFieldMapper.Builder getMergeBuilder() { + Builder builder = new Builder(name()); + builder.init(this); + return builder; + } + + @Override + protected String contentType() { + return CONTENT_TYPE; + } +} diff --git a/server/src/main/java/org/opensearch/index/query/RegexpQueryBuilder.java b/server/src/main/java/org/opensearch/index/query/RegexpQueryBuilder.java index f0da4d5736c0f..692ce355d003e 100644 --- a/server/src/main/java/org/opensearch/index/query/RegexpQueryBuilder.java +++ b/server/src/main/java/org/opensearch/index/query/RegexpQueryBuilder.java @@ -38,6 +38,7 @@ import org.apache.lucene.search.RegexpQuery; import org.apache.lucene.util.automaton.Operations; import org.apache.lucene.util.automaton.RegExp; +import org.opensearch.common.logging.DeprecationLogger; import org.opensearch.common.lucene.BytesRefs; import org.opensearch.common.xcontent.LoggingDeprecationHandler; import org.opensearch.core.ParseField; @@ -60,6 +61,9 @@ * @opensearch.internal */ public class RegexpQueryBuilder extends AbstractQueryBuilder implements MultiTermQueryBuilder { + + private static final DeprecationLogger deprecationLogger = DeprecationLogger.getLogger(RegexpQueryBuilder.class); + public static final String NAME = "regexp"; public static final int DEFAULT_FLAGS_VALUE = RegexpFlag.ALL.value(); @@ -294,12 +298,25 @@ protected Query doToQuery(QueryShardContext context) throws QueryShardException, + "] index level setting." ); } + + // Check if COMPLEMENT flag is being used + // The COMPLEMENT flag maps to Lucene's DEPRECATED_COMPLEMENT which is marked for removal in Lucene 11 + // This deprecation warning helps users migrate their queries before the feature is completely removed + if ((syntaxFlagsValue & RegexpFlag.COMPLEMENT.value()) != 0) { + deprecationLogger.deprecate( + "regexp_complement_operator", + "The complement operator (~) for arbitrary patterns in regexp queries is deprecated and will be removed in a future version. " + + "Consider rewriting your query to use character class negation [^...] or other query types." + ); + } + MultiTermQuery.RewriteMethod method = QueryParsers.parseRewriteMethod(rewrite, null, LoggingDeprecationHandler.INSTANCE); int matchFlagsValue = caseInsensitive ? RegExp.ASCII_CASE_INSENSITIVE : 0; Query query = null; // For BWC we mask irrelevant bits (RegExp changed ALL from 0xffff to 0xff) - int sanitisedSyntaxFlag = syntaxFlagsValue & RegExp.ALL; + // The hexadecimal for DEPRECATED_COMPLEMENT is 0x10000. The OR condition ensures COMPLEMENT ~ is preserved + int sanitisedSyntaxFlag = syntaxFlagsValue & (RegExp.ALL | RegExp.DEPRECATED_COMPLEMENT); MappedFieldType fieldType = context.fieldMapper(fieldName); if (fieldType != null) { diff --git a/server/src/main/java/org/opensearch/index/search/QueryStringQueryParser.java b/server/src/main/java/org/opensearch/index/search/QueryStringQueryParser.java index e67d9c7e08908..7bfb3475d6744 100644 --- a/server/src/main/java/org/opensearch/index/search/QueryStringQueryParser.java +++ b/server/src/main/java/org/opensearch/index/search/QueryStringQueryParser.java @@ -56,6 +56,7 @@ import org.apache.lucene.search.SynonymQuery; import org.apache.lucene.search.WildcardQuery; import org.apache.lucene.util.BytesRef; +import org.apache.lucene.util.automaton.RegExp; import org.opensearch.common.lucene.search.Queries; import org.opensearch.common.regex.Regex; import org.opensearch.common.unit.Fuzziness; @@ -787,8 +788,12 @@ private Query getRegexpQuerySingle(String field, String termStr) throws ParseExc if (currentFieldType == null) { return newUnmappedFieldQuery(field); } - setAnalyzer(getSearchAnalyzer(currentFieldType)); - return super.getRegexpQuery(field, termStr); + if (forceAnalyzer != null) { + setAnalyzer(forceAnalyzer); + } + // query string query normalizes search value + termStr = getAnalyzer().normalize(currentFieldType.name(), termStr).utf8ToString(); + return currentFieldType.regexpQuery(termStr, RegExp.ALL, 0, getDeterminizeWorkLimit(), getMultiTermRewriteMethod(), context); } catch (RuntimeException e) { if (lenient) { return newLenientFieldQuery(field, e); diff --git a/server/src/main/java/org/opensearch/index/shard/RemoteStoreRefreshListener.java b/server/src/main/java/org/opensearch/index/shard/RemoteStoreRefreshListener.java index 8ace4848806d7..f3ee23a7505d2 100644 --- a/server/src/main/java/org/opensearch/index/shard/RemoteStoreRefreshListener.java +++ b/server/src/main/java/org/opensearch/index/shard/RemoteStoreRefreshListener.java @@ -9,9 +9,7 @@ package org.opensearch.index.shard; import org.apache.logging.log4j.Logger; -import org.apache.logging.log4j.message.ParameterizedMessage; import org.apache.lucene.codecs.CodecUtil; -import org.apache.lucene.index.CorruptIndexException; import org.apache.lucene.index.SegmentInfos; import org.apache.lucene.store.Directory; import org.apache.lucene.store.FilterDirectory; @@ -19,7 +17,6 @@ import org.apache.lucene.store.IndexInput; import org.opensearch.action.LatchedActionListener; import org.opensearch.action.bulk.BackoffPolicy; -import org.opensearch.action.support.GroupedActionListener; import org.opensearch.cluster.routing.RecoverySource; import org.opensearch.common.concurrent.GatedCloseable; import org.opensearch.common.logging.Loggers; @@ -30,7 +27,6 @@ import org.opensearch.index.engine.InternalEngine; import org.opensearch.index.remote.RemoteSegmentTransferTracker; import org.opensearch.index.seqno.SequenceNumbers; -import org.opensearch.index.store.CompositeDirectory; import org.opensearch.index.store.RemoteSegmentStoreDirectory; import org.opensearch.index.store.remote.metadata.RemoteSegmentMetadata; import org.opensearch.index.translog.Translog; @@ -49,6 +45,7 @@ import java.util.concurrent.CountDownLatch; import java.util.concurrent.TimeUnit; import java.util.concurrent.atomic.AtomicBoolean; +import java.util.function.Function; import java.util.stream.Collectors; import static org.opensearch.index.seqno.SequenceNumbers.LOCAL_CHECKPOINT_KEY; @@ -93,6 +90,7 @@ public final class RemoteStoreRefreshListener extends ReleasableRetryableRefresh private volatile Iterator backoffDelayIterator; private final SegmentReplicationCheckpointPublisher checkpointPublisher; private final RemoteStoreSettings remoteStoreSettings; + private final RemoteStoreUploader remoteStoreUploader; public RemoteStoreRefreshListener( IndexShard indexShard, @@ -106,6 +104,7 @@ public RemoteStoreRefreshListener( this.storeDirectory = indexShard.store().directory(); this.remoteDirectory = (RemoteSegmentStoreDirectory) ((FilterDirectory) ((FilterDirectory) indexShard.remoteStore().directory()) .getDelegate()).getDelegate(); + remoteStoreUploader = new RemoteStoreUploaderService(indexShard, storeDirectory, remoteDirectory); localSegmentChecksumMap = new HashMap<>(); RemoteSegmentMetadata remoteSegmentMetadata = null; if (indexShard.routingEntry().primary()) { @@ -324,6 +323,32 @@ public void onFailure(Exception e) { return successful.get(); } + /** + * Uploads new segment files to the remote store. + * + * @param localSegmentsPostRefresh collection of segment files present after refresh + * @param localSegmentsSizeMap map of segment file names to their sizes + * @param segmentUploadsCompletedListener listener to be notified when upload completes + */ + private void uploadNewSegments( + Collection localSegmentsPostRefresh, + Map localSegmentsSizeMap, + ActionListener segmentUploadsCompletedListener + ) { + Collection filteredFiles = localSegmentsPostRefresh.stream().filter(file -> !skipUpload(file)).collect(Collectors.toList()); + Function, UploadListener> uploadListenerFunction = (Map sizeMap) -> createUploadListener( + localSegmentsSizeMap + ); + + remoteStoreUploader.uploadSegments( + filteredFiles, + localSegmentsSizeMap, + segmentUploadsCompletedListener, + uploadListenerFunction, + isLowPriorityUpload() + ); + } + /** * Clears the stale files from the latest local segment checksum map. * @@ -424,45 +449,6 @@ void uploadMetadata(Collection localSegmentsPostRefresh, SegmentInfos se } } - private void uploadNewSegments( - Collection localSegmentsPostRefresh, - Map localSegmentsSizeMap, - ActionListener listener - ) { - Collection filteredFiles = localSegmentsPostRefresh.stream().filter(file -> !skipUpload(file)).collect(Collectors.toList()); - if (filteredFiles.size() == 0) { - logger.debug("No new segments to upload in uploadNewSegments"); - listener.onResponse(null); - return; - } - - logger.debug("Effective new segments files to upload {}", filteredFiles); - ActionListener> mappedListener = ActionListener.map(listener, resp -> null); - GroupedActionListener batchUploadListener = new GroupedActionListener<>(mappedListener, filteredFiles.size()); - Directory directory = ((FilterDirectory) (((FilterDirectory) storeDirectory).getDelegate())).getDelegate(); - - for (String src : filteredFiles) { - // Initializing listener here to ensure that the stats increment operations are thread-safe - UploadListener statsListener = createUploadListener(localSegmentsSizeMap); - ActionListener aggregatedListener = ActionListener.wrap(resp -> { - statsListener.onSuccess(src); - batchUploadListener.onResponse(resp); - if (directory instanceof CompositeDirectory) { - ((CompositeDirectory) directory).afterSyncToRemote(src); - } - }, ex -> { - logger.warn(() -> new ParameterizedMessage("Exception: [{}] while uploading segment files", ex), ex); - if (ex instanceof CorruptIndexException) { - indexShard.failShard(ex.getMessage(), ex); - } - statsListener.onFailure(src); - batchUploadListener.onFailure(ex); - }); - statsListener.beforeUpload(src); - remoteDirectory.copyFrom(storeDirectory, src, IOContext.DEFAULT, aggregatedListener, isLowPriorityUpload()); - } - } - boolean isLowPriorityUpload() { return isLocalOrSnapshotRecoveryOrSeeding(); } diff --git a/server/src/main/java/org/opensearch/index/shard/RemoteStoreUploader.java b/server/src/main/java/org/opensearch/index/shard/RemoteStoreUploader.java new file mode 100644 index 0000000000000..1a60aabf3e609 --- /dev/null +++ b/server/src/main/java/org/opensearch/index/shard/RemoteStoreUploader.java @@ -0,0 +1,30 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.index.shard; + +import org.opensearch.common.util.UploadListener; +import org.opensearch.core.action.ActionListener; + +import java.util.Collection; +import java.util.Map; +import java.util.function.Function; + +/** + * Interface to handle the functionality for uploading data in the remote store + */ +public interface RemoteStoreUploader { + + void uploadSegments( + Collection localSegments, + Map localSegmentsSizeMap, + ActionListener listener, + Function, UploadListener> uploadListenerFunction, + boolean isLowPriorityUpload + ); +} diff --git a/server/src/main/java/org/opensearch/index/shard/RemoteStoreUploaderService.java b/server/src/main/java/org/opensearch/index/shard/RemoteStoreUploaderService.java new file mode 100644 index 0000000000000..7cab0b258f107 --- /dev/null +++ b/server/src/main/java/org/opensearch/index/shard/RemoteStoreUploaderService.java @@ -0,0 +1,89 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.index.shard; + +import org.apache.logging.log4j.Logger; +import org.apache.logging.log4j.message.ParameterizedMessage; +import org.apache.lucene.index.CorruptIndexException; +import org.apache.lucene.store.Directory; +import org.apache.lucene.store.FilterDirectory; +import org.apache.lucene.store.IOContext; +import org.opensearch.action.support.GroupedActionListener; +import org.opensearch.common.logging.Loggers; +import org.opensearch.common.util.UploadListener; +import org.opensearch.core.action.ActionListener; +import org.opensearch.index.store.CompositeDirectory; +import org.opensearch.index.store.RemoteSegmentStoreDirectory; + +import java.util.Collection; +import java.util.Map; +import java.util.function.Function; + +/** + * The service essentially acts as a bridge between local segment storage and remote storage, + * ensuring efficient and reliable segment synchronization while providing comprehensive monitoring and error handling. + */ +public class RemoteStoreUploaderService implements RemoteStoreUploader { + + private final Logger logger; + + private final IndexShard indexShard; + private final Directory storeDirectory; + private final RemoteSegmentStoreDirectory remoteDirectory; + + public RemoteStoreUploaderService(IndexShard indexShard, Directory storeDirectory, RemoteSegmentStoreDirectory remoteDirectory) { + logger = Loggers.getLogger(getClass(), indexShard.shardId()); + this.indexShard = indexShard; + this.storeDirectory = storeDirectory; + this.remoteDirectory = remoteDirectory; + } + + @Override + public void uploadSegments( + Collection localSegments, + Map localSegmentsSizeMap, + ActionListener listener, + Function, UploadListener> uploadListenerFunction, + boolean isLowPriorityUpload + ) { + if (localSegments.isEmpty()) { + logger.debug("No new segments to upload in uploadNewSegments"); + listener.onResponse(null); + return; + } + + logger.debug("Effective new segments files to upload {}", localSegments); + ActionListener> mappedListener = ActionListener.map(listener, resp -> null); + GroupedActionListener batchUploadListener = new GroupedActionListener<>(mappedListener, localSegments.size()); + Directory directory = ((FilterDirectory) (((FilterDirectory) storeDirectory).getDelegate())).getDelegate(); + + for (String localSegment : localSegments) { + // Initializing listener here to ensure that the stats increment operations are thread-safe + UploadListener statsListener = uploadListenerFunction.apply(localSegmentsSizeMap); + ActionListener aggregatedListener = ActionListener.wrap(resp -> { + statsListener.onSuccess(localSegment); + batchUploadListener.onResponse(resp); + // Once uploaded to Remote, local files become eligible for eviction from FileCache + if (directory instanceof CompositeDirectory) { + ((CompositeDirectory) directory).afterSyncToRemote(localSegment); + } + }, ex -> { + logger.warn(() -> new ParameterizedMessage("Exception: [{}] while uploading segment files", ex), ex); + if (ex instanceof CorruptIndexException) { + indexShard.failShard(ex.getMessage(), ex); + } + statsListener.onFailure(localSegment); + batchUploadListener.onFailure(ex); + }); + statsListener.beforeUpload(localSegment); + // Place where the actual upload is happening + remoteDirectory.copyFrom(storeDirectory, localSegment, IOContext.DEFAULT, aggregatedListener, isLowPriorityUpload); + } + } +} diff --git a/server/src/main/java/org/opensearch/index/store/CompositeDirectory.java b/server/src/main/java/org/opensearch/index/store/CompositeDirectory.java index 99eb1db04b296..99e70dd215b5b 100644 --- a/server/src/main/java/org/opensearch/index/store/CompositeDirectory.java +++ b/server/src/main/java/org/opensearch/index/store/CompositeDirectory.java @@ -249,10 +249,16 @@ public IndexOutput createOutput(String name, IOContext context) throws IOExcepti public void sync(Collection names) throws IOException { ensureOpen(); logger.trace("Composite Directory[{}]: sync() called {}", this::toString, () -> names); - Collection remoteFiles = Arrays.asList(getRemoteFiles()); - Collection filesToSync = names.stream().filter(name -> remoteFiles.contains(name) == false).collect(Collectors.toList()); - logger.trace("Composite Directory[{}]: Synced files : {}", this::toString, () -> filesToSync); - localDirectory.sync(filesToSync); + Set remoteFiles = Set.of(getRemoteFiles()); + Set localFilesHavingBlocks = Arrays.stream(listLocalFiles()) + .filter(FileTypeUtils::isBlockFile) + .map(file -> file.substring(0, file.indexOf(BLOCK_FILE_IDENTIFIER))) + .collect(Collectors.toSet()); + Collection fullFilesToSync = names.stream() + .filter(name -> (remoteFiles.contains(name) == false) && (localFilesHavingBlocks.contains(name) == false)) + .collect(Collectors.toList()); + logger.trace("Composite Directory[{}]: Synced files : {}", this::toString, () -> fullFilesToSync); + localDirectory.sync(fullFilesToSync); } /** diff --git a/server/src/main/java/org/opensearch/index/store/remote/filecache/FileCache.java b/server/src/main/java/org/opensearch/index/store/remote/filecache/FileCache.java index 718f79a74f9d7..b8b1e3c650b1b 100644 --- a/server/src/main/java/org/opensearch/index/store/remote/filecache/FileCache.java +++ b/server/src/main/java/org/opensearch/index/store/remote/filecache/FileCache.java @@ -69,8 +69,8 @@ public long capacity() { @Override public CachedIndexInput put(Path filePath, CachedIndexInput indexInput) { + checkParentBreaker(); CachedIndexInput cachedIndexInput = theCache.put(filePath, indexInput); - checkParentBreaker(filePath); return cachedIndexInput; } @@ -79,8 +79,8 @@ public CachedIndexInput compute( Path key, BiFunction remappingFunction ) { + checkParentBreaker(); CachedIndexInput cachedIndexInput = theCache.compute(key, remappingFunction); - checkParentBreaker(key); return cachedIndexInput; } @@ -201,13 +201,11 @@ public void closeIndexInputReferences() { /** * Ensures that the PARENT breaker is not tripped when an entry is added to the cache - * @param filePath the path key for which entry is added */ - private void checkParentBreaker(Path filePath) { + private void checkParentBreaker() { try { circuitBreaker.addEstimateBytesAndMaybeBreak(0, "filecache_entry"); } catch (CircuitBreakingException ex) { - theCache.remove(filePath); throw new CircuitBreakingException( "Unable to create file cache entries", ex.getBytesWanted(), diff --git a/server/src/main/java/org/opensearch/index/store/remote/filecache/FullFileCachedIndexInput.java b/server/src/main/java/org/opensearch/index/store/remote/filecache/FullFileCachedIndexInput.java index 9383c53d6d830..deb8f437bfd63 100644 --- a/server/src/main/java/org/opensearch/index/store/remote/filecache/FullFileCachedIndexInput.java +++ b/server/src/main/java/org/opensearch/index/store/remote/filecache/FullFileCachedIndexInput.java @@ -13,23 +13,23 @@ import org.apache.lucene.store.AlreadyClosedException; import org.apache.lucene.store.IndexInput; import org.opensearch.common.annotation.ExperimentalApi; +import org.opensearch.common.util.concurrent.OpenSearchExecutors; import java.io.IOException; +import java.lang.ref.Cleaner; import java.nio.file.Path; -import java.util.HashSet; -import java.util.Set; /** * Extension of {@link FileCachedIndexInput} for full files for handling clones and slices - * We maintain a clone map so that we can close them when the parent IndexInput is closed so that ref count is properly maintained in file cache - * Closing of clones explicitly is needed as Lucene does not guarantee that it will close the clones + * Since Lucene does not guarantee that it will close the clones/slices, we have created a Cleaner which handles closing of the clones/slices when they become phantom reachable * https://github.com/apache/lucene/blob/8340b01c3cc229f33584ce2178b07b8984daa6a9/lucene/core/src/java/org/apache/lucene/store/IndexInput.java#L32-L33 * @opensearch.experimental */ @ExperimentalApi public class FullFileCachedIndexInput extends FileCachedIndexInput { private static final Logger logger = LogManager.getLogger(FullFileCachedIndexInput.class); - private final Set clones; + private final IndexInputHolder indexInputHolder; + private static final Cleaner CLEANER = Cleaner.create(OpenSearchExecutors.daemonThreadFactory("index-input-cleaner")); public FullFileCachedIndexInput(FileCache cache, Path filePath, IndexInput underlyingIndexInput) { this(cache, filePath, underlyingIndexInput, false); @@ -37,7 +37,8 @@ public FullFileCachedIndexInput(FileCache cache, Path filePath, IndexInput under public FullFileCachedIndexInput(FileCache cache, Path filePath, IndexInput underlyingIndexInput, boolean isClone) { super(cache, filePath, underlyingIndexInput, isClone); - clones = new HashSet<>(); + indexInputHolder = new IndexInputHolder(underlyingIndexInput, isClone, cache, filePath); + CLEANER.register(this, indexInputHolder); } /** @@ -48,7 +49,6 @@ public FullFileCachedIndexInput(FileCache cache, Path filePath, IndexInput under public FullFileCachedIndexInput clone() { FullFileCachedIndexInput clonedIndexInput = new FullFileCachedIndexInput(cache, filePath, luceneIndexInput.clone(), true); cache.incRef(filePath); - clones.add(clonedIndexInput); return clonedIndexInput; } @@ -74,7 +74,6 @@ public IndexInput slice(String sliceDescription, long offset, long length) throw } IndexInput slicedLuceneIndexInput = luceneIndexInput.slice(sliceDescription, offset, length); FullFileCachedIndexInput slicedIndexInput = new FullFileCachedIndexInput(cache, filePath, slicedLuceneIndexInput, true); - clones.add(slicedIndexInput); cache.incRef(filePath); return slicedIndexInput; } @@ -88,21 +87,37 @@ public void close() throws IOException { if (isClone) { cache.decRef(filePath); } - clones.forEach(indexInput -> { - try { - indexInput.close(); - } catch (Exception e) { - logger.trace("Exception while closing clone - {}", e.getMessage()); - } - }); try { luceneIndexInput.close(); } catch (AlreadyClosedException e) { logger.trace("FullFileCachedIndexInput already closed"); } luceneIndexInput = null; - clones.clear(); closed = true; } } + + private static class IndexInputHolder implements Runnable { + private final IndexInput indexInput; + private final FileCache cache; + private final boolean isClone; + private final Path path; + + IndexInputHolder(IndexInput indexInput, boolean isClone, FileCache cache, Path path) { + this.indexInput = indexInput; + this.isClone = isClone; + this.cache = cache; + this.path = path; + } + + @Override + public void run() { + try { + indexInput.close(); + if (isClone) cache.decRef(path); + } catch (IOException e) { + logger.error("Failed to close IndexInput while clearing phantom reachable object"); + } + } + } } diff --git a/server/src/main/java/org/opensearch/index/translog/transfer/BlobStoreTransferService.java b/server/src/main/java/org/opensearch/index/translog/transfer/BlobStoreTransferService.java index a0ac50709bf7e..81438b978cd99 100644 --- a/server/src/main/java/org/opensearch/index/translog/transfer/BlobStoreTransferService.java +++ b/server/src/main/java/org/opensearch/index/translog/transfer/BlobStoreTransferService.java @@ -40,7 +40,6 @@ import java.util.HashMap; import java.util.List; import java.util.Map; -import java.util.Objects; import java.util.Set; import static org.opensearch.common.blobstore.BlobContainer.BlobNameSortOrder.LEXICOGRAPHIC; @@ -192,7 +191,8 @@ private void uploadBlob( listener.onFailure(new FileTransferException(fileSnapshot, ex)); }); - Objects.requireNonNull(fileSnapshot.getChecksum()); + // Only the first generation doesn't have checksum + assert (fileSnapshot.getChecksum() != null || fileSnapshot.getName().contains("-1.")); uploadBlobAsyncInternal( fileSnapshot.getName(), fileSnapshot.getName(), @@ -226,7 +226,7 @@ void uploadBlobAsyncInternal( BlobPath blobPath, WritePriority writePriority, RemoteTransferContainer.OffsetRangeInputStreamSupplier inputStreamSupplier, - long expectedChecksum, + Long expectedChecksum, ActionListener completionListener, Map metadata ) throws IOException { diff --git a/server/src/main/java/org/opensearch/indices/IndicesModule.java b/server/src/main/java/org/opensearch/indices/IndicesModule.java index ad2eb834b721e..a0386eaf57c24 100644 --- a/server/src/main/java/org/opensearch/indices/IndicesModule.java +++ b/server/src/main/java/org/opensearch/indices/IndicesModule.java @@ -68,6 +68,7 @@ import org.opensearch.index.mapper.ObjectMapper; import org.opensearch.index.mapper.RangeType; import org.opensearch.index.mapper.RoutingFieldMapper; +import org.opensearch.index.mapper.SemanticVersionFieldMapper; import org.opensearch.index.mapper.SeqNoFieldMapper; import org.opensearch.index.mapper.SourceFieldMapper; import org.opensearch.index.mapper.StarTreeMapper; @@ -177,6 +178,7 @@ public static Map getMappers(List mappe mappers.put(DerivedFieldMapper.CONTENT_TYPE, DerivedFieldMapper.PARSER); mappers.put(WildcardFieldMapper.CONTENT_TYPE, WildcardFieldMapper.PARSER); mappers.put(StarTreeMapper.CONTENT_TYPE, new StarTreeMapper.TypeParser()); + mappers.put(SemanticVersionFieldMapper.CONTENT_TYPE, SemanticVersionFieldMapper.PARSER); for (MapperPlugin mapperPlugin : mapperPlugins) { for (Map.Entry entry : mapperPlugin.getMappers().entrySet()) { diff --git a/server/src/main/java/org/opensearch/indices/recovery/RecoveryTarget.java b/server/src/main/java/org/opensearch/indices/recovery/RecoveryTarget.java index f47b082de3856..1c386c1c1623e 100644 --- a/server/src/main/java/org/opensearch/indices/recovery/RecoveryTarget.java +++ b/server/src/main/java/org/opensearch/indices/recovery/RecoveryTarget.java @@ -224,8 +224,7 @@ public void prepareForTranslogOperations(int totalTranslogOps, ActionListener { indexShard.refresh("remote store migration"); }); indexShard.waitForRemoteStoreSync(this::setLastAccessTime); logger.info("Remote Store is now seeded for {}", indexShard.shardId()); diff --git a/server/src/main/java/org/opensearch/indices/replication/MergedSegmentReplicationTarget.java b/server/src/main/java/org/opensearch/indices/replication/MergedSegmentReplicationTarget.java index 75cd3985ffc88..00ced0cfe4798 100644 --- a/server/src/main/java/org/opensearch/indices/replication/MergedSegmentReplicationTarget.java +++ b/server/src/main/java/org/opensearch/indices/replication/MergedSegmentReplicationTarget.java @@ -6,30 +6,6 @@ * compatible open source license. */ -/* - * Licensed to Elasticsearch under one or more contributor - * license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright - * ownership. Elasticsearch licenses this file to you under - * the Apache License, Version 2.0 (the "License"); you may - * not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ - -/* - * Modifications Copyright OpenSearch Contributors. See - * GitHub history for details. - */ - package org.opensearch.indices.replication; import org.opensearch.action.StepListener; diff --git a/server/src/main/java/org/opensearch/indices/replication/SegmentReplicationTargetService.java b/server/src/main/java/org/opensearch/indices/replication/SegmentReplicationTargetService.java index 1be0224a77b60..d39b5b62781b8 100644 --- a/server/src/main/java/org/opensearch/indices/replication/SegmentReplicationTargetService.java +++ b/server/src/main/java/org/opensearch/indices/replication/SegmentReplicationTargetService.java @@ -49,6 +49,7 @@ import java.io.IOException; import java.util.List; +import java.util.Objects; import java.util.Optional; import java.util.concurrent.CountDownLatch; import java.util.concurrent.atomic.AtomicLong; @@ -347,7 +348,10 @@ public void onReplicationDone(SegmentReplicationState state) { // if we received a checkpoint during the copy event that is ahead of this // try and process it. - processLatestReceivedCheckpoint(replicaShard, thread); + ReplicationCheckpoint latestReceivedCheckpoint = replicator.getPrimaryCheckpoint(replicaShard.shardId()); + if (Objects.nonNull(latestReceivedCheckpoint) && latestReceivedCheckpoint.isAheadOf(receivedCheckpoint)) { + processLatestReceivedCheckpoint(replicaShard, thread); + } } @Override diff --git a/server/src/main/java/org/opensearch/indices/replication/SegmentReplicator.java b/server/src/main/java/org/opensearch/indices/replication/SegmentReplicator.java index d1683f57315f4..72abd4f33e465 100644 --- a/server/src/main/java/org/opensearch/indices/replication/SegmentReplicator.java +++ b/server/src/main/java/org/opensearch/indices/replication/SegmentReplicator.java @@ -15,6 +15,7 @@ import org.opensearch.OpenSearchCorruptionException; import org.opensearch.common.Nullable; import org.opensearch.common.SetOnce; +import org.opensearch.common.time.DateUtils; import org.opensearch.common.unit.TimeValue; import org.opensearch.common.util.concurrent.AbstractRunnable; import org.opensearch.common.util.concurrent.ConcurrentCollections; @@ -31,12 +32,13 @@ import org.opensearch.threadpool.ThreadPool; import java.io.IOException; +import java.time.Duration; +import java.time.Instant; import java.util.List; import java.util.Map; import java.util.concurrent.ConcurrentMap; import java.util.concurrent.ConcurrentNavigableMap; import java.util.concurrent.ConcurrentSkipListMap; -import java.util.concurrent.TimeUnit; import reactor.util.annotation.NonNull; @@ -54,7 +56,7 @@ public class SegmentReplicator { private final ReplicationCollection onGoingReplications; private final ReplicationCollection onGoingMergedSegmentReplications; private final Map completedReplications = ConcurrentCollections.newConcurrentMap(); - private final ConcurrentMap> replicationCheckpointStats = + protected final ConcurrentMap> replicationCheckpointStats = ConcurrentCollections.newConcurrentMap(); private final ConcurrentMap primaryCheckpoint = ConcurrentCollections.newConcurrentMap(); @@ -167,9 +169,8 @@ public ReplicationStats getSegmentReplicationStats(final ShardId shardId) { long bytesBehind = highestEntry.getValue().getBytesBehind(); long replicationLag = bytesBehind > 0L - ? TimeUnit.NANOSECONDS.toMillis(System.nanoTime() - lowestEntry.getValue().getTimestamp()) + ? Duration.ofNanos(DateUtils.toLong(Instant.now()) - lowestEntry.getValue().getTimestamp()).toMillis() : 0; - return new ReplicationStats(bytesBehind, bytesBehind, replicationLag); } @@ -217,7 +218,7 @@ protected void pruneCheckpointsUpToLastSync(final IndexShard indexShard) { ); if (existingCheckpointStats != null && !existingCheckpointStats.isEmpty()) { - existingCheckpointStats.keySet().removeIf(key -> key < segmentInfoVersion); + existingCheckpointStats.keySet().removeIf(key -> key <= segmentInfoVersion); Map.Entry lastEntry = existingCheckpointStats.lastEntry(); if (lastEntry != null) { lastEntry.getValue().setBytesBehind(calculateBytesBehind(latestCheckpoint, indexReplicationCheckPoint)); diff --git a/server/src/main/java/org/opensearch/indices/replication/checkpoint/MergeSegmentCheckpoint.java b/server/src/main/java/org/opensearch/indices/replication/checkpoint/MergeSegmentCheckpoint.java index 424a2cc98939e..15b90c4fb8dfe 100644 --- a/server/src/main/java/org/opensearch/indices/replication/checkpoint/MergeSegmentCheckpoint.java +++ b/server/src/main/java/org/opensearch/indices/replication/checkpoint/MergeSegmentCheckpoint.java @@ -6,30 +6,6 @@ * compatible open source license. */ -/* - * Licensed to Elasticsearch under one or more contributor - * license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright - * ownership. Elasticsearch licenses this file to you under - * the Apache License, Version 2.0 (the "License"); you may - * not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ - -/* - * Modifications Copyright OpenSearch Contributors. See - * GitHub history for details. - */ - package org.opensearch.indices.replication.checkpoint; import org.opensearch.common.annotation.ExperimentalApi; diff --git a/server/src/main/java/org/opensearch/indices/replication/checkpoint/ReplicationCheckpoint.java b/server/src/main/java/org/opensearch/indices/replication/checkpoint/ReplicationCheckpoint.java index 8380187a288ba..39c2039191def 100644 --- a/server/src/main/java/org/opensearch/indices/replication/checkpoint/ReplicationCheckpoint.java +++ b/server/src/main/java/org/opensearch/indices/replication/checkpoint/ReplicationCheckpoint.java @@ -11,6 +11,7 @@ import org.opensearch.Version; import org.opensearch.common.Nullable; import org.opensearch.common.annotation.PublicApi; +import org.opensearch.common.time.DateUtils; import org.opensearch.core.common.io.stream.StreamInput; import org.opensearch.core.common.io.stream.StreamOutput; import org.opensearch.core.common.io.stream.Writeable; @@ -19,6 +20,7 @@ import org.opensearch.index.store.StoreFileMetadata; import java.io.IOException; +import java.time.Instant; import java.util.Collections; import java.util.Map; import java.util.Objects; @@ -56,11 +58,11 @@ private ReplicationCheckpoint(ShardId shardId, String codec) { length = 0L; this.codec = codec; this.metadataMap = Collections.emptyMap(); - this.createdTimeStamp = System.nanoTime(); + this.createdTimeStamp = DateUtils.toLong(Instant.now()); } public ReplicationCheckpoint(ShardId shardId, long primaryTerm, long segmentsGen, long segmentInfosVersion, String codec) { - this(shardId, primaryTerm, segmentsGen, segmentInfosVersion, 0L, codec, Collections.emptyMap(), System.nanoTime()); + this(shardId, primaryTerm, segmentsGen, segmentInfosVersion, 0L, codec, Collections.emptyMap(), DateUtils.toLong(Instant.now())); } public ReplicationCheckpoint( @@ -79,7 +81,7 @@ public ReplicationCheckpoint( this.length = length; this.codec = codec; this.metadataMap = metadataMap; - this.createdTimeStamp = System.nanoTime(); + this.createdTimeStamp = DateUtils.toLong(Instant.now()); } public ReplicationCheckpoint( diff --git a/server/src/main/java/org/opensearch/ingest/IngestService.java b/server/src/main/java/org/opensearch/ingest/IngestService.java index b340a0b81941d..50793d6e237db 100644 --- a/server/src/main/java/org/opensearch/ingest/IngestService.java +++ b/server/src/main/java/org/opensearch/ingest/IngestService.java @@ -107,7 +107,9 @@ import static org.opensearch.cluster.service.ClusterManagerTask.DELETE_PIPELINE; import static org.opensearch.cluster.service.ClusterManagerTask.PUT_PIPELINE; import static org.opensearch.plugins.IngestPlugin.SystemIngestPipelineConfigKeys.INDEX_MAPPINGS; +import static org.opensearch.plugins.IngestPlugin.SystemIngestPipelineConfigKeys.INDEX_SETTINGS; import static org.opensearch.plugins.IngestPlugin.SystemIngestPipelineConfigKeys.INDEX_TEMPLATE_MAPPINGS; +import static org.opensearch.plugins.IngestPlugin.SystemIngestPipelineConfigKeys.INDEX_TEMPLATE_SETTINGS; /** * Holder class for several ingest related services. @@ -272,7 +274,8 @@ public boolean resolveSystemIngestPipeline( // precedence), or if a V2 template does not match, any V1 templates String v2Template = MetadataIndexTemplateService.findV2Template(metadata, indexRequest.index(), false); if (v2Template != null) { - systemIngestPipelineId = getSystemIngestPipelineForTemplateV2(v2Template, indexRequest); + Settings settings = MetadataIndexTemplateService.resolveSettings(metadata, v2Template); + systemIngestPipelineId = getSystemIngestPipelineForTemplateV2(v2Template, indexRequest, settings); } else { List templates = MetadataIndexTemplateService.findV1Templates( metadata, @@ -344,7 +347,7 @@ public boolean resolvePipelines(final DocWriteRequest originalRequest, final } if (this.isSystemIngestPipelineEnabled) { - systemIngestPipelineId = getSystemIngestPipelineForTemplateV2(v2Template, indexRequest); + systemIngestPipelineId = getSystemIngestPipelineForTemplateV2(v2Template, indexRequest, settings); } } else { List templates = MetadataIndexTemplateService.findV1Templates( @@ -433,9 +436,13 @@ private String getSystemIngestPipelineForTemplateV1( final String indexId = createIndexIdWithTemplateSuffix(indexRequest.index()); Pipeline ingestPipeline = systemIngestPipelineCache.getSystemIngestPipeline(indexId); if (ingestPipeline == null) { + final List settingsList = new ArrayList<>(); final List> mappingsMap = new ArrayList<>(); final Map pipelineConfig = new HashMap<>(); for (final IndexTemplateMetadata template : templates) { + if (template.settings() != null) { + settingsList.add(template.settings()); + } if (template.mappings() != null) { try { mappingsMap.add(MapperService.parseMapping(xContentRegistry, template.mappings().string())); @@ -453,6 +460,7 @@ private String getSystemIngestPipelineForTemplateV1( } pipelineConfig.put(INDEX_TEMPLATE_MAPPINGS, mappingsMap); + pipelineConfig.put(INDEX_TEMPLATE_SETTINGS, settingsList); ingestPipeline = createSystemIngestPipeline(indexId, pipelineConfig); } @@ -461,7 +469,11 @@ private String getSystemIngestPipelineForTemplateV1( return ingestPipeline.getProcessors().isEmpty() ? null : indexId; } - private String getSystemIngestPipelineForTemplateV2(@NonNull final String templateName, @NonNull final IndexRequest indexRequest) { + private String getSystemIngestPipelineForTemplateV2( + @NonNull final String templateName, + @NonNull final IndexRequest indexRequest, + final Settings settings + ) { // Here we cache it with index name + template as the suffix since currently we don't have the uuid. // We need to cache it so that later during execution we can find it by indexId to reuse it. final String indexId = createIndexIdWithTemplateSuffix(indexRequest.index()); @@ -491,6 +503,7 @@ private String getSystemIngestPipelineForTemplateV2(@NonNull final String templa } pipelineConfig.put(INDEX_TEMPLATE_MAPPINGS, mappingsMap); + pipelineConfig.put(INDEX_TEMPLATE_SETTINGS, settings == null ? Collections.emptyList() : List.of(settings)); ingestPipeline = createSystemIngestPipeline(indexId, pipelineConfig); } @@ -515,10 +528,14 @@ private String getSystemIngestPipelineForExistingIndex(@NonNull final IndexMetad if (ingestPipeline == null) { // no cache we will try to resolve the ingest pipeline based on the index configuration final MappingMetadata mappingMetadata = indexMetadata.mapping(); + final Settings settings = indexMetadata.getSettings(); final Map pipelineConfig = new HashMap<>(); if (mappingMetadata != null) { pipelineConfig.put(INDEX_MAPPINGS, mappingMetadata.getSourceAsMap()); } + if (settings != null) { + pipelineConfig.put(INDEX_SETTINGS, settings); + } ingestPipeline = createSystemIngestPipeline(indexId, pipelineConfig); } // we can get an empty pipeline from the cache diff --git a/server/src/main/java/org/opensearch/node/Node.java b/server/src/main/java/org/opensearch/node/Node.java index a5e92293c0be1..5b3559b2fde49 100644 --- a/server/src/main/java/org/opensearch/node/Node.java +++ b/server/src/main/java/org/opensearch/node/Node.java @@ -68,6 +68,8 @@ import org.opensearch.cluster.InternalClusterInfoService; import org.opensearch.cluster.NodeConnectionsService; import org.opensearch.cluster.action.index.MappingUpdatedAction; +import org.opensearch.cluster.action.shard.LocalShardStateAction; +import org.opensearch.cluster.action.shard.ShardStateAction; import org.opensearch.cluster.applicationtemplates.SystemTemplatesPlugin; import org.opensearch.cluster.applicationtemplates.SystemTemplatesService; import org.opensearch.cluster.coordination.PersistedStateRegistry; @@ -86,6 +88,7 @@ import org.opensearch.cluster.routing.allocation.AwarenessReplicaBalance; import org.opensearch.cluster.routing.allocation.DiskThresholdMonitor; import org.opensearch.cluster.service.ClusterService; +import org.opensearch.cluster.service.LocalClusterService; import org.opensearch.common.SetOnce; import org.opensearch.common.StopWatch; import org.opensearch.common.cache.module.CacheModule; @@ -130,6 +133,7 @@ import org.opensearch.crypto.CryptoHandlerRegistry; import org.opensearch.discovery.Discovery; import org.opensearch.discovery.DiscoveryModule; +import org.opensearch.discovery.LocalDiscovery; import org.opensearch.env.Environment; import org.opensearch.env.NodeEnvironment; import org.opensearch.env.NodeMetadata; @@ -702,12 +706,13 @@ protected Node(final Environment initialEnvironment, Collection clas final ClusterManagerMetrics clusterManagerMetrics = new ClusterManagerMetrics(metricsRegistry); List clusterPlugins = pluginsService.filterPlugins(ClusterPlugin.class); - final ClusterService clusterService = new ClusterService( - settings, - settingsModule.getClusterSettings(), - threadPool, - clusterManagerMetrics - ); + final boolean clusterless = clusterPlugins.stream().anyMatch(ClusterPlugin::isClusterless); + final ClusterService clusterService; + if (clusterless) { + clusterService = new LocalClusterService(settings, settingsModule.getClusterSettings(), threadPool, clusterManagerMetrics); + } else { + clusterService = new ClusterService(settings, settingsModule.getClusterSettings(), threadPool, clusterManagerMetrics); + } clusterService.addStateApplier(scriptService); resourcesToClose.add(clusterService); final Set> consistentSettings = settingsModule.getConsistentSettings(); @@ -766,7 +771,8 @@ protected Node(final Environment initialEnvironment, Collection clas clusterInfoService, snapshotsInfoService, threadPool.getThreadContext(), - clusterManagerMetrics + clusterManagerMetrics, + clusterless ? LocalShardStateAction.class : ShardStateAction.class ); modules.add(clusterModule); final List mapperPlugins = pluginsService.filterPlugins(MapperPlugin.class); @@ -1347,27 +1353,31 @@ protected Node(final Environment initialEnvironment, Collection clas ); clusterInfoService.addListener(diskThresholdMonitor::onNewInfo); - final DiscoveryModule discoveryModule = new DiscoveryModule( - settings, - threadPool, - transportService, - namedWriteableRegistry, - networkService, - clusterService.getClusterManagerService(), - clusterService.getClusterApplierService(), - clusterService.getClusterSettings(), - pluginsService.filterPlugins(DiscoveryPlugin.class), - clusterModule.getAllocationService(), - environment.configDir(), - gatewayMetaState, - rerouteService, - fsHealthService, - persistedStateRegistry, - remoteStoreNodeService, - clusterManagerMetrics, - remoteClusterStateService - ); - + final Discovery discovery; + if (clusterless) { + discovery = new LocalDiscovery(transportService, clusterService.getClusterApplierService()); + } else { + discovery = new DiscoveryModule( + settings, + threadPool, + transportService, + namedWriteableRegistry, + networkService, + clusterService.getClusterManagerService(), + clusterService.getClusterApplierService(), + clusterService.getClusterSettings(), + pluginsService.filterPlugins(DiscoveryPlugin.class), + clusterModule.getAllocationService(), + environment.configDir(), + gatewayMetaState, + rerouteService, + fsHealthService, + persistedStateRegistry, + remoteStoreNodeService, + clusterManagerMetrics, + remoteClusterStateService + ).getDiscovery(); + } final SearchPipelineService searchPipelineService = new SearchPipelineService( clusterService, threadPool, @@ -1388,11 +1398,12 @@ protected Node(final Environment initialEnvironment, Collection clas transportService.getTaskManager(), taskCancellationMonitoringSettings ); + this.nodeService = new NodeService( settings, threadPool, monitorService, - discoveryModule.getDiscovery(), + discovery, transportService, indicesService, pluginsService, @@ -1535,7 +1546,7 @@ protected Node(final Environment initialEnvironment, Collection clas b.bind(ClusterInfoService.class).toInstance(clusterInfoService); b.bind(SnapshotsInfoService.class).toInstance(snapshotsInfoService); b.bind(GatewayMetaState.class).toInstance(gatewayMetaState); - b.bind(Discovery.class).toInstance(discoveryModule.getDiscovery()); + b.bind(Discovery.class).toInstance(discovery); b.bind(RemoteStoreSettings.class).toInstance(remoteStoreSettings); { b.bind(PeerRecoverySourceService.class) @@ -1716,7 +1727,7 @@ public Node start() throws NodeValidationException { injector.getInstance(GatewayService.class).start(); Discovery discovery = injector.getInstance(Discovery.class); discovery.setNodeConnectionsService(nodeConnectionsService); - clusterService.getClusterManagerService().setClusterStatePublisher(discovery::publish); + clusterService.getClusterManagerService().setClusterStatePublisher(discovery); // Start the transport service now so the publish address will be added to the local disco node in ClusterService TransportService transportService = injector.getInstance(TransportService.class); diff --git a/server/src/main/java/org/opensearch/plugins/ClusterPlugin.java b/server/src/main/java/org/opensearch/plugins/ClusterPlugin.java index 1edd9f52d97a7..cfd26814697e0 100644 --- a/server/src/main/java/org/opensearch/plugins/ClusterPlugin.java +++ b/server/src/main/java/org/opensearch/plugins/ClusterPlugin.java @@ -32,6 +32,7 @@ package org.opensearch.plugins; +import org.opensearch.cluster.metadata.IndexNameExpressionResolver; import org.opensearch.cluster.node.DiscoveryNode; import org.opensearch.cluster.routing.allocation.ExistingShardsAllocator; import org.opensearch.cluster.routing.allocation.allocator.ShardsAllocator; @@ -85,6 +86,14 @@ default Map getExistingShardsAllocators() { return Collections.emptyMap(); } + /** + * Returns List of custom index name resolvers which can support additional custom wildcards. + * @return List of {@link IndexNameExpressionResolver.ExpressionResolver} + */ + default Collection getIndexNameCustomResolvers() { + return Collections.emptyList(); + } + /** * Called when the node is started * @@ -102,4 +111,13 @@ default void onNodeStarted(DiscoveryNode localNode) { onNodeStarted(); } + /** + * @return true if this plugin will handle cluster state management on behalf of the node, so the node does not + * need to discover a cluster manager and be part of a cluster. + * + * Note that if any ClusterPlugin returns true from this method, the node will start in clusterless mode. + */ + default boolean isClusterless() { + return false; + } } diff --git a/server/src/main/java/org/opensearch/plugins/IngestPlugin.java b/server/src/main/java/org/opensearch/plugins/IngestPlugin.java index b169d354ff977..18709994fb1fb 100644 --- a/server/src/main/java/org/opensearch/plugins/IngestPlugin.java +++ b/server/src/main/java/org/opensearch/plugins/IngestPlugin.java @@ -109,5 +109,17 @@ class SystemIngestPipelineConfigKeys { * ] */ public static final String INDEX_TEMPLATE_MAPPINGS = "index_template_mappings"; + + /** + * Use this key to access the settings{@link org.opensearch.common.settings.Settings} of the index from the config. + */ + public static final String INDEX_SETTINGS = "index_settings"; + + /** + * Use this key to access the settings{@link org.opensearch.common.settings.Settings} of the matched templates + * of the index from the config. If there are multiple matched templates the later one can override the setting of the previous one if merge + * rules are allowed. So this will be a list of settings. + */ + public static final String INDEX_TEMPLATE_SETTINGS = "index_template_settings"; } } diff --git a/server/src/main/java/org/opensearch/plugins/SearchPlugin.java b/server/src/main/java/org/opensearch/plugins/SearchPlugin.java index 710dd32371f83..80a4619f56b64 100644 --- a/server/src/main/java/org/opensearch/plugins/SearchPlugin.java +++ b/server/src/main/java/org/opensearch/plugins/SearchPlugin.java @@ -68,6 +68,7 @@ import org.opensearch.search.deciders.ConcurrentSearchRequestDecider; import org.opensearch.search.fetch.FetchSubPhase; import org.opensearch.search.fetch.subphase.highlight.Highlighter; +import org.opensearch.search.query.QueryCollectorContextSpecFactory; import org.opensearch.search.query.QueryPhaseSearcher; import org.opensearch.search.rescore.Rescorer; import org.opensearch.search.rescore.RescorerBuilder; @@ -227,6 +228,10 @@ default Optional getIndexSearcherExecutorProvider() { return Optional.empty(); } + default List getCollectorContextSpecFactories() { + return emptyList(); + } + /** * Executor service provider */ diff --git a/server/src/main/java/org/opensearch/plugins/SecureAuxTransportSettingsProvider.java b/server/src/main/java/org/opensearch/plugins/SecureAuxTransportSettingsProvider.java index f90d642409b01..826d5ca641b22 100644 --- a/server/src/main/java/org/opensearch/plugins/SecureAuxTransportSettingsProvider.java +++ b/server/src/main/java/org/opensearch/plugins/SecureAuxTransportSettingsProvider.java @@ -10,7 +10,6 @@ import org.opensearch.common.annotation.ExperimentalApi; import org.opensearch.common.settings.Settings; -import org.opensearch.transport.AuxTransport; import javax.net.ssl.SSLContext; import javax.net.ssl.SSLException; @@ -26,17 +25,22 @@ public interface SecureAuxTransportSettingsProvider { /** * Fetch an SSLContext as managed by pluggable security provider. + * @param settings for providing additional configuration options when building the ssl context. + * @param auxTransportType key for enabling this transport with AUX_TRANSPORT_TYPES_SETTING. * @return an instance of SSLContext. */ - default Optional buildSecureAuxServerTransportContext(Settings settings, AuxTransport transport) throws SSLException { + default Optional buildSecureAuxServerTransportContext(Settings settings, String auxTransportType) throws SSLException { return Optional.empty(); } /** * Additional params required for configuring ALPN. + * @param settings for providing additional configuration options when building secure params. + * @param auxTransportType key for enabling this transport with AUX_TRANSPORT_TYPES_SETTING. * @return an instance of {@link SecureAuxTransportSettingsProvider.SecureAuxTransportParameters} */ - default Optional parameters() { + default Optional parameters(Settings settings, String auxTransportType) + throws SSLException { return Optional.empty(); } diff --git a/server/src/main/java/org/opensearch/repositories/RepositoriesService.java b/server/src/main/java/org/opensearch/repositories/RepositoriesService.java index 994c981b8bb0c..1060c946ae799 100644 --- a/server/src/main/java/org/opensearch/repositories/RepositoriesService.java +++ b/server/src/main/java/org/opensearch/repositories/RepositoriesService.java @@ -174,7 +174,7 @@ public RepositoriesService( public void registerOrUpdateRepository(final PutRepositoryRequest request, final ActionListener listener) { assert lifecycle.started() : "Trying to register new repository but service is in state [" + lifecycle.state() + "]"; - final RepositoryMetadata newRepositoryMetadata = new RepositoryMetadata( + RepositoryMetadata newRepositoryMetadata = new RepositoryMetadata( request.name(), request.type(), request.settings(), @@ -206,14 +206,32 @@ public void registerOrUpdateRepository(final PutRepositoryRequest request, final registrationListener = listener; } - // Trying to create the new repository on cluster-manager to make sure it works - try { - closeRepository(createRepository(newRepositoryMetadata, typesRegistry)); - } catch (Exception e) { - registrationListener.onFailure(e); - return; + Repository currentRepository = repositories.get(request.name()); + boolean isReloadableSettings = currentRepository != null && currentRepository.isReloadableSettings(newRepositoryMetadata); + + if (isReloadableSettings) { + // We are reloading the repository, so we need to preserve the old settings in the new repository metadata + Settings updatedSettings = Settings.builder() + .put(currentRepository.getMetadata().settings()) + .put(newRepositoryMetadata.settings()) + .build(); + newRepositoryMetadata = new RepositoryMetadata( + newRepositoryMetadata.name(), + newRepositoryMetadata.type(), + updatedSettings, + newRepositoryMetadata.cryptoMetadata() + ); + } else { + // Trying to create the new repository on cluster-manager to make sure it works + try { + closeRepository(createRepository(newRepositoryMetadata, typesRegistry)); + } catch (Exception e) { + registrationListener.onFailure(e); + return; + } } + final RepositoryMetadata finalRepositoryMetadata = newRepositoryMetadata; clusterService.submitStateUpdateTask( "put_repository [" + request.name() + "]", new AckedClusterStateUpdateTask(request, registrationListener) { @@ -224,7 +242,9 @@ protected ClusterStateUpdateResponse newResponse(boolean acknowledged) { @Override public ClusterState execute(ClusterState currentState) { - ensureRepositoryNotInUse(currentState, request.name()); + if (isReloadableSettings == false) { + ensureRepositoryNotInUse(currentState, request.name()); + } Metadata metadata = currentState.metadata(); Metadata.Builder mdBuilder = Metadata.builder(currentState.metadata()); RepositoriesMetadata repositories = metadata.custom(RepositoriesMetadata.TYPE); @@ -245,17 +265,17 @@ public ClusterState execute(ClusterState currentState) { List repositoriesMetadata = new ArrayList<>(repositories.repositories().size() + 1); for (RepositoryMetadata repositoryMetadata : repositories.repositories()) { - RepositoryMetadata updatedRepositoryMetadata = newRepositoryMetadata; + RepositoryMetadata updatedRepositoryMetadata = finalRepositoryMetadata; if (isSystemRepositorySettingPresent(repositoryMetadata.settings())) { Settings updatedSettings = Settings.builder() - .put(newRepositoryMetadata.settings()) + .put(finalRepositoryMetadata.settings()) .put(SYSTEM_REPOSITORY_SETTING.getKey(), true) .build(); updatedRepositoryMetadata = new RepositoryMetadata( - newRepositoryMetadata.name(), - newRepositoryMetadata.type(), + finalRepositoryMetadata.name(), + finalRepositoryMetadata.type(), updatedSettings, - newRepositoryMetadata.cryptoMetadata() + finalRepositoryMetadata.cryptoMetadata() ); } if (repositoryMetadata.name().equals(updatedRepositoryMetadata.name())) { @@ -481,7 +501,8 @@ public void applyClusterState(ClusterChangedEvent event) { if (previousMetadata.type().equals(repositoryMetadata.type()) == false || previousMetadata.settings().equals(repositoryMetadata.settings()) == false) { // Previous version is different from the version in settings - if (repository.isSystemRepository() && repository.isReloadable()) { + if ((repository.isSystemRepository() && repository.isReloadable()) + || repository.isReloadableSettings(repositoryMetadata)) { logger.debug( "updating repository [{}] in-place to use new metadata [{}]", repositoryMetadata.name(), diff --git a/server/src/main/java/org/opensearch/repositories/Repository.java b/server/src/main/java/org/opensearch/repositories/Repository.java index 259c4a6e09ce7..521187f48b375 100644 --- a/server/src/main/java/org/opensearch/repositories/Repository.java +++ b/server/src/main/java/org/opensearch/repositories/Repository.java @@ -602,6 +602,10 @@ default boolean isReloadable() { return false; } + default boolean isReloadableSettings(RepositoryMetadata newRepositoryMetadata) { + return false; + } + /** * Reload the repository inplace */ diff --git a/server/src/main/java/org/opensearch/repositories/blobstore/BlobStoreRepository.java b/server/src/main/java/org/opensearch/repositories/blobstore/BlobStoreRepository.java index 940942b816536..7cdbc31563654 100644 --- a/server/src/main/java/org/opensearch/repositories/blobstore/BlobStoreRepository.java +++ b/server/src/main/java/org/opensearch/repositories/blobstore/BlobStoreRepository.java @@ -264,6 +264,46 @@ public abstract class BlobStoreRepository extends AbstractLifecycleComponent imp public static final long CACHE_DEFAULT_THRESHOLD = calculateDefaultSnapshotRepositoryDataCacheThreshold(); + public static final String MAX_SNAPSHOT_BYTES_PER_SEC = "max_snapshot_bytes_per_sec"; + + public static final Setting SNAPSHOT_BYTES_PER_SEC_SETTING = Setting.byteSizeSetting( + MAX_SNAPSHOT_BYTES_PER_SEC, + new ByteSizeValue(40, ByteSizeUnit.MB), + Setting.Property.NodeScope + ); + + public static final String MAX_RESTORE_BYTES_PER_SEC = "max_restore_bytes_per_sec"; + + public static final Setting RESTORE_BYTES_PER_SEC_SETTING = Setting.byteSizeSetting( + MAX_RESTORE_BYTES_PER_SEC, + ByteSizeValue.ZERO, + Setting.Property.NodeScope + ); + + public static final String MAX_REMOTE_UPLOAD_BYTES_PER_SEC = "max_remote_upload_bytes_per_sec"; + + public static final Setting MAX_REMOTE_UPLOAD_BYTES_PER_SEC_SETTING = Setting.byteSizeSetting( + MAX_REMOTE_UPLOAD_BYTES_PER_SEC, + ByteSizeValue.ZERO, + Setting.Property.NodeScope + ); + + public static final String MAX_REMOTE_LOW_PRIORITY_UPLOAD_BYTES_PER_SEC = "max_remote_low_priority_upload_bytes_per_sec"; + + public static final Setting MAX_REMOTE_LOW_PRIORITY_UPLOAD_BYTES_PER_SEC_SETTING = Setting.byteSizeSetting( + MAX_REMOTE_LOW_PRIORITY_UPLOAD_BYTES_PER_SEC, + ByteSizeValue.ZERO, + Setting.Property.NodeScope + ); + + public static final String MAX_REMOTE_DOWNLOAD_BYTES_PER_SEC = "max_remote_download_bytes_per_sec"; + + public static final Setting MAX_REMOTE_DOWNLOAD_BYTES_PER_SEC_SETTING = Setting.byteSizeSetting( + MAX_REMOTE_DOWNLOAD_BYTES_PER_SEC, + ByteSizeValue.ZERO, + Setting.Property.NodeScope + ); + /** * Set to Integer.MAX_VALUE - 8 to prevent OutOfMemoryError due to array header requirements, following the limit used in certain JDK versions. * This ensures compatibility across various JDK versions. For a practical usage example, @@ -328,6 +368,14 @@ public abstract class BlobStoreRepository extends AbstractLifecycleComponent imp Setting.Property.NodeScope ); + private static final Set RELOADABLE_SETTINGS = Set.of( + MAX_RESTORE_BYTES_PER_SEC, + MAX_SNAPSHOT_BYTES_PER_SEC, + MAX_REMOTE_UPLOAD_BYTES_PER_SEC, + MAX_REMOTE_LOW_PRIORITY_UPLOAD_BYTES_PER_SEC, + MAX_REMOTE_DOWNLOAD_BYTES_PER_SEC + ); + public static long calculateDefaultSnapshotRepositoryDataCacheThreshold() { return Math.max(ByteSizeUnit.KB.toBytes(500), CACHE_MAX_THRESHOLD / 2); } @@ -592,15 +640,11 @@ private void readRepositoryMetadata(RepositoryMetadata repositoryMetadata) { this.metadata = repositoryMetadata; supportURLRepo = SUPPORT_URL_REPO.get(metadata.settings()); - snapshotRateLimiter = getRateLimiter(metadata.settings(), "max_snapshot_bytes_per_sec", new ByteSizeValue(40, ByteSizeUnit.MB)); - restoreRateLimiter = getRateLimiter(metadata.settings(), "max_restore_bytes_per_sec", ByteSizeValue.ZERO); - remoteUploadRateLimiter = getRateLimiter(metadata.settings(), "max_remote_upload_bytes_per_sec", ByteSizeValue.ZERO); - remoteUploadLowPriorityRateLimiter = getRateLimiter( - metadata.settings(), - "max_remote_low_priority_upload_bytes_per_sec", - ByteSizeValue.ZERO - ); - remoteDownloadRateLimiter = getRateLimiter(metadata.settings(), "max_remote_download_bytes_per_sec", ByteSizeValue.ZERO); + snapshotRateLimiter = getRateLimiter(SNAPSHOT_BYTES_PER_SEC_SETTING, metadata.settings()); + restoreRateLimiter = getRateLimiter(RESTORE_BYTES_PER_SEC_SETTING, metadata.settings()); + remoteUploadRateLimiter = getRateLimiter(MAX_REMOTE_UPLOAD_BYTES_PER_SEC_SETTING, metadata.settings()); + remoteUploadLowPriorityRateLimiter = getRateLimiter(MAX_REMOTE_LOW_PRIORITY_UPLOAD_BYTES_PER_SEC_SETTING, metadata.settings()); + remoteDownloadRateLimiter = getRateLimiter(MAX_REMOTE_DOWNLOAD_BYTES_PER_SEC_SETTING, metadata.settings()); readOnly = READONLY_SETTING.get(metadata.settings()); cacheRepositoryData = CACHE_REPOSITORY_DATA.get(metadata.settings()); bufferSize = Math.toIntExact(BUFFER_SIZE_SETTING.get(metadata.settings()).getBytes()); @@ -2891,17 +2935,16 @@ private BlobPath shardPath(IndexId indexId, int shardId) { /** * Configures RateLimiter based on repository and global settings * - * @param repositorySettings repository settings - * @param setting setting to use to configure rate limiter - * @param defaultRate default limiting rate + * @param bytesPerSecSetting setting to use to configure rate limiter + * @param settings repository settings * @return rate limiter or null of no throttling is needed */ - private RateLimiter getRateLimiter(Settings repositorySettings, String setting, ByteSizeValue defaultRate) { - ByteSizeValue maxSnapshotBytesPerSec = repositorySettings.getAsBytesSize(setting, defaultRate); - if (maxSnapshotBytesPerSec.getBytes() <= 0) { + private RateLimiter getRateLimiter(Setting bytesPerSecSetting, Settings settings) { + ByteSizeValue maxByteSize = bytesPerSecSetting.get(settings); + if (maxByteSize.getBytes() <= 0) { return null; } else { - return new RateLimiter.SimpleRateLimiter(maxSnapshotBytesPerSec.getMbFrac()); + return new RateLimiter.SimpleRateLimiter(maxByteSize.getMbFrac()); } } @@ -4326,6 +4369,31 @@ public InputStream maybeRateLimitSnapshots(InputStream stream) { return maybeRateLimit(stream, () -> snapshotRateLimiter, snapshotRateLimitingTimeInNanos, BlobStoreTransferContext.SNAPSHOT); } + // Visible for testing + public RateLimiter snapshotRateLimiter() { + return snapshotRateLimiter; + } + + // Visible for testing + public RateLimiter restoreRateLimiter() { + return restoreRateLimiter; + } + + // Visible for testing + public RateLimiter remoteUploadRateLimiter() { + return remoteUploadRateLimiter; + } + + // Visible for testing + public RateLimiter remoteUploadLowPriorityRateLimiter() { + return remoteUploadLowPriorityRateLimiter; + } + + // Visible for testing + public RateLimiter remoteDownloadRateLimiter() { + return remoteDownloadRateLimiter; + } + @Override public List> getRestrictedSystemRepositorySettings() { return Arrays.asList(SYSTEM_REPOSITORY_SETTING, READONLY_SETTING, REMOTE_STORE_INDEX_SHALLOW_COPY); @@ -4715,6 +4783,31 @@ private static Optional extractShallowSnapshotUUID(String blobName) { return Optional.empty(); } + @Override + public boolean isReloadableSettings(RepositoryMetadata newRepositoryMetadata) { + if (metadata.name().equals(newRepositoryMetadata.name()) == false + || metadata.type().equals(newRepositoryMetadata.type()) == false + || Objects.equals(metadata.cryptoMetadata(), newRepositoryMetadata.cryptoMetadata()) == false) { + return false; + } + Settings newSettings = newRepositoryMetadata.settings(); + if (RELOADABLE_SETTINGS.containsAll(newSettings.keySet())) { + // the new settings are all contained in RELOADABLE_SETTINGS + return true; + } else { + Settings currentSettings = metadata.settings(); + // In addition to the settings in RELOADABLE_SETTINGS, all the new settings should be equal to current settings + Set allKeys = Stream.concat(newSettings.keySet().stream(), currentSettings.keySet().stream()) + .filter(key -> !RELOADABLE_SETTINGS.contains(key)) + .collect(Collectors.toSet()); + return allKeys.stream().allMatch(key -> isSettingEqual(newSettings, currentSettings, key)); + } + } + + private boolean isSettingEqual(Settings s1, Settings s2, String key) { + return Objects.equals(s1.get(key), s2.get(key)); + } + /** * The result of removing a snapshot from a shard folder in the repository. */ diff --git a/server/src/main/java/org/opensearch/search/SearchModule.java b/server/src/main/java/org/opensearch/search/SearchModule.java index 24bdb2d28cba2..cee66b734e76b 100644 --- a/server/src/main/java/org/opensearch/search/SearchModule.java +++ b/server/src/main/java/org/opensearch/search/SearchModule.java @@ -257,6 +257,7 @@ import org.opensearch.search.fetch.subphase.highlight.Highlighter; import org.opensearch.search.fetch.subphase.highlight.PlainHighlighter; import org.opensearch.search.fetch.subphase.highlight.UnifiedHighlighter; +import org.opensearch.search.query.QueryCollectorContextSpecRegistry; import org.opensearch.search.query.QueryPhase; import org.opensearch.search.query.QueryPhaseSearcher; import org.opensearch.search.query.QueryPhaseSearcherWrapper; @@ -350,6 +351,7 @@ public SearchModule(Settings settings, List plugins) { indexSearcherExecutorProvider = registerIndexSearcherExecutorProvider(plugins); namedWriteables.addAll(SortValue.namedWriteables()); concurrentSearchDeciderFactories = registerConcurrentSearchDeciderFactories(plugins); + registerQueryCollectorContextSpec(plugins); } private Collection registerConcurrentSearchDeciderFactories(List plugins) { @@ -1297,6 +1299,10 @@ private SearchPlugin.ExecutorServiceProvider registerIndexSearcherExecutorProvid return provider; } + private void registerQueryCollectorContextSpec(List plugins) { + registerFromPlugin(plugins, SearchPlugin::getCollectorContextSpecFactories, QueryCollectorContextSpecRegistry::registerFactory); + } + public FetchPhase getFetchPhase() { return new FetchPhase(fetchSubPhases); } diff --git a/server/src/main/java/org/opensearch/search/approximate/ApproximatePointRangeQuery.java b/server/src/main/java/org/opensearch/search/approximate/ApproximatePointRangeQuery.java index bd8f637136ea7..e6d11d02eacf6 100644 --- a/server/src/main/java/org/opensearch/search/approximate/ApproximatePointRangeQuery.java +++ b/server/src/main/java/org/opensearch/search/approximate/ApproximatePointRangeQuery.java @@ -8,10 +8,15 @@ package org.opensearch.search.approximate; +import org.apache.lucene.document.DoublePoint; +import org.apache.lucene.document.FloatPoint; +import org.apache.lucene.document.IntPoint; import org.apache.lucene.document.LongPoint; import org.apache.lucene.index.LeafReader; import org.apache.lucene.index.LeafReaderContext; import org.apache.lucene.index.PointValues; +import org.apache.lucene.sandbox.document.BigIntegerPoint; +import org.apache.lucene.sandbox.document.HalfFloatPoint; import org.apache.lucene.search.ConstantScoreScorer; import org.apache.lucene.search.ConstantScoreWeight; import org.apache.lucene.search.DocIdSetIterator; @@ -40,6 +45,11 @@ */ public class ApproximatePointRangeQuery extends ApproximateQuery { public static final Function LONG_FORMAT = bytes -> Long.toString(LongPoint.decodeDimension(bytes, 0)); + public static final Function INT_FORMAT = bytes -> Integer.toString(IntPoint.decodeDimension(bytes, 0)); + public static final Function HALF_FLOAT_FORMAT = bytes -> Float.toString(HalfFloatPoint.decodeDimension(bytes, 0)); + public static final Function FLOAT_FORMAT = bytes -> Float.toString(FloatPoint.decodeDimension(bytes, 0)); + public static final Function DOUBLE_FORMAT = bytes -> Double.toString(DoublePoint.decodeDimension(bytes, 0)); + public static final Function UNSIGNED_LONG_FORMAT = bytes -> BigIntegerPoint.decodeDimension(bytes, 0).toString(); private int size; diff --git a/server/src/main/java/org/opensearch/search/backpressure/trackers/NodeDuressTrackers.java b/server/src/main/java/org/opensearch/search/backpressure/trackers/NodeDuressTrackers.java index c27c50ac12c0f..2cf5f63144e9a 100644 --- a/server/src/main/java/org/opensearch/search/backpressure/trackers/NodeDuressTrackers.java +++ b/server/src/main/java/org/opensearch/search/backpressure/trackers/NodeDuressTrackers.java @@ -9,9 +9,11 @@ package org.opensearch.search.backpressure.trackers; import org.opensearch.common.util.Streak; +import org.opensearch.common.util.TimeBasedExpiryTracker; import org.opensearch.wlm.ResourceType; import java.util.Map; +import java.util.concurrent.ConcurrentHashMap; import java.util.function.BooleanSupplier; import java.util.function.IntSupplier; @@ -22,9 +24,19 @@ */ public class NodeDuressTrackers { private final Map duressTrackers; + private final Map resourceDuressCache = new ConcurrentHashMap<>(); + private final BooleanSupplier nodeDuressCacheExpiryChecker; public NodeDuressTrackers(Map duressTrackers) { + this(duressTrackers, new TimeBasedExpiryTracker(System::nanoTime)); + } + + public NodeDuressTrackers(Map duressTrackers, BooleanSupplier nodeDuressCacheExpiryChecker) { this.duressTrackers = duressTrackers; + for (ResourceType resourceType : ResourceType.values()) { + resourceDuressCache.put(resourceType, false); + } + this.nodeDuressCacheExpiryChecker = nodeDuressCacheExpiryChecker; } /** @@ -32,7 +44,8 @@ public NodeDuressTrackers(Map duressTrackers) { * @return Boolean */ public boolean isResourceInDuress(ResourceType resourceType) { - return duressTrackers.get(resourceType).test(); + updateCache(); + return resourceDuressCache.get(resourceType); } /** @@ -48,6 +61,13 @@ public boolean isNodeInDuress() { return false; } + private void updateCache() { + if (nodeDuressCacheExpiryChecker.getAsBoolean()) { + for (ResourceType resourceType : ResourceType.values()) + resourceDuressCache.put(resourceType, duressTrackers.get(resourceType).test()); + } + } + /** * NodeDuressTracker is used to check if the node is in duress * @opensearch.internal diff --git a/server/src/main/java/org/opensearch/search/dfs/DfsPhase.java b/server/src/main/java/org/opensearch/search/dfs/DfsPhase.java index b5f6c082a18c5..fc4b1d3e4b828 100644 --- a/server/src/main/java/org/opensearch/search/dfs/DfsPhase.java +++ b/server/src/main/java/org/opensearch/search/dfs/DfsPhase.java @@ -35,10 +35,10 @@ import org.apache.lucene.index.Term; import org.apache.lucene.search.CollectionStatistics; import org.apache.lucene.search.IndexSearcher; -import org.apache.lucene.search.Query; import org.apache.lucene.search.ScoreMode; import org.apache.lucene.search.TermStatistics; import org.opensearch.core.tasks.TaskCancelledException; +import org.opensearch.index.query.ParsedQuery; import org.opensearch.search.internal.SearchContext; import org.opensearch.search.rescore.RescoreContext; @@ -86,8 +86,8 @@ public CollectionStatistics collectionStatistics(String field) throws IOExceptio searcher.createWeight(context.searcher().rewrite(context.query()), ScoreMode.COMPLETE, 1); for (RescoreContext rescoreContext : context.rescore()) { - for (Query query : rescoreContext.getQueries()) { - searcher.createWeight(context.searcher().rewrite(query), ScoreMode.COMPLETE, 1); + for (ParsedQuery parsedQuery : rescoreContext.getParsedQueries()) { + searcher.createWeight(context.searcher().rewrite(parsedQuery.query()), ScoreMode.COMPLETE, 1); } } diff --git a/server/src/main/java/org/opensearch/search/fetch/subphase/MatchedQueriesPhase.java b/server/src/main/java/org/opensearch/search/fetch/subphase/MatchedQueriesPhase.java index 406d9c8b4bc03..427946e7e2b21 100644 --- a/server/src/main/java/org/opensearch/search/fetch/subphase/MatchedQueriesPhase.java +++ b/server/src/main/java/org/opensearch/search/fetch/subphase/MatchedQueriesPhase.java @@ -41,6 +41,7 @@ import org.opensearch.search.fetch.FetchContext; import org.opensearch.search.fetch.FetchSubPhase; import org.opensearch.search.fetch.FetchSubPhaseProcessor; +import org.opensearch.search.rescore.RescoreContext; import java.io.IOException; import java.util.ArrayList; @@ -65,6 +66,12 @@ public FetchSubPhaseProcessor getProcessor(FetchContext context) throws IOExcept if (context.parsedPostFilter() != null) { namedQueries.putAll(context.parsedPostFilter().namedFilters()); } + if (context.rescore() != null) { + for (RescoreContext rescoreContext : context.rescore()) { + rescoreContext.getParsedQueries().forEach(query -> namedQueries.putAll(query.namedFilters())); + } + } + if (namedQueries.isEmpty()) { return null; } diff --git a/server/src/main/java/org/opensearch/search/internal/ContextIndexSearcher.java b/server/src/main/java/org/opensearch/search/internal/ContextIndexSearcher.java index bf570a0dd0d60..4a5159681d07f 100644 --- a/server/src/main/java/org/opensearch/search/internal/ContextIndexSearcher.java +++ b/server/src/main/java/org/opensearch/search/internal/ContextIndexSearcher.java @@ -213,7 +213,7 @@ public Weight createWeight(Query query, ScoreMode scoreMode, float boost) throws // createWeight() is called for each query in the tree, so we tell the queryProfiler // each invocation so that it can build an internal representation of the query // tree - ContextualProfileBreakdown profile = profiler.getQueryBreakdown(query); + ContextualProfileBreakdown profile = profiler.getQueryBreakdown(query); Timer timer = profile.getTimer(QueryTimingType.CREATE_WEIGHT); timer.start(); final Weight weight; diff --git a/server/src/main/java/org/opensearch/search/profile/AbstractInternalProfileTree.java b/server/src/main/java/org/opensearch/search/profile/AbstractInternalProfileTree.java index 904b04b249b1b..f067064cc56f5 100644 --- a/server/src/main/java/org/opensearch/search/profile/AbstractInternalProfileTree.java +++ b/server/src/main/java/org/opensearch/search/profile/AbstractInternalProfileTree.java @@ -45,7 +45,7 @@ * * @opensearch.internal */ -public abstract class AbstractInternalProfileTree, E> { +public abstract class AbstractInternalProfileTree { protected ArrayList breakdowns; /** Maps the Query to it's list of children. This is basically the dependency tree */ diff --git a/server/src/main/java/org/opensearch/search/profile/AbstractProfileBreakdown.java b/server/src/main/java/org/opensearch/search/profile/AbstractProfileBreakdown.java index 4a1563e7cdce9..1bacc26eaa024 100644 --- a/server/src/main/java/org/opensearch/search/profile/AbstractProfileBreakdown.java +++ b/server/src/main/java/org/opensearch/search/profile/AbstractProfileBreakdown.java @@ -32,9 +32,12 @@ package org.opensearch.search.profile; +import java.util.Collection; import java.util.Collections; -import java.util.HashMap; import java.util.Map; +import java.util.TreeMap; +import java.util.function.Supplier; +import java.util.stream.Collectors; import static java.util.Collections.emptyMap; @@ -45,46 +48,46 @@ * * @opensearch.internal */ -public abstract class AbstractProfileBreakdown> { +public abstract class AbstractProfileBreakdown { - /** - * The accumulated timings for this query node - */ - protected final Timer[] timings; - protected final T[] timingTypes; - public static final String TIMING_TYPE_COUNT_SUFFIX = "_count"; - public static final String TIMING_TYPE_START_TIME_SUFFIX = "_start_time"; + protected final Map metrics; /** Sole constructor. */ - public AbstractProfileBreakdown(Class clazz) { - this.timingTypes = clazz.getEnumConstants(); - timings = new Timer[timingTypes.length]; - for (int i = 0; i < timings.length; ++i) { - timings[i] = new Timer(); - } + public AbstractProfileBreakdown(Collection> metricSuppliers) { + this.metrics = metricSuppliers.stream().map(Supplier::get).collect(Collectors.toMap(ProfileMetric::getName, metric -> metric)); } - public Timer getTimer(T timing) { - return timings[timing.ordinal()]; + public Timer getTimer(Enum type) { + ProfileMetric metric = metrics.get(type.toString()); + assert metric instanceof Timer : "Metric " + type + " is not a timer"; + return (Timer) metric; } - public void setTimer(T timing, Timer timer) { - timings[timing.ordinal()] = timer; + public ProfileMetric getMetric(String name) { + return metrics.get(name); } /** - * Build a timing count breakdown for current instance + * Build a breakdown for current instance */ public Map toBreakdownMap() { - Map map = new HashMap<>(this.timings.length * 3); - for (T timingType : this.timingTypes) { - map.put(timingType.toString(), this.timings[timingType.ordinal()].getApproximateTiming()); - map.put(timingType + TIMING_TYPE_COUNT_SUFFIX, this.timings[timingType.ordinal()].getCount()); - map.put(timingType + TIMING_TYPE_START_TIME_SUFFIX, this.timings[timingType.ordinal()].getEarliestTimerStartTime()); + Map map = new TreeMap<>(); + for (Map.Entry entry : metrics.entrySet()) { + map.putAll(entry.getValue().toBreakdownMap()); } return Collections.unmodifiableMap(map); } + public long toNodeTime() { + long total = 0; + for (Map.Entry entry : metrics.entrySet()) { + if (entry.getValue() instanceof Timer t) { + total += t.getApproximateTiming(); + } + } + return total; + } + /** * Fetch extra debugging information. */ @@ -92,11 +95,4 @@ public Map toDebugMap() { return emptyMap(); } - public long toNodeTime() { - long total = 0; - for (T timingType : timingTypes) { - total += timings[timingType.ordinal()].getApproximateTiming(); - } - return total; - } } diff --git a/server/src/main/java/org/opensearch/search/profile/AbstractProfiler.java b/server/src/main/java/org/opensearch/search/profile/AbstractProfiler.java index 4db1cb87a231d..0527bb44e16d4 100644 --- a/server/src/main/java/org/opensearch/search/profile/AbstractProfiler.java +++ b/server/src/main/java/org/opensearch/search/profile/AbstractProfiler.java @@ -39,7 +39,7 @@ * * @opensearch.internal */ -public class AbstractProfiler, E> { +public abstract class AbstractProfiler { protected final AbstractInternalProfileTree profileTree; diff --git a/server/src/main/java/org/opensearch/search/profile/ContextualProfileBreakdown.java b/server/src/main/java/org/opensearch/search/profile/ContextualProfileBreakdown.java index 3fe621321c8ad..2ca88f5cabc85 100644 --- a/server/src/main/java/org/opensearch/search/profile/ContextualProfileBreakdown.java +++ b/server/src/main/java/org/opensearch/search/profile/ContextualProfileBreakdown.java @@ -11,8 +11,10 @@ import org.apache.lucene.index.LeafReaderContext; import org.apache.lucene.search.Collector; +import java.util.Collection; import java.util.List; import java.util.Map; +import java.util.function.Supplier; /** * Provide contextual profile breakdowns which are associated with freestyle context. Used when concurrent @@ -20,9 +22,10 @@ * * @opensearch.internal */ -public abstract class ContextualProfileBreakdown> extends AbstractProfileBreakdown { - public ContextualProfileBreakdown(Class clazz) { - super(clazz); +public abstract class ContextualProfileBreakdown extends AbstractProfileBreakdown { + + public ContextualProfileBreakdown(Collection> metrics) { + super(metrics); } /** @@ -30,7 +33,7 @@ public ContextualProfileBreakdown(Class clazz) { * @param context freestyle context * @return contextual profile breakdown instance */ - public abstract AbstractProfileBreakdown context(Object context); + public abstract AbstractProfileBreakdown context(Object context); public void associateCollectorToLeaves(Collector collector, LeafReaderContext leaf) {} diff --git a/server/src/main/java/org/opensearch/search/profile/ProfileMetric.java b/server/src/main/java/org/opensearch/search/profile/ProfileMetric.java new file mode 100644 index 0000000000000..36bb9b300f3b3 --- /dev/null +++ b/server/src/main/java/org/opensearch/search/profile/ProfileMetric.java @@ -0,0 +1,40 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.search.profile; + +import org.opensearch.common.annotation.ExperimentalApi; + +import java.util.Map; + +/** + * A metric for profiling. + */ +@ExperimentalApi +public abstract class ProfileMetric { + + private final String name; + + public ProfileMetric(String name) { + this.name = name; + } + + /** + * + * @return name of the metric + */ + public String getName() { + return name; + } + + /** + * + * @return map representation of breakdown + */ + abstract public Map toBreakdownMap(); +} diff --git a/server/src/main/java/org/opensearch/search/profile/ProfileMetricUtil.java b/server/src/main/java/org/opensearch/search/profile/ProfileMetricUtil.java new file mode 100644 index 0000000000000..248e29fc0383f --- /dev/null +++ b/server/src/main/java/org/opensearch/search/profile/ProfileMetricUtil.java @@ -0,0 +1,38 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.search.profile; + +import org.opensearch.search.profile.aggregation.AggregationTimingType; +import org.opensearch.search.profile.query.QueryTimingType; + +import java.util.ArrayList; +import java.util.Collection; +import java.util.function.Supplier; + +/** + * Utility class to provide profile metrics to breakdowns. + */ +public class ProfileMetricUtil { + + public static Collection> getDefaultQueryProfileMetrics() { + Collection> metrics = new ArrayList<>(); + for (QueryTimingType type : QueryTimingType.values()) { + metrics.add(() -> new Timer(type.toString())); + } + return metrics; + } + + public static Collection> getAggregationProfileMetrics() { + Collection> metrics = new ArrayList<>(); + for (AggregationTimingType type : AggregationTimingType.values()) { + metrics.add(() -> new Timer(type.toString())); + } + return metrics; + } +} diff --git a/server/src/main/java/org/opensearch/search/profile/ProfileResult.java b/server/src/main/java/org/opensearch/search/profile/ProfileResult.java index 2c0d2cf3ba78a..7969a9e5890bf 100644 --- a/server/src/main/java/org/opensearch/search/profile/ProfileResult.java +++ b/server/src/main/java/org/opensearch/search/profile/ProfileResult.java @@ -271,7 +271,7 @@ static void removeStartTimeFields(Map modifiedBreakdown) { Iterator> iterator = modifiedBreakdown.entrySet().iterator(); while (iterator.hasNext()) { Map.Entry entry = iterator.next(); - if (entry.getKey().endsWith(AbstractProfileBreakdown.TIMING_TYPE_START_TIME_SUFFIX)) { + if (entry.getKey().endsWith(Timer.TIMING_TYPE_START_TIME_SUFFIX)) { iterator.remove(); } } diff --git a/server/src/main/java/org/opensearch/search/profile/Profilers.java b/server/src/main/java/org/opensearch/search/profile/Profilers.java index 75337f89e67ca..5aaf1d670313e 100644 --- a/server/src/main/java/org/opensearch/search/profile/Profilers.java +++ b/server/src/main/java/org/opensearch/search/profile/Profilers.java @@ -79,7 +79,7 @@ public QueryProfiler addQueryProfiler() { /** Get the current profiler. */ public QueryProfiler getCurrentQueryProfiler() { - return queryProfilers.get(queryProfilers.size() - 1); + return queryProfilers.getLast(); } /** Return the list of all created {@link QueryProfiler}s so far. */ diff --git a/server/src/main/java/org/opensearch/search/profile/Timer.java b/server/src/main/java/org/opensearch/search/profile/Timer.java index 864c689cf7fa0..673d05bfdca91 100644 --- a/server/src/main/java/org/opensearch/search/profile/Timer.java +++ b/server/src/main/java/org/opensearch/search/profile/Timer.java @@ -32,6 +32,9 @@ package org.opensearch.search.profile; +import java.util.HashMap; +import java.util.Map; + /** Helps measure how much time is spent running some methods. * The {@link #start()} and {@link #stop()} methods should typically be called * in a try/finally clause with {@link #start()} being called right before the @@ -48,16 +51,19 @@ * * @opensearch.internal */ -public class Timer { +public class Timer extends ProfileMetric { + public static final String TIMING_TYPE_COUNT_SUFFIX = "_count"; + public static final String TIMING_TYPE_START_TIME_SUFFIX = "_start_time"; private boolean doTiming; private long timing, count, lastCount, start, earliestTimerStartTime; - public Timer() { - this(0, 0, 0, 0, 0); + public Timer(String name) { + super(name); } - public Timer(long timing, long count, long lastCount, long start, long earliestTimerStartTime) { + public Timer(long timing, long count, long lastCount, long start, long earliestTimerStartTime, String name) { + super(name); this.timing = timing; this.count = count; this.lastCount = lastCount; @@ -131,4 +137,13 @@ public final long getApproximateTiming() { } return timing; } + + @Override + public Map toBreakdownMap() { + Map map = new HashMap<>(); + map.put(getName(), getApproximateTiming()); + map.put(getName() + TIMING_TYPE_COUNT_SUFFIX, getCount()); + map.put(getName() + TIMING_TYPE_START_TIME_SUFFIX, getEarliestTimerStartTime()); + return map; + } } diff --git a/server/src/main/java/org/opensearch/search/profile/aggregation/AggregationProfileBreakdown.java b/server/src/main/java/org/opensearch/search/profile/aggregation/AggregationProfileBreakdown.java index 8642f0da4a90b..3c99b041a423c 100644 --- a/server/src/main/java/org/opensearch/search/profile/aggregation/AggregationProfileBreakdown.java +++ b/server/src/main/java/org/opensearch/search/profile/aggregation/AggregationProfileBreakdown.java @@ -34,9 +34,13 @@ import org.opensearch.common.annotation.PublicApi; import org.opensearch.search.profile.AbstractProfileBreakdown; +import org.opensearch.search.profile.ProfileMetric; +import org.opensearch.search.profile.ProfileMetricUtil; +import java.util.Collection; import java.util.HashMap; import java.util.Map; +import java.util.function.Supplier; import static java.util.Collections.unmodifiableMap; @@ -46,11 +50,15 @@ * @opensearch.api */ @PublicApi(since = "1.0.0") -public class AggregationProfileBreakdown extends AbstractProfileBreakdown { +public class AggregationProfileBreakdown extends AbstractProfileBreakdown { private final Map extra = new HashMap<>(); public AggregationProfileBreakdown() { - super(AggregationTimingType.class); + this(ProfileMetricUtil.getAggregationProfileMetrics()); + } + + public AggregationProfileBreakdown(Collection> timers) { + super(timers); } /** @@ -64,4 +72,5 @@ public void addDebugInfo(String key, Object value) { public Map toDebugMap() { return unmodifiableMap(extra); } + } diff --git a/server/src/main/java/org/opensearch/search/profile/aggregation/ConcurrentAggregationProfiler.java b/server/src/main/java/org/opensearch/search/profile/aggregation/ConcurrentAggregationProfiler.java index deed68c535cf9..593359cd5c22c 100644 --- a/server/src/main/java/org/opensearch/search/profile/aggregation/ConcurrentAggregationProfiler.java +++ b/server/src/main/java/org/opensearch/search/profile/aggregation/ConcurrentAggregationProfiler.java @@ -13,8 +13,8 @@ package org.opensearch.search.profile.aggregation; -import org.opensearch.search.profile.AbstractProfileBreakdown; import org.opensearch.search.profile.ProfileResult; +import org.opensearch.search.profile.Timer; import java.util.HashMap; import java.util.LinkedList; @@ -31,7 +31,7 @@ public class ConcurrentAggregationProfiler extends AggregationProfiler { private static final String MAX_PREFIX = "max_"; private static final String MIN_PREFIX = "min_"; private static final String AVG_PREFIX = "avg_"; - private static final String START_TIME_KEY = AggregationTimingType.INITIALIZE + AbstractProfileBreakdown.TIMING_TYPE_START_TIME_SUFFIX; + private static final String START_TIME_KEY = AggregationTimingType.INITIALIZE + Timer.TIMING_TYPE_START_TIME_SUFFIX; private static final String[] breakdownCountStatsTypes = { "build_leaf_collector_count", "collect_count" }; @Override @@ -82,8 +82,7 @@ private List reduceProfileResultsTree(List profile // Profiled breakdown total time for (AggregationTimingType timingType : AggregationTimingType.values()) { String breakdownTimingType = timingType.toString(); - Long startTime = profileResult.getTimeBreakdown() - .get(breakdownTimingType + AbstractProfileBreakdown.TIMING_TYPE_START_TIME_SUFFIX); + Long startTime = profileResult.getTimeBreakdown().get(breakdownTimingType + Timer.TIMING_TYPE_START_TIME_SUFFIX); Long endTime = startTime + profileResult.getTimeBreakdown().get(breakdownTimingType); minSliceStartTimeMap.put( breakdownTimingType, @@ -103,7 +102,7 @@ private List reduceProfileResultsTree(List profile // Profiled breakdown count for (AggregationTimingType timingType : AggregationTimingType.values()) { String breakdownType = timingType.toString(); - String breakdownTypeCount = breakdownType + AbstractProfileBreakdown.TIMING_TYPE_COUNT_SUFFIX; + String breakdownTypeCount = breakdownType + Timer.TIMING_TYPE_COUNT_SUFFIX; breakdown.put( breakdownTypeCount, breakdown.getOrDefault(breakdownTypeCount, 0L) + profileResult.getTimeBreakdown().get(breakdownTypeCount) diff --git a/server/src/main/java/org/opensearch/search/profile/query/AbstractQueryProfileTree.java b/server/src/main/java/org/opensearch/search/profile/query/AbstractQueryProfileTree.java index 2f5d632ee2d87..2541bd3887f13 100644 --- a/server/src/main/java/org/opensearch/search/profile/query/AbstractQueryProfileTree.java +++ b/server/src/main/java/org/opensearch/search/profile/query/AbstractQueryProfileTree.java @@ -20,7 +20,7 @@ * * @opensearch.internal */ -public abstract class AbstractQueryProfileTree extends AbstractInternalProfileTree, Query> { +public abstract class AbstractQueryProfileTree extends AbstractInternalProfileTree { /** Rewrite time */ private long rewriteTime; @@ -64,4 +64,6 @@ public void stopAndAddRewriteTime() { public long getRewriteTime() { return rewriteTime; } + + protected abstract ContextualProfileBreakdown createProfileBreakdown(); } diff --git a/server/src/main/java/org/opensearch/search/profile/query/ConcurrentQueryProfileBreakdown.java b/server/src/main/java/org/opensearch/search/profile/query/ConcurrentQueryProfileBreakdown.java index c017b15bc6d3c..8ee0f0b8ac7e1 100644 --- a/server/src/main/java/org/opensearch/search/profile/query/ConcurrentQueryProfileBreakdown.java +++ b/server/src/main/java/org/opensearch/search/profile/query/ConcurrentQueryProfileBreakdown.java @@ -13,13 +13,23 @@ import org.opensearch.OpenSearchException; import org.opensearch.search.profile.AbstractProfileBreakdown; import org.opensearch.search.profile.ContextualProfileBreakdown; +import org.opensearch.search.profile.ProfileMetric; +import org.opensearch.search.profile.Timer; import java.util.ArrayList; +import java.util.Collection; import java.util.Collections; import java.util.HashMap; +import java.util.HashSet; import java.util.List; import java.util.Map; +import java.util.Set; +import java.util.TreeMap; import java.util.concurrent.ConcurrentHashMap; +import java.util.function.Supplier; + +import static org.opensearch.search.profile.Timer.TIMING_TYPE_COUNT_SUFFIX; +import static org.opensearch.search.profile.Timer.TIMING_TYPE_START_TIME_SUFFIX; /** * A record of timings for the various operations that may happen during query execution. @@ -28,7 +38,7 @@ * * @opensearch.internal */ -public final class ConcurrentQueryProfileBreakdown extends ContextualProfileBreakdown { +public final class ConcurrentQueryProfileBreakdown extends ContextualProfileBreakdown { static final String SLICE_END_TIME_SUFFIX = "_slice_end_time"; static final String SLICE_START_TIME_SUFFIX = "_slice_start_time"; static final String MAX_PREFIX = "max_"; @@ -40,26 +50,32 @@ public final class ConcurrentQueryProfileBreakdown extends ContextualProfileBrea private long avgSliceNodeTime = 0L; // keep track of all breakdown timings per segment. package-private for testing - private final Map> contexts = new ConcurrentHashMap<>(); + private final Map contexts = new ConcurrentHashMap<>(); // represents slice to leaves mapping as for each slice a unique collector instance is created private final Map> sliceCollectorsToLeaves = new ConcurrentHashMap<>(); - /** Sole constructor. */ - public ConcurrentQueryProfileBreakdown() { - super(QueryTimingType.class); + private final Collection> metricSuppliers; + private final Set timingMetrics; + private final Set nonTimingMetrics; + + public ConcurrentQueryProfileBreakdown(Collection> metricSuppliers) { + super(metricSuppliers); + this.metricSuppliers = metricSuppliers; + this.timingMetrics = getTimingMetrics(); + this.nonTimingMetrics = getNonTimingMetrics(); } @Override - public AbstractProfileBreakdown context(Object context) { + public AbstractProfileBreakdown context(Object context) { // See please https://bugs.openjdk.java.net/browse/JDK-8161372 - final AbstractProfileBreakdown profile = contexts.get(context); + final AbstractProfileBreakdown profile = contexts.get(context); if (profile != null) { return profile; } - return contexts.computeIfAbsent(context, ctx -> new QueryProfileBreakdown()); + return contexts.computeIfAbsent(context, ctx -> new QueryProfileBreakdown(metricSuppliers)); } @Override @@ -87,12 +103,12 @@ public Map toBreakdownMap() { assert contexts.size() == 1 : "Unexpected size: " + contexts.size() + " of leaves breakdown in ConcurrentQueryProfileBreakdown of rewritten query for a leaf."; - AbstractProfileBreakdown breakdown = contexts.values().iterator().next(); + AbstractProfileBreakdown breakdown = contexts.values().iterator().next(); queryNodeTime = breakdown.toNodeTime() + createWeightTime; maxSliceNodeTime = 0L; minSliceNodeTime = 0L; avgSliceNodeTime = 0L; - Map queryBreakdownMap = new HashMap<>(breakdown.toBreakdownMap()); + Map queryBreakdownMap = new TreeMap<>(breakdown.toBreakdownMap()); queryBreakdownMap.put(QueryTimingType.CREATE_WEIGHT.toString(), createWeightTime); queryBreakdownMap.put(QueryTimingType.CREATE_WEIGHT + TIMING_TYPE_COUNT_SUFFIX, 1L); return queryBreakdownMap; @@ -110,19 +126,19 @@ public Map toBreakdownMap() { * default breakdown map. */ private Map buildDefaultQueryBreakdownMap(long createWeightTime) { - final Map concurrentQueryBreakdownMap = new HashMap<>(); + final Map concurrentQueryBreakdownMap = new TreeMap<>(); for (QueryTimingType timingType : QueryTimingType.values()) { final String timingTypeKey = timingType.toString(); - final String timingTypeCountKey = timingTypeKey + TIMING_TYPE_COUNT_SUFFIX; + final String timingTypeCountKey = timingType + TIMING_TYPE_COUNT_SUFFIX; if (timingType.equals(QueryTimingType.CREATE_WEIGHT)) { concurrentQueryBreakdownMap.put(timingTypeKey, createWeightTime); concurrentQueryBreakdownMap.put(timingTypeCountKey, 1L); continue; } - final String maxBreakdownTypeTime = MAX_PREFIX + timingTypeKey; - final String minBreakdownTypeTime = MIN_PREFIX + timingTypeKey; - final String avgBreakdownTypeTime = AVG_PREFIX + timingTypeKey; + final String maxBreakdownTypeTime = MAX_PREFIX + timingType; + final String minBreakdownTypeTime = MIN_PREFIX + timingType; + final String avgBreakdownTypeTime = AVG_PREFIX + timingType; final String maxBreakdownTypeCount = MAX_PREFIX + timingTypeCountKey; final String minBreakdownTypeCount = MIN_PREFIX + timingTypeCountKey; final String avgBreakdownTypeCount = AVG_PREFIX + timingTypeCountKey; @@ -156,8 +172,9 @@ Map> buildSliceLevelBreakdown() { // max slice end time across all timing types long sliceMaxEndTime = Long.MIN_VALUE; long sliceMinStartTime = Long.MAX_VALUE; - for (QueryTimingType timingType : QueryTimingType.values()) { - if (timingType.equals(QueryTimingType.CREATE_WEIGHT)) { + + for (String timingType : timingMetrics) { + if (timingType.equals(QueryTimingType.CREATE_WEIGHT.toString())) { // do nothing for create weight as that is query level time and not slice level continue; } @@ -210,9 +227,7 @@ Map> buildSliceLevelBreakdown() { ); // compute the sliceEndTime for timingType using max of endTime across slice leaves - final long sliceLeafTimingTypeEndTime = sliceLeafTimingTypeStartTime + currentSliceLeafBreakdownMap.get( - timingType.toString() - ); + final long sliceLeafTimingTypeEndTime = sliceLeafTimingTypeStartTime + currentSliceLeafBreakdownMap.get(timingType); currentSliceBreakdown.compute( timingTypeSliceEndTimeKey, (key, value) -> (value == null) ? sliceLeafTimingTypeEndTime : Math.max(value, sliceLeafTimingTypeEndTime) @@ -235,13 +250,28 @@ Map> buildSliceLevelBreakdown() { sliceMinStartTime = Math.min(sliceMinStartTime, currentSliceStartTime); // compute total time for each timing type at slice level using sliceEndTime and sliceStartTime currentSliceBreakdown.put( - timingType.toString(), + timingType, currentSliceBreakdown.getOrDefault(timingTypeSliceEndTimeKey, 0L) - currentSliceBreakdown.getOrDefault( timingTypeSliceStartTimeKey, 0L ) ); } + + for (String metric : nonTimingMetrics) { + for (LeafReaderContext sliceLeaf : slice.getValue()) { + if (!contexts.containsKey(sliceLeaf)) { + continue; + } + final Map currentSliceLeafBreakdownMap = contexts.get(sliceLeaf).toBreakdownMap(); + final long sliceLeafMetricValue = currentSliceLeafBreakdownMap.get(metric); + currentSliceBreakdown.compute( + metric, + (key, value) -> (value == null) ? sliceLeafMetricValue : value + sliceLeafMetricValue + ); + } + } + // currentSliceNodeTime does not include the create weight time, as that is computed in non-concurrent part long currentSliceNodeTime; if (sliceMinStartTime == Long.MAX_VALUE && sliceMaxEndTime == Long.MIN_VALUE) { @@ -284,17 +314,27 @@ public Map buildQueryBreakdownMap( long createWeightTime, long createWeightStartTime ) { - final Map queryBreakdownMap = new HashMap<>(); + final Map queryBreakdownMap = new TreeMap<>(); long queryEndTime = Long.MIN_VALUE; - for (QueryTimingType queryTimingType : QueryTimingType.values()) { - final String timingTypeKey = queryTimingType.toString(); - final String timingTypeCountKey = timingTypeKey + TIMING_TYPE_COUNT_SUFFIX; - final String sliceEndTimeForTimingType = timingTypeKey + SLICE_END_TIME_SUFFIX; - final String sliceStartTimeForTimingType = timingTypeKey + SLICE_START_TIME_SUFFIX; - - final String maxBreakdownTypeTime = MAX_PREFIX + timingTypeKey; - final String minBreakdownTypeTime = MIN_PREFIX + timingTypeKey; - final String avgBreakdownTypeTime = AVG_PREFIX + timingTypeKey; + + // the create weight time is computed at the query level and is called only once per query + queryBreakdownMap.put(QueryTimingType.CREATE_WEIGHT + TIMING_TYPE_COUNT_SUFFIX, 1L); + queryBreakdownMap.put(QueryTimingType.CREATE_WEIGHT.toString(), createWeightTime); + + for (String metric : timingMetrics) { + + if (metric.equals(QueryTimingType.CREATE_WEIGHT.toString())) { + // create weight time is computed at query level and is called only once per query + continue; + } + + final String timingTypeCountKey = metric + TIMING_TYPE_COUNT_SUFFIX; + final String sliceEndTimeForTimingType = metric + SLICE_END_TIME_SUFFIX; + final String sliceStartTimeForTimingType = metric + SLICE_START_TIME_SUFFIX; + + final String maxBreakdownTypeTime = MAX_PREFIX + metric; + final String minBreakdownTypeTime = MIN_PREFIX + metric; + final String avgBreakdownTypeTime = AVG_PREFIX + metric; final String maxBreakdownTypeCount = MAX_PREFIX + timingTypeCountKey; final String minBreakdownTypeCount = MIN_PREFIX + timingTypeCountKey; final String avgBreakdownTypeCount = AVG_PREFIX + timingTypeCountKey; @@ -303,43 +343,19 @@ public Map buildQueryBreakdownMap( long queryTimingTypeStartTime = Long.MAX_VALUE; long queryTimingTypeCount = 0L; - // the create weight time is computed at the query level and is called only once per query - if (queryTimingType == QueryTimingType.CREATE_WEIGHT) { - queryBreakdownMap.put(timingTypeCountKey, 1L); - queryBreakdownMap.put(timingTypeKey, createWeightTime); - continue; - } - // for all other timing types, we will compute min/max/avg/total across slices for (Map.Entry> sliceBreakdown : sliceLevelBreakdowns.entrySet()) { - long sliceBreakdownTypeTime = sliceBreakdown.getValue().getOrDefault(timingTypeKey, 0L); + long sliceBreakdownTypeTime = sliceBreakdown.getValue().getOrDefault(metric, 0L); long sliceBreakdownTypeCount = sliceBreakdown.getValue().getOrDefault(timingTypeCountKey, 0L); // compute max/min/avg TimingType time across slices - queryBreakdownMap.compute( - maxBreakdownTypeTime, - (key, value) -> (value == null) ? sliceBreakdownTypeTime : Math.max(sliceBreakdownTypeTime, value) - ); - queryBreakdownMap.compute( - minBreakdownTypeTime, - (key, value) -> (value == null) ? sliceBreakdownTypeTime : Math.min(sliceBreakdownTypeTime, value) - ); - queryBreakdownMap.compute( - avgBreakdownTypeTime, - (key, value) -> (value == null) ? sliceBreakdownTypeTime : sliceBreakdownTypeTime + value - ); - + addStatsToMap(queryBreakdownMap, maxBreakdownTypeTime, minBreakdownTypeTime, avgBreakdownTypeTime, sliceBreakdownTypeTime); // compute max/min/avg TimingType count across slices - queryBreakdownMap.compute( + addStatsToMap( + queryBreakdownMap, maxBreakdownTypeCount, - (key, value) -> (value == null) ? sliceBreakdownTypeCount : Math.max(sliceBreakdownTypeCount, value) - ); - queryBreakdownMap.compute( minBreakdownTypeCount, - (key, value) -> (value == null) ? sliceBreakdownTypeCount : Math.min(sliceBreakdownTypeCount, value) - ); - queryBreakdownMap.compute( avgBreakdownTypeCount, - (key, value) -> (value == null) ? sliceBreakdownTypeCount : sliceBreakdownTypeCount + value + sliceBreakdownTypeCount ); // only modify the start/end time of the TimingType if the slice used the timer @@ -360,7 +376,7 @@ public Map buildQueryBreakdownMap( if (queryTimingTypeCount > 0L && (queryTimingTypeStartTime == Long.MAX_VALUE || queryTimingTypeEndTime == Long.MIN_VALUE)) { throw new OpenSearchException( "Unexpected timing type [" - + timingTypeKey + + metric + "] start [" + queryTimingTypeStartTime + "] or end time [" @@ -368,13 +384,33 @@ public Map buildQueryBreakdownMap( + "] computed across slices for profile results" ); } - queryBreakdownMap.put(timingTypeKey, (queryTimingTypeCount > 0L) ? queryTimingTypeEndTime - queryTimingTypeStartTime : 0L); + queryBreakdownMap.put(metric, (queryTimingTypeCount > 0L) ? queryTimingTypeEndTime - queryTimingTypeStartTime : 0L); queryBreakdownMap.put(timingTypeCountKey, queryTimingTypeCount); queryBreakdownMap.compute(avgBreakdownTypeTime, (key, value) -> (value == null) ? 0L : value / sliceLevelBreakdowns.size()); queryBreakdownMap.compute(avgBreakdownTypeCount, (key, value) -> (value == null) ? 0L : value / sliceLevelBreakdowns.size()); // compute query end time using max of query end time across all timing types queryEndTime = Math.max(queryEndTime, queryTimingTypeEndTime); } + + for (String metric : nonTimingMetrics) { + + final String maxBreakdownTypeTime = MAX_PREFIX + metric; + final String minBreakdownTypeTime = MIN_PREFIX + metric; + final String avgBreakdownTypeTime = AVG_PREFIX + metric; + + long totalBreakdownValue = 0L; + + // for all other timing types, we will compute min/max/avg/total across slices + for (Map.Entry> sliceBreakdown : sliceLevelBreakdowns.entrySet()) { + long sliceBreakdownValue = sliceBreakdown.getValue().getOrDefault(metric, 0L); + // compute max/min/avg TimingType time across slices + addStatsToMap(queryBreakdownMap, maxBreakdownTypeTime, minBreakdownTypeTime, avgBreakdownTypeTime, sliceBreakdownValue); + totalBreakdownValue += sliceBreakdownValue; + } + queryBreakdownMap.put(metric, totalBreakdownValue); + queryBreakdownMap.compute(avgBreakdownTypeTime, (key, value) -> (value == null) ? 0L : value / sliceLevelBreakdowns.size()); + } + if (queryEndTime == Long.MIN_VALUE) { throw new OpenSearchException("Unexpected error while computing the query end time across slices in profile result"); } @@ -382,6 +418,32 @@ public Map buildQueryBreakdownMap( return queryBreakdownMap; } + private void addStatsToMap(Map queryBreakdownMap, String maxKey, String minKey, String avgKey, long sliceValue) { + queryBreakdownMap.compute(maxKey, (key, value) -> (value == null) ? sliceValue : Math.max(sliceValue, value)); + queryBreakdownMap.compute(minKey, (key, value) -> (value == null) ? sliceValue : Math.min(sliceValue, value)); + queryBreakdownMap.compute(avgKey, (key, value) -> (value == null) ? sliceValue : (value + sliceValue)); + } + + private Set getTimingMetrics() { + Set result = new HashSet<>(); + for (Map.Entry entry : metrics.entrySet()) { + if (entry.getValue() instanceof org.opensearch.search.profile.Timer) { + result.add(entry.getKey()); + } + } + return result; + } + + private Set getNonTimingMetrics() { + Set result = new HashSet<>(); + for (Map.Entry entry : metrics.entrySet()) { + if (!(entry.getValue() instanceof Timer)) { + result.add(entry.getKey()); + } + } + return result; + } + @Override public long toNodeTime() { return queryNodeTime; @@ -403,7 +465,7 @@ Map> getSliceCollectorsToLeaves() { } // used by tests - Map> getContexts() { + Map getContexts() { return contexts; } @@ -418,4 +480,5 @@ long getMinSliceNodeTime() { long getAvgSliceNodeTime() { return avgSliceNodeTime; } + } diff --git a/server/src/main/java/org/opensearch/search/profile/query/ConcurrentQueryProfileTree.java b/server/src/main/java/org/opensearch/search/profile/query/ConcurrentQueryProfileTree.java index 4e54178c3b4fb..ae549a5e9cbe2 100644 --- a/server/src/main/java/org/opensearch/search/profile/query/ConcurrentQueryProfileTree.java +++ b/server/src/main/java/org/opensearch/search/profile/query/ConcurrentQueryProfileTree.java @@ -11,6 +11,7 @@ import org.apache.lucene.index.LeafReaderContext; import org.apache.lucene.search.Collector; import org.opensearch.search.profile.ContextualProfileBreakdown; +import org.opensearch.search.profile.ProfileMetricUtil; import org.opensearch.search.profile.ProfileResult; import java.util.List; @@ -24,15 +25,15 @@ public class ConcurrentQueryProfileTree extends AbstractQueryProfileTree { @Override - protected ContextualProfileBreakdown createProfileBreakdown() { - return new ConcurrentQueryProfileBreakdown(); + protected ContextualProfileBreakdown createProfileBreakdown() { + return new ConcurrentQueryProfileBreakdown(ProfileMetricUtil.getDefaultQueryProfileMetrics()); } @Override protected ProfileResult createProfileResult( String type, String description, - ContextualProfileBreakdown breakdown, + ContextualProfileBreakdown breakdown, List childrenProfileResults ) { assert breakdown instanceof ConcurrentQueryProfileBreakdown; @@ -62,7 +63,7 @@ protected ProfileResult createProfileResult( @Override public List getTree() { for (Integer root : roots) { - final ContextualProfileBreakdown parentBreakdown = breakdowns.get(root); + final ContextualProfileBreakdown parentBreakdown = breakdowns.get(root); assert parentBreakdown instanceof ConcurrentQueryProfileBreakdown; final Map> parentCollectorToLeaves = ((ConcurrentQueryProfileBreakdown) parentBreakdown) .getSliceCollectorsToLeaves(); @@ -82,7 +83,7 @@ private void updateCollectorToLeavesForChildBreakdowns(Integer parentToken, Map< final List children = tree.get(parentToken); if (children != null) { for (Integer currentChild : children) { - final ContextualProfileBreakdown currentChildBreakdown = breakdowns.get(currentChild); + final ContextualProfileBreakdown currentChildBreakdown = breakdowns.get(currentChild); currentChildBreakdown.associateCollectorsToLeaves(collectorToLeaves); updateCollectorToLeavesForChildBreakdowns(currentChild, collectorToLeaves); } diff --git a/server/src/main/java/org/opensearch/search/profile/query/ConcurrentQueryProfiler.java b/server/src/main/java/org/opensearch/search/profile/query/ConcurrentQueryProfiler.java index e02e562d766a0..3bce0ecd5c252 100644 --- a/server/src/main/java/org/opensearch/search/profile/query/ConcurrentQueryProfiler.java +++ b/server/src/main/java/org/opensearch/search/profile/query/ConcurrentQueryProfiler.java @@ -45,7 +45,7 @@ public ConcurrentQueryProfiler(AbstractQueryProfileTree profileTree) { } @Override - public ContextualProfileBreakdown getQueryBreakdown(Query query) { + public ContextualProfileBreakdown getQueryBreakdown(Query query) { ConcurrentQueryProfileTree profileTree = threadToProfileTree.computeIfAbsent( getCurrentThreadId(), k -> new ConcurrentQueryProfileTree() @@ -81,7 +81,7 @@ public List getTree() { */ @Override public void startRewriteTime() { - Timer rewriteTimer = new Timer(); + Timer rewriteTimer = new Timer("rewrite_timer"); threadToRewriteTimers.computeIfAbsent(getCurrentThreadId(), k -> new LinkedList<>()).add(rewriteTimer); rewriteTimer.start(); } diff --git a/server/src/main/java/org/opensearch/search/profile/query/InternalQueryProfileTree.java b/server/src/main/java/org/opensearch/search/profile/query/InternalQueryProfileTree.java index 1ed367f094fb7..14dd0bc853fa0 100644 --- a/server/src/main/java/org/opensearch/search/profile/query/InternalQueryProfileTree.java +++ b/server/src/main/java/org/opensearch/search/profile/query/InternalQueryProfileTree.java @@ -33,6 +33,7 @@ package org.opensearch.search.profile.query; import org.opensearch.search.profile.ContextualProfileBreakdown; +import org.opensearch.search.profile.ProfileMetricUtil; import org.opensearch.search.profile.ProfileResult; /** @@ -43,7 +44,7 @@ public class InternalQueryProfileTree extends AbstractQueryProfileTree { @Override - protected ContextualProfileBreakdown createProfileBreakdown() { - return new QueryProfileBreakdown(); + protected ContextualProfileBreakdown createProfileBreakdown() { + return new QueryProfileBreakdown(ProfileMetricUtil.getDefaultQueryProfileMetrics()); } } diff --git a/server/src/main/java/org/opensearch/search/profile/query/ProfileScorer.java b/server/src/main/java/org/opensearch/search/profile/query/ProfileScorer.java index 28b693ee03ad5..b78ecf4501ae1 100644 --- a/server/src/main/java/org/opensearch/search/profile/query/ProfileScorer.java +++ b/server/src/main/java/org/opensearch/search/profile/query/ProfileScorer.java @@ -54,7 +54,7 @@ final class ProfileScorer extends Scorer { private final Timer scoreTimer, nextDocTimer, advanceTimer, matchTimer, shallowAdvanceTimer, computeMaxScoreTimer, setMinCompetitiveScoreTimer; - ProfileScorer(Scorer scorer, AbstractProfileBreakdown profile) throws IOException { + ProfileScorer(Scorer scorer, AbstractProfileBreakdown profile) throws IOException { this.scorer = scorer; scoreTimer = profile.getTimer(QueryTimingType.SCORE); nextDocTimer = profile.getTimer(QueryTimingType.NEXT_DOC); diff --git a/server/src/main/java/org/opensearch/search/profile/query/ProfileWeight.java b/server/src/main/java/org/opensearch/search/profile/query/ProfileWeight.java index f190a9734c1a5..c200ebc6ba8f0 100644 --- a/server/src/main/java/org/opensearch/search/profile/query/ProfileWeight.java +++ b/server/src/main/java/org/opensearch/search/profile/query/ProfileWeight.java @@ -54,9 +54,9 @@ public final class ProfileWeight extends Weight { private final Weight subQueryWeight; - private final ContextualProfileBreakdown profile; + private final ContextualProfileBreakdown profile; - public ProfileWeight(Query query, Weight subQueryWeight, ContextualProfileBreakdown profile) throws IOException { + public ProfileWeight(Query query, Weight subQueryWeight, ContextualProfileBreakdown profile) throws IOException { super(query); this.subQueryWeight = subQueryWeight; this.profile = profile; diff --git a/server/src/main/java/org/opensearch/search/profile/query/QueryProfileBreakdown.java b/server/src/main/java/org/opensearch/search/profile/query/QueryProfileBreakdown.java index 3514a80e39d85..873c7dc22df65 100644 --- a/server/src/main/java/org/opensearch/search/profile/query/QueryProfileBreakdown.java +++ b/server/src/main/java/org/opensearch/search/profile/query/QueryProfileBreakdown.java @@ -34,6 +34,10 @@ import org.opensearch.search.profile.AbstractProfileBreakdown; import org.opensearch.search.profile.ContextualProfileBreakdown; +import org.opensearch.search.profile.ProfileMetric; + +import java.util.Collection; +import java.util.function.Supplier; /** * A record of timings for the various operations that may happen during query execution. @@ -42,15 +46,15 @@ * * @opensearch.internal */ -public final class QueryProfileBreakdown extends ContextualProfileBreakdown { +public final class QueryProfileBreakdown extends ContextualProfileBreakdown { - /** Sole constructor. */ - public QueryProfileBreakdown() { - super(QueryTimingType.class); + public QueryProfileBreakdown(Collection> metrics) { + super(metrics); } @Override - public AbstractProfileBreakdown context(Object context) { + public AbstractProfileBreakdown context(Object context) { return this; } + } diff --git a/server/src/main/java/org/opensearch/search/profile/query/QueryProfiler.java b/server/src/main/java/org/opensearch/search/profile/query/QueryProfiler.java index 78e65c5bfa257..9791f9af2035c 100644 --- a/server/src/main/java/org/opensearch/search/profile/query/QueryProfiler.java +++ b/server/src/main/java/org/opensearch/search/profile/query/QueryProfiler.java @@ -53,7 +53,7 @@ * @opensearch.api */ @PublicApi(since = "1.0.0") -public class QueryProfiler extends AbstractProfiler, Query> { +public class QueryProfiler extends AbstractProfiler { /** * The root Collector used in the search diff --git a/server/src/main/java/org/opensearch/search/query/QueryCollectorArguments.java b/server/src/main/java/org/opensearch/search/query/QueryCollectorArguments.java new file mode 100644 index 0000000000000..e7bad3db092cd --- /dev/null +++ b/server/src/main/java/org/opensearch/search/query/QueryCollectorArguments.java @@ -0,0 +1,78 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.search.query; + +import org.opensearch.common.annotation.ExperimentalApi; + +/** + * Arguments for {@link QueryCollectorContextSpecRegistry} + */ +@ExperimentalApi +public final class QueryCollectorArguments { + private final boolean hasFilterCollector; + + private QueryCollectorArguments(final boolean hasFilterCollector) { + this.hasFilterCollector = hasFilterCollector; + } + + /** + * Whether the query has a filter collector. + * @return true if the query has a filter collector, false otherwise + */ + public boolean hasFilterCollector() { + return hasFilterCollector; + } + + @Override + public boolean equals(Object o) { + if (this == o) return true; + if (o == null || getClass() != o.getClass()) return false; + + QueryCollectorArguments queryCollectorArguments = (QueryCollectorArguments) o; + return hasFilterCollector == queryCollectorArguments.hasFilterCollector; + } + + @Override + public int hashCode() { + return Boolean.hashCode(hasFilterCollector); + } + + /** + * {@inheritDoc} + */ + @Override + public String toString() { + return "QueryCollectorArguments[hasFilterCollector=" + hasFilterCollector + "]"; + } + + /** + * Builder for {@link QueryCollectorArguments} + */ + public static class Builder { + private boolean hasFilterCollector; + + /** + * Set the flag for query has a filter collector. + * @param hasFilterCollector true if the query has a filter collector, false otherwise + * @return Builder instance + */ + public Builder hasFilterCollector(boolean hasFilterCollector) { + this.hasFilterCollector = hasFilterCollector; + return this; + } + + /** + * Build the arguments for the query collector context spec registry. + * @return QueryCollectorArguments instance + */ + public QueryCollectorArguments build() { + return new QueryCollectorArguments(hasFilterCollector); + } + } +} diff --git a/server/src/main/java/org/opensearch/search/query/QueryCollectorContextSpec.java b/server/src/main/java/org/opensearch/search/query/QueryCollectorContextSpec.java new file mode 100644 index 0000000000000..99379e73ed51f --- /dev/null +++ b/server/src/main/java/org/opensearch/search/query/QueryCollectorContextSpec.java @@ -0,0 +1,50 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.search.query; + +import org.apache.lucene.search.Collector; +import org.apache.lucene.search.CollectorManager; +import org.opensearch.common.annotation.ExperimentalApi; + +import java.io.IOException; + +/** + * interface of QueryCollectorContextSpec + */ +@ExperimentalApi +public interface QueryCollectorContextSpec { + /** + * Context name for QueryCollectorContext + * @return string of context name + */ + String getContextName(); + + /** + * Create collector + * @param in collector + * @return collector + * @throws IOException + */ + Collector create(Collector in) throws IOException; + + /** + * Create collector manager + * @param in collector manager + * @return collector manager + * @throws IOException + */ + CollectorManager createManager(CollectorManager in) throws IOException; + + /** + * Post process query result + * @param result query result + * @throws IOException + */ + void postProcess(QuerySearchResult result) throws IOException; +} diff --git a/server/src/main/java/org/opensearch/search/query/QueryCollectorContextSpecFactory.java b/server/src/main/java/org/opensearch/search/query/QueryCollectorContextSpecFactory.java new file mode 100644 index 0000000000000..b08bebb840343 --- /dev/null +++ b/server/src/main/java/org/opensearch/search/query/QueryCollectorContextSpecFactory.java @@ -0,0 +1,32 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.search.query; + +import org.opensearch.common.annotation.ExperimentalApi; +import org.opensearch.search.internal.SearchContext; + +import java.io.IOException; +import java.util.Optional; + +/** + * interface of QueryCollectorContext spec factory + */ +@ExperimentalApi +public interface QueryCollectorContextSpecFactory { + /** + * @param searchContext context needed to create collector context spec + * @param queryCollectorArguments arguments to create collector context spec + * @return QueryCollectorContextSpec + * @throws IOException + */ + Optional createQueryCollectorContextSpec( + SearchContext searchContext, + QueryCollectorArguments queryCollectorArguments + ) throws IOException; +} diff --git a/server/src/main/java/org/opensearch/search/query/QueryCollectorContextSpecRegistry.java b/server/src/main/java/org/opensearch/search/query/QueryCollectorContextSpecRegistry.java new file mode 100644 index 0000000000000..413cd63b97856 --- /dev/null +++ b/server/src/main/java/org/opensearch/search/query/QueryCollectorContextSpecRegistry.java @@ -0,0 +1,64 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.search.query; + +import org.opensearch.search.internal.SearchContext; + +import java.io.IOException; +import java.util.Iterator; +import java.util.List; +import java.util.Optional; +import java.util.concurrent.CopyOnWriteArrayList; + +/** + * Registry class to load all collector context spec factories during cluster bootstrapping + */ +public final class QueryCollectorContextSpecRegistry { + private static final List registry = new CopyOnWriteArrayList<>(); + + private QueryCollectorContextSpecRegistry() {} + + /** + * Get all collector context spec factories + * @return list of collector context spec factories + */ + public static List getCollectorContextSpecFactories() { + return registry; + } + + /** + * Register factory + * @param factory collector context spec factory defined in plugin + */ + public static void registerFactory(QueryCollectorContextSpecFactory factory) { + registry.add(factory); + } + + /** + * Get collector context spec + * @param searchContext search context + * @param queryCollectorArguments query collector arguments + * @return collector context spec + * @throws IOException + */ + public static Optional getQueryCollectorContextSpec( + final SearchContext searchContext, + final QueryCollectorArguments queryCollectorArguments + ) throws IOException { + Iterator iterator = registry.iterator(); + while (iterator.hasNext()) { + QueryCollectorContextSpecFactory factory = iterator.next(); + Optional spec = factory.createQueryCollectorContextSpec(searchContext, queryCollectorArguments); + if (spec.isEmpty() == false) { + return spec; + } + } + return Optional.empty(); + } +} diff --git a/server/src/main/java/org/opensearch/search/query/QueryPhase.java b/server/src/main/java/org/opensearch/search/query/QueryPhase.java index 58be02cc413dd..ebf8ed0ce3362 100644 --- a/server/src/main/java/org/opensearch/search/query/QueryPhase.java +++ b/server/src/main/java/org/opensearch/search/query/QueryPhase.java @@ -76,6 +76,7 @@ import java.util.List; import java.util.Map; import java.util.Objects; +import java.util.Optional; import java.util.concurrent.ExecutorService; import java.util.stream.Collectors; @@ -445,9 +446,37 @@ protected boolean searchWithCollector( boolean hasFilterCollector, boolean hasTimeout ) throws IOException { + QueryCollectorContext queryCollectorContext = getQueryCollectorContext(searchContext, hasFilterCollector); + return searchWithCollector(searchContext, searcher, query, collectors, queryCollectorContext, hasFilterCollector, hasTimeout); + } + + private QueryCollectorContext getQueryCollectorContext(SearchContext searchContext, boolean hasFilterCollector) throws IOException { // create the top docs collector last when the other collectors are known - final TopDocsCollectorContext topDocsFactory = createTopDocsCollectorContext(searchContext, hasFilterCollector); - return searchWithCollector(searchContext, searcher, query, collectors, topDocsFactory, hasFilterCollector, hasTimeout); + final Optional queryCollectorContextOpt = QueryCollectorContextSpecRegistry.getQueryCollectorContextSpec( + searchContext, + new QueryCollectorArguments.Builder().hasFilterCollector(hasFilterCollector).build() + ).map(queryCollectorContextSpec -> new QueryCollectorContext(queryCollectorContextSpec.getContextName()) { + @Override + Collector create(Collector in) throws IOException { + return queryCollectorContextSpec.create(in); + } + + @Override + CollectorManager createManager(CollectorManager in) + throws IOException { + return queryCollectorContextSpec.createManager(in); + } + + @Override + void postProcess(QuerySearchResult result) throws IOException { + queryCollectorContextSpec.postProcess(result); + } + }); + if (queryCollectorContextOpt.isPresent()) { + return queryCollectorContextOpt.get(); + } else { + return createTopDocsCollectorContext(searchContext, hasFilterCollector); + } } protected boolean searchWithCollector( diff --git a/server/src/main/java/org/opensearch/search/query/TopDocsCollectorContext.java b/server/src/main/java/org/opensearch/search/query/TopDocsCollectorContext.java index d60780bebf7e7..674f0e50c436a 100644 --- a/server/src/main/java/org/opensearch/search/query/TopDocsCollectorContext.java +++ b/server/src/main/java/org/opensearch/search/query/TopDocsCollectorContext.java @@ -450,6 +450,16 @@ private SimpleTopDocsCollectorContext( return topDocs.scoreDocs[0].score; } }; + } else if (SortField.FIELD_SCORE.equals(sortAndFormats.sort.getSort()[0])) { + maxScoreSupplier = () -> { + TopDocs topDocs = topDocsSupplier.get(); + if (topDocs.scoreDocs.length == 0) { + return Float.NaN; + } else { + FieldDoc fieldDoc = (FieldDoc) topDocs.scoreDocs[0]; + return (float) fieldDoc.fields[0]; + } + }; } else if (trackMaxScore) { maxScoreCollector = new MaxScoreCollector(); maxScoreSupplier = maxScoreCollector::getMaxScore; @@ -595,8 +605,14 @@ TopDocsAndMaxScore newTopDocs(final TopDocs topDocs, final float maxScore, final newTopDocs = new TopDocs(totalHits, scoreDocs); } - if (Float.isNaN(maxScore) && newTopDocs.scoreDocs.length > 0 && sortAndFormats == null) { - return new TopDocsAndMaxScore(newTopDocs, newTopDocs.scoreDocs[0].score); + if (Float.isNaN(maxScore) && newTopDocs.scoreDocs.length > 0) { + float maxScoreFromDoc = maxScore; + if (sortAndFormats == null) { + maxScoreFromDoc = newTopDocs.scoreDocs[0].score; + } else if (SortField.FIELD_SCORE.equals(sortAndFormats.sort.getSort()[0])) { + maxScoreFromDoc = (float) ((FieldDoc) newTopDocs.scoreDocs[0]).fields[0]; + } + return new TopDocsAndMaxScore(newTopDocs, maxScoreFromDoc); } else { return new TopDocsAndMaxScore(newTopDocs, maxScore); } diff --git a/server/src/main/java/org/opensearch/search/rescore/QueryRescorer.java b/server/src/main/java/org/opensearch/search/rescore/QueryRescorer.java index 008bff18cea5b..e95fbb292deb5 100644 --- a/server/src/main/java/org/opensearch/search/rescore/QueryRescorer.java +++ b/server/src/main/java/org/opensearch/search/rescore/QueryRescorer.java @@ -37,6 +37,8 @@ import org.apache.lucene.search.Query; import org.apache.lucene.search.ScoreDoc; import org.apache.lucene.search.TopDocs; +import org.opensearch.index.query.ParsedQuery; +import org.opensearch.search.rescore.QueryRescorer.QueryRescoreContext; import java.io.IOException; import java.util.Arrays; @@ -67,7 +69,7 @@ public TopDocs rescore(TopDocs topDocs, IndexSearcher searcher, RescoreContext r final QueryRescoreContext rescore = (QueryRescoreContext) rescoreContext; - org.apache.lucene.search.Rescorer rescorer = new org.apache.lucene.search.QueryRescorer(rescore.query()) { + org.apache.lucene.search.Rescorer rescorer = new org.apache.lucene.search.QueryRescorer(rescore.parsedQuery().query()) { @Override protected float combine(float firstPassScore, boolean secondPassMatches, float secondPassScore) { @@ -120,7 +122,7 @@ public Explanation explain(int topLevelDocId, IndexSearcher searcher, RescoreCon prim = Explanation.noMatch("First pass did not match", sourceExplanation); } if (rescoreContext.isRescored(topLevelDocId)) { - Explanation rescoreExplain = searcher.explain(rescore.query(), topLevelDocId); + Explanation rescoreExplain = searcher.explain(rescore.parsedQuery().query(), topLevelDocId); // NOTE: we don't use Lucene's Rescorer.explain because we want to insert our own description with which ScoreMode was used. // Maybe we should add QueryRescorer.explainCombine to Lucene? if (rescoreExplain != null && rescoreExplain.isMatch()) { @@ -190,7 +192,7 @@ private TopDocs combine(TopDocs in, TopDocs resorted, QueryRescoreContext ctx) { * @opensearch.internal */ public static class QueryRescoreContext extends RescoreContext { - private Query query; + private ParsedQuery parsedQuery; private float queryWeight = 1.0f; private float rescoreQueryWeight = 1.0f; private QueryRescoreMode scoreMode; @@ -200,17 +202,22 @@ public QueryRescoreContext(int windowSize) { this.scoreMode = QueryRescoreMode.Total; } - public void setQuery(Query query) { - this.query = query; + public void setParsedQuery(ParsedQuery parsedQuery) { + this.parsedQuery = parsedQuery; + } + + public ParsedQuery parsedQuery() { + return parsedQuery; } @Override public List getQueries() { - return Collections.singletonList(query); + return parsedQuery != null ? Collections.singletonList(parsedQuery.query()) : Collections.emptyList(); } - public Query query() { - return query; + @Override + public List getParsedQueries() { + return parsedQuery != null ? Collections.singletonList(parsedQuery) : Collections.emptyList(); } public float queryWeight() { diff --git a/server/src/main/java/org/opensearch/search/rescore/QueryRescorerBuilder.java b/server/src/main/java/org/opensearch/search/rescore/QueryRescorerBuilder.java index aea10755d4e42..19afb97dc9e79 100644 --- a/server/src/main/java/org/opensearch/search/rescore/QueryRescorerBuilder.java +++ b/server/src/main/java/org/opensearch/search/rescore/QueryRescorerBuilder.java @@ -190,8 +190,9 @@ public static QueryRescorerBuilder fromXContent(XContentParser parser) throws IO @Override public QueryRescoreContext innerBuildContext(int windowSize, QueryShardContext context) throws IOException { QueryRescoreContext queryRescoreContext = new QueryRescoreContext(windowSize); - // query is rewritten at this point already - queryRescoreContext.setQuery(queryBuilder.toQuery(context)); + + queryRescoreContext.setParsedQuery(context.toQuery(queryBuilder)); + queryRescoreContext.setQueryWeight(this.queryWeight); queryRescoreContext.setRescoreQueryWeight(this.rescoreQueryWeight); queryRescoreContext.setScoreMode(this.scoreMode); diff --git a/server/src/main/java/org/opensearch/search/rescore/RescoreContext.java b/server/src/main/java/org/opensearch/search/rescore/RescoreContext.java index d4b87f004b58f..771e12324aac4 100644 --- a/server/src/main/java/org/opensearch/search/rescore/RescoreContext.java +++ b/server/src/main/java/org/opensearch/search/rescore/RescoreContext.java @@ -34,6 +34,7 @@ import org.apache.lucene.search.Query; import org.opensearch.common.annotation.PublicApi; +import org.opensearch.index.query.ParsedQuery; import java.util.Collections; import java.util.List; @@ -93,4 +94,11 @@ public Set getRescoredDocs() { public List getQueries() { return Collections.emptyList(); } + + /** + * Returns parsed queries associated with the rescorer + */ + public List getParsedQueries() { + return Collections.emptyList(); + } } diff --git a/server/src/main/java/org/opensearch/wlm/MutableWorkloadGroupFragment.java b/server/src/main/java/org/opensearch/wlm/MutableWorkloadGroupFragment.java index 329659b891f9f..d09f486225091 100644 --- a/server/src/main/java/org/opensearch/wlm/MutableWorkloadGroupFragment.java +++ b/server/src/main/java/org/opensearch/wlm/MutableWorkloadGroupFragment.java @@ -185,7 +185,7 @@ public Map getResourceLimits() { /** * This enum models the different WorkloadGroup resiliency modes - * SOFT - means that this query group can consume more than query group resource limits if node is not in duress + * SOFT - means that this workload group can consume more than workload group resource limits if node is not in duress * ENFORCED - means that it will never breach the assigned limits and will cancel as soon as the limits are breached * MONITOR - it will not cause any cancellation but just log the eligible task cancellations */ diff --git a/server/src/main/java/org/opensearch/wlm/WorkloadGroupService.java b/server/src/main/java/org/opensearch/wlm/WorkloadGroupService.java index 970844a2b59b0..a36a5a8bbfd31 100644 --- a/server/src/main/java/org/opensearch/wlm/WorkloadGroupService.java +++ b/server/src/main/java/org/opensearch/wlm/WorkloadGroupService.java @@ -139,7 +139,7 @@ protected void doStart() { try { doRun(); } catch (Exception e) { - logger.debug("Exception occurred in Query Sandbox service", e); + logger.debug("Exception occurred in Workload Group service", e); } }, this.workloadManagementSettings.getWorkloadGroupServiceRunInterval(), ThreadPool.Names.GENERIC); } @@ -160,26 +160,26 @@ public void clusterChanged(ClusterChangedEvent event) { Metadata previousMetadata = event.previousState().metadata(); Metadata currentMetadata = event.state().metadata(); - // Extract the query groups from both the current and previous cluster states + // Extract the workload groups from both the current and previous cluster states Map previousWorkloadGroups = previousMetadata.workloadGroups(); Map currentWorkloadGroups = currentMetadata.workloadGroups(); - // Detect new query groups added in the current cluster state + // Detect new workload groups added in the current cluster state for (String workloadGroupName : currentWorkloadGroups.keySet()) { if (!previousWorkloadGroups.containsKey(workloadGroupName)) { - // New query group detected + // New workload group detected WorkloadGroup newWorkloadGroup = currentWorkloadGroups.get(workloadGroupName); - // Perform any necessary actions with the new query group + // Perform any necessary actions with the new workload group workloadGroupsStateAccessor.addNewWorkloadGroup(newWorkloadGroup.get_id()); } } - // Detect query groups deleted in the current cluster state + // Detect workload groups deleted in the current cluster state for (String workloadGroupName : previousWorkloadGroups.keySet()) { if (!currentWorkloadGroups.containsKey(workloadGroupName)) { - // Query group deleted + // Workload group deleted WorkloadGroup deletedWorkloadGroup = previousWorkloadGroups.get(workloadGroupName); - // Perform any necessary actions with the deleted query group + // Perform any necessary actions with the deleted workload group this.deletedWorkloadGroups.add(deletedWorkloadGroup); workloadGroupsStateAccessor.removeWorkloadGroup(deletedWorkloadGroup.get_id()); } @@ -188,13 +188,13 @@ public void clusterChanged(ClusterChangedEvent event) { } /** - * updates the failure stats for the query group + * updates the failure stats for the workload group * - * @param workloadGroupId query group identifier + * @param workloadGroupId workload group identifier */ public void incrementFailuresFor(final String workloadGroupId) { WorkloadGroupState workloadGroupState = workloadGroupsStateAccessor.getWorkloadGroupState(workloadGroupId); - // This can happen if the request failed for a deleted query group + // This can happen if the request failed for a deleted workload group // or new workloadGroup is being created and has not been acknowledged yet if (workloadGroupState == null) { return; @@ -203,7 +203,7 @@ public void incrementFailuresFor(final String workloadGroupId) { } /** - * @return node level query group stats + * @return node level workload group stats */ public WorkloadGroupStats nodeStats(Set workloadGroupIds, Boolean requestedBreached) { final Map statsHolderMap = new HashMap<>(); @@ -250,7 +250,7 @@ public boolean resourceLimitBreached(String id, WorkloadGroupState currentState) } /** - * @param workloadGroupId query group identifier + * @param workloadGroupId workload group identifier */ public void rejectIfNeeded(String workloadGroupId) { if (workloadManagementSettings.getWlmMode() != WlmMode.ENABLED) { @@ -260,8 +260,8 @@ public void rejectIfNeeded(String workloadGroupId) { if (workloadGroupId == null || workloadGroupId.equals(WorkloadGroupTask.DEFAULT_WORKLOAD_GROUP_ID_SUPPLIER.get())) return; WorkloadGroupState workloadGroupState = workloadGroupsStateAccessor.getWorkloadGroupState(workloadGroupId); - // This can happen if the request failed for a deleted query group - // or new workloadGroup is being created and has not been acknowledged yet or invalid query group id + // This can happen if the request failed for a deleted workload group + // or new workloadGroup is being created and has not been acknowledged yet or invalid workload group id if (workloadGroupState == null) { return; } @@ -288,11 +288,15 @@ public void rejectIfNeeded(String workloadGroupId) { if (threshold < lastRecordedUsage) { reject = true; reason.append(resourceType) - .append(" limit is breaching for ENFORCED type WorkloadGroup: (") + .append(" limit is breaching for workload group ") + .append(workloadGroup.get_id()) + .append(", ") .append(threshold) .append(" < ") .append(lastRecordedUsage) - .append("). "); + .append(", wlm mode is ") + .append(workloadGroup.getResiliencyMode()) + .append(". "); workloadGroupState.getResourceState().get(resourceType).rejections.inc(); // should not double count even if both the resource limits are breaching break; @@ -348,7 +352,7 @@ public void onTaskCompleted(Task task) { final WorkloadGroupTask workloadGroupTask = (WorkloadGroupTask) task; String workloadGroupId = workloadGroupTask.getWorkloadGroupId(); - // set the default workloadGroupId if not existing in the active query groups + // set the default workloadGroupId if not existing in the active workload groups String finalWorkloadGroupId = workloadGroupId; boolean exists = activeWorkloadGroups.stream().anyMatch(workloadGroup -> workloadGroup.get_id().equals(finalWorkloadGroupId)); diff --git a/server/src/main/java/org/opensearch/wlm/WorkloadGroupsStateAccessor.java b/server/src/main/java/org/opensearch/wlm/WorkloadGroupsStateAccessor.java index 103871d1c7fe0..582730bbf0b33 100644 --- a/server/src/main/java/org/opensearch/wlm/WorkloadGroupsStateAccessor.java +++ b/server/src/main/java/org/opensearch/wlm/WorkloadGroupsStateAccessor.java @@ -31,7 +31,7 @@ public WorkloadGroupsStateAccessor(Map workloadGroup } /** - * returns the query groups state + * returns the workload groups state */ public Map getWorkloadGroupStateMap() { return workloadGroupStateMap; @@ -40,7 +40,7 @@ public Map getWorkloadGroupStateMap() { /** * return WorkloadGroupState for the given workloadGroupId * @param workloadGroupId - * @return WorkloadGroupState for the given workloadGroupId, if id is invalid return default query group state + * @return WorkloadGroupState for the given workloadGroupId, if id is invalid return default workload group state */ public WorkloadGroupState getWorkloadGroupState(String workloadGroupId) { return workloadGroupStateMap.getOrDefault( diff --git a/server/src/main/java/org/opensearch/wlm/WorkloadManagementSettings.java b/server/src/main/java/org/opensearch/wlm/WorkloadManagementSettings.java index 35a043a6e42d8..e2a6b3538802c 100644 --- a/server/src/main/java/org/opensearch/wlm/WorkloadManagementSettings.java +++ b/server/src/main/java/org/opensearch/wlm/WorkloadManagementSettings.java @@ -49,13 +49,13 @@ public class WorkloadManagementSettings { ); /** - * Setting name for Query Group Service run interval + * Setting name for Workload Group Service run interval */ public static final String QUERYGROUP_ENFORCEMENT_INTERVAL_SETTING_NAME = "wlm.workload_group.enforcement_interval"; private TimeValue workloadGroupServiceRunInterval; /** - * Setting to control the run interval of Query Group Service + * Setting to control the run interval of Workload Group Service */ public static final Setting QUERYGROUP_SERVICE_RUN_INTERVAL_SETTING = Setting.longSetting( QUERYGROUP_ENFORCEMENT_INTERVAL_SETTING_NAME, diff --git a/server/src/main/java/org/opensearch/wlm/stats/WorkloadGroupState.java b/server/src/main/java/org/opensearch/wlm/stats/WorkloadGroupState.java index 78cb4521aff18..a3715eb72f385 100644 --- a/server/src/main/java/org/opensearch/wlm/stats/WorkloadGroupState.java +++ b/server/src/main/java/org/opensearch/wlm/stats/WorkloadGroupState.java @@ -15,26 +15,26 @@ import java.util.Map; /** - * This class will keep the point in time view of the query group stats + * This class will keep the point in time view of the workload group stats */ public class WorkloadGroupState { /** - * co-ordinator level completions at the query group level, this is a cumulative counter since the Opensearch start time + * co-ordinator level completions at the workload group level, this is a cumulative counter since the Opensearch start time */ public final CounterMetric totalCompletions = new CounterMetric(); /** - * rejections at the query group level, this is a cumulative counter since the OpenSearch start time + * rejections at the workload group level, this is a cumulative counter since the OpenSearch start time */ public final CounterMetric totalRejections = new CounterMetric(); /** - * this will track the cumulative failures in a query group + * this will track the cumulative failures in a workload group */ public final CounterMetric failures = new CounterMetric(); /** - * This will track total number of cancellations in the query group due to all resource type breaches + * This will track total number of cancellations in the workload group due to all resource type breaches */ public final CounterMetric totalCancellations = new CounterMetric(); @@ -54,7 +54,7 @@ public WorkloadGroupState() { /** * - * @return co-ordinator completions in the query group + * @return co-ordinator completions in the workload group */ public long getTotalCompletions() { return totalCompletions.count(); @@ -62,7 +62,7 @@ public long getTotalCompletions() { /** * - * @return rejections in the query group + * @return rejections in the workload group */ public long getTotalRejections() { return totalRejections.count(); @@ -70,7 +70,7 @@ public long getTotalRejections() { /** * - * @return failures in the query group + * @return failures in the workload group */ public long getFailures() { return failures.count(); @@ -81,15 +81,15 @@ public long getTotalCancellations() { } /** - * getter for query group resource state - * @return the query group resource state + * getter for workload group resource state + * @return the workload group resource state */ public Map getResourceState() { return resourceState; } /** - * This class holds the resource level stats for the query group + * This class holds the resource level stats for the workload group */ public static class ResourceTypeState { public final ResourceType resourceType; diff --git a/server/src/main/java/org/opensearch/wlm/stats/WorkloadGroupStats.java b/server/src/main/java/org/opensearch/wlm/stats/WorkloadGroupStats.java index baec9eee980ea..1174424ed398e 100644 --- a/server/src/main/java/org/opensearch/wlm/stats/WorkloadGroupStats.java +++ b/server/src/main/java/org/opensearch/wlm/stats/WorkloadGroupStats.java @@ -87,7 +87,7 @@ public Map getStats() { } /** - * This is a stats holder object which will hold the data for a query group at a point in time + * This is a stats holder object which will hold the data for a workload group at a point in time * the instance will only be created on demand through stats api */ public static class WorkloadGroupStatsHolder implements ToXContentObject, Writeable { diff --git a/server/src/main/java/org/opensearch/wlm/stats/package-info.java b/server/src/main/java/org/opensearch/wlm/stats/package-info.java index 2facf8d16df22..780ba6b33aa50 100644 --- a/server/src/main/java/org/opensearch/wlm/stats/package-info.java +++ b/server/src/main/java/org/opensearch/wlm/stats/package-info.java @@ -7,6 +7,6 @@ */ /** - * Query group stats related artifacts + * Workload group stats related artifacts */ package org.opensearch.wlm.stats; diff --git a/server/src/main/java/org/opensearch/wlm/tracker/CpuUsageCalculator.java b/server/src/main/java/org/opensearch/wlm/tracker/CpuUsageCalculator.java index 32fc0487e4e66..01f2d1071259d 100644 --- a/server/src/main/java/org/opensearch/wlm/tracker/CpuUsageCalculator.java +++ b/server/src/main/java/org/opensearch/wlm/tracker/CpuUsageCalculator.java @@ -14,7 +14,7 @@ import java.util.List; /** - * class to help make cpu usage calculations for the query group + * class to help make cpu usage calculations for the workload group */ public class CpuUsageCalculator extends ResourceUsageCalculator { // This value should be initialised at the start time of the process and be used throughout the codebase diff --git a/server/src/main/java/org/opensearch/wlm/tracker/MemoryUsageCalculator.java b/server/src/main/java/org/opensearch/wlm/tracker/MemoryUsageCalculator.java index 6edb011f399e3..a03487f568228 100644 --- a/server/src/main/java/org/opensearch/wlm/tracker/MemoryUsageCalculator.java +++ b/server/src/main/java/org/opensearch/wlm/tracker/MemoryUsageCalculator.java @@ -15,7 +15,7 @@ import java.util.List; /** - * class to help make memory usage calculations for the query group + * class to help make memory usage calculations for the workload group */ public class MemoryUsageCalculator extends ResourceUsageCalculator { public static final long HEAP_SIZE_BYTES = JvmStats.jvmStats().getMem().getHeapMax().getBytes(); diff --git a/server/src/main/java/org/opensearch/wlm/tracker/ResourceUsageCalculator.java b/server/src/main/java/org/opensearch/wlm/tracker/ResourceUsageCalculator.java index 7ec1f531c8920..6766ca57c38d1 100644 --- a/server/src/main/java/org/opensearch/wlm/tracker/ResourceUsageCalculator.java +++ b/server/src/main/java/org/opensearch/wlm/tracker/ResourceUsageCalculator.java @@ -14,14 +14,14 @@ import java.util.List; /** - * This class is used to track query group level resource usage + * This class is used to track workload group level resource usage */ @PublicApi(since = "2.18.0") public abstract class ResourceUsageCalculator { /** - * calculates the current resource usage for the query group + * calculates the current resource usage for the workload group * - * @param tasks list of tasks in the query group + * @param tasks list of tasks in the workload group */ public abstract double calculateResourceUsage(List tasks); diff --git a/server/src/test/java/org/opensearch/VersionTests.java b/server/src/test/java/org/opensearch/VersionTests.java index 4577dcc6a66f0..61912e7b4891c 100644 --- a/server/src/test/java/org/opensearch/VersionTests.java +++ b/server/src/test/java/org/opensearch/VersionTests.java @@ -96,7 +96,7 @@ public void testVersionComparison() { public void testMin() { assertEquals(VersionUtils.getPreviousVersion(), Version.min(Version.CURRENT, VersionUtils.getPreviousVersion())); - assertEquals(LegacyESVersion.fromString("7.0.1"), Version.min(LegacyESVersion.fromString("7.0.1"), Version.CURRENT)); + assertEquals(Version.fromString("7.0.1"), Version.min(Version.fromString("7.0.1"), Version.CURRENT)); Version version = VersionUtils.randomVersion(random()); Version version1 = VersionUtils.randomVersion(random()); if (version.id <= version1.id) { @@ -189,12 +189,12 @@ public void testIndexCreatedVersion() { } public void testMinCompatVersion() { - Version major = LegacyESVersion.fromString("6.8.0"); + Version major = Version.fromString("6.8.0"); assertThat(Version.fromString("1.0.0").minimumCompatibilityVersion(), equalTo(major)); assertThat(Version.fromString("1.2.0").minimumCompatibilityVersion(), equalTo(major)); assertThat(Version.fromString("1.3.0").minimumCompatibilityVersion(), equalTo(major)); - Version major2x = LegacyESVersion.fromString("7.10.0"); + Version major2x = Version.fromString("7.10.0"); assertThat(Version.fromString("2.0.0").minimumCompatibilityVersion(), equalTo(major2x)); assertThat(Version.fromString("2.2.0").minimumCompatibilityVersion(), equalTo(major2x)); assertThat(Version.fromString("2.3.0").minimumCompatibilityVersion(), equalTo(major2x)); diff --git a/server/src/test/java/org/opensearch/cluster/ClusterModuleTests.java b/server/src/test/java/org/opensearch/cluster/ClusterModuleTests.java index c620b03042007..3b20af8446704 100644 --- a/server/src/test/java/org/opensearch/cluster/ClusterModuleTests.java +++ b/server/src/test/java/org/opensearch/cluster/ClusterModuleTests.java @@ -32,6 +32,8 @@ package org.opensearch.cluster; +import org.opensearch.cluster.action.shard.ShardStateAction; +import org.opensearch.cluster.metadata.IndexNameExpressionResolver; import org.opensearch.cluster.metadata.Metadata; import org.opensearch.cluster.metadata.RepositoriesMetadata; import org.opensearch.cluster.metadata.WorkloadGroupMetadata; @@ -90,7 +92,7 @@ import java.util.function.Supplier; public class ClusterModuleTests extends ModuleTestCase { - private ClusterInfoService clusterInfoService = EmptyClusterInfoService.INSTANCE; + private final ClusterInfoService clusterInfoService = EmptyClusterInfoService.INSTANCE; private ClusterService clusterService; private ThreadContext threadContext; @@ -127,6 +129,71 @@ public ShardAllocationDecision decideShardAllocation(ShardRouting shard, Routing } } + static class FakeExpressionResolver implements IndexNameExpressionResolver.ExpressionResolver { + @Override + public List resolve(IndexNameExpressionResolver.Context context, List expressions) { + throw new UnsupportedOperationException("resolve operation not supported on FakeExpressionResolver"); + } + } + + static class AnotherFakeExpressionResolver implements IndexNameExpressionResolver.ExpressionResolver { + @Override + public List resolve(IndexNameExpressionResolver.Context context, List expressions) { + throw new UnsupportedOperationException("resolve operation not supported on FakeExpressionResolver"); + } + } + + public void testRegisterCustomExpressionResolver() { + FakeExpressionResolver customResolver1 = new FakeExpressionResolver(); + AnotherFakeExpressionResolver customResolver2 = new AnotherFakeExpressionResolver(); + List clusterPlugins = Collections.singletonList(new ClusterPlugin() { + @Override + public Collection getIndexNameCustomResolvers() { + return Arrays.asList(customResolver1, customResolver2); + } + }); + ClusterModule module = new ClusterModule( + Settings.EMPTY, + clusterService, + clusterPlugins, + clusterInfoService, + null, + threadContext, + null, + ShardStateAction.class + ); + assertTrue(module.getIndexNameExpressionResolver().getExpressionResolvers().contains(customResolver1)); + assertTrue(module.getIndexNameExpressionResolver().getExpressionResolvers().contains(customResolver2)); + } + + public void testRegisterCustomExpressionResolverDuplicate() { + FakeExpressionResolver customResolver1 = new FakeExpressionResolver(); + FakeExpressionResolver customResolver2 = new FakeExpressionResolver(); + List clusterPlugins = Collections.singletonList(new ClusterPlugin() { + @Override + public Collection getIndexNameCustomResolvers() { + return Arrays.asList(customResolver1, customResolver2); + } + }); + IllegalArgumentException ex = expectThrows( + IllegalArgumentException.class, + () -> new ClusterModule( + Settings.EMPTY, + clusterService, + clusterPlugins, + clusterInfoService, + null, + threadContext, + null, + ShardStateAction.class + ) + ); + assertEquals( + "Cannot specify expression resolver [org.opensearch.cluster.ClusterModuleTests$FakeExpressionResolver] twice", + ex.getMessage() + ); + } + public void testRegisterClusterDynamicSettingDuplicate() { try { new SettingsModule(Settings.EMPTY, EnableAllocationDecider.CLUSTER_ROUTING_ALLOCATION_ENABLE_SETTING); @@ -173,7 +240,7 @@ public void testRegisterAllocationDeciderDuplicate() { public Collection createAllocationDeciders(Settings settings, ClusterSettings clusterSettings) { return Collections.singletonList(new EnableAllocationDecider(settings, clusterSettings)); } - }), clusterInfoService, null, threadContext, new ClusterManagerMetrics(NoopMetricsRegistry.INSTANCE)) + }), clusterInfoService, null, threadContext, new ClusterManagerMetrics(NoopMetricsRegistry.INSTANCE), ShardStateAction.class) ); assertEquals(e.getMessage(), "Cannot specify allocation decider [" + EnableAllocationDecider.class.getName() + "] twice"); } @@ -184,7 +251,7 @@ public void testRegisterAllocationDecider() { public Collection createAllocationDeciders(Settings settings, ClusterSettings clusterSettings) { return Collections.singletonList(new FakeAllocationDecider()); } - }), clusterInfoService, null, threadContext, new ClusterManagerMetrics(NoopMetricsRegistry.INSTANCE)); + }), clusterInfoService, null, threadContext, new ClusterManagerMetrics(NoopMetricsRegistry.INSTANCE), ShardStateAction.class); assertTrue(module.deciderList.stream().anyMatch(d -> d.getClass().equals(FakeAllocationDecider.class))); } @@ -194,7 +261,7 @@ private ClusterModule newClusterModuleWithShardsAllocator(Settings settings, Str public Map> getShardsAllocators(Settings settings, ClusterSettings clusterSettings) { return Collections.singletonMap(name, supplier); } - }), clusterInfoService, null, threadContext, new ClusterManagerMetrics(NoopMetricsRegistry.INSTANCE)); + }), clusterInfoService, null, threadContext, new ClusterManagerMetrics(NoopMetricsRegistry.INSTANCE), ShardStateAction.class); } public void testRegisterShardsAllocator() { @@ -222,7 +289,8 @@ public void testUnknownShardsAllocator() { clusterInfoService, null, threadContext, - new ClusterManagerMetrics(NoopMetricsRegistry.INSTANCE) + new ClusterManagerMetrics(NoopMetricsRegistry.INSTANCE), + ShardStateAction.class ) ); assertEquals("Unknown ShardsAllocator [dne]", e.getMessage()); @@ -310,7 +378,8 @@ public void testRejectsReservedExistingShardsAllocatorName() { clusterInfoService, null, threadContext, - new ClusterManagerMetrics(NoopMetricsRegistry.INSTANCE) + new ClusterManagerMetrics(NoopMetricsRegistry.INSTANCE), + ShardStateAction.class ); expectThrows( IllegalArgumentException.class, @@ -326,7 +395,8 @@ public void testRejectsDuplicateExistingShardsAllocatorName() { clusterInfoService, null, threadContext, - new ClusterManagerMetrics(NoopMetricsRegistry.INSTANCE) + new ClusterManagerMetrics(NoopMetricsRegistry.INSTANCE), + ShardStateAction.class ); expectThrows( IllegalArgumentException.class, @@ -359,7 +429,8 @@ public void testRerouteServiceSetForBalancedShardsAllocator() { clusterInfoService, null, threadContext, - new ClusterManagerMetrics(NoopMetricsRegistry.INSTANCE) + new ClusterManagerMetrics(NoopMetricsRegistry.INSTANCE), + ShardStateAction.class ); clusterModule.setRerouteServiceForAllocator((reason, priority, listener) -> listener.onResponse(clusterService.state())); } diff --git a/server/src/test/java/org/opensearch/cluster/coordination/JoinTaskExecutorTests.java b/server/src/test/java/org/opensearch/cluster/coordination/JoinTaskExecutorTests.java index ab455a1fbb4e7..f868c6cb9b0df 100644 --- a/server/src/test/java/org/opensearch/cluster/coordination/JoinTaskExecutorTests.java +++ b/server/src/test/java/org/opensearch/cluster/coordination/JoinTaskExecutorTests.java @@ -31,7 +31,6 @@ package org.opensearch.cluster.coordination; -import org.opensearch.LegacyESVersion; import org.opensearch.Version; import org.opensearch.cluster.ClusterName; import org.opensearch.cluster.ClusterState; @@ -137,7 +136,7 @@ public void testPreventJoinClusterWithUnsupportedNodeVersions() { final DiscoveryNode tooLowJoiningNode = new DiscoveryNode( UUIDs.base64UUID(), buildNewFakeTransportAddress(), - LegacyESVersion.fromString("6.7.0") + Version.fromString("6.7.0") ); expectThrows(IllegalStateException.class, () -> { if (randomBoolean()) { diff --git a/server/src/test/java/org/opensearch/cluster/metadata/IndexNameExpressionResolverTests.java b/server/src/test/java/org/opensearch/cluster/metadata/IndexNameExpressionResolverTests.java index fda2f411b1994..c2caae1dc2586 100644 --- a/server/src/test/java/org/opensearch/cluster/metadata/IndexNameExpressionResolverTests.java +++ b/server/src/test/java/org/opensearch/cluster/metadata/IndexNameExpressionResolverTests.java @@ -45,6 +45,7 @@ import org.opensearch.cluster.ClusterName; import org.opensearch.cluster.ClusterState; import org.opensearch.cluster.metadata.IndexMetadata.State; +import org.opensearch.cluster.metadata.IndexNameExpressionResolver.ExpressionResolver; import org.opensearch.common.settings.Settings; import org.opensearch.common.util.concurrent.ThreadContext; import org.opensearch.core.common.Strings; @@ -2520,6 +2521,20 @@ public void testDataStreamsNames() { assertThat(names, empty()); } + static class FakeExpressionResolver implements IndexNameExpressionResolver.ExpressionResolver { + @Override + public List resolve(IndexNameExpressionResolver.Context context, List expressions) { + throw new UnsupportedOperationException("resolve operation not supported on FakeExpressionResolver"); + } + } + + public void testAddCustomResolver() { + FakeExpressionResolver customResolver = new FakeExpressionResolver(); + List customResolvers = Collections.singletonList(customResolver); + IndexNameExpressionResolver indexNameExpressionResolver = new IndexNameExpressionResolver(threadContext, customResolvers); + assertEquals(indexNameExpressionResolver.getExpressionResolvers().get(0), customResolver); + } + private ClusterState systemIndexTestClusterState() { Settings settings = Settings.builder().build(); Metadata.Builder mdBuilder = Metadata.builder() diff --git a/server/src/test/java/org/opensearch/cluster/metadata/MetadataIndexUpgradeServiceTests.java b/server/src/test/java/org/opensearch/cluster/metadata/MetadataIndexUpgradeServiceTests.java index cee95323b8129..6ef1b096960ee 100644 --- a/server/src/test/java/org/opensearch/cluster/metadata/MetadataIndexUpgradeServiceTests.java +++ b/server/src/test/java/org/opensearch/cluster/metadata/MetadataIndexUpgradeServiceTests.java @@ -31,7 +31,6 @@ package org.opensearch.cluster.metadata; -import org.opensearch.LegacyESVersion; import org.opensearch.Version; import org.opensearch.common.settings.IndexScopedSettings; import org.opensearch.common.settings.Settings; @@ -131,7 +130,7 @@ public void testFailUpgrade() { minCompat, Version.max(minCompat, VersionUtils.getPreviousVersion(Version.CURRENT)) ); - Version indexCreated = LegacyESVersion.fromString((minCompat.major - 1) + "." + randomInt(5) + "." + randomInt(5)); + Version indexCreated = Version.fromString((minCompat.major - 1) + "." + randomInt(5) + "." + randomInt(5)); final IndexMetadata metadata = newIndexMeta( "foo", Settings.builder() diff --git a/server/src/test/java/org/opensearch/cluster/node/DiscoveryNodesTests.java b/server/src/test/java/org/opensearch/cluster/node/DiscoveryNodesTests.java index 61b86856c9ebc..ceb0c6f1c675b 100644 --- a/server/src/test/java/org/opensearch/cluster/node/DiscoveryNodesTests.java +++ b/server/src/test/java/org/opensearch/cluster/node/DiscoveryNodesTests.java @@ -34,7 +34,6 @@ import com.carrotsearch.randomizedtesting.generators.RandomPicks; -import org.opensearch.LegacyESVersion; import org.opensearch.Version; import org.opensearch.common.io.stream.BytesStreamOutput; import org.opensearch.common.settings.Setting; @@ -493,7 +492,7 @@ public void testMaxMinNodeVersion() { buildNewFakeTransportAddress(), Collections.emptyMap(), new HashSet<>(randomSubsetOf(DiscoveryNodeRole.BUILT_IN_ROLES)), - LegacyESVersion.fromString("5.1.0") + Version.fromString("5.1.0") ) ); discoBuilder.add( @@ -503,7 +502,7 @@ public void testMaxMinNodeVersion() { buildNewFakeTransportAddress(), Collections.emptyMap(), new HashSet<>(randomSubsetOf(DiscoveryNodeRole.BUILT_IN_ROLES)), - LegacyESVersion.fromString("6.3.0") + Version.fromString("6.3.0") ) ); discoBuilder.add( @@ -520,7 +519,7 @@ public void testMaxMinNodeVersion() { discoBuilder.clusterManagerNodeId("name_2"); DiscoveryNodes build = discoBuilder.build(); assertEquals(Version.fromString("1.1.0"), build.getMaxNodeVersion()); - assertEquals(LegacyESVersion.fromString("5.1.0"), build.getMinNodeVersion()); + assertEquals(Version.fromString("5.1.0"), build.getMinNodeVersion()); } private DiscoveryNode buildDiscoveryNodeFromExisting(DiscoveryNode existing, Version newVersion) { diff --git a/server/src/test/java/org/opensearch/common/lucene/uid/VersionsTests.java b/server/src/test/java/org/opensearch/common/lucene/uid/VersionsTests.java index 4a68918587c23..d995a60adfe8d 100644 --- a/server/src/test/java/org/opensearch/common/lucene/uid/VersionsTests.java +++ b/server/src/test/java/org/opensearch/common/lucene/uid/VersionsTests.java @@ -39,7 +39,6 @@ import org.apache.lucene.index.IndexWriterConfig; import org.apache.lucene.index.Term; import org.apache.lucene.store.Directory; -import org.opensearch.LegacyESVersion; import org.opensearch.Version; import org.opensearch.common.lucene.Lucene; import org.opensearch.common.lucene.index.OpenSearchDirectoryReader; @@ -220,7 +219,7 @@ public void testLuceneVersionOnUnknownVersions() { assertEquals(VersionUtils.getPreviousVersion(Version.fromString("2.1.3")).luceneVersion, version.luceneVersion); // too old version, major should be the oldest supported lucene version minus 1 - version = LegacyESVersion.fromString("5.2.1"); + version = Version.fromString("5.2.1"); assertEquals(VersionUtils.getFirstVersion().luceneVersion.major - 1, version.luceneVersion.major); // future version, should be the same version as today diff --git a/server/src/test/java/org/opensearch/common/util/TimeBasedCacheExpiryTrackerTests.java b/server/src/test/java/org/opensearch/common/util/TimeBasedCacheExpiryTrackerTests.java new file mode 100644 index 0000000000000..c3c6ad275c7f1 --- /dev/null +++ b/server/src/test/java/org/opensearch/common/util/TimeBasedCacheExpiryTrackerTests.java @@ -0,0 +1,47 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.common.util; + +import org.opensearch.test.OpenSearchTestCase; + +import java.util.function.LongSupplier; + +public class TimeBasedCacheExpiryTrackerTests extends OpenSearchTestCase { + TimeBasedExpiryTracker sut; + final long ONE_SEC = 1_000_000_000; + + public void testExpiryEvent() { + TestTimeSupplier testTimeSupplier = new TestTimeSupplier(); + sut = new TimeBasedExpiryTracker(testTimeSupplier); + + testTimeSupplier.advanceClockBy(2 * ONE_SEC); + assertTrue(sut.getAsBoolean()); + } + + public void testNonExpiryEvent() { + TestTimeSupplier testTimeSupplier = new TestTimeSupplier(); + sut = new TimeBasedExpiryTracker(testTimeSupplier); + + testTimeSupplier.advanceClockBy(ONE_SEC / 2); + assertFalse(sut.getAsBoolean()); + } + + public static class TestTimeSupplier implements LongSupplier { + long currentTime = System.nanoTime(); + + @Override + public long getAsLong() { + return currentTime; + } + + public void advanceClockBy(long nanos) { + currentTime += nanos; + } + } +} diff --git a/server/src/test/java/org/opensearch/index/autoforcemerge/AutoForceMergeManagerTests.java b/server/src/test/java/org/opensearch/index/autoforcemerge/AutoForceMergeManagerTests.java index 5a8d7fc16ad3e..8f33c3534423a 100644 --- a/server/src/test/java/org/opensearch/index/autoforcemerge/AutoForceMergeManagerTests.java +++ b/server/src/test/java/org/opensearch/index/autoforcemerge/AutoForceMergeManagerTests.java @@ -18,6 +18,7 @@ import org.opensearch.cluster.node.DiscoveryNodes; import org.opensearch.cluster.routing.ShardRouting; import org.opensearch.cluster.service.ClusterService; +import org.opensearch.common.lifecycle.Lifecycle; import org.opensearch.common.settings.ClusterSettings; import org.opensearch.common.settings.Settings; import org.opensearch.common.unit.TimeValue; @@ -176,7 +177,6 @@ public void testConfigurationValidatorWithNonDataNode() { // NodeValidator Tests public void testNodeValidatorWithHealthyResources() { when(cpu.getPercent()).thenReturn((short) 50); - when(cpu.getLoadAverage()).thenReturn(new double[]{0.7 * allocatedProcessors, 0.6 * allocatedProcessors, 0.5 * allocatedProcessors}); when(jvm.getHeapUsedPercent()).thenReturn((short) 60); ThreadPoolStats stats = new ThreadPoolStats( Arrays.asList(new ThreadPoolStats.Stats( @@ -191,9 +191,32 @@ public void testNodeValidatorWithHealthyResources() { autoForceMergeManager.close(); } + public void testNodeValidatorWithFeatureSwitch() { + when(cpu.getPercent()).thenReturn((short) 50); + when(jvm.getHeapUsedPercent()).thenReturn((short) 60); + ThreadPoolStats stats = new ThreadPoolStats( + Arrays.asList(new ThreadPoolStats.Stats( + ThreadPool.Names.FORCE_MERGE, 1, 0, 0, 0, 1, 0, 0 + )) + ); + when(threadPool.stats()).thenReturn(stats); + Settings settings = getConfiguredClusterSettings(false, false, Collections.emptyMap()); + AutoForceMergeManager autoForceMergeManager = clusterSetupWithNode(settings, getNodeWithRoles(DATA_NODE_1, Set.of(DiscoveryNodeRole.DATA_ROLE))); + autoForceMergeManager.start(); + assertFalse(autoForceMergeManager.getConfigurationValidator().validate().isAllowed()); + assertNotEquals(Lifecycle.State.STARTED, ResourceTrackerProvider.resourceTrackers.cpuFiveMinute.lifecycleState()); + assertNotEquals(Lifecycle.State.STARTED, ResourceTrackerProvider.resourceTrackers.cpuFiveMinute.lifecycleState()); + assertNotEquals(Lifecycle.State.STARTED, ResourceTrackerProvider.resourceTrackers.cpuFiveMinute.lifecycleState()); + assertNotEquals(Lifecycle.State.STARTED, ResourceTrackerProvider.resourceTrackers.cpuFiveMinute.lifecycleState()); + assertTrue(autoForceMergeManager.getNodeValidator().validate().isAllowed()); + assertEquals(Lifecycle.State.STARTED, ResourceTrackerProvider.resourceTrackers.cpuFiveMinute.lifecycleState()); + assertEquals(Lifecycle.State.STARTED, ResourceTrackerProvider.resourceTrackers.cpuFiveMinute.lifecycleState()); + assertEquals(Lifecycle.State.STARTED, ResourceTrackerProvider.resourceTrackers.cpuFiveMinute.lifecycleState()); + assertEquals(Lifecycle.State.STARTED, ResourceTrackerProvider.resourceTrackers.cpuFiveMinute.lifecycleState()); + autoForceMergeManager.close(); + } + public void testNodeValidatorWithHighCPU() { - when(cpu.getPercent()).thenReturn((short) 95); - when(cpu.getLoadAverage()).thenReturn(new double[]{0.7 * allocatedProcessors, 0.6 * allocatedProcessors, 0.5 * allocatedProcessors}); DiscoveryNode dataNode1 = getNodeWithRoles(DATA_NODE_1, Set.of(DiscoveryNodeRole.DATA_ROLE)); DiscoveryNode warmNode1 = getNodeWithRoles(WARM_NODE_1, Set.of(DiscoveryNodeRole.WARM_ROLE)); ClusterState clusterState = ClusterState.builder(new ClusterName(ClusterServiceUtils.class.getSimpleName())) @@ -207,20 +230,24 @@ public void testNodeValidatorWithHighCPU() { .blocks(ClusterBlocks.EMPTY_CLUSTER_BLOCK) .build(); when(clusterService.state()).thenReturn(clusterState); - AutoForceMergeManager autoForceMergeManager = clusterSetupWithNode(getConfiguredClusterSettings(true, true, Collections.emptyMap()), dataNode1); + AutoForceMergeManager autoForceMergeManager = clusterSetupWithNode( + getConfiguredClusterSettings(true, true, Collections.emptyMap()), + dataNode1 + ); autoForceMergeManager.start(); + when(cpu.getPercent()).thenReturn((short) 95); assertFalse(autoForceMergeManager.getNodeValidator().validate().isAllowed()); - when(cpu.getPercent()).thenReturn((short) 50); - when(cpu.getLoadAverage()).thenReturn(new double[]{0.9 * allocatedProcessors, 0.6 * allocatedProcessors, 0.5 * allocatedProcessors}); + for (int i = 0; i < 10; i++) + ResourceTrackerProvider.resourceTrackers.cpuOneMinute.recordUsage(90); assertFalse(autoForceMergeManager.getNodeValidator().validate().isAllowed()); - when(cpu.getLoadAverage()).thenReturn(new double[]{0.7 * allocatedProcessors, 0.9 * allocatedProcessors, 0.5 * allocatedProcessors}); + for (int i = 0; i < 10; i++) + ResourceTrackerProvider.resourceTrackers.cpuFiveMinute.recordUsage(90); assertFalse(autoForceMergeManager.getNodeValidator().validate().isAllowed()); autoForceMergeManager.close(); } public void testNodeValidatorWithHighDiskUsage() { when(cpu.getPercent()).thenReturn((short) 50); - when(cpu.getLoadAverage()).thenReturn(new double[]{0.7 * allocatedProcessors, 0.6 * allocatedProcessors, 0.5 * allocatedProcessors}); when(disk.getAvailable()).thenReturn(new ByteSizeValue(5)); AutoForceMergeManager autoForceMergeManager = clusterSetupWithNode(getConfiguredClusterSettings(true, true, Collections.emptyMap()), getNodeWithRoles(DATA_NODE_1, Set.of(DiscoveryNodeRole.DATA_ROLE))); autoForceMergeManager.start(); @@ -230,17 +257,21 @@ public void testNodeValidatorWithHighDiskUsage() { public void testNodeValidatorWithHighJVMUsage() { when(cpu.getPercent()).thenReturn((short) 50); - when(cpu.getLoadAverage()).thenReturn(new double[]{0.7 * allocatedProcessors, 0.6 * allocatedProcessors, 0.5 * allocatedProcessors}); - when(jvm.getHeapUsedPercent()).thenReturn((short) 90); AutoForceMergeManager autoForceMergeManager = clusterSetupWithNode(getConfiguredClusterSettings(true, true, Collections.emptyMap()), getNodeWithRoles(DATA_NODE_1, Set.of(DiscoveryNodeRole.DATA_ROLE))); autoForceMergeManager.start(); + when(jvm.getHeapUsedPercent()).thenReturn((short) 90); + assertFalse(autoForceMergeManager.getNodeValidator().validate().isAllowed()); + for(int i = 0; i < 10; i++) + ResourceTrackerProvider.resourceTrackers.jvmOneMinute.recordUsage(90); + assertFalse(autoForceMergeManager.getNodeValidator().validate().isAllowed()); + for(int i = 0; i < 10; i++) + ResourceTrackerProvider.resourceTrackers.jvmFiveMinute.recordUsage(90); assertFalse(autoForceMergeManager.getNodeValidator().validate().isAllowed()); autoForceMergeManager.close(); } public void testNodeValidatorWithInsufficientForceMergeThreads() { when(cpu.getPercent()).thenReturn((short) 50); - when(cpu.getLoadAverage()).thenReturn(new double[]{0.7 * allocatedProcessors, 0.6 * allocatedProcessors, 0.5 * allocatedProcessors}); when(jvm.getHeapUsedPercent()).thenReturn((short) 50); ThreadPoolStats stats = new ThreadPoolStats( Arrays.asList(new ThreadPoolStats.Stats( @@ -376,9 +407,6 @@ public void testForceMergeOperationOnDataNodeWithFailingMerges() throws IOExcept .build(); when(clusterService.state()).thenReturn(clusterState); when(cpu.getPercent()).thenReturn((short) 50); - when(cpu.getLoadAverage()).thenReturn( - new double[] { 0.7 * allocatedProcessors, 0.6 * allocatedProcessors, 0.5 * allocatedProcessors } - ); when(jvm.getHeapUsedPercent()).thenReturn((short) 50); int forceMergeThreads = 4; @@ -430,9 +458,6 @@ public void testForceMergeOperationOnDataNodeOfWarmEnabledCluster() throws IOExc .build(); when(clusterService.state()).thenReturn(clusterState); when(cpu.getPercent()).thenReturn((short) 50); - when(cpu.getLoadAverage()).thenReturn( - new double[] { 0.7 * allocatedProcessors, 0.6 * allocatedProcessors, 0.5 * allocatedProcessors } - ); when(jvm.getHeapUsedPercent()).thenReturn((short) 50); int forceMergeThreads = 4; ExecutorService executorService = Executors.newFixedThreadPool(forceMergeThreads); @@ -488,9 +513,6 @@ public void testForceMergeOperationOnDataNodeWithThreadInterruption() throws Int .build(); when(clusterService.state()).thenReturn(clusterState); when(cpu.getPercent()).thenReturn((short) 50); - when(cpu.getLoadAverage()).thenReturn( - new double[] { 0.7 * allocatedProcessors, 0.6 * allocatedProcessors, 0.5 * allocatedProcessors } - ); when(jvm.getHeapUsedPercent()).thenReturn((short) 50); int forceMergeThreads = 4; diff --git a/server/src/test/java/org/opensearch/index/autoforcemerge/ForceMergeManagerSettingsTests.java b/server/src/test/java/org/opensearch/index/autoforcemerge/ForceMergeManagerSettingsTests.java index cb3c008f0de08..7aded4023670a 100644 --- a/server/src/test/java/org/opensearch/index/autoforcemerge/ForceMergeManagerSettingsTests.java +++ b/server/src/test/java/org/opensearch/index/autoforcemerge/ForceMergeManagerSettingsTests.java @@ -61,12 +61,13 @@ public void accept(TimeValue timeValue) { public void testDefaultSettings() { assertEquals(false, forceMergeManagerSettings.isAutoForceMergeFeatureEnabled()); - assertEquals(forceMergeManagerSettings.getForcemergeDelay(), TimeValue.timeValueSeconds(10)); + assertEquals(forceMergeManagerSettings.getForcemergeDelay(), TimeValue.timeValueSeconds(15)); assertEquals(forceMergeManagerSettings.getSchedulerInterval(), TimeValue.timeValueMinutes(30)); assertEquals(2, (int) forceMergeManagerSettings.getConcurrencyMultiplier()); assertEquals(1, (int) forceMergeManagerSettings.getSegmentCount()); - assertEquals(80.0, forceMergeManagerSettings.getCpuThreshold(), 0.0); + assertEquals(75.0, forceMergeManagerSettings.getCpuThreshold(), 0.0); assertEquals(75.0, forceMergeManagerSettings.getJvmThreshold(), 0.0); + assertEquals(85.0, forceMergeManagerSettings.getDiskThreshold(), 0.0); } public void testDynamicSettingsUpdate() { @@ -100,14 +101,14 @@ public void testTimeValueSettings() { Settings newSettings = Settings.builder() .put(ForceMergeManagerSettings.AUTO_FORCE_MERGE_SCHEDULER_INTERVAL.getKey(), "10m") .put(ForceMergeManagerSettings.TRANSLOG_AGE_AUTO_FORCE_MERGE.getKey(), "10m") - .put(ForceMergeManagerSettings.MERGE_DELAY_BETWEEN_SHARDS_FOR_AUTO_FORCE_MERGE.getKey(), "15s") + .put(ForceMergeManagerSettings.MERGE_DELAY_BETWEEN_SHARDS_FOR_AUTO_FORCE_MERGE.getKey(), "20s") .build(); clusterSettings.applySettings(newSettings); assertEquals(forceMergeManagerSettings.getSchedulerInterval(), TimeValue.timeValueMinutes(10)); assertEquals(forceMergeManagerSettings.getTranslogAge(), TimeValue.timeValueMinutes(10)); - assertEquals(forceMergeManagerSettings.getForcemergeDelay(), TimeValue.timeValueSeconds(15)); + assertEquals(forceMergeManagerSettings.getForcemergeDelay(), TimeValue.timeValueSeconds(20)); } public void testThreadSettings() { diff --git a/server/src/test/java/org/opensearch/index/mapper/NumberFieldTypeTests.java b/server/src/test/java/org/opensearch/index/mapper/NumberFieldTypeTests.java index 6382f033d74d5..5fc771a422c01 100644 --- a/server/src/test/java/org/opensearch/index/mapper/NumberFieldTypeTests.java +++ b/server/src/test/java/org/opensearch/index/mapper/NumberFieldTypeTests.java @@ -411,10 +411,20 @@ public void testLongRangeQuery() { public void testUnsignedLongRangeQuery() { MappedFieldType ft = new NumberFieldMapper.NumberFieldType("field", NumberFieldMapper.NumberType.UNSIGNED_LONG); - Query expected = new IndexOrDocValuesQuery( + Query indexOrDvQuery = new IndexOrDocValuesQuery( BigIntegerPoint.newRangeQuery("field", BigInteger.valueOf(1), BigInteger.valueOf(3)), SortedUnsignedLongDocValuesRangeQuery.newSlowRangeQuery("field", BigInteger.valueOf(1), BigInteger.valueOf(3)) ); + Query expected = new ApproximateScoreQuery( + indexOrDvQuery, + new ApproximatePointRangeQuery( + "field", + NumberType.UNSIGNED_LONG.encodePoint(BigInteger.valueOf(1)), + NumberType.UNSIGNED_LONG.encodePoint(BigInteger.valueOf(3)), + 1, + ApproximatePointRangeQuery.UNSIGNED_LONG_FORMAT + ) + ); assertEquals(expected, ft.rangeQuery("1", "3", true, true, null, null, null, MOCK_QSC)); MappedFieldType unsearchable = unsearchable(); @@ -443,7 +453,7 @@ public void testUnsignedLongTermsQuery() { public void testDoubleRangeQuery() { MappedFieldType ft = new NumberFieldMapper.NumberFieldType("field", NumberFieldMapper.NumberType.DOUBLE); - Query expected = new IndexOrDocValuesQuery( + Query indexOrDvQuery = new IndexOrDocValuesQuery( DoublePoint.newRangeQuery("field", 1d, 3d), SortedNumericDocValuesField.newSlowRangeQuery( "field", @@ -451,6 +461,16 @@ public void testDoubleRangeQuery() { NumericUtils.doubleToSortableLong(3) ) ); + Query expected = new ApproximateScoreQuery( + indexOrDvQuery, + new ApproximatePointRangeQuery( + "field", + DoublePoint.pack(new double[] { 1d }).bytes, + DoublePoint.pack(new double[] { 3d }).bytes, + 1, + ApproximatePointRangeQuery.DOUBLE_FORMAT + ) + ); assertEquals(expected, ft.rangeQuery("1", "3", true, true, null, null, null, MOCK_QSC)); MappedFieldType unsearchable = unsearchable(); diff --git a/server/src/test/java/org/opensearch/index/mapper/SemanticVersionFieldMapperTests.java b/server/src/test/java/org/opensearch/index/mapper/SemanticVersionFieldMapperTests.java new file mode 100644 index 0000000000000..35bc16c512bc6 --- /dev/null +++ b/server/src/test/java/org/opensearch/index/mapper/SemanticVersionFieldMapperTests.java @@ -0,0 +1,854 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.index.mapper; + +import org.apache.lucene.document.KeywordField; +import org.apache.lucene.index.DirectoryReader; +import org.apache.lucene.index.DocValuesType; +import org.apache.lucene.index.IndexReader; +import org.apache.lucene.index.IndexWriter; +import org.apache.lucene.index.IndexWriterConfig; +import org.apache.lucene.index.IndexableField; +import org.apache.lucene.index.MultiReader; +import org.apache.lucene.search.BooleanClause; +import org.apache.lucene.search.BooleanQuery; +import org.apache.lucene.search.IndexOrDocValuesQuery; +import org.apache.lucene.search.IndexSearcher; +import org.apache.lucene.search.MultiTermQuery; +import org.apache.lucene.search.Query; +import org.apache.lucene.search.TermRangeQuery; +import org.apache.lucene.search.TopDocs; +import org.apache.lucene.store.ByteBuffersDirectory; +import org.apache.lucene.store.Directory; +import org.apache.lucene.util.automaton.Operations; +import org.apache.lucene.util.automaton.RegExp; +import org.opensearch.common.geo.ShapeRelation; +import org.opensearch.common.unit.Fuzziness; +import org.opensearch.common.xcontent.XContentFactory; +import org.opensearch.core.xcontent.ToXContent; +import org.opensearch.core.xcontent.XContentBuilder; +import org.opensearch.index.query.QueryShardContext; +import org.opensearch.index.query.QueryShardException; +import org.opensearch.search.lookup.SearchLookup; + +import java.io.IOException; +import java.util.ArrayList; +import java.util.Arrays; +import java.util.Collections; +import java.util.HashMap; +import java.util.List; +import java.util.Objects; +import java.util.stream.Collectors; + +import static org.hamcrest.Matchers.arrayWithSize; +import static org.hamcrest.Matchers.contains; +import static org.hamcrest.Matchers.containsString; +import static org.hamcrest.Matchers.equalTo; +import static org.hamcrest.Matchers.greaterThanOrEqualTo; +import static org.mockito.Mockito.mock; + +/** + * Field mapper for OpenSearch semantic version field type + */ +public class SemanticVersionFieldMapperTests extends MapperTestCase { + + @Override + protected void minimalMapping(XContentBuilder b) throws IOException { + b.field("type", "version"); + b.field("store", true); + b.field("index", true); + } + + @Override + protected void registerParameters(ParameterChecker checker) { + // No additional parameters to register for version field type + } + + @Override + protected void metaMapping(XContentBuilder b) throws IOException { + minimalMapping(b); + } + + @Override + protected void writeFieldValue(XContentBuilder builder) throws IOException { + builder.value("1.0.0"); + } + + public void testDefaults() throws Exception { + DocumentMapper mapper = createDocumentMapper(fieldMapping(this::minimalMapping)); + ParsedDocument doc = mapper.parse(source(b -> b.field("field", "1.0.0"))); + + IndexableField[] fields = doc.rootDoc().getFields("field"); + assertThat(fields.length, greaterThanOrEqualTo(2)); + + boolean hasDocValues = false; + boolean hasStoredField = false; + for (IndexableField field : fields) { + if (field.fieldType().docValuesType() == DocValuesType.SORTED_SET) { + hasDocValues = true; + } + if (field.fieldType().stored()) { + hasStoredField = true; + } + } + + assertTrue("Field should have doc values", hasDocValues); + assertTrue("Field should be stored", hasStoredField); + } + + public void testValidVersionValues() throws Exception { + DocumentMapper mapper = createDocumentMapper(fieldMapping(this::minimalMapping)); + + // Test regular version + ParsedDocument doc = mapper.parse(source(b -> b.field("field", "1.2.3"))); + IndexableField field = doc.rootDoc().getField("field"); + assertThat(field.stringValue(), equalTo("1.2.3")); + + // Test version with pre-release + doc = mapper.parse(source(b -> b.field("field", "1.2.3-alpha"))); + field = doc.rootDoc().getField("field"); + assertThat(field.stringValue(), equalTo("1.2.3-alpha")); + + // Test version with build metadata + doc = mapper.parse(source(b -> b.field("field", "1.2.3+build.123"))); + field = doc.rootDoc().getField("field"); + assertThat(field.stringValue(), equalTo("1.2.3+build.123")); + + // Test version with both pre-release and build metadata + doc = mapper.parse(source(b -> b.field("field", "1.2.3-alpha+build.123"))); + field = doc.rootDoc().getField("field"); + assertThat(field.stringValue(), equalTo("1.2.3-alpha+build.123")); + } + + public void testStoredValues() throws Exception { + DocumentMapper mapper = createDocumentMapper(fieldMapping(this::minimalMapping)); + + // Test storing different version formats + List versions = Arrays.asList( + "1.0.0", + "1.0.0-alpha", + "1.0.0-alpha.1", + "1.0.0+build.123", + "1.0.0-beta+build.123", + "999999999.999999999.999999999" + ); + + for (String version : versions) { + ParsedDocument doc = mapper.parse(source(b -> b.field("field", version))); + IndexableField storedField = doc.rootDoc().getField("field"); + assertNotNull("Stored field should exist for " + version, storedField); + assertEquals("Stored value should match input for " + version, version, storedField.stringValue()); + } + } + + public void testDocValuesSorting() throws Exception { + DocumentMapper mapper = createDocumentMapper(fieldMapping(this::minimalMapping)); + ParsedDocument doc = mapper.parse(source(b -> b.field("field", "1.0.0"))); + + IndexableField[] fields = doc.rootDoc().getFields("field"); + boolean hasDocValues = false; + for (IndexableField field : fields) { + if (field.fieldType().docValuesType() == DocValuesType.SORTED_SET) { + hasDocValues = true; + assertEquals(DocValuesType.SORTED_SET, field.fieldType().docValuesType()); + } + } + assertTrue("Field should have sorted doc values", hasDocValues); + } + + public void testNullHandling() throws Exception { + DocumentMapper mapper = createDocumentMapper(fieldMapping(this::minimalMapping)); + + // Test null value + ParsedDocument doc = mapper.parse(source(b -> b.nullField("field"))); + assertThat(doc.rootDoc().getFields("field"), arrayWithSize(0)); + + // Test missing field + doc = mapper.parse(source(b -> {})); + assertThat(doc.rootDoc().getFields("field"), arrayWithSize(0)); + } + + public void testMalformedVersions() throws Exception { + DocumentMapper mapper = createDocumentMapper(fieldMapping(this::minimalMapping)); + + // Test various malformed versions + List malformedVersions = Arrays.asList( + "1", + "1.0", + "v1.0.0", + "1.0.0.0", + "-1.0.0", + "1.0.0-", + "1.0.0+", + "01.0.0", + "1.0.0@invalid" + ); + + for (String malformed : malformedVersions) { + MapperParsingException e = expectThrows( + MapperParsingException.class, + () -> mapper.parse(source(b -> b.field("field", malformed))) + ); + assertTrue(e.getCause().getMessage().contains("Invalid semantic version format")); + } + } + + public void testMetadataFields() throws Exception { + XContentBuilder mapping = fieldMapping(this::minimalMapping); + DocumentMapper mapper = createDocumentMapper(mapping); + ParsedDocument doc = mapper.parse(source(b -> b.field("field", "1.0.0"))); + + IndexableField[] fields = doc.rootDoc().getFields("field"); + boolean hasDocValues = false; + for (IndexableField field : fields) { + if (field.fieldType().docValuesType() == DocValuesType.SORTED_SET) { + hasDocValues = true; + break; + } + } + assertTrue("Field should have doc values", hasDocValues); + } + + public void testToXContent() throws IOException { + XContentBuilder mapping = fieldMapping(this::minimalMapping); + DocumentMapper mapper = createDocumentMapper(mapping); + + XContentBuilder builder = XContentFactory.jsonBuilder(); + builder.startObject(); + mapper.mapping().toXContent(builder, ToXContent.EMPTY_PARAMS); + builder.endObject(); + + String mappingString = builder.toString(); + assertTrue(mappingString.contains("\"type\":\"version\"")); + } + + public void testMultipleVersionFields() throws Exception { + XContentBuilder mapping = mapping(b -> { + b.startObject("version1").field("type", "version").field("store", true).endObject(); + b.startObject("version2").field("type", "version").field("store", true).endObject(); + }); + + DocumentMapper mapper = createDocumentMapper(mapping); + ParsedDocument doc = mapper.parse(source(b -> { + b.field("version1", "1.0.0"); + b.field("version2", "2.0.0"); + })); + + assertNotNull(doc.rootDoc().getField("version1")); + assertNotNull(doc.rootDoc().getField("version2")); + assertNotEquals(doc.rootDoc().getField("version1").stringValue(), doc.rootDoc().getField("version2").stringValue()); + } + + public void testMajorVersionComparison() throws Exception { + DocumentMapper mapper = createDocumentMapper(fieldMapping(this::minimalMapping)); + + ParsedDocument doc1 = mapper.parse(source(b -> b.field("field", "2.0.0"))); + ParsedDocument doc2 = mapper.parse(source(b -> b.field("field", "1.9.9"))); + + SemanticVersion v1 = SemanticVersion.parse(doc1.rootDoc().getField("field").stringValue()); + SemanticVersion v2 = SemanticVersion.parse(doc2.rootDoc().getField("field").stringValue()); + + assertTrue("Major version comparison failed", v1.compareTo(v2) > 0); + } + + public void testComplexPreReleaseComparison() throws Exception { + DocumentMapper mapper = createDocumentMapper(fieldMapping(this::minimalMapping)); + + List versions = Arrays.asList( + "1.0.0-alpha.beta", + "1.0.0-alpha.1", + "1.0.0-alpha", + "1.0.0-alpha.beta.2", + "1.0.0-beta", + "1.0.0-alpha.12", + "1.0.0-beta.2", + "1.0.0" + ); + + List expected = Arrays.asList( + "1.0.0-alpha", + "1.0.0-alpha.1", + "1.0.0-alpha.12", + "1.0.0-alpha.beta", + "1.0.0-alpha.beta.2", + "1.0.0-beta", + "1.0.0-beta.2", + "1.0.0" + ); + + testVersionSorting(mapper, versions, expected); + } + + public void testBuildMetadataEquality() throws Exception { + DocumentMapper mapper = createDocumentMapper(fieldMapping(this::minimalMapping)); + + ParsedDocument doc1 = mapper.parse(source(b -> b.field("field", "1.0.0+build.1"))); + ParsedDocument doc2 = mapper.parse(source(b -> b.field("field", "1.0.0+build.2"))); + + SemanticVersion v1 = SemanticVersion.parse(doc1.rootDoc().getField("field").stringValue()); + SemanticVersion v2 = SemanticVersion.parse(doc2.rootDoc().getField("field").stringValue()); + + assertEquals("Build metadata should not affect equality", 0, v1.compareTo(v2)); + } + + public void testMultipleFieldsInDocument() throws Exception { + DocumentMapper mapper = createDocumentMapper(mapping(b -> { + b.startObject("version1").field("type", "version").field("store", true).endObject(); + b.startObject("version2").field("type", "version").field("store", true).endObject(); + })); + + ParsedDocument doc = mapper.parse(source(b -> { + b.field("version1", "1.0.0"); + b.field("version2", "2.0.0"); + })); + + assertNotNull(doc.rootDoc().getField("version1")); + assertNotNull(doc.rootDoc().getField("version2")); + assertNotEquals(doc.rootDoc().getField("version1").stringValue(), doc.rootDoc().getField("version2").stringValue()); + } + + public void testExtremeVersionNumbers() throws Exception { + DocumentMapper mapper = createDocumentMapper(fieldMapping(this::minimalMapping)); + + // Test very large version numbers + ParsedDocument doc = mapper.parse(source(b -> b.field("field", "999999999.999999999.999999999"))); + assertNotNull(doc.rootDoc().getField("field")); + + // Test version with many pre-release parts + doc = mapper.parse(source(b -> b.field("field", "1.0.0-alpha.beta.gamma.delta.epsilon"))); + assertNotNull(doc.rootDoc().getField("field")); + + // Test version with many build metadata parts + doc = mapper.parse(source(b -> b.field("field", "1.0.0+build.123.456.789.abc.def"))); + assertNotNull(doc.rootDoc().getField("field")); + } + + public void testMoreInvalidVersions() throws Exception { + DocumentMapper mapper = createDocumentMapper(fieldMapping(this::minimalMapping)); + + // Test empty string + expectThrows(MapperParsingException.class, () -> mapper.parse(source(b -> b.field("field", "")))); + + // Test only dots + expectThrows(MapperParsingException.class, () -> mapper.parse(source(b -> b.field("field", "...")))); + + // Test invalid characters + expectThrows(MapperParsingException.class, () -> mapper.parse(source(b -> b.field("field", "1.0.0@")))); + + // Test leading zeros + expectThrows(MapperParsingException.class, () -> mapper.parse(source(b -> b.field("field", "01.2.3")))); + } + + private void testVersionSorting(DocumentMapper mapper, List input, List expected) throws Exception { + List actual = input.stream().map(v -> { + try { + ParsedDocument doc = mapper.parse(source(b -> b.field("field", v))); + return doc.rootDoc().getField("field").stringValue(); + } catch (IOException e) { + throw new RuntimeException(e); + } + }).sorted((v1, v2) -> { + SemanticVersion sv1 = SemanticVersion.parse(v1); + SemanticVersion sv2 = SemanticVersion.parse(v2); + return sv1.compareTo(sv2); + }).collect(Collectors.toList()); + + assertThat(actual, contains(expected.toArray())); + } + + public void testInvalidVersionValues() throws IOException { + DocumentMapper mapper = createDocumentMapper(fieldMapping(this::minimalMapping)); + + // Test invalid version format + MapperParsingException e = expectThrows(MapperParsingException.class, () -> mapper.parse(source(b -> b.field("field", "1.2")))); + assertTrue(e.getCause().getMessage().contains("Invalid semantic version format")); + + // Test negative version numbers + e = expectThrows(MapperParsingException.class, () -> mapper.parse(source(b -> b.field("field", "-1.2.3")))); + assertTrue(e.getCause().getMessage().contains("Invalid semantic version format")); + + // Test invalid pre-release format + e = expectThrows(MapperParsingException.class, () -> mapper.parse(source(b -> b.field("field", "1.2.3-")))); + assertTrue(e.getCause().getMessage().contains("Invalid semantic version format")); + } + + public void testVersionSorting() throws Exception { + DocumentMapper mapper = createDocumentMapper(fieldMapping(this::minimalMapping)); + + // Create a list of versions in random order + List versions = Arrays.asList( + "1.0.0", + "1.10.0", + "1.2.0", + "1.0.0-alpha", + "1.0.0-alpha.1", + "1.0.0-beta", + "1.0.0-beta.2", + "1.0.0-beta.11", + "1.0.0-rc.1", + "1.0.0+build.123" + ); + Collections.shuffle(versions, random()); + + // Store documents with versions + List docs = new ArrayList<>(); + for (String version : versions) { + ParsedDocument doc = mapper.parse(source(b -> b.field("field", version))); + docs.add(doc); + } + + // Extract and sort versions + List sortedVersions = docs.stream().map(doc -> { + for (IndexableField field : doc.rootDoc().getFields("field")) { + if (field.fieldType().stored()) { + return field.stringValue(); + } + } + return null; + }).filter(Objects::nonNull).sorted((v1, v2) -> { + SemanticVersion sv1 = SemanticVersion.parse(v1); + SemanticVersion sv2 = SemanticVersion.parse(v2); + return sv1.compareTo(sv2); + }).map(v -> { + // Normalize version by removing build metadata + SemanticVersion sv = SemanticVersion.parse(v); + return new SemanticVersion(sv.getMajor(), sv.getMinor(), sv.getPatch(), sv.getPreRelease(), null).toString(); + }).collect(Collectors.toList()); + + // Verify each version individually + assertEquals("Wrong number of versions", 10, sortedVersions.size()); + assertEquals("1.0.0-alpha", sortedVersions.get(0)); + assertEquals("1.0.0-alpha.1", sortedVersions.get(1)); + assertEquals("1.0.0-beta", sortedVersions.get(2)); + assertEquals("1.0.0-beta.2", sortedVersions.get(3)); + assertEquals("1.0.0-beta.11", sortedVersions.get(4)); + assertEquals("1.0.0-rc.1", sortedVersions.get(5)); + assertEquals("1.0.0", sortedVersions.get(6)); + assertEquals("1.0.0", sortedVersions.get(7)); // build.123 is stripped + assertEquals("1.2.0", sortedVersions.get(8)); + assertEquals("1.10.0", sortedVersions.get(9)); + } + + public void testDocValues() throws Exception { + DocumentMapper mapper = createDocumentMapper(fieldMapping(this::minimalMapping)); + ParsedDocument doc = mapper.parse(source(b -> b.field("field", "1.0.0"))); + + IndexableField[] fields = doc.rootDoc().getFields("field"); + boolean hasDocValues = false; + for (IndexableField field : fields) { + if (field.fieldType().docValuesType() == DocValuesType.SORTED_SET) { + hasDocValues = true; + break; + } + } + assertTrue("Field should have doc values", hasDocValues); + } + + public void testTermQuery() throws Exception { + DocumentMapper mapper = createDocumentMapper(fieldMapping(this::minimalMapping)); + ParsedDocument doc = mapper.parse(source(b -> b.field("field", "1.0.0"))); + + IndexableField[] fields = doc.rootDoc().getFields("field"); + boolean hasTermField = false; + for (IndexableField field : fields) { + if (field instanceof KeywordField) { + hasTermField = true; + break; + } + } + assertTrue("Field should have keyword term field", hasTermField); + } + + public void testNullValue() throws Exception { + DocumentMapper mapper = createDocumentMapper(fieldMapping(this::minimalMapping)); + ParsedDocument doc = mapper.parse(source(b -> b.nullField("field"))); + assertThat(doc.rootDoc().getFields("field"), arrayWithSize(0)); + } + + public void testRangeQueries() throws Exception { + DocumentMapper mapper = createDocumentMapper(fieldMapping(this::minimalMapping)); + MappedFieldType fieldType = ((FieldMapper) mapper.mappers().getMapper("field")).fieldType(); + + // Test various range scenarios + Query rangeQuery1 = fieldType.rangeQuery("1.0.0", "2.0.0", true, true, ShapeRelation.INTERSECTS, null, null, null); + assertTrue( + rangeQuery1 instanceof TermRangeQuery + || (rangeQuery1 instanceof IndexOrDocValuesQuery + && ((IndexOrDocValuesQuery) rangeQuery1).getIndexQuery() instanceof TermRangeQuery) + ); + + Query rangeQuery2 = fieldType.rangeQuery("1.0.0-alpha", "1.0.0", true, true, ShapeRelation.INTERSECTS, null, null, null); + assertTrue( + rangeQuery2 instanceof TermRangeQuery + || (rangeQuery2 instanceof IndexOrDocValuesQuery + && ((IndexOrDocValuesQuery) rangeQuery2).getIndexQuery() instanceof TermRangeQuery) + ); + + // Test null bounds + Query rangeQuery3 = fieldType.rangeQuery(null, "2.0.0", true, true, ShapeRelation.INTERSECTS, null, null, null); + assertTrue( + rangeQuery3 instanceof TermRangeQuery + || (rangeQuery3 instanceof IndexOrDocValuesQuery + && ((IndexOrDocValuesQuery) rangeQuery3).getIndexQuery() instanceof TermRangeQuery) + ); + + Query rangeQuery4 = fieldType.rangeQuery("1.0.0", null, true, true, ShapeRelation.INTERSECTS, null, null, null); + assertTrue( + rangeQuery4 instanceof TermRangeQuery + || (rangeQuery4 instanceof IndexOrDocValuesQuery + && ((IndexOrDocValuesQuery) rangeQuery4).getIndexQuery() instanceof TermRangeQuery) + ); + + // Test actual document matching + ParsedDocument doc1 = mapper.parse(source(b -> b.field("field", "1.5.0"))); + ParsedDocument doc2 = mapper.parse(source(b -> b.field("field", "2.5.0"))); + + // Should match doc1 but not doc2 + Query rangeQuery = fieldType.rangeQuery("1.0.0", "2.0.0", true, true, ShapeRelation.INTERSECTS, null, null, null); + + // Create readers and searcher + Directory dir1 = new ByteBuffersDirectory(); + Directory dir2 = new ByteBuffersDirectory(); + + // Index first document + IndexWriter writer1 = new IndexWriter(dir1, new IndexWriterConfig()); + writer1.addDocument(doc1.rootDoc()); + writer1.close(); + + // Index second document + IndexWriter writer2 = new IndexWriter(dir2, new IndexWriterConfig()); + writer2.addDocument(doc2.rootDoc()); + writer2.close(); + + // Create readers + IndexReader reader1 = DirectoryReader.open(dir1); + IndexReader reader2 = DirectoryReader.open(dir2); + + // Create MultiReader with array of readers + IndexReader[] readers = new IndexReader[] { reader1, reader2 }; + MultiReader multiReader = new MultiReader(readers, true); + + IndexSearcher searcher = new IndexSearcher(multiReader); + TopDocs hits = searcher.search(rangeQuery, 10); + + // Clean up + multiReader.close(); + dir1.close(); + dir2.close(); + } + + private IndexSearcher setupSearcher(DocumentMapper mapper) throws IOException { + List docs = Arrays.asList( + mapper.parse(source(b -> b.field("field", "1.0.0"))), + mapper.parse(source(b -> b.field("field", "1.0.1"))), + mapper.parse(source(b -> b.field("field", "1.1.0"))), + mapper.parse(source(b -> b.field("field", "2.0.0"))), + mapper.parse(source(b -> b.field("field", "1.0.0-alpha"))), + mapper.parse(source(b -> b.field("field", "1.0.0-beta"))), + mapper.parse(source(b -> b.field("field", "1.0.0+build.123"))) + ); + + Directory dir = new ByteBuffersDirectory(); + IndexWriter writer = new IndexWriter(dir, new IndexWriterConfig()); + for (ParsedDocument doc : docs) { + writer.addDocument(doc.rootDoc()); + } + writer.close(); + + return new IndexSearcher(DirectoryReader.open(dir)); + } + + public void testPrefixQuery() throws Exception { + DocumentMapper mapper = createDocumentMapper(fieldMapping(this::minimalMapping)); + MappedFieldType fieldType = ((FieldMapper) mapper.mappers().getMapper("field")).fieldType(); + IndexSearcher searcher = setupSearcher(mapper); + + Query prefixQuery = fieldType.prefixQuery( + "1.0", + MultiTermQuery.CONSTANT_SCORE_REWRITE, // Specify rewrite method + false, + null + ); + + assertThat("Should match 1.0.0, 1.0.1, 1.0.0-alpha, 1.0.0-beta, 1.0.0+build.123", searcher.count(prefixQuery), equalTo(5)); + + // Test different prefix patterns + Query majorVersionPrefix = fieldType.prefixQuery("1.", MultiTermQuery.CONSTANT_SCORE_REWRITE, false, null); + assertThat("Should match all version 1.x.x", searcher.count(majorVersionPrefix), equalTo(6)); + + // Test case sensitivity + Query caseInsensitivePrefix = fieldType.prefixQuery("1.0", MultiTermQuery.CONSTANT_SCORE_REWRITE, true, null); + assertThat("Should match same as case sensitive", searcher.count(caseInsensitivePrefix), equalTo(5)); + } + + public void testWildcardQuery() throws Exception { + DocumentMapper mapper = createDocumentMapper(fieldMapping(this::minimalMapping)); + MappedFieldType fieldType = ((FieldMapper) mapper.mappers().getMapper("field")).fieldType(); + + // Create test documents with specific versions + List docs = Arrays.asList( + mapper.parse(source(b -> b.field("field", "1.0.0"))), + mapper.parse(source(b -> b.field("field", "1.1.0"))), + mapper.parse(source(b -> b.field("field", "2.1.0"))) + ); + + Directory dir = new ByteBuffersDirectory(); + IndexWriter writer = new IndexWriter(dir, new IndexWriterConfig()); + for (ParsedDocument doc : docs) { + writer.addDocument(doc.rootDoc()); + } + writer.close(); + + IndexSearcher searcher = new IndexSearcher(DirectoryReader.open(dir)); + + Query wildcardQuery = fieldType.wildcardQuery("1.*.0", MultiTermQuery.CONSTANT_SCORE_REWRITE, false, null); + assertThat("Should match 1.0.0 and 1.1.0", searcher.count(wildcardQuery), equalTo(2)); + + dir.close(); + } + + public void testRegexpQuery() throws Exception { + DocumentMapper mapper = createDocumentMapper(fieldMapping(this::minimalMapping)); + MappedFieldType fieldType = ((FieldMapper) mapper.mappers().getMapper("field")).fieldType(); + IndexSearcher searcher = setupSearcher(mapper); + + Query regexpQuery = fieldType.regexpQuery( + "1\\.0\\.0-.*", + RegExp.ALL, + 0, + Operations.DEFAULT_DETERMINIZE_WORK_LIMIT, + MultiTermQuery.CONSTANT_SCORE_REWRITE, + null + ); + assertThat("Should match 1.0.0-alpha and 1.0.0-beta", searcher.count(regexpQuery), equalTo(2)); + } + + public void testFuzzyQuery() throws Exception { + DocumentMapper mapper = createDocumentMapper(fieldMapping(this::minimalMapping)); + MappedFieldType fieldType = ((FieldMapper) mapper.mappers().getMapper("field")).fieldType(); + + // Create simple test documents + List docs = Arrays.asList( + mapper.parse(source(b -> b.field("field", "1.0.0"))), + mapper.parse(source(b -> b.field("field", "1.0.1"))) + ); + + Directory dir = new ByteBuffersDirectory(); + IndexWriter writer = new IndexWriter(dir, new IndexWriterConfig()); + for (ParsedDocument doc : docs) { + writer.addDocument(doc.rootDoc()); + } + writer.close(); + + IndexSearcher searcher = new IndexSearcher(DirectoryReader.open(dir)); + + // Test fuzzy query + Query fuzzyQuery = fieldType.fuzzyQuery( + "1.0.0", + Fuzziness.ONE, + 0, // No prefix requirement + 50, + true, + MultiTermQuery.CONSTANT_SCORE_REWRITE, + null + ); + + assertThat( + "Fuzzy match should find similar versions", + searcher.count(fuzzyQuery), + equalTo(2) // Should match 1.0.0 and 1.0.1 + ); + + dir.close(); + } + + public void testComplexQuery() throws IOException { + DocumentMapper mapper = createDocumentMapper(fieldMapping(this::minimalMapping)); + MappedFieldType fieldType = ((FieldMapper) mapper.mappers().getMapper("field")).fieldType(); + IndexSearcher searcher = setupSearcher(mapper); + + Query complexQuery = new BooleanQuery.Builder().add( + fieldType.prefixQuery("1.", MultiTermQuery.CONSTANT_SCORE_REWRITE, false, null), + BooleanClause.Occur.MUST + ) + .add( + fieldType.regexpQuery( + ".*-.*", + RegExp.ALL, + 0, + Operations.DEFAULT_DETERMINIZE_WORK_LIMIT, + MultiTermQuery.CONSTANT_SCORE_REWRITE, + null + ), + BooleanClause.Occur.MUST_NOT + ) + .build(); + + assertThat("Should match 1.0.0, 1.0.1, 1.1.0, 1.0.0+build.123", searcher.count(complexQuery), equalTo(4)); + } + + /** + * Test to cover error cases in SemanticVersionFieldType + */ + public void testFieldTypeErrorCases() { + // Create a mock QueryShardContext + QueryShardContext mockContext = mock(QueryShardContext.class); + + // Create a field type with various configurations to test error paths + SemanticVersionFieldMapper.SemanticVersionFieldType fieldType = new SemanticVersionFieldMapper.SemanticVersionFieldType( + "test_field", + new HashMap<>(), + true, // isSearchable + true, // hasDocValues + false // isStored + ); + + // Test termQuery with null value (should throw exception) + IllegalArgumentException nullValueException = expectThrows( + IllegalArgumentException.class, + () -> fieldType.termQuery(null, mockContext) + ); + assertEquals("Cannot search for null value", nullValueException.getMessage()); + + // Create a field type that is not searchable but has doc values + SemanticVersionFieldMapper.SemanticVersionFieldType docValuesOnlyFieldType = + new SemanticVersionFieldMapper.SemanticVersionFieldType( + "docvalues_only", + new HashMap<>(), + false, // isSearchable + true, // hasDocValues + false // isStored + ); + + // Test termQuery - should use doc values query only + Query docValuesQuery = docValuesOnlyFieldType.termQuery("1.0.0", mockContext); + assertNotNull(docValuesQuery); + + // Create a field type that is searchable but has no doc values + SemanticVersionFieldMapper.SemanticVersionFieldType searchOnlyFieldType = new SemanticVersionFieldMapper.SemanticVersionFieldType( + "search_only", + new HashMap<>(), + true, // isSearchable + false, // hasDocValues + false // isStored + ); + + // Test termQuery - should use index query only + Query indexQuery = searchOnlyFieldType.termQuery("1.0.0", mockContext); + assertNotNull(indexQuery); + + // Create a field type that is neither searchable nor has doc values + SemanticVersionFieldMapper.SemanticVersionFieldType invalidFieldType = new SemanticVersionFieldMapper.SemanticVersionFieldType( + "invalid_field", + new HashMap<>(), + false, // isSearchable + false, // hasDocValues + false // isStored + ); + + // Test termQuery - should throw exception + IllegalArgumentException invalidFieldException = expectThrows( + IllegalArgumentException.class, + () -> invalidFieldType.termQuery("1.0.0", mockContext) + ); + assertThat(invalidFieldException.getMessage(), containsString("is neither indexed nor has doc_values enabled")); + + // Test rangeQuery with invalid version format - should throw QueryShardException + QueryShardException rangeException = expectThrows( + QueryShardException.class, + () -> fieldType.rangeQuery("invalid-version", "2.0.0", true, true, null, null, null, mockContext) + ); + assertThat(rangeException.getMessage(), containsString("Failed to create range query for field")); + + // Test termsQuery with different field configurations + List terms = Arrays.asList("1.0.0", "2.0.0", "3.0.0"); + + // Test with searchable field + Query termsQuery = searchOnlyFieldType.termsQuery(terms, mockContext); + assertNotNull(termsQuery); + + // Test with doc values only field + Query docValuesTermsQuery = docValuesOnlyFieldType.termsQuery(terms, mockContext); + assertNotNull(docValuesTermsQuery); + + // Test with invalid field + IllegalArgumentException termsException = expectThrows( + IllegalArgumentException.class, + () -> invalidFieldType.termsQuery(terms, mockContext) + ); + assertThat(termsException.getMessage(), containsString("is neither indexed nor has doc_values enabled")); + + // Test regexpQuery with non-searchable field + IllegalArgumentException regexpException = expectThrows( + IllegalArgumentException.class, + () -> docValuesOnlyFieldType.regexpQuery("1\\.0\\..*", 0, 0, 10, MultiTermQuery.CONSTANT_SCORE_REWRITE, mockContext) + ); + assertEquals("Regexp queries require the field to be indexed", regexpException.getMessage()); + + // Test wildcardQuery with case insensitivity + Query wildcardQuery = fieldType.wildcardQuery( + "1.0.*", + MultiTermQuery.CONSTANT_SCORE_REWRITE, + true, // case insensitive + mockContext + ); + assertNotNull(wildcardQuery); + + // Test wildcardQuery with non-searchable field + IllegalArgumentException wildcardException = expectThrows( + IllegalArgumentException.class, + () -> docValuesOnlyFieldType.wildcardQuery("1.0.*", MultiTermQuery.CONSTANT_SCORE_REWRITE, false, mockContext) + ); + assertEquals("Wildcard queries require the field to be indexed", wildcardException.getMessage()); + + // Test prefixQuery with non-searchable field + IllegalArgumentException prefixException = expectThrows( + IllegalArgumentException.class, + () -> docValuesOnlyFieldType.prefixQuery("1.0", MultiTermQuery.CONSTANT_SCORE_REWRITE, false, mockContext) + ); + assertEquals("Prefix queries require the field to be indexed", prefixException.getMessage()); + + // Test fuzzyQuery with null rewrite method (should use default) + Query fuzzyQuery = fieldType.fuzzyQuery( + "1.0.0", + Fuzziness.ONE, + 0, + 50, + true, + null, // null rewrite method + mockContext + ); + assertNotNull(fuzzyQuery); + + // Test fuzzyQuery with non-searchable field + IllegalArgumentException fuzzyException = expectThrows( + IllegalArgumentException.class, + () -> docValuesOnlyFieldType.fuzzyQuery("1.0.0", Fuzziness.ONE, 0, 50, true, MultiTermQuery.CONSTANT_SCORE_REWRITE, mockContext) + ); + assertEquals("Fuzzy queries require the field to be indexed", fuzzyException.getMessage()); + + // Test valueFetcher with format parameter + SearchLookup mockLookup = mock(SearchLookup.class); + IllegalArgumentException formatException = expectThrows( + IllegalArgumentException.class, + () -> fieldType.valueFetcher(mockContext, mockLookup, "some_format") + ); + assertThat(formatException.getMessage(), containsString("doesn't support formats")); + + // Test valueFetcher without format parameter + assertNotNull(fieldType.valueFetcher(mockContext, mockLookup, null)); + + // Test fielddataBuilder with doc_values disabled + IllegalArgumentException fieldDataException = expectThrows( + IllegalArgumentException.class, + () -> searchOnlyFieldType.fielddataBuilder("test_index", null) + ); + assertThat(fieldDataException.getMessage(), containsString("does not have doc_values enabled")); + } +} diff --git a/server/src/test/java/org/opensearch/index/mapper/SemanticVersionTests.java b/server/src/test/java/org/opensearch/index/mapper/SemanticVersionTests.java new file mode 100644 index 0000000000000..f364ea6ff9dde --- /dev/null +++ b/server/src/test/java/org/opensearch/index/mapper/SemanticVersionTests.java @@ -0,0 +1,352 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ +package org.opensearch.index.mapper; + +import org.opensearch.test.OpenSearchTestCase; + +import java.util.Arrays; +import java.util.List; + +import static org.hamcrest.Matchers.greaterThan; +import static org.hamcrest.Matchers.lessThan; + +/** + * Test class for SemanticVersion + */ +public class SemanticVersionTests extends OpenSearchTestCase { + + public void testBasicVersionParsing() { + SemanticVersion version = SemanticVersion.parse("1.2.3"); + assertEquals("1.2.3", version.toString()); + } + + public void testPreReleaseVersionParsing() { + SemanticVersion version = SemanticVersion.parse("1.2.3-alpha"); + assertEquals("1.2.3-alpha", version.toString()); + + version = SemanticVersion.parse("1.2.3-alpha.1"); + assertEquals("1.2.3-alpha.1", version.toString()); + + version = SemanticVersion.parse("1.2.3-0.3.7"); + assertEquals("1.2.3-0.3.7", version.toString()); + + version = SemanticVersion.parse("1.2.3-x.7.z.92"); + assertEquals("1.2.3-x.7.z.92", version.toString()); + } + + public void testBuildMetadataParsing() { + SemanticVersion version = SemanticVersion.parse("1.2.3+build.123"); + assertEquals("1.2.3+build.123", version.toString()); + + version = SemanticVersion.parse("1.2.3+build.123.xyz"); + assertEquals("1.2.3+build.123.xyz", version.toString()); + } + + public void testCompleteVersionParsing() { + SemanticVersion version = SemanticVersion.parse("1.2.3-alpha.1+build.123"); + assertEquals("1.2.3-alpha.1+build.123", version.toString()); + } + + public void testInvalidVersions() { + // Test null + IllegalArgumentException e = expectThrows(IllegalArgumentException.class, () -> SemanticVersion.parse(null)); + assertEquals("Version string cannot be null or empty", e.getMessage()); + + // Test empty string + e = expectThrows(IllegalArgumentException.class, () -> SemanticVersion.parse("")); + assertEquals("Version string cannot be null or empty", e.getMessage()); + + // Test invalid formats + List invalidVersions = Arrays.asList( + "1", + "1.2", + "1.2.3.4", + "1.2.3-", + "1.2.3+", + "01.2.3", + "1.02.3", + "1.2.03", + "1.2.3-@invalid", + "1.2.3+@invalid", + "a.b.c", + "-1.2.3", + "1.-2.3", + "1.2.-3" + ); + + for (String invalid : invalidVersions) { + e = expectThrows(IllegalArgumentException.class, () -> SemanticVersion.parse(invalid)); + assertTrue(e.getMessage().contains("Invalid semantic version format")); + } + } + + public void testNegativeNumbers() { + IllegalArgumentException e = expectThrows(IllegalArgumentException.class, () -> new SemanticVersion(-1, 0, 0, null, null)); + assertEquals("Version numbers cannot be negative", e.getMessage()); + } + + public void testVersionComparison() { + // Test major version comparison + assertThat(SemanticVersion.parse("2.0.0").compareTo(SemanticVersion.parse("1.0.0")), greaterThan(0)); + assertThat(SemanticVersion.parse("1.0.0").compareTo(SemanticVersion.parse("2.0.0")), lessThan(0)); + + // Test minor version comparison + assertThat(SemanticVersion.parse("1.2.0").compareTo(SemanticVersion.parse("1.1.0")), greaterThan(0)); + + // Test patch version comparison + assertThat(SemanticVersion.parse("1.1.2").compareTo(SemanticVersion.parse("1.1.1")), greaterThan(0)); + } + + public void testPreReleaseComparison() { + // Pre-release versions have lower precedence + assertThat(SemanticVersion.parse("1.0.0-alpha").compareTo(SemanticVersion.parse("1.0.0")), lessThan(0)); + + // Numeric identifiers + assertThat(SemanticVersion.parse("1.0.0-alpha.1").compareTo(SemanticVersion.parse("1.0.0-alpha.2")), lessThan(0)); + + // Alphanumeric identifiers + assertThat(SemanticVersion.parse("1.0.0-alpha").compareTo(SemanticVersion.parse("1.0.0-beta")), lessThan(0)); + } + + public void testComplexPreReleaseComparison() { + List orderedVersions = Arrays.asList( + "1.0.0-alpha", + "1.0.0-alpha.1", + "1.0.0-alpha.beta", + "1.0.0-beta", + "1.0.0-beta.2", + "1.0.0-beta.11", + "1.0.0-rc.1", + "1.0.0" + ); + + for (int i = 0; i < orderedVersions.size() - 1; i++) { + SemanticVersion v1 = SemanticVersion.parse(orderedVersions.get(i)); + SemanticVersion v2 = SemanticVersion.parse(orderedVersions.get(i + 1)); + assertThat(v1.compareTo(v2), lessThan(0)); + } + } + + public void testBuildMetadataComparison() { + // Build metadata should be ignored in precedence + assertEquals(0, SemanticVersion.parse("1.0.0+build.1").compareTo(SemanticVersion.parse("1.0.0+build.2"))); + assertEquals(0, SemanticVersion.parse("1.0.0").compareTo(SemanticVersion.parse("1.0.0+build"))); + } + + public void testNormalizedString() { + SemanticVersion version = SemanticVersion.parse("1.2.3-alpha+build.123"); + String normalized = version.getNormalizedString(); + + // Check padding + assertTrue(normalized.startsWith("00000000000000000001")); + assertTrue(normalized.contains("00000000000000000002")); + assertTrue(normalized.contains("00000000000000000003")); + + // Check pre-release and build metadata + assertTrue(normalized.contains("-alpha")); + assertTrue(normalized.contains("+build.123")); + } + + public void testEdgeCases() { + // Very large version numbers + SemanticVersion version = SemanticVersion.parse("999999999.999999999.999999999"); + assertEquals("999999999.999999999.999999999", version.toString()); + + // Long pre-release string + version = SemanticVersion.parse("1.0.0-alpha.beta.gamma.delta.epsilon"); + assertEquals("1.0.0-alpha.beta.gamma.delta.epsilon", version.toString()); + + // Long build metadata + version = SemanticVersion.parse("1.0.0+build.123.456.789.abc.def"); + assertEquals("1.0.0+build.123.456.789.abc.def", version.toString()); + } + + public void testNullComparison() { + SemanticVersion version = SemanticVersion.parse("1.0.0"); + assertThat(version.compareTo(null), greaterThan(0)); + } + + public void testPreReleaseIdentifierComparison() { + // Numeric identifiers have lower precedence than non-numeric + assertThat(SemanticVersion.parse("1.0.0-1").compareTo(SemanticVersion.parse("1.0.0-alpha")), lessThan(0)); + + // Longer pre-release version has higher precedence + assertThat(SemanticVersion.parse("1.0.0-alpha").compareTo(SemanticVersion.parse("1.0.0-alpha.1")), lessThan(0)); + } + + public void testGetNormalizedComparableString() { + // Stable release - should end with '~' + SemanticVersion stable = SemanticVersion.parse("1.0.0"); + String stableNorm = stable.getNormalizedComparableString(); + assertTrue(stableNorm.startsWith("00000000000000000001.00000000000000000000.00000000000000000000")); + assertTrue(stableNorm.endsWith("~")); + + // Pre-release alpha - should end with '-alpha' (lowercase) + SemanticVersion alpha = SemanticVersion.parse("1.0.0-alpha"); + String alphaNorm = alpha.getNormalizedComparableString(); + assertTrue(alphaNorm.startsWith("00000000000000000001.00000000000000000000.00000000000000000000")); + assertTrue(alphaNorm.endsWith("-alpha")); + + // Pre-release beta - should end with '-beta' + SemanticVersion beta = SemanticVersion.parse("1.0.0-beta"); + String betaNorm = beta.getNormalizedComparableString(); + assertTrue(betaNorm.startsWith("00000000000000000001.00000000000000000000.00000000000000000000")); + assertTrue(betaNorm.endsWith("-beta")); + + // Pre-release with uppercase (should be lowercased in normalized) + SemanticVersion preReleaseCaps = SemanticVersion.parse("1.0.0-ALPHA.BETA"); + String preReleaseCapsNorm = preReleaseCaps.getNormalizedComparableString(); + assertTrue(preReleaseCapsNorm.endsWith("-alpha.beta")); + + // Stable release with build metadata (build metadata ignored in normalized string) + SemanticVersion stableWithBuild = SemanticVersion.parse("1.0.0+build.123"); + String stableWithBuildNorm = stableWithBuild.getNormalizedComparableString(); + assertEquals(stableNorm, stableWithBuildNorm); + + // Pre-release with build metadata (build metadata ignored) + SemanticVersion preReleaseWithBuild = SemanticVersion.parse("1.0.0-beta+build.456"); + String preReleaseWithBuildNorm = preReleaseWithBuild.getNormalizedComparableString(); + assertEquals(betaNorm, preReleaseWithBuildNorm); + } + + public void testSquareBracketRemoval() { + // Test that square brackets are removed during parsing + SemanticVersion version1 = SemanticVersion.parse("[1.2.3]"); + assertEquals("1.2.3", version1.toString()); + + SemanticVersion version2 = SemanticVersion.parse("[1.2.3-alpha]"); + assertEquals("1.2.3-alpha", version2.toString()); + + SemanticVersion version3 = SemanticVersion.parse("[1.2.3+build.123]"); + assertEquals("1.2.3+build.123", version3.toString()); + + SemanticVersion version4 = SemanticVersion.parse("[1.2.3-alpha+build.123]"); + assertEquals("1.2.3-alpha+build.123", version4.toString()); + } + + public void testWhitespaceHandling() { + // Test that whitespace is converted to dots during parsing + SemanticVersion version1 = SemanticVersion.parse("1 2 3"); + assertEquals("1.2.3", version1.toString()); + + SemanticVersion version2 = SemanticVersion.parse("1 2 3"); + assertEquals("1.2.3", version2.toString()); + + SemanticVersion version3 = SemanticVersion.parse("1\t2\t3"); + assertEquals("1.2.3", version3.toString()); + + SemanticVersion version4 = SemanticVersion.parse("1 2 3"); + assertEquals("1.2.3", version4.toString()); + + // Test mixed whitespace + SemanticVersion version5 = SemanticVersion.parse("1 \t 2 \t 3"); + assertEquals("1.2.3", version5.toString()); + } + + public void testNumberFormatExceptionHandling() { + IllegalArgumentException e = expectThrows(IllegalArgumentException.class, () -> SemanticVersion.parse("01.02.03")); + assertTrue(e.getMessage().contains("Invalid semantic version format")); + } + + public void testComparePreReleaseMethod() { + // Test numeric vs non-numeric identifiers + assertThat(SemanticVersion.parse("1.0.0-1").compareTo(SemanticVersion.parse("1.0.0-alpha")), lessThan(0)); + assertThat(SemanticVersion.parse("1.0.0-alpha").compareTo(SemanticVersion.parse("1.0.0-1")), greaterThan(0)); + + // Test numeric comparison + assertThat(SemanticVersion.parse("1.0.0-1").compareTo(SemanticVersion.parse("1.0.0-2")), lessThan(0)); + assertThat(SemanticVersion.parse("1.0.0-10").compareTo(SemanticVersion.parse("1.0.0-2")), greaterThan(0)); + + // Test string comparison + assertThat(SemanticVersion.parse("1.0.0-alpha").compareTo(SemanticVersion.parse("1.0.0-beta")), lessThan(0)); + assertThat(SemanticVersion.parse("1.0.0-beta").compareTo(SemanticVersion.parse("1.0.0-alpha")), greaterThan(0)); + + // Test mixed numeric and string + assertThat(SemanticVersion.parse("1.0.0-alpha.1").compareTo(SemanticVersion.parse("1.0.0-alpha.2")), lessThan(0)); + assertThat(SemanticVersion.parse("1.0.0-alpha.2").compareTo(SemanticVersion.parse("1.0.0-alpha.10")), lessThan(0)); + + // Test different lengths + assertThat(SemanticVersion.parse("1.0.0-alpha").compareTo(SemanticVersion.parse("1.0.0-alpha.1")), lessThan(0)); + assertThat(SemanticVersion.parse("1.0.0-alpha.1").compareTo(SemanticVersion.parse("1.0.0-alpha")), greaterThan(0)); + } + + public void testPadWithZerosMethod() { + // Test with small numbers that need padding + SemanticVersion version1 = SemanticVersion.parse("1.2.3"); + String normalized1 = version1.getNormalizedString(); + assertEquals("00000000000000000001.00000000000000000002.00000000000000000003", normalized1); + + // Test with larger numbers that need less padding + SemanticVersion version2 = SemanticVersion.parse("123.456.789"); + String normalized2 = version2.getNormalizedString(); + assertEquals("00000000000000000123.00000000000000000456.00000000000000000789", normalized2); + + // Test with very large numbers + SemanticVersion version3 = SemanticVersion.parse("999999999.999999999.999999999"); + String normalized3 = version3.getNormalizedString(); + assertEquals("00000000000999999999.00000000000999999999.00000000000999999999", normalized3); + + // Test with zero values + SemanticVersion version4 = SemanticVersion.parse("0.0.0"); + String normalized4 = version4.getNormalizedString(); + assertEquals("00000000000000000000.00000000000000000000.00000000000000000000", normalized4); + + // Test with a value that doesn't need padding + String str = "9999999999"; + String padded = SemanticVersion.parse("1.0.0").toString(); + } + + /** + * Test to cover getBuild() method + */ + public void testGetBuild() { + // Test with build metadata + SemanticVersion version = SemanticVersion.parse("1.2.3+build.123"); + assertEquals("build.123", version.getBuild()); + + // Test without build metadata + SemanticVersion versionNoBuild = SemanticVersion.parse("1.2.3"); + assertNull(versionNoBuild.getBuild()); + } + + /** + * Test to cover NumberFormatException handling in parse method + */ + public void testNumberFormatExceptionInParse() { + try { + // This should throw IllegalArgumentException with a NumberFormatException cause + SemanticVersion.parse("2147483648.0.0"); // Integer.MAX_VALUE + 1 + fail("Should have thrown an exception"); + } catch (IllegalArgumentException e) { + // Verify that the exception message contains the expected text + assertTrue(e.getMessage().contains("Invalid version numbers")); + // Verify that the cause is a NumberFormatException + assertTrue(e.getCause() instanceof NumberFormatException); + } + } + + /** + * Test to cover the else branch in comparePreRelease method + */ + public void testComparePreReleaseElseBranch() { + // Create versions with non-numeric pre-release identifiers + SemanticVersion v1 = SemanticVersion.parse("1.0.0-alpha"); + SemanticVersion v2 = SemanticVersion.parse("1.0.0-beta"); + + // alpha comes before beta lexically + assertThat(v1.compareTo(v2), lessThan(0)); + assertThat(v2.compareTo(v1), greaterThan(0)); + + // Test with mixed case to ensure case sensitivity is handled + SemanticVersion v3 = SemanticVersion.parse("1.0.0-Alpha"); + SemanticVersion v4 = SemanticVersion.parse("1.0.0-alpha"); + + // Uppercase comes before lowercase in ASCII + assertThat(v3.compareTo(v4), lessThan(0)); + } +} diff --git a/server/src/test/java/org/opensearch/index/query/QueryStringQueryBuilderTests.java b/server/src/test/java/org/opensearch/index/query/QueryStringQueryBuilderTests.java index f62bcbcf1038c..267567f0c3c86 100644 --- a/server/src/test/java/org/opensearch/index/query/QueryStringQueryBuilderTests.java +++ b/server/src/test/java/org/opensearch/index/query/QueryStringQueryBuilderTests.java @@ -786,6 +786,16 @@ public void testToQueryRegExpQuery() throws Exception { assertTrue(regexpQuery.toString().contains("/foo*bar/")); } + public void testRegexpQueryParserWithForceAnalyzer() throws Exception { + QueryStringQueryParser queryParser = new QueryStringQueryParser(createShardContext(), TEXT_FIELD_NAME); + queryParser.setForceAnalyzer(new org.apache.lucene.analysis.standard.StandardAnalyzer()); + Query query = queryParser.parse("/aBc.*/"); + assertThat(query, instanceOf(RegexpQuery.class)); + RegexpQuery regexpQuery = (RegexpQuery) query; + // Standard analyzer normalizes to lowercase, verifying the normalization path with currentFieldType.name() is hit + assertTrue(regexpQuery.toString().contains("abc.*")); + } + public void testToQueryRegExpQueryTooComplex() throws Exception { QueryStringQueryBuilder queryBuilder = queryStringQuery("/[ac]*a[ac]{50,200}/").defaultField(TEXT_FIELD_NAME); diff --git a/server/src/test/java/org/opensearch/index/query/RangeQueryBuilderTests.java b/server/src/test/java/org/opensearch/index/query/RangeQueryBuilderTests.java index c67e9f8b3180c..7994852217033 100644 --- a/server/src/test/java/org/opensearch/index/query/RangeQueryBuilderTests.java +++ b/server/src/test/java/org/opensearch/index/query/RangeQueryBuilderTests.java @@ -257,8 +257,12 @@ protected void doAssertLuceneQuery(RangeQueryBuilder queryBuilder, Query query, query ); } else if (expectedFieldName.equals(INT_FIELD_NAME)) { - assertThat(query, instanceOf(IndexOrDocValuesQuery.class)); - query = ((IndexOrDocValuesQuery) query).getIndexQuery(); + assertThat(query, instanceOf(ApproximateScoreQuery.class)); + Query approximationQuery = ((ApproximateScoreQuery) query).getApproximationQuery(); + assertThat(approximationQuery, instanceOf(ApproximateQuery.class)); + Query originalQuery = ((ApproximateScoreQuery) query).getOriginalQuery(); + assertThat(originalQuery, instanceOf(IndexOrDocValuesQuery.class)); + query = ((IndexOrDocValuesQuery) originalQuery).getIndexQuery(); assertThat(query, instanceOf(PointRangeQuery.class)); Integer min = (Integer) queryBuilder.from(); Integer max = (Integer) queryBuilder.to(); @@ -299,6 +303,9 @@ public void testIllegalArguments() { public void testToQueryNumericField() throws IOException { Query parsedQuery = rangeQuery(INT_FIELD_NAME).from(23).to(54).includeLower(true).includeUpper(false).toQuery(createShardContext()); + if (parsedQuery instanceof ApproximateScoreQuery) { + parsedQuery = ((ApproximateScoreQuery) parsedQuery).getOriginalQuery(); + } // since age is automatically registered in data, we encode it as numeric assertThat(parsedQuery, instanceOf(IndexOrDocValuesQuery.class)); parsedQuery = ((IndexOrDocValuesQuery) parsedQuery).getIndexQuery(); diff --git a/server/src/test/java/org/opensearch/index/query/RegexpQueryBuilderTests.java b/server/src/test/java/org/opensearch/index/query/RegexpQueryBuilderTests.java index 30c8c07fa2c27..7f840cb789b46 100644 --- a/server/src/test/java/org/opensearch/index/query/RegexpQueryBuilderTests.java +++ b/server/src/test/java/org/opensearch/index/query/RegexpQueryBuilderTests.java @@ -55,7 +55,15 @@ protected RegexpQueryBuilder doCreateTestQueryBuilder() { List flags = new ArrayList<>(); int iter = randomInt(5); for (int i = 0; i < iter; i++) { - flags.add(randomFrom(RegexpFlag.values())); + // Exclude COMPLEMENT from random selection to avoid deprecation warnings + RegexpFlag[] availableFlags = { + RegexpFlag.INTERSECTION, + RegexpFlag.EMPTY, + RegexpFlag.ANYSTRING, + RegexpFlag.INTERVAL, + RegexpFlag.NONE, + RegexpFlag.ALL }; + flags.add(randomFrom(availableFlags)); } query.flags(flags.toArray(new RegexpFlag[0])); } @@ -162,4 +170,32 @@ public void testParseFailsWithMultipleFields() throws IOException { e = expectThrows(ParsingException.class, () -> parseQuery(shortJson)); assertEquals("[regexp] query doesn't support multiple fields, found [user1] and [user2]", e.getMessage()); } + + // Test that COMPLEMENT flag triggers deprecation warning + public void testComplementFlagDeprecation() throws IOException { + RegexpQueryBuilder query = new RegexpQueryBuilder("field", "a~bc"); + query.flags(RegexpFlag.COMPLEMENT); + QueryShardContext context = createShardContext(); + Query luceneQuery = query.toQuery(context); + assertNotNull(luceneQuery); + assertThat(luceneQuery, instanceOf(RegexpQuery.class)); + assertWarnings( + "The complement operator (~) for arbitrary patterns in regexp queries is deprecated " + + "and will be removed in a future version. Consider rewriting your query to use character class negation [^...] or other query types." + ); + } + + // Separate test for COMPLEMENT flag Cacheability + public void testComplementFlagCacheability() throws IOException { + RegexpQueryBuilder queryBuilder = new RegexpQueryBuilder("field", "pattern"); + queryBuilder.flags(RegexpFlag.COMPLEMENT); + QueryShardContext context = createShardContext(); + QueryBuilder rewriteQuery = rewriteQuery(queryBuilder, new QueryShardContext(context)); + assertNotNull(rewriteQuery.toQuery(context)); + assertTrue("query should be cacheable: " + queryBuilder, context.isCacheable()); + assertWarnings( + "The complement operator (~) for arbitrary patterns in regexp queries is deprecated " + + "and will be removed in a future version. Consider rewriting your query to use character class negation [^...] or other query types." + ); + } } diff --git a/server/src/test/java/org/opensearch/index/query/SemanticVersionFieldQueryTests.java b/server/src/test/java/org/opensearch/index/query/SemanticVersionFieldQueryTests.java new file mode 100644 index 0000000000000..048ea37aae73c --- /dev/null +++ b/server/src/test/java/org/opensearch/index/query/SemanticVersionFieldQueryTests.java @@ -0,0 +1,419 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ +package org.opensearch.index.query; + +import org.opensearch.action.get.GetResponse; +import org.opensearch.action.index.IndexRequest; +import org.opensearch.action.search.SearchResponse; +import org.opensearch.common.unit.Fuzziness; +import org.opensearch.common.xcontent.XContentFactory; +import org.opensearch.core.xcontent.MediaTypeRegistry; +import org.opensearch.core.xcontent.XContentBuilder; +import org.opensearch.test.OpenSearchSingleNodeTestCase; + +import java.io.IOException; + +import static org.opensearch.action.support.WriteRequest.RefreshPolicy.IMMEDIATE; +import static org.opensearch.test.hamcrest.OpenSearchAssertions.assertHitCount; +import static org.opensearch.test.hamcrest.OpenSearchAssertions.assertSearchResponse; + +public class SemanticVersionFieldQueryTests extends OpenSearchSingleNodeTestCase { + + @Override + public void setUp() throws Exception { + super.setUp(); + setupIndex(); + ensureGreen("test"); + } + + protected XContentBuilder createMapping() throws IOException { + return XContentFactory.jsonBuilder() + .startObject() + .startObject("properties") + .startObject("version") + .field("type", "version") + .endObject() + .endObject() + .endObject(); + } + + protected void setupIndex() throws IOException { + XContentBuilder mapping = createMapping(); + client().admin().indices().prepareCreate("test").setMapping(mapping).get(); + ensureGreen(); + + String doc1 = """ + { + "version": "1.0.0" + }"""; + client().index(new IndexRequest("test").id("1").source(doc1, MediaTypeRegistry.JSON).setRefreshPolicy(IMMEDIATE)).actionGet(); + + String doc2 = """ + { + "version": "1.0.1" + }"""; + client().index(new IndexRequest("test").id("2").source(doc2, MediaTypeRegistry.JSON).setRefreshPolicy(IMMEDIATE)).actionGet(); + + String doc3 = """ + { + "version": "1.1.0" + }"""; + client().index(new IndexRequest("test").id("3").source(doc3, MediaTypeRegistry.JSON).setRefreshPolicy(IMMEDIATE)).actionGet(); + + String doc4 = """ + { + "version": "2.0.0" + }"""; + client().index(new IndexRequest("test").id("4").source(doc4, MediaTypeRegistry.JSON).setRefreshPolicy(IMMEDIATE)).actionGet(); + + String doc5 = """ + { + "version": "1.0.0-alpha" + }"""; + client().index(new IndexRequest("test").id("5").source(doc5, MediaTypeRegistry.JSON).setRefreshPolicy(IMMEDIATE)).actionGet(); + + String doc6 = """ + { + "version": "1.0.0-beta" + }"""; + client().index(new IndexRequest("test").id("6").source(doc6, MediaTypeRegistry.JSON).setRefreshPolicy(IMMEDIATE)).actionGet(); + + String doc7 = """ + { + "version": "1.0.0+build.123" + }"""; + client().index(new IndexRequest("test").id("7").source(doc7, MediaTypeRegistry.JSON).setRefreshPolicy(IMMEDIATE)).actionGet(); + } + + /** + * Expected behavior: + * 1.0.0-alpha (Match) + * 1.0.0-beta (Match) + * 1.0.0+build.123 (Match) + * 1.0.0 (Match) + * 1.0.1 (Match) + * 1.1.0 (Match) + * 2.0.0 (Match) + * @throws IOException + */ + public void testExistsQuery() throws IOException { + // Test exists query + SearchResponse response = client().prepareSearch("test").setQuery(QueryBuilders.existsQuery("version")).get(); + + assertSearchResponse(response); + assertHitCount(response, 7); // Should match all documents with a version field + + // Test with non-existent field + response = client().prepareSearch("test").setQuery(QueryBuilders.existsQuery("non_existent_field")).get(); + + assertSearchResponse(response); + assertHitCount(response, 0); + } + + /** + * 1.0.0-alpha (Ignore) + * 1.0.0-beta (Ignore) + * 1.0.0+build.123 (Ignore) + * 1.0.0 (Ignore) + * 1.0.1 (Match) + * 1.1.0 (Match) + * 2.0.0 (Ignore) + * @throws IOException + */ + public void testMatchQuery() throws IOException { + SearchResponse response = client().prepareSearch("test").setQuery(QueryBuilders.matchQuery("version", "1.0.0")).get(); + + assertSearchResponse(response); + assertHitCount(response, 1); // Should match "1.0.0" if analyzed, or 0 if not. + } + + /** + * 1.0.0-alpha (Ignore) + * 1.0.0-beta (Ignore) + * 1.0.0+build.123 (Ignore) + * 1.0.0 (Ignore) + * 1.0.1 (Match) + * 1.1.0 (Match) + * 2.0.0 (Ignore) + * @throws IOException + */ + public void testMultiMatchQuery() throws IOException { + SearchResponse response = client().prepareSearch("test") + .setQuery(QueryBuilders.multiMatchQuery("1.0.0", "version", "non_existent_field")) + .get(); + + assertSearchResponse(response); + assertHitCount(response, 1); // Match only "1.0.0" in "version" + } + + /** + * 1.0.0-alpha (Ignore) + * 1.0.0-beta (Ignore) + * 1.0.0+build.123 (Ignore) + * 1.0.0 (Match) + * 1.0.1 (Ignore) + * 1.1.0 (Ignore) + * 2.0.0 (Ignore) + * @throws IOException if the search request fails + */ + public void testTermQuery() throws IOException { + GetResponse getResponse = client().prepareGet("test", "1").get(); + assertTrue("Document should exist", getResponse.isExists()); + assertEquals("1.0.0", getResponse.getSourceAsMap().get("version")); + + // Term query + SearchResponse response = client().prepareSearch("test").setQuery(QueryBuilders.termQuery("version", "1.0.0")).get(); + + assertSearchResponse(response); + assertHitCount(response, 1); + } + + /** + * 1.0.0-alpha (Ignore) + * 1.0.0-beta (Ignore) + * 1.0.0+build.123 (Ignore) + * 1.0.0 (Match) + * 1.0.1 (Match) + * 1.1.0 (Ignore) + * 2.0.0 (Ignore) + * @throws IOException + */ + public void testTermsQuery() throws IOException { + SearchResponse response = client().prepareSearch("test").setQuery(QueryBuilders.termsQuery("version", "1.0.0", "1.0.1")).get(); + + assertSearchResponse(response); + assertHitCount(response, 2); + } + + /** + * 1.0.0-alpha (Ignore) + * 1.0.0-beta (Ignore) + * 1.0.0+build.123 (Ignore) + * 1.0.0 (Ignore) + * 1.0.1 (Match) + * 1.1.0 (Match) + * 2.0.0 (Ignore) + * @throws IOException + */ + public void testRangeQuery() throws IOException { + SearchResponse response = client().prepareSearch("test") + .setQuery(QueryBuilders.rangeQuery("version").gt("1.0.0").lt("2.0.0")) + .get(); + assertSearchResponse(response); + assertHitCount(response, 2); + } + + /** + * 1.0.0-alpha (Match) + * 1.0.0-beta (Match) + * 1.0.0+build.123 (Match) + * 1.0.0 (Match) + * 1.0.1 (Ignore) + * 1.1.0 (Ignore) + * 2.0.0 (Ignore) + * @throws IOException + */ + public void testRangeQueryIncludingPreRelease() throws IOException { + SearchResponse response = client().prepareSearch("test") + .setQuery(QueryBuilders.rangeQuery("version").gte("1.0.0-alpha").lt("1.0.1")) + .get(); + + assertSearchResponse(response); + assertHitCount(response, 4); + } + + /** + * 1.0.0-alpha (Match) + * 1.0.0-beta (Match) + * 1.0.0+build.123 (Match) + * 1.0.0 (Match) + * 1.0.1 (Match) + * 1.1.0 (Match) + * 2.0.0 (Ignore) + * @throws IOException + */ + public void testPrefixQuery() throws IOException { + SearchResponse response = client().prepareSearch("test").setQuery(QueryBuilders.prefixQuery("version", "1.")).get(); + assertSearchResponse(response); + assertHitCount(response, 6); + } + + /** + * 1.0.0-alpha (Ignore) + * 1.0.0-beta (Ignore) + * 1.0.0+build.123 (Ignore) + * 1.0.0 (Match) + * 1.0.1 (Ignore) + * 1.1.0 (Ignore) + * 2.0.0 (Match) + * @throws IOException + */ + public void testWildcardQuery() throws IOException { + SearchResponse response = client().prepareSearch("test").setQuery(QueryBuilders.wildcardQuery("version", "*.0.0")).get(); + assertSearchResponse(response); + assertHitCount(response, 2); + } + + /** + * 1.0.0-alpha (Ignore) + * 1.0.0-beta (Ignore) + * 1.0.0+build.123 (Ignore) + * 1.0.0 (Match) + * 1.0.1 (Ignore) + * 1.1.0 (Match) + * 2.0.0 (Ignore) + * @throws IOException + */ + public void testRegexpQuery() throws IOException { + SearchResponse response = client().prepareSearch("test").setQuery(QueryBuilders.regexpQuery("version", "1\\..*\\.0")).get(); + assertSearchResponse(response); + assertHitCount(response, 2); + } + + /** + * 1.0.0-alpha (Ignore) + * 1.0.0-beta (Ignore) + * 1.0.0+build.123 (Ignore) + * 1.0.0 (Match) + * 1.0.1 (Match) + * 1.1.0 (Ignore) + * 2.0.0 (Ignore) + * @throws IOException + */ + public void testFuzzyQuery() throws IOException { + SearchResponse response = client().prepareSearch("test") + .setQuery(QueryBuilders.fuzzyQuery("version", "1.0.1").fuzziness(Fuzziness.ONE)) + .get(); + assertSearchResponse(response); + assertHitCount(response, 2); + } + + /** + * 1.0.0-alpha — excluded because of dash (mustNot) + * 1.0.0-beta — excluded (mustNot) + * 1.0.0+build.123 — no dash, starts with 1. → included + * 1.0.0 — included + * 1.0.1 — included + * 1.1.0 — included (starts with 1. and no dash) + * 2.0.0 — excluded (does not start with 1.) + * @throws IOException + */ + public void testComplexQuery() throws IOException { + SearchResponse response = client().prepareSearch("test") + .setQuery( + QueryBuilders.boolQuery() + .must(QueryBuilders.prefixQuery("version", "1.")) + .mustNot(QueryBuilders.regexpQuery("version", ".*-.*")) + .should(QueryBuilders.termQuery("version", "1.0.0")) + ) + .get(); + assertSearchResponse(response); + assertHitCount(response, 4); + } + + /** + * Should match documents with IDs 1 and 2 regardless of version: + * Document 1 - version 1.0.0 + * Document 2 - version 1.0.1 + * @throws IOException + */ + public void testIdsQuery() throws IOException { + SearchResponse response = client().prepareSearch("test").setQuery(QueryBuilders.idsQuery().addIds("1", "2")).get(); + + assertSearchResponse(response); + assertHitCount(response, 2); + } + + /** + * 1.0.0-alpha (Ignore) + * 1.0.0-beta (Ignore) + * 1.0.0+build.123 (Ignore) + * 1.0.0 (Match) + * 1.0.1 (Ignore) + * 1.1.0 (Ignore) + * 2.0.0 (Ignore) + * @throws IOException + */ + public void testConstantScoreQuery() throws IOException { + SearchResponse response = client().prepareSearch("test") + .setQuery(QueryBuilders.constantScoreQuery(QueryBuilders.termQuery("version", "1.0.0"))) + .get(); + + assertSearchResponse(response); + assertHitCount(response, 1); + } + + /** + * 1.0.0-alpha (Ignore) + * 1.0.0-beta (Ignore) + * 1.0.0+build.123 (Ignore) + * 1.0.0 (Match) + * 1.0.1 (Ignore) + * 1.1.0 (Ignore) + * 2.0.0 (Ignore) + * @throws IOException + */ + public void testFunctionScoreQuery() throws IOException { + SearchResponse response = client().prepareSearch("test") + .setQuery(QueryBuilders.functionScoreQuery(QueryBuilders.termQuery("version", "1.0.0"))) + .get(); + + assertSearchResponse(response); + assertHitCount(response, 1); + } + + /** + * 1.0.0-alpha (Ignore) + * 1.0.0-beta (Ignore) + * 1.0.0+build.123 (Ignore) + * 1.0.0 (Ignore) + * 1.0.1 (Match) + * 1.1.0 (Match) + * 2.0.0 (Ignore) + * @throws IOException + */ + public void testMatchPhraseQuery() throws IOException { + SearchResponse response = client().prepareSearch("test").setQuery(QueryBuilders.matchPhraseQuery("version", "1.0.0")).get(); + + assertSearchResponse(response); + assertHitCount(response, 1); + } + + /** + * Sorted ascending: + * First hit: 1.0.0-alpha + * Second hit: 1.0.0-beta + * Third hit: 1.0.0 + * Fourth hit: 1.0.0+build.123 + * Fifth hit: 1.0.1 + * Sixth hit: 1.1.0 + * Seventh hit: 2.0.0 + * @throws IOException + */ + public void testSortByVersion() throws IOException { + SearchResponse response = client().prepareSearch("test") + .addSort("version", org.opensearch.search.sort.SortOrder.ASC) + .setQuery(QueryBuilders.matchAllQuery()) + .get(); + + assertSearchResponse(response); + assertHitCount(response, 7); + assertEquals("1.0.0-alpha", response.getHits().getAt(0).getSourceAsMap().get("version")); + } + + /** + * Test that a term query for an invalid version returns no hits. + * @throws IOException + */ + public void testTermQueryInvalidVersion() throws IOException { + SearchResponse response = client().prepareSearch("test").setQuery(QueryBuilders.termQuery("version", "invalid-version")).get(); + + assertSearchResponse(response); + assertHitCount(response, 0); + } +} diff --git a/server/src/test/java/org/opensearch/index/shard/RemoteStoreUploaderServiceTests.java b/server/src/test/java/org/opensearch/index/shard/RemoteStoreUploaderServiceTests.java new file mode 100644 index 0000000000000..d522020d0198b --- /dev/null +++ b/server/src/test/java/org/opensearch/index/shard/RemoteStoreUploaderServiceTests.java @@ -0,0 +1,477 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.index.shard; + +import org.apache.lucene.index.CorruptIndexException; +import org.apache.lucene.store.Directory; +import org.apache.lucene.store.FilterDirectory; +import org.opensearch.cluster.routing.ShardRouting; +import org.opensearch.common.util.UploadListener; +import org.opensearch.core.action.ActionListener; +import org.opensearch.core.index.Index; +import org.opensearch.core.index.shard.ShardId; +import org.opensearch.index.store.CompositeDirectory; +import org.opensearch.index.store.RemoteDirectory; +import org.opensearch.index.store.RemoteSegmentStoreDirectory; +import org.opensearch.index.store.lockmanager.RemoteStoreLockManager; +import org.opensearch.test.OpenSearchTestCase; +import org.opensearch.threadpool.ThreadPool; + +import java.io.IOException; +import java.util.Arrays; +import java.util.Collection; +import java.util.Collections; +import java.util.HashMap; +import java.util.Map; +import java.util.concurrent.CountDownLatch; +import java.util.concurrent.TimeUnit; +import java.util.function.Function; + +import static org.mockito.ArgumentMatchers.any; +import static org.mockito.ArgumentMatchers.eq; +import static org.mockito.Mockito.doAnswer; +import static org.mockito.Mockito.mock; +import static org.mockito.Mockito.never; +import static org.mockito.Mockito.times; +import static org.mockito.Mockito.verify; +import static org.mockito.Mockito.when; + +/** + * Test class for {@link RemoteStoreUploaderService}. + * Tests various scenarios of segment upload functionality including success cases, + * error handling, and different directory configurations. + */ +public class RemoteStoreUploaderServiceTests extends OpenSearchTestCase { + + /** Mock IndexShard instance used across tests */ + private IndexShard mockIndexShard; + + /** Mock Directory representing the local store directory */ + private Directory mockStoreDirectory; + + /** Mock RemoteSegmentStoreDirectory for remote storage operations */ + private RemoteSegmentStoreDirectory mockRemoteDirectory; + + /** The service under test */ + private RemoteStoreUploaderService uploaderService; + + /** Mock upload listener for tracking upload events */ + private UploadListener mockUploadListener; + + /** Mock function that creates upload listeners */ + private Function, UploadListener> mockUploadListenerFunction; + + /** + * Sets up the test environment before each test method. + * Initializes all mock objects and the service under test. + * + * @throws Exception if setup fails + */ + @Override + public void setUp() throws Exception { + super.setUp(); + mockIndexShard = mock(IndexShard.class); + ShardId shardId = new ShardId(new Index("test", "test"), 0); + when(mockIndexShard.shardId()).thenReturn(shardId); + + // Mock IndexShard methods instead of setting private fields + when(mockIndexShard.state()).thenReturn(IndexShardState.STARTED); + mockStoreDirectory = mock(FilterDirectory.class); + // Use a real instance with mocked dependencies instead of mocking the final class + RemoteDirectory remoteDataDirectory = mock(RemoteDirectory.class); + mockRemoteDirectory = createMockRemoteDirectory(remoteDataDirectory); + mockUploadListener = mock(UploadListener.class); + mockUploadListenerFunction = mock(Function.class); + + when(mockUploadListenerFunction.apply(any())).thenReturn(mockUploadListener); + + uploaderService = new RemoteStoreUploaderService(mockIndexShard, mockStoreDirectory, mockRemoteDirectory); + } + + /** + * Creates a real RemoteSegmentStoreDirectory instance with mocked dependencies + * instead of trying to mock the final class directly. + * This approach is used because RemoteSegmentStoreDirectory is a final class + * that cannot be mocked directly. + * + * @param remoteDirectory the remote directory to use (currently unused) + * @return a new RemoteSegmentStoreDirectory instance with mocked dependencies + * @throws RuntimeException if the directory creation fails + */ + private RemoteSegmentStoreDirectory createMockRemoteDirectory(RemoteDirectory remoteDirectory) { + try { + RemoteDirectory remoteDataDirectory = mock(RemoteDirectory.class); + RemoteDirectory remoteMetadataDirectory = mock(RemoteDirectory.class); + RemoteStoreLockManager lockManager = mock(RemoteStoreLockManager.class); + ThreadPool threadPool = mock(ThreadPool.class); + ShardId shardId = new ShardId(new Index("test", "test"), 0); + + return new RemoteSegmentStoreDirectory(remoteDataDirectory, remoteMetadataDirectory, lockManager, threadPool, shardId); + } catch (IOException e) { + throw new RuntimeException("Failed to create mock RemoteSegmentStoreDirectory", e); + } + } + + /** + * Tests that uploading an empty collection of segments completes successfully + * without performing any actual upload operations. + * + * @throws Exception if the test fails + */ + public void testUploadSegmentsWithEmptyCollection() throws Exception { + Collection emptySegments = Collections.emptyList(); + Map segmentSizeMap = new HashMap<>(); + CountDownLatch latch = new CountDownLatch(1); + + ActionListener listener = ActionListener.wrap( + response -> latch.countDown(), + exception -> fail("Should not fail for empty segments") + ); + + uploaderService.uploadSegments(emptySegments, segmentSizeMap, listener, mockUploadListenerFunction, false); + + assertTrue(latch.await(1, TimeUnit.SECONDS)); + } + + /** + * Tests successful segment upload with low priority upload flag set to false. + * Verifies that segments are uploaded correctly and upload listeners are notified. + * + * @throws Exception if the test fails + */ + public void testUploadSegmentsSuccessWithHighPriorityUpload() throws Exception { + Collection segments = Arrays.asList("segment1", "segment2"); + Map segmentSizeMap = new HashMap<>(); + segmentSizeMap.put("segment1", 100L); + segmentSizeMap.put("segment2", 200L); + + // Create a fresh mock IndexShard + IndexShard freshMockShard = mock(IndexShard.class); + ShardId shardId = new ShardId(new Index("test", "test"), 1); + when(freshMockShard.shardId()).thenReturn(shardId); + when(freshMockShard.state()).thenReturn(IndexShardState.STARTED); + + // Create a mock directory structure that matches what the code expects + Directory innerMockDelegate = mock(Directory.class); + FilterDirectory innerFilterDirectory = new TestFilterDirectory(new TestFilterDirectory(innerMockDelegate)); + + FilterDirectory outerFilterDirectory = new TestFilterDirectory(new TestFilterDirectory(innerFilterDirectory)); + + // Setup the real RemoteSegmentStoreDirectory to handle copyFrom calls + RemoteDirectory remoteDirectory = mock(RemoteDirectory.class); + RemoteSegmentStoreDirectory remoteSegmentStoreDirectory = new RemoteSegmentStoreDirectory( + remoteDirectory, + mock(RemoteDirectory.class), + mock(RemoteStoreLockManager.class), + freshMockShard.getThreadPool(), + freshMockShard.shardId() + ); + + // Create a new uploader service with the fresh mocks + RemoteStoreUploaderService testUploaderService = new RemoteStoreUploaderService( + freshMockShard, + outerFilterDirectory, + remoteSegmentStoreDirectory + ); + + doAnswer(invocation -> { + ActionListener callback = invocation.getArgument(5); + callback.onResponse(null); + return true; + }).when(remoteDirectory).copyFrom(any(), any(), any(), any(), any(), any(), any(Boolean.class)); + + CountDownLatch latch = new CountDownLatch(1); + + ActionListener listener = ActionListener.wrap( + response -> latch.countDown(), + exception -> fail("Upload should succeed: " + exception.getMessage()) + ); + + testUploaderService.uploadSegments(segments, segmentSizeMap, listener, mockUploadListenerFunction, false); + + assertTrue(latch.await(5, TimeUnit.SECONDS)); + // Verify the upload listener was called correctly + verify(mockUploadListener, times(2)).beforeUpload(any(String.class)); + verify(mockUploadListener, times(2)).onSuccess(any(String.class)); + } + + /** + * Tests successful segment upload with low priority upload flag set to true. + * Verifies that segments are uploaded correctly and upload listeners are notified. + * + * @throws Exception if the test fails + */ + public void testUploadSegmentsSuccessWithLowPriorityUpload() throws Exception { + Collection segments = Arrays.asList("segment1", "segment2"); + Map segmentSizeMap = new HashMap<>(); + segmentSizeMap.put("segment1", 100L); + segmentSizeMap.put("segment2", 200L); + + // Create a fresh mock IndexShard + IndexShard freshMockShard = mock(IndexShard.class); + ShardId shardId = new ShardId(new Index("test", "test"), 1); + when(freshMockShard.shardId()).thenReturn(shardId); + when(freshMockShard.state()).thenReturn(IndexShardState.STARTED); + + // Create a mock directory structure that matches what the code expects + Directory innerMockDelegate = mock(Directory.class); + FilterDirectory innerFilterDirectory = new TestFilterDirectory(new TestFilterDirectory(innerMockDelegate)); + + FilterDirectory outerFilterDirectory = new TestFilterDirectory(new TestFilterDirectory(innerFilterDirectory)); + + // Setup the real RemoteSegmentStoreDirectory to handle copyFrom calls + RemoteDirectory remoteDirectory = mock(RemoteDirectory.class); + RemoteSegmentStoreDirectory remoteSegmentStoreDirectory = new RemoteSegmentStoreDirectory( + remoteDirectory, + mock(RemoteDirectory.class), + mock(RemoteStoreLockManager.class), + freshMockShard.getThreadPool(), + freshMockShard.shardId() + ); + + // Create a new uploader service with the fresh mocks + RemoteStoreUploaderService testUploaderService = new RemoteStoreUploaderService( + freshMockShard, + outerFilterDirectory, + remoteSegmentStoreDirectory + ); + + doAnswer(invocation -> { + ActionListener callback = invocation.getArgument(5); + callback.onResponse(null); + return true; + }).when(remoteDirectory).copyFrom(any(), any(), any(), any(), any(), any(), any(Boolean.class)); + + CountDownLatch latch = new CountDownLatch(1); + + ActionListener listener = ActionListener.wrap( + response -> latch.countDown(), + exception -> fail("Upload should succeed: " + exception.getMessage()) + ); + + testUploaderService.uploadSegments(segments, segmentSizeMap, listener, mockUploadListenerFunction, true); + + assertTrue(latch.await(5, TimeUnit.SECONDS)); + // Verify the upload listener was called correctly + verify(mockUploadListener, times(2)).beforeUpload(any(String.class)); + verify(mockUploadListener, times(2)).onSuccess(any(String.class)); + } + + /** + * Tests segment upload functionality when using a CompositeDirectory. + * Verifies that the afterSyncToRemote callback is invoked on the CompositeDirectory + * after successful upload operations. + * + * @throws Exception if the test fails + */ + public void testUploadSegmentsWithCompositeDirectory() throws Exception { + Collection segments = Arrays.asList("segment1"); + Map segmentSizeMap = new HashMap<>(); + segmentSizeMap.put("segment1", 100L); + + // Create a fresh mock IndexShard + IndexShard freshMockShard = mock(IndexShard.class); + ShardId shardId = new ShardId(new Index("test", "test"), 0); + when(freshMockShard.shardId()).thenReturn(shardId); + + // Create a mock ShardRouting and set it as a field on the IndexShard mock + ShardRouting mockShardRouting = mock(ShardRouting.class); + freshMockShard.shardRouting = mockShardRouting; + when(mockShardRouting.primary()).thenReturn(true); + + CompositeDirectory mockCompositeDirectory = mock(CompositeDirectory.class); + FilterDirectory innerFilterDirectory = new TestFilterDirectory(mockCompositeDirectory); + FilterDirectory outerFilterDirectory = new TestFilterDirectory(innerFilterDirectory); + + // Setup the real RemoteSegmentStoreDirectory to handle copyFrom calls + RemoteDirectory remoteDirectory = mock(RemoteDirectory.class); + RemoteSegmentStoreDirectory remoteSegmentStoreDirectory = new RemoteSegmentStoreDirectory( + remoteDirectory, + mock(RemoteDirectory.class), + mock(RemoteStoreLockManager.class), + freshMockShard.getThreadPool(), + freshMockShard.shardId() + ); + + // Create a new uploader service with the fresh mocks + RemoteStoreUploaderService testUploaderService = new RemoteStoreUploaderService( + freshMockShard, + outerFilterDirectory, + remoteSegmentStoreDirectory + ); + + // Setup the real RemoteSegmentStoreDirectory to handle copyFrom calls + doAnswer(invocation -> { + ActionListener callback = invocation.getArgument(5); + callback.onResponse(null); + return true; + }).when(remoteDirectory).copyFrom(any(), any(), any(), any(), any(), any(), any(Boolean.class)); + + CountDownLatch latch = new CountDownLatch(1); + + ActionListener listener = ActionListener.wrap( + response -> latch.countDown(), + exception -> fail("Upload should succeed: " + exception.getMessage()) + ); + + testUploaderService.uploadSegments(segments, segmentSizeMap, listener, mockUploadListenerFunction, false); + + assertTrue(latch.await(5, TimeUnit.SECONDS)); + verify(mockCompositeDirectory).afterSyncToRemote("segment1"); + } + + /** + * Tests error handling when a CorruptIndexException occurs during segment upload. + * Verifies that the shard is failed with the appropriate error message + * and the upload listener is notified of the failure. + * + * @throws Exception if the test fails + */ + public void testUploadSegmentsWithCorruptIndexException() throws Exception { + Collection segments = Arrays.asList("segment1"); + Map segmentSizeMap = new HashMap<>(); + segmentSizeMap.put("segment1", 100L); + + // Create a fresh mock IndexShard + IndexShard freshMockShard = mock(IndexShard.class); + ShardId shardId = new ShardId(new Index("test", "test"), 0); + when(freshMockShard.shardId()).thenReturn(shardId); + + // Create a mock ShardRouting and set it as a field on the IndexShard mock + ShardRouting mockShardRouting = mock(ShardRouting.class); + freshMockShard.shardRouting = mockShardRouting; + when(mockShardRouting.primary()).thenReturn(true); + + Directory innerMockDelegate = mock(Directory.class); + FilterDirectory innerFilterDirectory = new TestFilterDirectory(new TestFilterDirectory(innerMockDelegate)); + + FilterDirectory outerFilterDirectory = new TestFilterDirectory(new TestFilterDirectory(innerFilterDirectory)); + + // Setup the real RemoteSegmentStoreDirectory to handle copyFrom calls + RemoteDirectory remoteDirectory = mock(RemoteDirectory.class); + RemoteSegmentStoreDirectory remoteSegmentStoreDirectory = new RemoteSegmentStoreDirectory( + remoteDirectory, + mock(RemoteDirectory.class), + mock(RemoteStoreLockManager.class), + freshMockShard.getThreadPool(), + freshMockShard.shardId() + ); + + // Create a new uploader service with the fresh mocks + RemoteStoreUploaderService testUploaderService = new RemoteStoreUploaderService( + freshMockShard, + outerFilterDirectory, + remoteSegmentStoreDirectory + ); + + CorruptIndexException corruptException = new CorruptIndexException("Index corrupted", "test"); + CountDownLatch latch = new CountDownLatch(1); + + // Setup the real RemoteSegmentStoreDirectory to handle copyFrom calls + RemoteDirectory remoteDataDirectory = mock(RemoteDirectory.class); + doAnswer(invocation -> { + ActionListener callback = invocation.getArgument(5); + callback.onFailure(corruptException); + return true; + }).when(remoteDirectory).copyFrom(any(), any(), any(), any(), any(), any(), any(Boolean.class)); + + ActionListener listener = ActionListener.wrap(response -> fail("Should not succeed with corrupt index"), exception -> { + assertEquals(corruptException, exception); + latch.countDown(); + }); + + testUploaderService.uploadSegments(segments, segmentSizeMap, listener, mockUploadListenerFunction, false); + + assertTrue(latch.await(5, TimeUnit.SECONDS)); + verify(freshMockShard).failShard(eq("Index corrupted (resource=test)"), eq(corruptException)); + verify(mockUploadListener).onFailure("segment1"); + } + + /** + * Tests error handling when a generic RuntimeException occurs during segment upload. + * Verifies that the shard is NOT failed (unlike CorruptIndexException) + * but the upload listener is still notified of the failure. + * + * @throws Exception if the test fails + */ + public void testUploadSegmentsWithGenericException() throws Exception { + Collection segments = Arrays.asList("segment1"); + Map segmentSizeMap = new HashMap<>(); + segmentSizeMap.put("segment1", 100L); + + // Create a fresh mock IndexShard + IndexShard freshMockShard = mock(IndexShard.class); + ShardId shardId = new ShardId(new Index("test", "test"), 0); + when(freshMockShard.shardId()).thenReturn(shardId); + + // Create a mock ShardRouting and set it as a field on the IndexShard mock + ShardRouting mockShardRouting = mock(ShardRouting.class); + freshMockShard.shardRouting = mockShardRouting; + when(mockShardRouting.primary()).thenReturn(true); + + Directory innerMockDelegate = mock(Directory.class); + FilterDirectory innerFilterDirectory = new TestFilterDirectory(new TestFilterDirectory(innerMockDelegate)); + + FilterDirectory outerFilterDirectory = new TestFilterDirectory(new TestFilterDirectory(innerFilterDirectory)); + + // Setup the real RemoteSegmentStoreDirectory to handle copyFrom calls + RemoteDirectory remoteDirectory = mock(RemoteDirectory.class); + RemoteSegmentStoreDirectory remoteSegmentStoreDirectory = new RemoteSegmentStoreDirectory( + remoteDirectory, + mock(RemoteDirectory.class), + mock(RemoteStoreLockManager.class), + freshMockShard.getThreadPool(), + freshMockShard.shardId() + ); + + // Create a new uploader service with the fresh mocks + RemoteStoreUploaderService testUploaderService = new RemoteStoreUploaderService( + freshMockShard, + outerFilterDirectory, + remoteSegmentStoreDirectory + ); + + RuntimeException genericException = new RuntimeException("Generic error"); + CountDownLatch latch = new CountDownLatch(1); + + // Setup the real RemoteSegmentStoreDirectory to handle copyFrom calls + doAnswer(invocation -> { + ActionListener callback = invocation.getArgument(5); + callback.onFailure(genericException); + return true; + }).when(remoteDirectory).copyFrom(any(), any(), any(), any(), any(), any(), any(Boolean.class)); + + ActionListener listener = ActionListener.wrap(response -> fail("Should not succeed with generic exception"), exception -> { + assertEquals(genericException, exception); + latch.countDown(); + }); + + testUploaderService.uploadSegments(segments, segmentSizeMap, listener, mockUploadListenerFunction, false); + + assertTrue(latch.await(5, TimeUnit.SECONDS)); + verify(freshMockShard, never()).failShard(any(), any()); + verify(mockUploadListener).onFailure("segment1"); + } + + /** + * Test implementation of FilterDirectory used for creating nested directory structures + * in tests. This class simply delegates all operations to the wrapped directory. + */ + public static class TestFilterDirectory extends FilterDirectory { + + /** + * Creates a new TestFilterDirectory wrapping the given directory. + * + * @param in the directory to wrap + */ + public TestFilterDirectory(Directory in) { + super(in); + } + } +} diff --git a/server/src/test/java/org/opensearch/index/store/CompositeDirectoryTests.java b/server/src/test/java/org/opensearch/index/store/CompositeDirectoryTests.java index 531bca97df662..7ee00712a236d 100644 --- a/server/src/test/java/org/opensearch/index/store/CompositeDirectoryTests.java +++ b/server/src/test/java/org/opensearch/index/store/CompositeDirectoryTests.java @@ -132,6 +132,12 @@ public void testSync() throws IOException { // All the files in the below list are present either locally or on remote, so sync should work as expected Collection names = List.of("_0.cfe", "_0.cfs", "_0.si", "_1.cfe", "_2.cfe", "segments_1"); compositeDirectory.sync(names); + // Deleting file _1.cfe and then adding its blocks locally so that full file is not present but block files are present in local + // State of _1.cfe file after these operations - not present in remote, full file not present locally but blocks present in local + compositeDirectory.deleteFile("_1.cfe"); + addFilesToDirectory(new String[] { "_1.cfe_block_0", "_1.cfe_block_2" }); + // Sync should work as expected since blocks are present in local + compositeDirectory.sync(List.of("_1.cfe")); // Below list contains a non-existent file, hence will throw an error Collection names1 = List.of("_0.cfe", "_0.cfs", "_0.si", "_1.cfe", "_2.cfe", "segments_1", "non_existent_file"); assertThrows(NoSuchFileException.class, () -> compositeDirectory.sync(names1)); diff --git a/server/src/test/java/org/opensearch/index/store/remote/filecache/FileCacheTests.java b/server/src/test/java/org/opensearch/index/store/remote/filecache/FileCacheTests.java index 643caa85b5862..3e4be7ef5c294 100644 --- a/server/src/test/java/org/opensearch/index/store/remote/filecache/FileCacheTests.java +++ b/server/src/test/java/org/opensearch/index/store/remote/filecache/FileCacheTests.java @@ -8,6 +8,8 @@ package org.opensearch.index.store.remote.filecache; +import com.carrotsearch.randomizedtesting.annotations.ThreadLeakFilters; + import org.apache.lucene.store.FSDirectory; import org.apache.lucene.store.IOContext; import org.apache.lucene.store.IndexInput; @@ -18,6 +20,7 @@ import org.opensearch.core.common.breaker.NoopCircuitBreaker; import org.opensearch.env.NodeEnvironment; import org.opensearch.index.store.remote.directory.RemoteSnapshotDirectoryFactory; +import org.opensearch.index.store.remote.file.CleanerDaemonThreadLeakFilter; import org.opensearch.index.store.remote.utils.FileTypeUtils; import org.opensearch.test.OpenSearchTestCase; import org.junit.Before; @@ -28,6 +31,7 @@ import java.nio.file.attribute.PosixFilePermissions; import java.util.List; +@ThreadLeakFilters(filters = CleanerDaemonThreadLeakFilter.class) public class FileCacheTests extends OpenSearchTestCase { // need concurrency level to be static to make these tests more deterministic because capacity per segment is dependent on // (total capacity) / (concurrency level) so having high concurrency level might trigger early evictions which is tolerable in real life @@ -46,6 +50,10 @@ private FileCache createFileCache(long capacity) { return FileCacheFactory.createConcurrentLRUFileCache(capacity, CONCURRENCY_LEVEL, new NoopCircuitBreaker(CircuitBreaker.REQUEST)); } + private FileCache createFileCache(long capacity, CircuitBreaker circuitBreaker) { + return FileCacheFactory.createConcurrentLRUFileCache(capacity, CONCURRENCY_LEVEL, circuitBreaker); + } + private FileCache createCircuitBreakingFileCache(long capacity) { TestCircuitBreaker testCircuitBreaker = new TestCircuitBreaker(); testCircuitBreaker.startBreaking(); @@ -200,6 +208,20 @@ public void testComputeThrowCircuitBreakingException() { assertNull(fileCache.get(path)); } + public void testEntryNotRemovedCircuitBreaker() { + TestCircuitBreaker circuitBreaker = new TestCircuitBreaker(); + FileCache fileCache = createFileCache(MEGA_BYTES, circuitBreaker); + Path path = createPath("0"); + fileCache.put(path, new StubCachedIndexInput(8 * MEGA_BYTES)); + // put should succeed since circuit breaker hasn't tripped yet + assertEquals(fileCache.get(path).length(), 8 * MEGA_BYTES); + circuitBreaker.startBreaking(); + // compute should throw CircuitBreakingException but shouldn't remove entry already present + assertThrows(CircuitBreakingException.class, () -> fileCache.compute(path, (p, i) -> new StubCachedIndexInput(2 * MEGA_BYTES))); + assertNotNull(fileCache.get(path)); + assertEquals(fileCache.get(path).length(), 8 * MEGA_BYTES); + } + public void testRemove() { FileCache fileCache = createFileCache(MEGA_BYTES); for (int i = 0; i < 4; i++) { diff --git a/server/src/test/java/org/opensearch/index/store/remote/filecache/FileCachedIndexInputTests.java b/server/src/test/java/org/opensearch/index/store/remote/filecache/FileCachedIndexInputTests.java index ce0a4d7bf3c02..41e76d0b762ea 100644 --- a/server/src/test/java/org/opensearch/index/store/remote/filecache/FileCachedIndexInputTests.java +++ b/server/src/test/java/org/opensearch/index/store/remote/filecache/FileCachedIndexInputTests.java @@ -8,18 +8,22 @@ package org.opensearch.index.store.remote.filecache; +import com.carrotsearch.randomizedtesting.annotations.ThreadLeakFilters; + import org.apache.lucene.store.FSDirectory; import org.apache.lucene.store.IOContext; import org.apache.lucene.store.IndexInput; import org.apache.lucene.store.IndexOutput; import org.opensearch.core.common.breaker.CircuitBreaker; import org.opensearch.core.common.breaker.NoopCircuitBreaker; +import org.opensearch.index.store.remote.file.CleanerDaemonThreadLeakFilter; import org.opensearch.test.OpenSearchTestCase; import org.junit.Before; import java.io.IOException; import java.nio.file.Path; +@ThreadLeakFilters(filters = CleanerDaemonThreadLeakFilter.class) public class FileCachedIndexInputTests extends OpenSearchTestCase { protected FileCache fileCache; diff --git a/server/src/test/java/org/opensearch/index/store/remote/filecache/FullFileCachedIndexInputTests.java b/server/src/test/java/org/opensearch/index/store/remote/filecache/FullFileCachedIndexInputTests.java index 7fb7a03584e20..bc646cc8d50db 100644 --- a/server/src/test/java/org/opensearch/index/store/remote/filecache/FullFileCachedIndexInputTests.java +++ b/server/src/test/java/org/opensearch/index/store/remote/filecache/FullFileCachedIndexInputTests.java @@ -8,17 +8,22 @@ package org.opensearch.index.store.remote.filecache; +import com.carrotsearch.randomizedtesting.annotations.ThreadLeakFilters; + import org.apache.lucene.store.AlreadyClosedException; -import org.apache.lucene.store.IndexInput; +import org.opensearch.index.store.remote.file.CleanerDaemonThreadLeakFilter; import java.io.IOException; +import java.util.concurrent.TimeUnit; +@ThreadLeakFilters(filters = CleanerDaemonThreadLeakFilter.class) public class FullFileCachedIndexInputTests extends FileCachedIndexInputTests { private FullFileCachedIndexInput fullFileCachedIndexInput; @Override protected void setupIndexInputAndAddToFileCache() { fullFileCachedIndexInput = new FullFileCachedIndexInput(fileCache, filePath, underlyingIndexInput); + // Putting in the file cache would increase refCount to 1 fileCache.put(filePath, new CachedFullFileIndexInput(fileCache, filePath, fullFileCachedIndexInput)); } @@ -37,15 +42,11 @@ public void testClone() throws IOException { fileCache.decRef(filePath); assertFalse(isActiveAndTotalUsageSame()); - // After cloning the refCount will increase again and activeUsage and totalUsage will be same again - FileCachedIndexInput clonedFileCachedIndexInput1 = fullFileCachedIndexInput.clone(); - FileCachedIndexInput clonedFileCachedIndexInput2 = clonedFileCachedIndexInput1.clone(); - FileCachedIndexInput clonedFileCachedIndexInput3 = clonedFileCachedIndexInput2.clone(); - assertTrue(isActiveAndTotalUsageSame()); + // Since no clones have been done, refCount should be zero + assertEquals((int) fileCache.getRef(filePath), 0); - // closing the first level clone will close all subsequent level clones and reduce ref count to 0 - clonedFileCachedIndexInput1.close(); - assertFalse(isActiveAndTotalUsageSame()); + createUnclosedClonesSlices(false); + triggerGarbageCollectionAndAssertClonesClosed(); fileCache.prune(); @@ -68,12 +69,38 @@ public void testSlice() throws IOException { fileCache.decRef(filePath); assertFalse(isActiveAndTotalUsageSame()); - // Creating a slice will increase the refCount - IndexInput slicedFileCachedIndexInput = fullFileCachedIndexInput.slice(SLICE_DESC, 1, 2); - assertTrue(isActiveAndTotalUsageSame()); + // Since no clones have been done, refCount should be zero + assertEquals((int) fileCache.getRef(filePath), 0); + + createUnclosedClonesSlices(true); + triggerGarbageCollectionAndAssertClonesClosed(); - // Closing the parent will close all the slices as well decreasing the refCount to 0 - fullFileCachedIndexInput.close(); assertFalse(isActiveAndTotalUsageSame()); } + + private void triggerGarbageCollectionAndAssertClonesClosed() { + try { + // Clones/Slices will be phantom reachable now, triggering gc should call close on them + assertBusy(() -> { + System.gc(); // Do not rely on GC to be deterministic, hence the polling + assertEquals( + "Expected refCount to drop to zero as all clones/slices should have closed", + (int) fileCache.getRef(filePath), + 0 + ); + }, 5, TimeUnit.SECONDS); + } catch (Exception e) { + logger.error("Exception thrown while triggering gc", e); + fail(); + } + } + + private void createUnclosedClonesSlices(boolean createSlice) throws IOException { + int NUM_OF_CLONES = 3; + for (int i = 0; i < NUM_OF_CLONES; i++) { + if (createSlice) fullFileCachedIndexInput.slice("slice", 1, 2); + else fullFileCachedIndexInput.clone(); + } + assertEquals((int) fileCache.getRef(filePath), NUM_OF_CLONES); + } } diff --git a/server/src/test/java/org/opensearch/indices/replication/SegmentReplicationTargetServiceTests.java b/server/src/test/java/org/opensearch/indices/replication/SegmentReplicationTargetServiceTests.java index f9c9b451d3c77..f8dc1c3dc971d 100644 --- a/server/src/test/java/org/opensearch/indices/replication/SegmentReplicationTargetServiceTests.java +++ b/server/src/test/java/org/opensearch/indices/replication/SegmentReplicationTargetServiceTests.java @@ -625,6 +625,7 @@ public void testStartReplicationListenerSuccess() throws InterruptedException { latch.await(2, TimeUnit.SECONDS); verify(spy, (atLeastOnce())).updateVisibleCheckpoint(eq(0L), eq(replicaShard)); + verify(spy, times(1)).processLatestReceivedCheckpoint(any(), any()); } public void testStartReplicationListenerFailure() throws InterruptedException { @@ -851,4 +852,5 @@ public void testProcessCheckpointOnClusterStateUpdate() { spy.clusterChanged(new ClusterChangedEvent("ignored", oldState, newState)); verify(spy, times(1)).processLatestReceivedCheckpoint(eq(replicaShard), any()); } + } diff --git a/server/src/test/java/org/opensearch/indices/replication/SegmentReplicatorTests.java b/server/src/test/java/org/opensearch/indices/replication/SegmentReplicatorTests.java index 38f1c59bd5b68..a487a0f9a6032 100644 --- a/server/src/test/java/org/opensearch/indices/replication/SegmentReplicatorTests.java +++ b/server/src/test/java/org/opensearch/indices/replication/SegmentReplicatorTests.java @@ -21,6 +21,7 @@ import org.opensearch.cluster.service.ClusterService; import org.opensearch.common.lucene.Lucene; import org.opensearch.common.settings.Settings; +import org.opensearch.common.time.DateUtils; import org.opensearch.core.action.ActionListener; import org.opensearch.core.index.shard.ShardId; import org.opensearch.index.ReplicationStats; @@ -38,6 +39,7 @@ import java.io.IOException; import java.io.UncheckedIOException; +import java.time.Instant; import java.util.ArrayList; import java.util.HashMap; import java.util.List; @@ -215,10 +217,10 @@ public void testGetSegmentReplicationStats_WhenNoReplication() { assertEquals(0, replicationStats.maxBytesBehind); } - public void testGetSegmentReplicationStats_WhileOnGoingReplicationAndPrimaryRefreshedToNewCheckPoint() { + public void testGetSegmentReplicationStats_WhileOnGoingReplicationAndPrimaryRefreshedToNewCheckPoint() throws InterruptedException { ShardId shardId = new ShardId("index", "uuid", 0); ReplicationCheckpoint firstReplicationCheckpoint = ReplicationCheckpoint.empty(shardId); - + long baseTime = DateUtils.toLong(Instant.now()); StoreFileMetadata storeFileMetadata1 = new StoreFileMetadata("test-1", 500, "1", Version.LATEST, new BytesRef(500)); StoreFileMetadata storeFileMetadata2 = new StoreFileMetadata("test-2", 500, "1", Version.LATEST, new BytesRef(500)); Map stringStoreFileMetadataMapOne = new HashMap<>(); @@ -232,7 +234,7 @@ public void testGetSegmentReplicationStats_WhileOnGoingReplicationAndPrimaryRefr 1000, "", stringStoreFileMetadataMapOne, - System.nanoTime() - TimeUnit.MINUTES.toNanos(1) + baseTime - 5_000_000 ); IndexShard replicaShard = mock(IndexShard.class); @@ -260,7 +262,7 @@ public void testGetSegmentReplicationStats_WhileOnGoingReplicationAndPrimaryRefr 200, "", stringStoreFileMetadataMapTwo, - System.nanoTime() - TimeUnit.MINUTES.toNanos(1) + baseTime - 1_000_000 ); segmentReplicator.updateReplicationCheckpointStats(thirdReplicationCheckpoint, replicaShard); @@ -276,6 +278,16 @@ public void testGetSegmentReplicationStats_WhileOnGoingReplicationAndPrimaryRefr assertEquals(200, replicationStatsSecond.totalBytesBehind); assertEquals(200, replicationStatsSecond.maxBytesBehind); assertTrue(replicationStatsSecond.maxReplicationLag > 0); + + // shard finished syncing to last checkpoint (sis 3) + when(replicaShard.getLatestReplicationCheckpoint()).thenReturn(thirdReplicationCheckpoint); + segmentReplicator.pruneCheckpointsUpToLastSync(replicaShard); + ReplicationStats finalStats = segmentReplicator.getSegmentReplicationStats(shardId); + assertEquals(0, finalStats.totalBytesBehind); + assertEquals(0, finalStats.maxBytesBehind); + assertEquals(0, finalStats.maxReplicationLag); + // shard is up to date, should not have any tracked stats + assertTrue(segmentReplicator.replicationCheckpointStats.get(shardId).isEmpty()); } public void testGetSegmentReplicationStats_WhenCheckPointReceivedOutOfOrder() { diff --git a/server/src/test/java/org/opensearch/search/approximate/ApproximatePointRangeQueryTests.java b/server/src/test/java/org/opensearch/search/approximate/ApproximatePointRangeQueryTests.java index f02386c5e0d10..1103062379a48 100644 --- a/server/src/test/java/org/opensearch/search/approximate/ApproximatePointRangeQueryTests.java +++ b/server/src/test/java/org/opensearch/search/approximate/ApproximatePointRangeQueryTests.java @@ -8,13 +8,19 @@ package org.opensearch.search.approximate; +import com.carrotsearch.randomizedtesting.annotations.ParametersFactory; import com.carrotsearch.randomizedtesting.generators.RandomNumbers; import org.apache.lucene.analysis.core.WhitespaceAnalyzer; import org.apache.lucene.document.Document; +import org.apache.lucene.document.DoublePoint; +import org.apache.lucene.document.FloatPoint; +import org.apache.lucene.document.IntPoint; import org.apache.lucene.document.LongPoint; import org.apache.lucene.document.NumericDocValuesField; import org.apache.lucene.index.IndexReader; +import org.apache.lucene.sandbox.document.BigIntegerPoint; +import org.apache.lucene.sandbox.document.HalfFloatPoint; import org.apache.lucene.search.IndexSearcher; import org.apache.lucene.search.Query; import org.apache.lucene.search.Sort; @@ -29,54 +35,281 @@ import org.opensearch.test.OpenSearchTestCase; import java.io.IOException; +import java.math.BigInteger; +import java.util.Arrays; +import java.util.Collection; +import java.util.function.Function; -import static java.util.Arrays.asList; -import static org.apache.lucene.document.LongPoint.pack; import static org.mockito.Mockito.mock; import static org.mockito.Mockito.when; public class ApproximatePointRangeQueryTests extends OpenSearchTestCase { - protected static final String DATE_FIELD_NAME = "mapped_date"; + private final NumericType numericType; + + public ApproximatePointRangeQueryTests(NumericType numericType) { + this.numericType = numericType; + } + + @ParametersFactory + public static Collection parameters() { + return Arrays.asList( + new Object[][] { + { NumericType.INT }, + { NumericType.LONG }, + { NumericType.HALF_FLOAT }, + { NumericType.FLOAT }, + { NumericType.DOUBLE }, + { NumericType.UNSIGNED_LONG } } + ); + } + + enum NumericType { + INT("int_field", ApproximatePointRangeQuery.INT_FORMAT) { + @Override + byte[] encode(Number value) { + return IntPoint.pack(new int[] { value.intValue() }).bytes; + } + + @Override + void addField(Document doc, String fieldName, Number value) { + doc.add(new IntPoint(fieldName, value.intValue())); + } + + @Override + void addDocValuesField(Document doc, String fieldName, Number value) { + doc.add(new NumericDocValuesField(fieldName, value.intValue())); + } + + @Override + Query rangeQuery(String fieldName, Number lower, Number upper) { + return IntPoint.newRangeQuery(fieldName, lower.intValue(), upper.intValue()); + } + + @Override + SortField.Type getSortFieldType() { + return SortField.Type.INT; + } + }, + LONG("long_field", ApproximatePointRangeQuery.LONG_FORMAT) { + @Override + byte[] encode(Number value) { + return LongPoint.pack(new long[] { value.longValue() }).bytes; + } + + // Add to BKD + @Override + void addField(Document doc, String fieldName, Number value) { + doc.add(new LongPoint(fieldName, value.longValue())); + } + + // Add DocValues + @Override + void addDocValuesField(Document doc, String fieldName, Number value) { + doc.add(new NumericDocValuesField(fieldName, value.longValue())); + } + + @Override + Query rangeQuery(String fieldName, Number lower, Number upper) { + return LongPoint.newRangeQuery(fieldName, lower.longValue(), upper.longValue()); + } + + @Override + SortField.Type getSortFieldType() { + return SortField.Type.LONG; + } + }, + HALF_FLOAT("half_float_field", ApproximatePointRangeQuery.HALF_FLOAT_FORMAT) { + @Override + byte[] encode(Number value) { + byte[] bytes = new byte[HalfFloatPoint.BYTES]; + HalfFloatPoint.encodeDimension(value.floatValue(), bytes, 0); + return bytes; + } + + @Override + void addField(Document doc, String fieldName, Number value) { + doc.add(new HalfFloatPoint(fieldName, value.floatValue())); + } + + @Override + void addDocValuesField(Document doc, String fieldName, Number value) { + doc.add(new NumericDocValuesField(fieldName + "_sort", value.longValue())); + } + + @Override + String getSortFieldName() { + return fieldName + "_sort"; + } + + @Override + Query rangeQuery(String fieldName, Number lower, Number upper) { + return HalfFloatPoint.newRangeQuery(fieldName, lower.floatValue(), upper.floatValue()); + } + + @Override + SortField.Type getSortFieldType() { + return SortField.Type.LONG; + } + }, + FLOAT("float_field", ApproximatePointRangeQuery.FLOAT_FORMAT) { + @Override + byte[] encode(Number value) { + return FloatPoint.pack(new float[] { value.floatValue() }).bytes; + } + + @Override + void addField(Document doc, String fieldName, Number value) { + doc.add(new FloatPoint(fieldName, value.floatValue())); + } + + @Override + void addDocValuesField(Document doc, String fieldName, Number value) { + doc.add(new NumericDocValuesField(fieldName, Float.floatToIntBits(value.floatValue()))); + } + + @Override + Query rangeQuery(String fieldName, Number lower, Number upper) { + return FloatPoint.newRangeQuery(fieldName, lower.floatValue(), upper.floatValue()); + } + + @Override + SortField.Type getSortFieldType() { + return SortField.Type.FLOAT; + } + }, + DOUBLE("double_field", ApproximatePointRangeQuery.DOUBLE_FORMAT) { + @Override + byte[] encode(Number value) { + return DoublePoint.pack(new double[] { value.doubleValue() }).bytes; + } + + @Override + void addField(Document doc, String fieldName, Number value) { + doc.add(new DoublePoint(fieldName, value.doubleValue())); + } + + @Override + void addDocValuesField(Document doc, String fieldName, Number value) { + doc.add(new NumericDocValuesField(fieldName, Double.doubleToLongBits(value.doubleValue()))); + } + + @Override + Query rangeQuery(String fieldName, Number lower, Number upper) { + return DoublePoint.newRangeQuery(fieldName, lower.doubleValue(), upper.doubleValue()); + } + + @Override + SortField.Type getSortFieldType() { + return SortField.Type.DOUBLE; + } + }, + UNSIGNED_LONG("unsigned_long_field", ApproximatePointRangeQuery.UNSIGNED_LONG_FORMAT) { + @Override + byte[] encode(Number value) { + byte[] bytes = new byte[BigIntegerPoint.BYTES]; + BigIntegerPoint.encodeDimension(BigInteger.valueOf(value.longValue()), bytes, 0); + return bytes; + } + + @Override + void addField(Document doc, String fieldName, Number value) { + doc.add(new BigIntegerPoint(fieldName, BigInteger.valueOf(value.longValue()))); + } + + @Override + void addDocValuesField(Document doc, String fieldName, Number value) { + doc.add(new NumericDocValuesField(fieldName + "_sort", value.longValue())); + } + + @Override + Query rangeQuery(String fieldName, Number lower, Number upper) { + return BigIntegerPoint.newRangeQuery( + fieldName, + BigInteger.valueOf(lower.longValue()), + BigInteger.valueOf(upper.longValue()) + ); + } + + @Override + SortField.Type getSortFieldType() { + return SortField.Type.LONG; + } + + @Override + String getSortFieldName() { + return fieldName + "_sort"; + } + }; + + final String fieldName; + final Function format; + + NumericType(String fieldName, Function format) { + this.fieldName = fieldName; + this.format = format; + } + + abstract byte[] encode(Number value); + + abstract void addField(Document doc, String fieldName, Number value); + + abstract void addDocValuesField(Document doc, String fieldName, Number value); + + abstract Query rangeQuery(String fieldName, Number lower, Number upper); + + abstract SortField.Type getSortFieldType(); + + String getSortFieldName() { + return fieldName; + } + + long getMinTestValue() { + return this == UNSIGNED_LONG ? 0 : -100; + } + } public void testApproximateRangeEqualsActualRange() throws IOException { try (Directory directory = newDirectory()) { try (RandomIndexWriter iw = new RandomIndexWriter(random(), directory, new WhitespaceAnalyzer())) { int dims = 1; - - long[] scratch = new long[dims]; - for (int i = 0; i < 100; i++) { - int numPoints = RandomNumbers.randomIntBetween(random(), 1, 10); + int numDocs = RandomNumbers.randomIntBetween(random(), 1500, 3000); + for (int i = 0; i < numDocs; i++) { + int numValues = RandomNumbers.randomIntBetween(random(), 1, 10); Document doc = new Document(); - for (int j = 0; j < numPoints; j++) { - for (int v = 0; v < dims; v++) { - scratch[v] = RandomNumbers.randomLongBetween(random(), 0, 100); - } - doc.add(new LongPoint("point", scratch)); + for (int j = 0; j < numValues; j++) { + long randomValue = RandomNumbers.randomLongBetween(random(), 0, 200); + numericType.addField(doc, numericType.fieldName, randomValue); } iw.addDocument(doc); + if (random().nextInt(20) == 0) { + iw.flush(); + } } iw.flush(); + if (random().nextBoolean()) { + iw.forceMerge(1); + } try (IndexReader reader = iw.getReader()) { - try { - long lower = RandomNumbers.randomLongBetween(random(), -100, 200); - long upper = lower + RandomNumbers.randomLongBetween(random(), 0, 100); - Query approximateQuery = new ApproximatePointRangeQuery( - "point", - pack(lower).bytes, - pack(upper).bytes, - dims, - ApproximatePointRangeQuery.LONG_FORMAT - ); - Query query = LongPoint.newRangeQuery("point", lower, upper); - IndexSearcher searcher = new IndexSearcher(reader); - TopDocs topDocs = searcher.search(approximateQuery, 10); - TopDocs topDocs1 = searcher.search(query, 10); - assertEquals(topDocs.totalHits, topDocs1.totalHits); - } catch (IOException e) { - throw new RuntimeException(e); - } - + long lower = RandomNumbers.randomLongBetween(random(), numericType.getMinTestValue(), 200); + long upper = RandomNumbers.randomLongBetween(random(), lower, lower + 150); + int searchSize = RandomNumbers.randomIntBetween(random(), 10, 50); + Query approximateQuery = new ApproximatePointRangeQuery( + numericType.fieldName, + numericType.encode(lower), + numericType.encode(upper), + dims, + numericType.format + ); + Query exactQuery = numericType.rangeQuery(numericType.fieldName, lower, upper); + IndexSearcher searcher = new IndexSearcher(reader); + TopDocs topDocs = searcher.search(approximateQuery, searchSize); + TopDocs topDocs1 = searcher.search(exactQuery, searchSize); + assertEquals( + "Approximate and exact queries should return same number of docs", + topDocs1.scoreDocs.length, + topDocs.scoreDocs.length + ); } } } @@ -86,283 +319,297 @@ public void testApproximateRangeWithDefaultSize() throws IOException { try (Directory directory = newDirectory()) { try (RandomIndexWriter iw = new RandomIndexWriter(random(), directory, new WhitespaceAnalyzer())) { int dims = 1; - - long[] scratch = new long[dims]; int numPoints = 1000; for (int i = 0; i < numPoints; i++) { Document doc = new Document(); - for (int v = 0; v < dims; v++) { - scratch[v] = i; - } - doc.add(new LongPoint("point", scratch)); + numericType.addField(doc, numericType.fieldName, i); iw.addDocument(doc); if (i % 15 == 0) iw.flush(); } iw.flush(); try (IndexReader reader = iw.getReader()) { - try { - long lower = 0; - long upper = 1000; - Query approximateQuery = new ApproximatePointRangeQuery( - "point", - pack(lower).bytes, - pack(upper).bytes, - dims, - ApproximatePointRangeQuery.LONG_FORMAT - ); - IndexSearcher searcher = new IndexSearcher(reader); - TopDocs topDocs = searcher.search(approximateQuery, 10); - assertEquals(topDocs.totalHits, new TotalHits(1000, TotalHits.Relation.EQUAL_TO)); - } catch (IOException e) { - throw new RuntimeException(e); - } - + long lower = 0; + long upper = 1000; + Query approximateQuery = new ApproximatePointRangeQuery( + numericType.fieldName, + numericType.encode(lower), + numericType.encode(upper), + dims, + numericType.format + ); + IndexSearcher searcher = new IndexSearcher(reader); + TopDocs topDocs = searcher.search(approximateQuery, 10); + assertEquals(topDocs.totalHits, new TotalHits(1000, TotalHits.Relation.EQUAL_TO)); } } } } - public void testApproximateRangeWithSizeDefault() throws IOException { + public void testApproximateRangeShortCircuitAscSort() throws IOException { try (Directory directory = newDirectory()) { try (RandomIndexWriter iw = new RandomIndexWriter(random(), directory, new WhitespaceAnalyzer())) { int dims = 1; - - long[] scratch = new long[dims]; - int numPoints = 1000; + int numPoints = RandomNumbers.randomIntBetween(random(), 1000, 3000); for (int i = 0; i < numPoints; i++) { Document doc = new Document(); - for (int v = 0; v < dims; v++) { - scratch[v] = i; - } - doc.add(new LongPoint("point", scratch)); + numericType.addField(doc, numericType.fieldName, i); + numericType.addDocValuesField(doc, numericType.fieldName, i); iw.addDocument(doc); + // Randomly flush + if (random().nextInt(20) == 0) { + iw.flush(); + } } iw.flush(); - iw.forceMerge(1); - final int size = 10; + if (random().nextBoolean()) { + iw.forceMerge(1); + } try (IndexReader reader = iw.getReader()) { - try { - long lower = 0; - long upper = 45; - Query approximateQuery = new ApproximatePointRangeQuery( - "point", - pack(lower).bytes, - pack(upper).bytes, - dims, - size, - null, - ApproximatePointRangeQuery.LONG_FORMAT - ); - IndexSearcher searcher = new IndexSearcher(reader); - TopDocs topDocs = searcher.search(approximateQuery, size); - assertEquals(size, topDocs.scoreDocs.length); - } catch (IOException e) { - throw new RuntimeException(e); + long lower = RandomNumbers.randomLongBetween(random(), 0, 50); + long upper = RandomNumbers.randomLongBetween(random(), lower + 10, Math.min(lower + 100, numPoints - 1)); + final int size = RandomNumbers.randomIntBetween(random(), 5, 20); + Query approximateQuery = new ApproximatePointRangeQuery( + numericType.fieldName, + numericType.encode(lower), + numericType.encode(upper), + dims, + size, + SortOrder.ASC, + numericType.format + ); + Query exactQuery = numericType.rangeQuery(numericType.fieldName, lower, upper); + IndexSearcher searcher = new IndexSearcher(reader); + Sort sort = new Sort(new SortField(numericType.getSortFieldName(), numericType.getSortFieldType())); + TopDocs topDocs = searcher.search(approximateQuery, size, sort); + TopDocs topDocs1 = searcher.search(exactQuery, size, sort); + int compareSize = Math.min(size, Math.min(topDocs.scoreDocs.length, topDocs1.scoreDocs.length)); + for (int i = 0; i < compareSize; i++) { + assertEquals("Mismatch at doc index " + i, topDocs.scoreDocs[i].doc, topDocs1.scoreDocs[i].doc); } - } } } } - public void testApproximateRangeWithSizeOverDefault() throws IOException { + public void testApproximateRangeShortCircuitDescSort() throws IOException { try (Directory directory = newDirectory()) { try (RandomIndexWriter iw = new RandomIndexWriter(random(), directory, new WhitespaceAnalyzer())) { int dims = 1; - - long[] scratch = new long[dims]; - int numPoints = 15000; + int numPoints = RandomNumbers.randomIntBetween(random(), 1000, 3000); for (int i = 0; i < numPoints; i++) { Document doc = new Document(); - for (int v = 0; v < dims; v++) { - scratch[v] = i; - } - doc.add(new LongPoint("point", scratch)); + numericType.addField(doc, numericType.fieldName, i); + numericType.addDocValuesField(doc, numericType.fieldName, i); iw.addDocument(doc); + if (random().nextInt(20) == 0) { + iw.flush(); + } } iw.flush(); - iw.forceMerge(1); + if (random().nextBoolean()) { + iw.forceMerge(1); + } try (IndexReader reader = iw.getReader()) { - try { - long lower = 0; - long upper = 12000; - long maxHits = 12001; - final int size = 11000; - Query approximateQuery = new ApproximatePointRangeQuery( - "point", - pack(lower).bytes, - pack(upper).bytes, - dims, - size, - null, - ApproximatePointRangeQuery.LONG_FORMAT - ); - IndexSearcher searcher = new IndexSearcher(reader); - TopDocs topDocs = searcher.search(approximateQuery, size); - - if (topDocs.totalHits.relation() == Relation.EQUAL_TO) { - assertEquals(topDocs.totalHits.value(), size); - } else { - assertTrue(11000 <= topDocs.totalHits.value()); - assertTrue(maxHits >= topDocs.totalHits.value()); - } - } catch (IOException e) { - throw new RuntimeException(e); + long lower = RandomNumbers.randomLongBetween(random(), numPoints - 100, numPoints - 20); + long upper = RandomNumbers.randomLongBetween(random(), lower + 10, numPoints - 1); + final int size = RandomNumbers.randomIntBetween(random(), 5, 20); + Query approximateQuery = new ApproximatePointRangeQuery( + numericType.fieldName, + numericType.encode(lower), + numericType.encode(upper), + dims, + size, + SortOrder.DESC, + numericType.format + ); + Query exactQuery = numericType.rangeQuery(numericType.fieldName, lower, upper); + IndexSearcher searcher = new IndexSearcher(reader); + Sort sort = new Sort(new SortField(numericType.getSortFieldName(), numericType.getSortFieldType(), true)); + TopDocs topDocs = searcher.search(approximateQuery, size, sort); + TopDocs topDocs1 = searcher.search(exactQuery, size, sort); + int compareSize = Math.min(size, Math.min(topDocs.scoreDocs.length, topDocs1.scoreDocs.length)); + for (int i = 0; i < compareSize; i++) { + assertEquals("Mismatch at doc index " + i, topDocs.scoreDocs[i].doc, topDocs1.scoreDocs[i].doc); } - } } } } - public void testApproximateRangeShortCircuit() throws IOException { + public void testApproximateRangeWithSizeDefault() throws IOException { try (Directory directory = newDirectory()) { try (RandomIndexWriter iw = new RandomIndexWriter(random(), directory, new WhitespaceAnalyzer())) { int dims = 1; - - long[] scratch = new long[dims]; - int numPoints = 1000; + int numPoints = RandomNumbers.randomIntBetween(random(), 1000, 3000); for (int i = 0; i < numPoints; i++) { Document doc = new Document(); - for (int v = 0; v < dims; v++) { - scratch[v] = i; - } - doc.add(new LongPoint("point", scratch)); + numericType.addField(doc, numericType.fieldName, i); iw.addDocument(doc); - if (i % 10 == 0) iw.flush(); + if (random().nextInt(20) == 0) { + iw.flush(); + } } iw.flush(); - iw.forceMerge(1); + if (random().nextBoolean()) { + iw.forceMerge(1); + } + final int size = RandomNumbers.randomIntBetween(random(), 5, 20); try (IndexReader reader = iw.getReader()) { - try { - long lower = 0; - long upper = 100; - final int size = 10; - Query approximateQuery = new ApproximatePointRangeQuery( - "point", - pack(lower).bytes, - pack(upper).bytes, - dims, - size, - null, - ApproximatePointRangeQuery.LONG_FORMAT - ); - Query query = LongPoint.newRangeQuery("point", lower, upper); - - IndexSearcher searcher = new IndexSearcher(reader); - TopDocs topDocs = searcher.search(approximateQuery, size); - TopDocs topDocs1 = searcher.search(query, size); - assertEquals(size, topDocs.scoreDocs.length); - assertEquals(size, topDocs1.scoreDocs.length); - assertEquals(topDocs1.totalHits.value(), 101); - } catch (IOException e) { - throw new RuntimeException(e); - } - + long lower = RandomNumbers.randomLongBetween(random(), 0, numPoints / 2); + long upper = RandomNumbers.randomLongBetween(random(), lower + size * 2, numPoints - 1); + Query approximateQuery = new ApproximatePointRangeQuery( + numericType.fieldName, + numericType.encode(lower), + numericType.encode(upper), + dims, + size, + null, + numericType.format + ); + IndexSearcher searcher = new IndexSearcher(reader); + TopDocs topDocs = searcher.search(approximateQuery, size); + assertEquals(size, topDocs.scoreDocs.length); } } } } - public void testApproximateRangeShortCircuitAscSort() throws IOException { + public void testApproximateRangeWithSizeOverDefault() throws IOException { try (Directory directory = newDirectory()) { try (RandomIndexWriter iw = new RandomIndexWriter(random(), directory, new WhitespaceAnalyzer())) { int dims = 1; - - long[] scratch = new long[dims]; - int numPoints = 1000; + int numPoints = RandomNumbers.randomIntBetween(random(), 15000, 20000); for (int i = 0; i < numPoints; i++) { Document doc = new Document(); - for (int v = 0; v < dims; v++) { - scratch[v] = i; + numericType.addField(doc, numericType.fieldName, i); + iw.addDocument(doc); + if (random().nextInt(100) == 0) { + iw.flush(); } - iw.addDocument(asList(new LongPoint("point", scratch[0]), new NumericDocValuesField("point", scratch[0]))); } iw.flush(); - iw.forceMerge(1); + if (random().nextBoolean()) { + iw.forceMerge(1); + } try (IndexReader reader = iw.getReader()) { - try { - long lower = 0; - long upper = 20; - final int size = 10; - Query approximateQuery = new ApproximatePointRangeQuery( - "point", - pack(lower).bytes, - pack(upper).bytes, - dims, - size, - SortOrder.ASC, - ApproximatePointRangeQuery.LONG_FORMAT - ); - Query query = LongPoint.newRangeQuery("point", lower, upper); - - IndexSearcher searcher = new IndexSearcher(reader); - Sort sort = new Sort(new SortField("point", SortField.Type.LONG)); - TopDocs topDocs = searcher.search(approximateQuery, size, sort); - TopDocs topDocs1 = searcher.search(query, size, sort); - - assertEquals(topDocs.scoreDocs[0].doc, topDocs1.scoreDocs[0].doc); - assertEquals(topDocs.scoreDocs[1].doc, topDocs1.scoreDocs[1].doc); - assertEquals(topDocs.scoreDocs[2].doc, topDocs1.scoreDocs[2].doc); - assertEquals(topDocs.scoreDocs[3].doc, topDocs1.scoreDocs[3].doc); - assertEquals(topDocs.scoreDocs[4].doc, topDocs1.scoreDocs[4].doc); - assertEquals(topDocs.scoreDocs[5].doc, topDocs1.scoreDocs[5].doc); - - } catch (IOException e) { - throw new RuntimeException(e); + final int size = RandomNumbers.randomIntBetween(random(), 11000, 13000); + long lower = RandomNumbers.randomLongBetween(random(), 0, 1000); + long upper = RandomNumbers.randomLongBetween(random(), lower + size, Math.min(lower + size + 2000, numPoints - 1)); + Query approximateQuery = new ApproximatePointRangeQuery( + numericType.fieldName, + numericType.encode(lower), + numericType.encode(upper), + dims, + size, + null, + numericType.format + ); + IndexSearcher searcher = new IndexSearcher(reader); + TopDocs topDocs = searcher.search(approximateQuery, size); + if (topDocs.totalHits.relation() == Relation.EQUAL_TO) { + assertEquals(topDocs.totalHits.value(), size); + } else { + assertTrue(size <= topDocs.totalHits.value()); } + } + } + } + } + public void testApproximateRangeShortCircuit() throws IOException { + try (Directory directory = newDirectory()) { + try (RandomIndexWriter iw = new RandomIndexWriter(random(), directory, new WhitespaceAnalyzer())) { + int dims = 1; + int numPoints = RandomNumbers.randomIntBetween(random(), 3000, 10000); + for (int i = 0; i < numPoints; i++) { + Document doc = new Document(); + numericType.addField(doc, numericType.fieldName, i); + iw.addDocument(doc); + if (random().nextInt(20) == 0) { + iw.flush(); + } + } + iw.flush(); + if (random().nextBoolean()) { + iw.forceMerge(1); + } + try (IndexReader reader = iw.getReader()) { + final int size = RandomNumbers.randomIntBetween(random(), 5, 20); + long lower = RandomNumbers.randomLongBetween(random(), 0, numPoints / 2); + long upper = RandomNumbers.randomLongBetween(random(), lower + size * 2, Math.min(lower + 200, numPoints - 1)); + Query approximateQuery = new ApproximatePointRangeQuery( + numericType.fieldName, + numericType.encode(lower), + numericType.encode(upper), + dims, + size, + null, + numericType.format + ); + Query exactQuery = numericType.rangeQuery(numericType.fieldName, lower, upper); + IndexSearcher searcher = new IndexSearcher(reader); + TopDocs exactAllDocs = searcher.search(exactQuery, numPoints); + long actualHitsInRange = exactAllDocs.totalHits.value(); + TopDocs topDocs = searcher.search(approximateQuery, size); + TopDocs topDocs1 = searcher.search(exactQuery, size); + assertEquals(size, topDocs.scoreDocs.length); + assertEquals(size, topDocs1.scoreDocs.length); + assertEquals(actualHitsInRange, topDocs1.totalHits.value()); + assertTrue("Approximate query should find at least 'size' documents", topDocs.totalHits.value() >= size); } } } } public void testSize() { + long lower = RandomNumbers.randomLongBetween(random(), 0, 100); + long upper = RandomNumbers.randomLongBetween(random(), lower + 10, lower + 1000); ApproximatePointRangeQuery query = new ApproximatePointRangeQuery( - "point", - pack(0).bytes, - pack(20).bytes, + numericType.fieldName, + numericType.encode(lower), + numericType.encode(upper), 1, - ApproximatePointRangeQuery.LONG_FORMAT + numericType.format ); assertEquals(query.getSize(), 10_000); - - query.setSize(100); - assertEquals(query.getSize(), 100); - + int newSize = RandomNumbers.randomIntBetween(random(), 50, 500); + query.setSize(newSize); + assertEquals(query.getSize(), newSize); } public void testSortOrder() { + long lower = RandomNumbers.randomLongBetween(random(), 0, 100); + long upper = RandomNumbers.randomLongBetween(random(), lower + 10, lower + 1000); ApproximatePointRangeQuery query = new ApproximatePointRangeQuery( - "point", - pack(0).bytes, - pack(20).bytes, + numericType.fieldName, + numericType.encode(lower), + numericType.encode(upper), 1, - ApproximatePointRangeQuery.LONG_FORMAT + numericType.format ); assertNull(query.getSortOrder()); - - query.setSortOrder(SortOrder.ASC); - assertEquals(query.getSortOrder(), SortOrder.ASC); + SortOrder sortOrder = random().nextBoolean() ? SortOrder.ASC : SortOrder.DESC; + query.setSortOrder(sortOrder); + assertEquals(query.getSortOrder(), sortOrder); } public void testCanApproximate() { + long lower = RandomNumbers.randomLongBetween(random(), 0, 100); + long upper = RandomNumbers.randomLongBetween(random(), lower + 10, lower + 1000); ApproximatePointRangeQuery query = new ApproximatePointRangeQuery( - "point", - pack(0).bytes, - pack(20).bytes, + numericType.fieldName, + numericType.encode(lower), + numericType.encode(upper), 1, - ApproximatePointRangeQuery.LONG_FORMAT + numericType.format ); - assertFalse(query.canApproximate(null)); - ApproximatePointRangeQuery queryCanApproximate = new ApproximatePointRangeQuery( - "point", - pack(0).bytes, - pack(20).bytes, + numericType.fieldName, + numericType.encode(lower), + numericType.encode(upper), 1, - ApproximatePointRangeQuery.LONG_FORMAT + numericType.format ) { public boolean canApproximate(SearchContext context) { return true; @@ -373,70 +620,29 @@ public boolean canApproximate(SearchContext context) { } public void testCannotApproximateWithTrackTotalHits() { + long lower = RandomNumbers.randomLongBetween(random(), 0, 100); + long upper = RandomNumbers.randomLongBetween(random(), lower + 10, lower + 1000); ApproximatePointRangeQuery query = new ApproximatePointRangeQuery( - "point", - pack(0).bytes, - pack(20).bytes, + numericType.fieldName, + numericType.encode(lower), + numericType.encode(upper), 1, - ApproximatePointRangeQuery.LONG_FORMAT + numericType.format ); - SearchContext mockContext = mock(SearchContext.class); + SearchContext mockContext = mock(org.opensearch.search.internal.SearchContext.class); when(mockContext.trackTotalHitsUpTo()).thenReturn(SearchContext.TRACK_TOTAL_HITS_ACCURATE); assertFalse(query.canApproximate(mockContext)); - when(mockContext.trackTotalHitsUpTo()).thenReturn(SearchContext.DEFAULT_TRACK_TOTAL_HITS_UP_TO); + int trackTotalHitsUpTo = RandomNumbers.randomIntBetween(random(), 1000, 20000); + when(mockContext.trackTotalHitsUpTo()).thenReturn(trackTotalHitsUpTo); when(mockContext.aggregations()).thenReturn(null); - when(mockContext.from()).thenReturn(0); - when(mockContext.size()).thenReturn(10); + int from = RandomNumbers.randomIntBetween(random(), 0, 100); + int size = RandomNumbers.randomIntBetween(random(), 10, 100); + when(mockContext.from()).thenReturn(from); + when(mockContext.size()).thenReturn(size); when(mockContext.request()).thenReturn(null); assertTrue(query.canApproximate(mockContext)); - } - - public void testApproximateRangeShortCircuitDescSort() throws IOException { - try (Directory directory = newDirectory()) { - try (RandomIndexWriter iw = new RandomIndexWriter(random(), directory, new WhitespaceAnalyzer())) { - int dims = 1; - long[] scratch = new long[dims]; - int numPoints = 1000; - for (int i = 0; i < numPoints; i++) { - for (int v = 0; v < dims; v++) { - scratch[v] = i; - } - iw.addDocument(asList(new LongPoint("point", scratch[0]), new NumericDocValuesField("point", scratch[0]))); - } - iw.flush(); - iw.forceMerge(1); - try (IndexReader reader = iw.getReader()) { - try { - long lower = 980; - long upper = 999; - final int size = 10; - Query approximateQuery = new ApproximatePointRangeQuery( - "point", - pack(lower).bytes, - pack(upper).bytes, - dims, - size, - SortOrder.DESC, - ApproximatePointRangeQuery.LONG_FORMAT - ); - Query query = LongPoint.newRangeQuery("point", lower, upper); - - IndexSearcher searcher = new IndexSearcher(reader); - Sort sort = new Sort(new SortField("point", SortField.Type.LONG, true)); // true for DESC - TopDocs topDocs = searcher.search(approximateQuery, size, sort); - TopDocs topDocs1 = searcher.search(query, size, sort); - - // Verify we got the highest values first (DESC order) - for (int i = 0; i < size; i++) { - assertEquals("Mismatch at doc index " + i, topDocs.scoreDocs[i].doc, topDocs1.scoreDocs[i].doc); - } - - } catch (IOException e) { - throw new RuntimeException(e); - } - } - } - } + int expectedSize = Math.max(from + size, trackTotalHitsUpTo) + 1; + assertEquals(expectedSize, query.getSize()); } // Test to cover the left child traversal in intersectRight with CELL_INSIDE_QUERY @@ -444,40 +650,43 @@ public void testIntersectRightLeftChildTraversal() throws IOException { try (Directory directory = newDirectory()) { try (RandomIndexWriter iw = new RandomIndexWriter(random(), directory, new WhitespaceAnalyzer())) { int dims = 1; - long[] scratch = new long[dims]; - // Create a dataset that will create a multi-level BKD tree - // We need enough documents to create internal nodes (not just leaves) - int numPoints = 2000; + int numPoints = RandomNumbers.randomIntBetween(random(), 2000, 5000); for (int i = 0; i < numPoints; i++) { Document doc = new Document(); - scratch[0] = i; - doc.add(new LongPoint("point", scratch[0])); - iw.addDocument(asList(new LongPoint("point", scratch[0]), new NumericDocValuesField("point", scratch[0]))); - if (i % 100 == 0) { + numericType.addField(doc, numericType.fieldName, i); + numericType.addDocValuesField(doc, numericType.fieldName, i); + iw.addDocument(doc); + if (i % RandomNumbers.randomIntBetween(random(), 50, 200) == 0) { iw.flush(); } } iw.flush(); - iw.forceMerge(1); - + if (random().nextBoolean()) { + iw.forceMerge(1); + } try (IndexReader reader = iw.getReader()) { - // Query that will match many documents and require tree traversal - long lower = 1000; - long upper = 1999; - final int size = 50; + // To test upper half of the data to test the DESC + long lower = RandomNumbers.randomLongBetween(random(), numPoints / 2, numPoints - 200); + long upper = RandomNumbers.randomLongBetween(random(), lower + 100, numPoints - 1); + final int size = RandomNumbers.randomIntBetween(random(), 30, 100); ApproximatePointRangeQuery query = new ApproximatePointRangeQuery( - "point", - pack(lower).bytes, - pack(upper).bytes, + numericType.fieldName, + numericType.encode(lower), + numericType.encode(upper), dims, - 50, // Small size to ensure we hit the left child traversal condition + size, SortOrder.DESC, - ApproximatePointRangeQuery.LONG_FORMAT + numericType.format ); + IndexSearcher searcher = new IndexSearcher(reader); - Sort sort = new Sort(new SortField("point", SortField.Type.LONG, true)); // DESC + Sort sort = new Sort(new SortField(numericType.getSortFieldName(), numericType.getSortFieldType(), true)); TopDocs topDocs = searcher.search(query, size, sort); - assertEquals("Should return exactly size value documents", size, topDocs.scoreDocs.length); + assertEquals( + "Should return exactly min(size, hits) documents", + (int) Math.min(size, (upper - lower + 1)), + topDocs.scoreDocs.length + ); } } } @@ -488,38 +697,39 @@ public void testIntersectRightCellInsideQueryLeaf() throws IOException { try (Directory directory = newDirectory()) { try (RandomIndexWriter iw = new RandomIndexWriter(random(), directory, new WhitespaceAnalyzer())) { int dims = 1; - long[] scratch = new long[dims]; - // Create a smaller dataset that will result in leaf nodes that are completely inside the query range - for (int i = 900; i <= 999; i++) { - scratch[0] = i; - iw.addDocument(asList(new LongPoint("point", scratch[0]), new NumericDocValuesField("point", scratch[0]))); + int dataStart = RandomNumbers.randomIntBetween(random(), 5000, 15000); + int dataEnd = dataStart + RandomNumbers.randomIntBetween(random(), 1000, 2000); + int numDocs = dataEnd - dataStart + 1; + for (int i = dataStart; i <= dataEnd; i++) { + Document doc = new Document(); + numericType.addField(doc, numericType.fieldName, i); + numericType.addDocValuesField(doc, numericType.fieldName, i); + iw.addDocument(doc); } iw.flush(); - iw.forceMerge(1); - + if (random().nextBoolean()) { + iw.forceMerge(1); + } try (IndexReader reader = iw.getReader()) { - // Query that completely contains all documents (CELL_INSIDE_QUERY) - long lower = 800; - long upper = 1100; - final int size = 200; - final int returnSize = 100; + // Create a query range that fully encompasses the data + // This ensures CELL_INSIDE_QUERY condition + long lower = dataStart - RandomNumbers.randomIntBetween(random(), 50, 200); + long upper = dataEnd + RandomNumbers.randomIntBetween(random(), 50, 200); + final int size = RandomNumbers.randomIntBetween(random(), numDocs + 50, numDocs + 200); ApproximatePointRangeQuery query = new ApproximatePointRangeQuery( - "point", - pack(lower).bytes, - pack(upper).bytes, + numericType.fieldName, + numericType.encode(lower), + numericType.encode(upper), dims, - 200, + size, SortOrder.DESC, - ApproximatePointRangeQuery.LONG_FORMAT + numericType.format ); - IndexSearcher searcher = new IndexSearcher(reader); - Sort sort = new Sort(new SortField("point", SortField.Type.LONG, true)); // DESC + Sort sort = new Sort(new SortField(numericType.getSortFieldName(), numericType.getSortFieldType(), true)); TopDocs topDocs = searcher.search(query, size, sort); - - assertEquals("Should find all documents", returnSize, topDocs.totalHits.value()); - // Should return all the indexed point values from 900 to 999 which tests CELL_INSIDE_QUERY - assertEquals("Should return exactly return size value documents", returnSize, topDocs.scoreDocs.length); + assertEquals("Should find all documents", numDocs, topDocs.totalHits.value()); + assertEquals("Should return exactly all documents", numDocs, topDocs.scoreDocs.length); } } } @@ -530,84 +740,90 @@ public void testIntersectRightCellOutsideQuery() throws IOException { try (Directory directory = newDirectory()) { try (RandomIndexWriter iw = new RandomIndexWriter(random(), directory, new WhitespaceAnalyzer())) { int dims = 1; - long[] scratch = new long[dims]; - // Create documents in two separate ranges to ensure some cells are outside query - // Range 1: 0-99 - for (int i = 0; i < 100; i++) { - scratch[0] = i; - iw.addDocument(asList(new LongPoint("point", scratch[0]), new NumericDocValuesField("point", scratch[0]))); + int range1Start = RandomNumbers.randomIntBetween(random(), 0, 1000); + int range1End = range1Start + RandomNumbers.randomIntBetween(random(), 1000, 15000); + for (int i = range1Start; i <= range1End; i++) { + Document doc = new Document(); + numericType.addField(doc, numericType.fieldName, i); + numericType.addDocValuesField(doc, numericType.fieldName, i); + iw.addDocument(doc); } - // Range 2: 500-599 (gap ensures some tree nodes will be completely outside query) - for (int i = 500; i < 600; i++) { - scratch[0] = i; - iw.addDocument(asList(new LongPoint("point", scratch[0]), new NumericDocValuesField("point", scratch[0]))); + int gapSize = RandomNumbers.randomIntBetween(random(), 200, 400); + int range2Start = range1End + gapSize; + int range2End = range2Start + RandomNumbers.randomIntBetween(random(), 50, 150); + for (int i = range2Start; i <= range2End; i++) { + Document doc = new Document(); + numericType.addField(doc, numericType.fieldName, i); + numericType.addDocValuesField(doc, numericType.fieldName, i); + iw.addDocument(doc); } iw.flush(); - iw.forceMerge(1); - + if (random().nextBoolean()) { + iw.forceMerge(1); + } try (IndexReader reader = iw.getReader()) { - // Query only the middle range - this should create CELL_OUTSIDE_QUERY for some nodes - long lower = 200; - long upper = 400; - final int size = 50; + long lower = range1End + RandomNumbers.randomIntBetween(random(), 10, gapSize / 2); + long upper = range2Start - RandomNumbers.randomIntBetween(random(), 10, gapSize / 2); + final int size = RandomNumbers.randomIntBetween(random(), 20, 100); + ApproximatePointRangeQuery query = new ApproximatePointRangeQuery( - "point", - pack(lower).bytes, - pack(upper).bytes, + numericType.fieldName, + numericType.encode(lower), + numericType.encode(upper), dims, size, SortOrder.DESC, - ApproximatePointRangeQuery.LONG_FORMAT + numericType.format ); - IndexSearcher searcher = new IndexSearcher(reader); - Sort sort = new Sort(new SortField("point", SortField.Type.LONG, true)); // DESC + Sort sort = new Sort(new SortField(numericType.getSortFieldName(), numericType.getSortFieldType(), true)); TopDocs topDocs = searcher.search(query, size, sort); - - // Should find no documents since our query range (200-400) has no documents assertEquals("Should find no documents in the gap range", 0, topDocs.totalHits.value()); } } } } - // Test to cover intersectRight with CELL_CROSSES_QUERY case and ensure comprehensive coverage for intersectRight + // Test to cover intersectRight with CELL_CROSSES_QUERY case public void testIntersectRightCellCrossesQuery() throws IOException { try (Directory directory = newDirectory()) { try (RandomIndexWriter iw = new RandomIndexWriter(random(), directory, new WhitespaceAnalyzer())) { int dims = 1; - long[] scratch = new long[dims]; - // Create documents that will result in cells that cross the query boundary - for (int i = 0; i < 1000; i++) { - scratch[0] = i; - iw.addDocument(asList(new LongPoint("point", scratch[0]), new NumericDocValuesField("point", scratch[0]))); + int numPoints = RandomNumbers.randomIntBetween(random(), 1000, 3000); + for (int i = 0; i < numPoints; i++) { + Document doc = new Document(); + numericType.addField(doc, numericType.fieldName, i); + numericType.addDocValuesField(doc, numericType.fieldName, i); + iw.addDocument(doc); + if (random().nextInt(100) == 0) { + iw.flush(); + } } iw.flush(); - iw.forceMerge(1); - + if (random().nextBoolean()) { + iw.forceMerge(1); + } try (IndexReader reader = iw.getReader()) { // Query that will partially overlap with tree nodes (CELL_CROSSES_QUERY) - // This range will intersect with some tree nodes but not completely contain them - long lower = 250; - long upper = 750; - final int size = 100; + long lower = RandomNumbers.randomLongBetween(random(), numPoints / 4, numPoints / 2); + long upper = RandomNumbers.randomLongBetween(random(), numPoints / 2, 3 * numPoints / 4); + final int size = RandomNumbers.randomIntBetween(random(), 50, 200); ApproximatePointRangeQuery query = new ApproximatePointRangeQuery( - "point", - pack(lower).bytes, - pack(upper).bytes, + numericType.fieldName, + numericType.encode(lower), + numericType.encode(upper), dims, - 100, + size, SortOrder.DESC, - ApproximatePointRangeQuery.LONG_FORMAT + numericType.format ); - IndexSearcher searcher = new IndexSearcher(reader); - Sort sort = new Sort(new SortField("point", SortField.Type.LONG, true)); // DESC + Sort sort = new Sort(new SortField(numericType.getSortFieldName(), numericType.getSortFieldType(), true)); TopDocs topDocs = searcher.search(query, size, sort); - - assertEquals("Should return exactly size value documents", size, topDocs.scoreDocs.length); - // For Desc sort the ApproximatePointRangeQuery will slightly over collect to retain the highest matched docs - assertTrue("Should collect at least requested number of documents", topDocs.totalHits.value() >= 100); + long expectedHits = upper - lower + 1; + int expectedReturnSize = (int) Math.min(size, expectedHits); + assertEquals("Should return min(size, hits) documents", expectedReturnSize, topDocs.scoreDocs.length); + assertTrue("Should collect at least min(size, hits) documents", topDocs.totalHits.value() >= expectedReturnSize); } } } @@ -618,67 +834,72 @@ public void testIntersectRightSingleChildNode() throws IOException { try (Directory directory = newDirectory()) { try (RandomIndexWriter iw = new RandomIndexWriter(random(), directory, new WhitespaceAnalyzer())) { int dims = 1; - long[] scratch = new long[dims]; - - for (int i = 0; i < 100; i++) { - scratch[0] = 1000L; - iw.addDocument(asList(new LongPoint("point", scratch[0]), new NumericDocValuesField("point", scratch[0]))); + int numSameValueDocs = RandomNumbers.randomIntBetween(random(), 1000, 3000); + long sameValue = RandomNumbers.randomLongBetween(random(), 500, 2000); + for (int i = 0; i < numSameValueDocs; i++) { + Document doc = new Document(); + numericType.addField(doc, numericType.fieldName, sameValue); + numericType.addDocValuesField(doc, numericType.fieldName, sameValue); + iw.addDocument(doc); } - scratch[0] = 987654321L; - iw.addDocument(asList(new LongPoint("point", scratch[0]), new NumericDocValuesField("point", scratch[0]))); - + long highValue = RandomNumbers.randomLongBetween(random(), 900000000L, 999999999L); + Document doc = new Document(); + numericType.addField(doc, numericType.fieldName, highValue); + numericType.addDocValuesField(doc, numericType.fieldName, highValue); + iw.addDocument(doc); iw.flush(); - iw.forceMerge(1); - + if (random().nextBoolean()) { + iw.forceMerge(1); + } try (IndexReader reader = iw.getReader()) { - long lower = 500L; - long upper = 999999999L; - final int size = 50; + long lower = RandomNumbers.randomLongBetween(random(), 0, sameValue - 100); + long upper = RandomNumbers.randomLongBetween(random(), highValue, highValue + 1000000L); + final int size = RandomNumbers.randomIntBetween(random(), 20, 100); ApproximatePointRangeQuery query = new ApproximatePointRangeQuery( - "point", - pack(lower).bytes, - pack(upper).bytes, + numericType.fieldName, + numericType.encode(lower), + numericType.encode(upper), dims, size, SortOrder.DESC, - ApproximatePointRangeQuery.LONG_FORMAT + numericType.format ); - IndexSearcher searcher = new IndexSearcher(reader); - Sort sort = new Sort(new SortField("point", SortField.Type.LONG, true)); + Sort sort = new Sort(new SortField(numericType.getSortFieldName(), numericType.getSortFieldType(), true)); TopDocs topDocs = searcher.search(query, size, sort); - - assertEquals("Should return exactly size value documents", size, topDocs.scoreDocs.length); + int totalDocs = numSameValueDocs + 1; + int expectedReturnSize = Math.min(size, totalDocs); + assertEquals("Should return min(size, totalDocs) documents", expectedReturnSize, topDocs.scoreDocs.length); } } } } - // Following test replicates the http_logs dataset public void testHttpLogTimestampDistribution() throws IOException { try (Directory directory = newDirectory()) { try (RandomIndexWriter iw = new RandomIndexWriter(random(), directory, new WhitespaceAnalyzer())) { int dims = 1; + String fieldName = numericType.fieldName; // Sparse range: 100-199 (100 docs, one per value) for (int i = 100; i < 200; i++) { Document doc = new Document(); - doc.add(new LongPoint("timestamp", i)); - doc.add(new NumericDocValuesField("timestamp", i)); + numericType.addField(doc, fieldName, i); + numericType.addDocValuesField(doc, fieldName, i); iw.addDocument(doc); } // Dense range: 1000-1999 (5000 docs, 5 per value) for (int i = 0; i < 5000; i++) { long value = 1000 + (i / 5); // Creates 5 docs per value from 1000-1999 Document doc = new Document(); - doc.add(new LongPoint("timestamp", value)); - doc.add(new NumericDocValuesField("timestamp", value)); + numericType.addField(doc, fieldName, value); + numericType.addDocValuesField(doc, fieldName, value); iw.addDocument(doc); } // 0-99 (100 docs) for (int i = 0; i < 100; i++) { Document doc = new Document(); - doc.add(new LongPoint("timestamp", i)); - doc.add(new NumericDocValuesField("timestamp", i)); + numericType.addField(doc, fieldName, i); + numericType.addDocValuesField(doc, fieldName, i); iw.addDocument(doc); } iw.flush(); @@ -686,11 +907,11 @@ public void testHttpLogTimestampDistribution() throws IOException { try (IndexReader reader = iw.getReader()) { IndexSearcher searcher = new IndexSearcher(reader); // Test sparse region - testApproximateVsExactQuery(searcher, "timestamp", 100, 199, 50, dims); + testApproximateVsExactQuery(searcher, fieldName, 100, 199, 50, dims); // Test dense region - testApproximateVsExactQuery(searcher, "timestamp", 1000, 1500, 100, dims); + testApproximateVsExactQuery(searcher, fieldName, 1000, 1500, 100, dims); // Test across regions - testApproximateVsExactQuery(searcher, "timestamp", 0, 2000, 200, dims); + testApproximateVsExactQuery(searcher, fieldName, 0, 2000, 200, dims); } } } @@ -701,30 +922,41 @@ public void testNycTaxiDataDistribution() throws IOException { try (Directory directory = newDirectory()) { try (RandomIndexWriter iw = new RandomIndexWriter(random(), directory, new WhitespaceAnalyzer())) { int dims = 1; + String fieldName = numericType.fieldName; // Create NYC taxi fare distribution with different ranges + // Low fares: 250-500 (sparse) for (long fare = 250; fare <= 500; fare++) { - iw.addDocument(asList(new LongPoint("fare_amount", fare), new NumericDocValuesField("fare_amount", fare))); + Document doc = new Document(); + numericType.addField(doc, fieldName, fare); + numericType.addDocValuesField(doc, fieldName, fare); + iw.addDocument(doc); } // Typical fares: 1000-3000 (dense, 5 docs per value) for (long fare = 1000; fare <= 3000; fare++) { for (int dup = 0; dup < 5; dup++) { - iw.addDocument(asList(new LongPoint("fare_amount", fare), new NumericDocValuesField("fare_amount", fare))); + Document doc = new Document(); + numericType.addField(doc, fieldName, fare); + numericType.addDocValuesField(doc, fieldName, fare); + iw.addDocument(doc); } } // High fares: 10000-20000 (sparse, 1 doc every 100) for (long fare = 10000; fare <= 20000; fare += 100) { - iw.addDocument(asList(new LongPoint("fare_amount", fare), new NumericDocValuesField("fare_amount", fare))); + Document doc = new Document(); + numericType.addField(doc, fieldName, fare); + numericType.addDocValuesField(doc, fieldName, fare); + iw.addDocument(doc); } iw.flush(); iw.forceMerge(1); try (IndexReader reader = iw.getReader()) { IndexSearcher searcher = new IndexSearcher(reader); // Test 1: Query for typical fare range - testApproximateVsExactQuery(searcher, "fare_amount", 1000, 3000, 100, dims); + testApproximateVsExactQuery(searcher, fieldName, 1000, 3000, 100, dims); // Test 2: Query for high fare range - testApproximateVsExactQuery(searcher, "fare_amount", 10000, 20000, 50, dims); + testApproximateVsExactQuery(searcher, fieldName, 10000, 20000, 50, dims); // Test 3: Query for low fares - testApproximateVsExactQuery(searcher, "fare_amount", 250, 500, 50, dims); + testApproximateVsExactQuery(searcher, fieldName, 250, 500, 50, dims); } } } @@ -735,15 +967,15 @@ private void testApproximateVsExactQuery(IndexSearcher searcher, String field, l // Test with approximate query ApproximatePointRangeQuery approxQuery = new ApproximatePointRangeQuery( field, - pack(lower).bytes, - pack(upper).bytes, + numericType.encode(lower), + numericType.encode(upper), dims, size, null, - ApproximatePointRangeQuery.LONG_FORMAT + numericType.format ); // Test with exact query - Query exactQuery = LongPoint.newRangeQuery(field, lower, upper); + Query exactQuery = numericType.rangeQuery(field, lower, upper); TopDocs approxDocs = searcher.search(approxQuery, size); TopDocs exactDocs = searcher.search(exactQuery, size); // Verify approximate query returns correct number of results @@ -757,24 +989,26 @@ private void testApproximateVsExactQuery(IndexSearcher searcher, String field, l ); } // Test with sorting (ASC and DESC) - Sort ascSort = new Sort(new SortField(field, SortField.Type.LONG)); - Sort descSort = new Sort(new SortField(field, SortField.Type.LONG, true)); + Sort ascSort = new Sort(new SortField(numericType.getSortFieldName(), numericType.getSortFieldType())); + Sort descSort = new Sort(new SortField(numericType.getSortFieldName(), numericType.getSortFieldType(), true)); // Test ASC sort ApproximatePointRangeQuery approxQueryAsc = new ApproximatePointRangeQuery( field, - pack(lower).bytes, - pack(upper).bytes, + numericType.encode(lower), + numericType.encode(upper), dims, size, SortOrder.ASC, - ApproximatePointRangeQuery.LONG_FORMAT + numericType.format ); TopDocs approxDocsAsc = searcher.search(approxQueryAsc, size, ascSort); TopDocs exactDocsAsc = searcher.search(exactQuery, size, ascSort); // Verify results match - for (int i = 0; i < size; i++) { + int compareSize = Math.min(size, Math.min(approxDocsAsc.scoreDocs.length, exactDocsAsc.scoreDocs.length)); + for (int i = 0; i < compareSize; i++) { assertEquals("ASC sorted results should match at position " + i, exactDocsAsc.scoreDocs[i].doc, approxDocsAsc.scoreDocs[i].doc); } + assertEquals("Should return same number of documents", exactDocsAsc.scoreDocs.length, approxDocsAsc.scoreDocs.length); assertEquals("Should return exactly size value documents", size, approxDocsAsc.scoreDocs.length); assertEquals( "Should return exactly size value documents as regular query", @@ -784,23 +1018,25 @@ private void testApproximateVsExactQuery(IndexSearcher searcher, String field, l // Test DESC sort ApproximatePointRangeQuery approxQueryDesc = new ApproximatePointRangeQuery( field, - pack(lower).bytes, - pack(upper).bytes, + numericType.encode(lower), + numericType.encode(upper), dims, size, SortOrder.DESC, - ApproximatePointRangeQuery.LONG_FORMAT + numericType.format ); TopDocs approxDocsDesc = searcher.search(approxQueryDesc, size, descSort); TopDocs exactDocsDesc = searcher.search(exactQuery, size, descSort); // Verify the results match - for (int i = 0; i < size; i++) { + compareSize = Math.min(size, Math.min(approxDocsDesc.scoreDocs.length, exactDocsDesc.scoreDocs.length)); + for (int i = 0; i < compareSize; i++) { assertEquals( "DESC sorted results should match at position " + i, exactDocsDesc.scoreDocs[i].doc, approxDocsDesc.scoreDocs[i].doc ); } + assertEquals("Should return same number of documents", exactDocsDesc.scoreDocs.length, approxDocsDesc.scoreDocs.length); assertEquals("Should return exactly size value documents", size, approxDocsAsc.scoreDocs.length); assertEquals( "Should return exactly size value documents as regular query", diff --git a/server/src/test/java/org/opensearch/search/backpressure/SearchBackpressureServiceTests.java b/server/src/test/java/org/opensearch/search/backpressure/SearchBackpressureServiceTests.java index 8cb22201da1b6..8e604824b73a6 100644 --- a/server/src/test/java/org/opensearch/search/backpressure/SearchBackpressureServiceTests.java +++ b/server/src/test/java/org/opensearch/search/backpressure/SearchBackpressureServiceTests.java @@ -56,6 +56,7 @@ import java.util.concurrent.TimeUnit; import java.util.concurrent.atomic.AtomicLong; import java.util.concurrent.atomic.AtomicReference; +import java.util.function.BooleanSupplier; import java.util.function.LongSupplier; import static org.opensearch.search.backpressure.SearchBackpressureTestHelpers.createMockTaskWithResourceStats; @@ -74,6 +75,7 @@ import static org.mockito.Mockito.when; public class SearchBackpressureServiceTests extends OpenSearchTestCase { + final BooleanSupplier resourceCacheExpiryChecker = () -> true; MockTransportService transportService; TaskManager taskManager; ThreadPool threadPool; @@ -101,8 +103,8 @@ public void testIsNodeInDuress() { AtomicReference cpuUsage = new AtomicReference<>(); AtomicReference heapUsage = new AtomicReference<>(); - NodeDuressTracker cpuUsageTracker = new NodeDuressTracker(() -> cpuUsage.get() >= 0.5, () -> 3); - NodeDuressTracker heapUsageTracker = new NodeDuressTracker(() -> heapUsage.get() >= 0.5, () -> 3); + NodeDuressTracker cpuUsageTracker = new NodeDuressTracker(() -> cpuUsage.get() >= 0.5, () -> 5); + NodeDuressTracker heapUsageTracker = new NodeDuressTracker(() -> heapUsage.get() >= 0.5, () -> 6); EnumMap duressTrackers = new EnumMap<>(ResourceType.class) { { @@ -121,7 +123,7 @@ public void testIsNodeInDuress() { mockTaskResourceTrackingService, threadPool, System::nanoTime, - new NodeDuressTrackers(duressTrackers), + new NodeDuressTrackers(duressTrackers, resourceCacheExpiryChecker), new TaskResourceUsageTrackers(), new TaskResourceUsageTrackers(), taskManager, @@ -167,7 +169,7 @@ public void testTrackerStateUpdateOnSearchTaskCompletion() { mockTaskResourceTrackingService, threadPool, mockTimeNanosSupplier, - new NodeDuressTrackers(new EnumMap<>(ResourceType.class)), + new NodeDuressTrackers(new EnumMap<>(ResourceType.class), resourceCacheExpiryChecker), taskResourceUsageTrackers, new TaskResourceUsageTrackers(), taskManager, @@ -201,7 +203,7 @@ public void testTrackerStateUpdateOnSearchShardTaskCompletion() { mockTaskResourceTrackingService, threadPool, mockTimeNanosSupplier, - new NodeDuressTrackers(new EnumMap<>(ResourceType.class)), + new NodeDuressTrackers(new EnumMap<>(ResourceType.class), resourceCacheExpiryChecker), new TaskResourceUsageTrackers(), taskResourceUsageTrackers, taskManager, @@ -256,7 +258,7 @@ public void testSearchTaskInFlightCancellation() { mockTaskResourceTrackingService, threadPool, mockTimeNanosSupplier, - new NodeDuressTrackers(duressTrackers), + new NodeDuressTrackers(duressTrackers, resourceCacheExpiryChecker), taskResourceUsageTrackers, new TaskResourceUsageTrackers(), mockTaskManager, @@ -329,7 +331,7 @@ public void testSearchShardTaskInFlightCancellation() { put(CPU, mockNodeDuressTracker); } }; - NodeDuressTrackers nodeDuressTrackers = new NodeDuressTrackers(duressTrackers); + NodeDuressTrackers nodeDuressTrackers = new NodeDuressTrackers(duressTrackers, resourceCacheExpiryChecker); TaskResourceUsageTracker mockTaskResourceUsageTracker = getMockedTaskResourceUsageTracker( TaskResourceUsageTrackerType.CPU_USAGE_TRACKER, @@ -427,7 +429,7 @@ public void testNonCancellationOfHeapBasedTasksWhenHeapNotInDuress() { } }; - NodeDuressTrackers nodeDuressTrackers = new NodeDuressTrackers(duressTrackers); + NodeDuressTrackers nodeDuressTrackers = new NodeDuressTrackers(duressTrackers, resourceCacheExpiryChecker); // Creating heap and cpu usage trackers where heap tracker will always evaluate with reasons to cancel the // tasks but heap based cancellation should not happen because heap is not in duress @@ -525,7 +527,7 @@ public void testNonCancellationWhenSearchTrafficIsNotQualifyingForCancellation() } }; - NodeDuressTrackers nodeDuressTrackers = new NodeDuressTrackers(duressTrackers); + NodeDuressTrackers nodeDuressTrackers = new NodeDuressTrackers(duressTrackers, resourceCacheExpiryChecker); // Creating heap and cpu usage trackers where heap tracker will always evaluate with reasons to cancel the // tasks but heap based cancellation should not happen because heap is not in duress diff --git a/server/src/test/java/org/opensearch/search/backpressure/trackers/NodeDuressTrackersTests.java b/server/src/test/java/org/opensearch/search/backpressure/trackers/NodeDuressTrackersTests.java index 7c52840c099d4..f46d84e1034a2 100644 --- a/server/src/test/java/org/opensearch/search/backpressure/trackers/NodeDuressTrackersTests.java +++ b/server/src/test/java/org/opensearch/search/backpressure/trackers/NodeDuressTrackersTests.java @@ -13,9 +13,12 @@ import org.opensearch.wlm.ResourceType; import java.util.EnumMap; +import java.util.function.BooleanSupplier; public class NodeDuressTrackersTests extends OpenSearchTestCase { + final BooleanSupplier resourceCacheExpiryChecker = () -> true; + public void testNodeNotInDuress() { EnumMap map = new EnumMap<>(ResourceType.class) { { @@ -24,7 +27,7 @@ public void testNodeNotInDuress() { } }; - NodeDuressTrackers nodeDuressTrackers = new NodeDuressTrackers(map); + NodeDuressTrackers nodeDuressTrackers = new NodeDuressTrackers(map, resourceCacheExpiryChecker); assertFalse(nodeDuressTrackers.isNodeInDuress()); assertFalse(nodeDuressTrackers.isNodeInDuress()); @@ -34,12 +37,12 @@ public void testNodeNotInDuress() { public void testNodeInDuressWhenHeapInDuress() { EnumMap map = new EnumMap<>(ResourceType.class) { { - put(ResourceType.MEMORY, new NodeDuressTracker(() -> true, () -> 3)); + put(ResourceType.MEMORY, new NodeDuressTracker(() -> true, () -> 6)); put(ResourceType.CPU, new NodeDuressTracker(() -> false, () -> 1)); } }; - NodeDuressTrackers nodeDuressTrackers = new NodeDuressTrackers(map); + NodeDuressTrackers nodeDuressTrackers = new NodeDuressTrackers(map, resourceCacheExpiryChecker); assertFalse(nodeDuressTrackers.isNodeInDuress()); assertFalse(nodeDuressTrackers.isNodeInDuress()); @@ -52,11 +55,11 @@ public void testNodeInDuressWhenCPUInDuress() { EnumMap map = new EnumMap<>(ResourceType.class) { { put(ResourceType.MEMORY, new NodeDuressTracker(() -> false, () -> 1)); - put(ResourceType.CPU, new NodeDuressTracker(() -> true, () -> 3)); + put(ResourceType.CPU, new NodeDuressTracker(() -> true, () -> 5)); } }; - NodeDuressTrackers nodeDuressTrackers = new NodeDuressTrackers(map); + NodeDuressTrackers nodeDuressTrackers = new NodeDuressTrackers(map, resourceCacheExpiryChecker); assertFalse(nodeDuressTrackers.isNodeInDuress()); assertFalse(nodeDuressTrackers.isNodeInDuress()); @@ -68,12 +71,12 @@ public void testNodeInDuressWhenCPUInDuress() { public void testNodeInDuressWhenCPUAndHeapInDuress() { EnumMap map = new EnumMap<>(ResourceType.class) { { - put(ResourceType.MEMORY, new NodeDuressTracker(() -> true, () -> 3)); - put(ResourceType.CPU, new NodeDuressTracker(() -> false, () -> 3)); + put(ResourceType.MEMORY, new NodeDuressTracker(() -> true, () -> 6)); + put(ResourceType.CPU, new NodeDuressTracker(() -> true, () -> 5)); } }; - NodeDuressTrackers nodeDuressTrackers = new NodeDuressTrackers(map); + NodeDuressTrackers nodeDuressTrackers = new NodeDuressTrackers(map, resourceCacheExpiryChecker); assertFalse(nodeDuressTrackers.isNodeInDuress()); assertFalse(nodeDuressTrackers.isNodeInDuress()); diff --git a/server/src/test/java/org/opensearch/search/profile/AbstractProfileBreakdownTests.java b/server/src/test/java/org/opensearch/search/profile/AbstractProfileBreakdownTests.java new file mode 100644 index 0000000000000..bfc2092ee7b0f --- /dev/null +++ b/server/src/test/java/org/opensearch/search/profile/AbstractProfileBreakdownTests.java @@ -0,0 +1,61 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.search.profile; + +import org.opensearch.test.OpenSearchTestCase; + +import java.util.Collections; +import java.util.HashMap; +import java.util.List; +import java.util.Locale; +import java.util.Map; + +public class AbstractProfileBreakdownTests extends OpenSearchTestCase { + private enum TestType { + STAT_1, + STAT_2; + + @Override + public String toString() { + return name().toLowerCase(Locale.ROOT); + } + } + + private static class TestProfileBreakdown extends AbstractProfileBreakdown { + Map stats; + + TestProfileBreakdown() { + super(List.of()); + stats = new HashMap(); + long counter = 123; + for (TestType type : TestType.values()) { + stats.put(type.toString(), counter++); + } + } + + @Override + public Map toBreakdownMap() { + return Collections.unmodifiableMap(stats); + } + + @Override + public Map toDebugMap() { + return Map.of("test_debug", 1234L); + } + } + + public void testToBreakdownMap() { + AbstractProfileBreakdown breakdown = new TestProfileBreakdown(); + Map stats = new HashMap<>(); + stats.put("stat_1", 123L); + stats.put("stat_2", 124L); + assertEquals(stats, breakdown.toBreakdownMap()); + assertEquals(Collections.singletonMap("test_debug", 1234L), breakdown.toDebugMap()); + } +} diff --git a/server/src/test/java/org/opensearch/search/profile/ProfileMetricTests.java b/server/src/test/java/org/opensearch/search/profile/ProfileMetricTests.java new file mode 100644 index 0000000000000..453573b97c96a --- /dev/null +++ b/server/src/test/java/org/opensearch/search/profile/ProfileMetricTests.java @@ -0,0 +1,49 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.search.profile; + +import org.opensearch.test.OpenSearchTestCase; + +import java.util.Collection; +import java.util.List; +import java.util.Map; +import java.util.function.Supplier; + +public class ProfileMetricTests extends OpenSearchTestCase { + + private static class TestMetric extends ProfileMetric { + + private long value = 0L; + + public TestMetric(String name) { + super(name); + } + + public void setValue(long value) { + this.value = value; + } + + @Override + public Map toBreakdownMap() { + return Map.of("test_metric", value); + } + } + + public void testNonTimingMetric() { + TestMetric test_metric = new TestMetric("test_metric"); + test_metric.setValue(1234L); + assertEquals(test_metric.getName(), "test_metric"); + Map map = test_metric.toBreakdownMap(); + assertEquals(map.get("test_metric").longValue(), 1234L); + } + + public static Collection> getNonTimingMetric() { + return List.of(() -> new TestMetric("test_metric")); + } +} diff --git a/server/src/test/java/org/opensearch/search/profile/TimerTests.java b/server/src/test/java/org/opensearch/search/profile/TimerTests.java index 5997292eb8f56..8634522dedcda 100644 --- a/server/src/test/java/org/opensearch/search/profile/TimerTests.java +++ b/server/src/test/java/org/opensearch/search/profile/TimerTests.java @@ -34,13 +34,14 @@ import org.opensearch.test.OpenSearchTestCase; +import java.util.Map; import java.util.concurrent.atomic.AtomicLong; public class TimerTests extends OpenSearchTestCase { public void testTimingInterval() { final AtomicLong nanoTimeCallCounter = new AtomicLong(); - Timer t = new Timer() { + Timer t = new Timer("test") { long time = 50; @Override @@ -63,7 +64,7 @@ long nanoTime() { } public void testExtrapolate() { - Timer t = new Timer() { + Timer t = new Timer("test") { long time = 50; @Override @@ -84,4 +85,16 @@ long nanoTime() { } } + public void testTimerBreakdownMap() { + Timer t = new Timer(123L, 2L, 1234L, 0L, 12345L, "test"); + Map map = t.toBreakdownMap(); + assertEquals(map.size(), 3); + assertEquals(map.get("test").longValue(), 123L); + assertEquals(map.get("test_count").longValue(), 2L); + assertEquals(map.get("test_start_time").longValue(), 12345L); + + Timer t1 = new Timer(123L, 2L, 1234L, 0L, 12345L, "test1"); + assertEquals(t1.getName(), "test1"); + } + } diff --git a/server/src/test/java/org/opensearch/search/profile/query/ConcurrentQueryProfileBreakdownTests.java b/server/src/test/java/org/opensearch/search/profile/query/ConcurrentQueryProfileBreakdownTests.java index db14eb90ef839..d3bea3bd4c5ce 100644 --- a/server/src/test/java/org/opensearch/search/profile/query/ConcurrentQueryProfileBreakdownTests.java +++ b/server/src/test/java/org/opensearch/search/profile/query/ConcurrentQueryProfileBreakdownTests.java @@ -25,16 +25,22 @@ import org.apache.lucene.index.Term; import org.apache.lucene.search.Collector; import org.apache.lucene.store.Directory; -import org.opensearch.search.profile.AbstractProfileBreakdown; +import org.opensearch.search.profile.ContextualProfileBreakdown; +import org.opensearch.search.profile.ProfileMetric; +import org.opensearch.search.profile.ProfileMetricTests; +import org.opensearch.search.profile.ProfileMetricUtil; import org.opensearch.search.profile.Timer; import org.opensearch.test.OpenSearchTestCase; import org.junit.Before; +import java.util.Collection; import java.util.HashMap; +import java.util.List; import java.util.Map; +import java.util.function.Supplier; -import static org.opensearch.search.profile.AbstractProfileBreakdown.TIMING_TYPE_COUNT_SUFFIX; -import static org.opensearch.search.profile.AbstractProfileBreakdown.TIMING_TYPE_START_TIME_SUFFIX; +import static org.opensearch.search.profile.Timer.TIMING_TYPE_COUNT_SUFFIX; +import static org.opensearch.search.profile.Timer.TIMING_TYPE_START_TIME_SUFFIX; import static org.opensearch.search.profile.query.ConcurrentQueryProfileBreakdown.MIN_PREFIX; import static org.opensearch.search.profile.query.ConcurrentQueryProfileBreakdown.SLICE_END_TIME_SUFFIX; import static org.opensearch.search.profile.query.ConcurrentQueryProfileBreakdown.SLICE_START_TIME_SUFFIX; @@ -42,11 +48,15 @@ public class ConcurrentQueryProfileBreakdownTests extends OpenSearchTestCase { private ConcurrentQueryProfileBreakdown testQueryProfileBreakdown; + private ConcurrentQueryProfileBreakdown testQueryProfileBreakdownCombined; private Timer createWeightTimer; @Before public void setup() { - testQueryProfileBreakdown = new ConcurrentQueryProfileBreakdown(); + testQueryProfileBreakdown = new ConcurrentQueryProfileBreakdown(ProfileMetricUtil.getDefaultQueryProfileMetrics()); + Collection> combinedMetrics = ProfileMetricUtil.getDefaultQueryProfileMetrics(); + combinedMetrics.addAll(ProfileMetricTests.getNonTimingMetric()); + testQueryProfileBreakdownCombined = new ConcurrentQueryProfileBreakdown(combinedMetrics); createWeightTimer = testQueryProfileBreakdown.getTimer(QueryTimingType.CREATE_WEIGHT); try { createWeightTimer.start(); @@ -105,10 +115,7 @@ public void testBuildSliceLevelBreakdownWithSingleSlice() throws Exception { final long createWeightEarliestStartTime = createWeightTimer.getEarliestTimerStartTime(); final long createWeightEndTime = createWeightEarliestStartTime + createWeightTimer.getApproximateTiming(); final Map leafProfileBreakdownMap = getLeafBreakdownMap(createWeightEndTime + 10, 10, 1); - final AbstractProfileBreakdown leafProfileBreakdown = new TestQueryProfileBreakdown( - QueryTimingType.class, - leafProfileBreakdownMap - ); + final ContextualProfileBreakdown leafProfileBreakdown = new TestQueryProfileBreakdown(leafProfileBreakdownMap); testQueryProfileBreakdown.associateCollectorToLeaves(sliceCollector, sliceLeaf); testQueryProfileBreakdown.getContexts().put(sliceLeaf, leafProfileBreakdown); final Map> sliceBreakdownMap = testQueryProfileBreakdown.buildSliceLevelBreakdown(); @@ -156,14 +163,8 @@ public void testBuildSliceLevelBreakdownWithMultipleSlices() throws Exception { final long createWeightEndTime = createWeightEarliestStartTime + createWeightTimer.getApproximateTiming(); final Map leafProfileBreakdownMap_1 = getLeafBreakdownMap(createWeightEndTime + 10, 10, 1); final Map leafProfileBreakdownMap_2 = getLeafBreakdownMap(createWeightEndTime + 40, 10, 1); - final AbstractProfileBreakdown leafProfileBreakdown_1 = new TestQueryProfileBreakdown( - QueryTimingType.class, - leafProfileBreakdownMap_1 - ); - final AbstractProfileBreakdown leafProfileBreakdown_2 = new TestQueryProfileBreakdown( - QueryTimingType.class, - leafProfileBreakdownMap_2 - ); + final ContextualProfileBreakdown leafProfileBreakdown_1 = new TestQueryProfileBreakdown(leafProfileBreakdownMap_1); + final ContextualProfileBreakdown leafProfileBreakdown_2 = new TestQueryProfileBreakdown(leafProfileBreakdownMap_2); testQueryProfileBreakdown.associateCollectorToLeaves(sliceCollector_1, directoryReader.leaves().get(0)); testQueryProfileBreakdown.associateCollectorToLeaves(sliceCollector_2, directoryReader.leaves().get(1)); testQueryProfileBreakdown.getContexts().put(directoryReader.leaves().get(0), leafProfileBreakdown_1); @@ -222,14 +223,8 @@ public void testBreakDownMapWithMultipleSlices() throws Exception { final long createWeightEndTime = createWeightEarliestStartTime + createWeightTimer.getApproximateTiming(); final Map leafProfileBreakdownMap_1 = getLeafBreakdownMap(createWeightEndTime + 10, 10, 1); final Map leafProfileBreakdownMap_2 = getLeafBreakdownMap(createWeightEndTime + 40, 20, 1); - final AbstractProfileBreakdown leafProfileBreakdown_1 = new TestQueryProfileBreakdown( - QueryTimingType.class, - leafProfileBreakdownMap_1 - ); - final AbstractProfileBreakdown leafProfileBreakdown_2 = new TestQueryProfileBreakdown( - QueryTimingType.class, - leafProfileBreakdownMap_2 - ); + final ContextualProfileBreakdown leafProfileBreakdown_1 = new TestQueryProfileBreakdown(leafProfileBreakdownMap_1); + final ContextualProfileBreakdown leafProfileBreakdown_2 = new TestQueryProfileBreakdown(leafProfileBreakdownMap_2); testQueryProfileBreakdown.associateCollectorToLeaves(sliceCollector_1, directoryReader.leaves().get(0)); testQueryProfileBreakdown.associateCollectorToLeaves(sliceCollector_2, directoryReader.leaves().get(1)); testQueryProfileBreakdown.getContexts().put(directoryReader.leaves().get(0), leafProfileBreakdown_1); @@ -283,10 +278,7 @@ public void testBreakDownMapWithMultipleSlicesAndOneSliceWithNoLeafContext() thr final long createWeightEarliestStartTime = createWeightTimer.getEarliestTimerStartTime(); final long createWeightEndTime = createWeightEarliestStartTime + createWeightTimer.getApproximateTiming(); final Map leafProfileBreakdownMap_1 = getLeafBreakdownMap(createWeightEndTime + 10, 10, 1); - final AbstractProfileBreakdown leafProfileBreakdown_1 = new TestQueryProfileBreakdown( - QueryTimingType.class, - leafProfileBreakdownMap_1 - ); + final ContextualProfileBreakdown leafProfileBreakdown_1 = new TestQueryProfileBreakdown(leafProfileBreakdownMap_1); testQueryProfileBreakdown.associateCollectorToLeaves(sliceCollector_1, directoryReader.leaves().get(0)); testQueryProfileBreakdown.associateCollectorToLeaves(sliceCollector_2, directoryReader.leaves().get(1)); testQueryProfileBreakdown.getContexts().put(directoryReader.leaves().get(0), leafProfileBreakdown_1); @@ -339,10 +331,7 @@ public void testOneLeafContextWithEmptySliceCollectorsToLeaves() throws Exceptio final long createWeightEarliestStartTime = createWeightTimer.getEarliestTimerStartTime(); final long createWeightEndTime = createWeightEarliestStartTime + createWeightTimer.getApproximateTiming(); final Map leafProfileBreakdownMap_1 = getLeafBreakdownMap(createWeightEndTime + 10, 10, 1); - final AbstractProfileBreakdown leafProfileBreakdown_1 = new TestQueryProfileBreakdown( - QueryTimingType.class, - leafProfileBreakdownMap_1 - ); + final ContextualProfileBreakdown leafProfileBreakdown_1 = new TestQueryProfileBreakdown(leafProfileBreakdownMap_1); testQueryProfileBreakdown.getContexts().put(directoryReader.leaves().get(0), leafProfileBreakdown_1); final Map queryBreakDownMap = testQueryProfileBreakdown.toBreakdownMap(); assertFalse(queryBreakDownMap == null || queryBreakDownMap.isEmpty()); @@ -385,6 +374,54 @@ public void testOneLeafContextWithEmptySliceCollectorsToLeaves() throws Exceptio directory.close(); } + public void testBuildSliceLevelBreakdownWithSingleSliceCombinedMetricOnly() throws Exception { + final DirectoryReader directoryReader = getDirectoryReader(1); + final Directory directory = directoryReader.directory(); + final LeafReaderContext sliceLeaf = directoryReader.leaves().get(0); + final Collector sliceCollector = mock(Collector.class); + final long createWeightEarliestStartTime = createWeightTimer.getEarliestTimerStartTime(); + final long createWeightEndTime = createWeightEarliestStartTime + createWeightTimer.getApproximateTiming(); + final Map leafProfileBreakdownMap = getCombinedLeafBreakdownMap(createWeightEndTime + 10, 10, 1); + final ContextualProfileBreakdown leafProfileBreakdown = new TestQueryProfileBreakdown(leafProfileBreakdownMap); + testQueryProfileBreakdownCombined.associateCollectorToLeaves(sliceCollector, sliceLeaf); + testQueryProfileBreakdownCombined.getContexts().put(sliceLeaf, leafProfileBreakdown); + final Map> sliceBreakdownMap = testQueryProfileBreakdownCombined.buildSliceLevelBreakdown(); + assertFalse(sliceBreakdownMap == null || sliceBreakdownMap.isEmpty()); + assertEquals(1, sliceBreakdownMap.size()); + assertTrue(sliceBreakdownMap.containsKey(sliceCollector)); + + final Map sliceBreakdown = sliceBreakdownMap.entrySet().iterator().next().getValue(); + for (QueryTimingType timingType : QueryTimingType.values()) { + String timingTypeKey = timingType.toString(); + String timingTypeCountKey = timingTypeKey + TIMING_TYPE_COUNT_SUFFIX; + + if (timingType.equals(QueryTimingType.CREATE_WEIGHT)) { + // there should be no entry for create weight at slice level breakdown map + assertNull(sliceBreakdown.get(timingTypeKey)); + assertNull(sliceBreakdown.get(timingTypeCountKey)); + continue; + } + + // for other timing type we will have all the value and will be same as leaf breakdown as there is single slice and single leaf + assertEquals(leafProfileBreakdownMap.get(timingTypeKey), sliceBreakdown.get(timingTypeKey)); + assertEquals(leafProfileBreakdownMap.get(timingTypeCountKey), sliceBreakdown.get(timingTypeCountKey)); + assertEquals( + leafProfileBreakdownMap.get(timingTypeKey + TIMING_TYPE_START_TIME_SUFFIX), + sliceBreakdown.get(timingTypeKey + SLICE_START_TIME_SUFFIX) + ); + assertEquals( + leafProfileBreakdownMap.get(timingTypeKey + TIMING_TYPE_START_TIME_SUFFIX) + leafProfileBreakdownMap.get(timingTypeKey), + (long) sliceBreakdown.get(timingTypeKey + SLICE_END_TIME_SUFFIX) + ); + } + assertEquals(leafProfileBreakdownMap.get("value"), sliceBreakdown.get("value")); + assertEquals(10, testQueryProfileBreakdownCombined.getMaxSliceNodeTime()); + assertEquals(10, testQueryProfileBreakdownCombined.getMinSliceNodeTime()); + assertEquals(10, testQueryProfileBreakdownCombined.getAvgSliceNodeTime()); + directoryReader.close(); + directory.close(); + } + private Map getLeafBreakdownMap(long startTime, long timeTaken, long count) { Map leafBreakDownMap = new HashMap<>(); for (QueryTimingType timingType : QueryTimingType.values()) { @@ -400,6 +437,22 @@ private Map getLeafBreakdownMap(long startTime, long timeTaken, lo return leafBreakDownMap; } + private Map getCombinedLeafBreakdownMap(long startTime, long timeTaken, long count) { + Map leafBreakDownMap = new HashMap<>(); + for (QueryTimingType timingType : QueryTimingType.values()) { + if (timingType.equals(QueryTimingType.CREATE_WEIGHT)) { + // don't add anything + continue; + } + String timingTypeKey = timingType.toString(); + leafBreakDownMap.put(timingTypeKey, timeTaken); + leafBreakDownMap.put(timingTypeKey + TIMING_TYPE_COUNT_SUFFIX, count); + leafBreakDownMap.put(timingTypeKey + TIMING_TYPE_START_TIME_SUFFIX, startTime); + } + leafBreakDownMap.put("test_metric", 123L); + return leafBreakDownMap; + } + private DirectoryReader getDirectoryReader(int numLeaves) throws Exception { final Directory directory = newDirectory(); IndexWriter iw = new IndexWriter(directory, new IndexWriterConfig(new StandardAnalyzer()).setMergePolicy(NoMergePolicy.INSTANCE)); @@ -416,11 +469,11 @@ private DirectoryReader getDirectoryReader(int numLeaves) throws Exception { return DirectoryReader.open(directory); } - private static class TestQueryProfileBreakdown extends AbstractProfileBreakdown { + private static class TestQueryProfileBreakdown extends ContextualProfileBreakdown { private Map breakdownMap; - public TestQueryProfileBreakdown(Class clazz, Map breakdownMap) { - super(clazz); + public TestQueryProfileBreakdown(Map breakdownMap) { + super(List.of()); this.breakdownMap = breakdownMap; } @@ -428,5 +481,10 @@ public TestQueryProfileBreakdown(Class clazz, Map public Map toBreakdownMap() { return breakdownMap; } + + @Override + public ContextualProfileBreakdown context(Object context) { + return null; + } } } diff --git a/server/src/test/java/org/opensearch/search/profile/query/ConcurrentQueryProfilerTests.java b/server/src/test/java/org/opensearch/search/profile/query/ConcurrentQueryProfilerTests.java index 736bbcdd9e8dd..2a40c7608feaa 100644 --- a/server/src/test/java/org/opensearch/search/profile/query/ConcurrentQueryProfilerTests.java +++ b/server/src/test/java/org/opensearch/search/profile/query/ConcurrentQueryProfilerTests.java @@ -21,9 +21,9 @@ public class ConcurrentQueryProfilerTests extends OpenSearchTestCase { public void testMergeRewriteTimeIntervals() { ConcurrentQueryProfiler profiler = new ConcurrentQueryProfiler(new ConcurrentQueryProfileTree()); List timers = new LinkedList<>(); - timers.add(new Timer(217134L, 1L, 1L, 0L, 553074511206907L)); - timers.add(new Timer(228954L, 1L, 1L, 0L, 553074509287335L)); - timers.add(new Timer(228954L, 1L, 1L, 0L, 553074509287336L)); + timers.add(new Timer(217134L, 1L, 1L, 0L, 553074511206907L, "test1")); + timers.add(new Timer(228954L, 1L, 1L, 0L, 553074509287335L, "test2")); + timers.add(new Timer(228954L, 1L, 1L, 0L, 553074509287336L, "test3")); LinkedList mergedIntervals = profiler.mergeRewriteTimeIntervals(timers); assertThat(mergedIntervals.size(), equalTo(2)); long[] interval = mergedIntervals.get(0); diff --git a/server/src/test/java/org/opensearch/search/profile/query/ProfileScorerTests.java b/server/src/test/java/org/opensearch/search/profile/query/ProfileScorerTests.java index a4ff92ec556e9..0112fa0839965 100644 --- a/server/src/test/java/org/opensearch/search/profile/query/ProfileScorerTests.java +++ b/server/src/test/java/org/opensearch/search/profile/query/ProfileScorerTests.java @@ -40,6 +40,7 @@ import org.apache.lucene.search.ScoreMode; import org.apache.lucene.search.Scorer; import org.apache.lucene.search.Weight; +import org.opensearch.search.profile.ProfileMetricUtil; import org.opensearch.test.OpenSearchTestCase; import java.io.IOException; @@ -84,7 +85,7 @@ public void testPropagateMinCompetitiveScore() throws IOException { Query query = new MatchAllDocsQuery(); Weight weight = query.createWeight(new IndexSearcher(new MultiReader()), ScoreMode.TOP_SCORES, 1f); FakeScorer fakeScorer = new FakeScorer(weight); - QueryProfileBreakdown profile = new QueryProfileBreakdown(); + QueryProfileBreakdown profile = new QueryProfileBreakdown(ProfileMetricUtil.getDefaultQueryProfileMetrics()); ProfileScorer profileScorer = new ProfileScorer(fakeScorer, profile); profileScorer.setMinCompetitiveScore(0.42f); assertEquals(0.42f, fakeScorer.minCompetitiveScore, 0f); @@ -94,7 +95,7 @@ public void testPropagateMaxScore() throws IOException { Query query = new MatchAllDocsQuery(); Weight weight = query.createWeight(new IndexSearcher(new MultiReader()), ScoreMode.TOP_SCORES, 1f); FakeScorer fakeScorer = new FakeScorer(weight); - QueryProfileBreakdown profile = new QueryProfileBreakdown(); + QueryProfileBreakdown profile = new QueryProfileBreakdown(ProfileMetricUtil.getDefaultQueryProfileMetrics()); ProfileScorer profileScorer = new ProfileScorer(fakeScorer, profile); profileScorer.setMinCompetitiveScore(0.42f); fakeScorer.maxScore = 42f; diff --git a/server/src/test/java/org/opensearch/search/query/QueryCollectorArgumentsTests.java b/server/src/test/java/org/opensearch/search/query/QueryCollectorArgumentsTests.java new file mode 100644 index 0000000000000..a853edf5c8be7 --- /dev/null +++ b/server/src/test/java/org/opensearch/search/query/QueryCollectorArgumentsTests.java @@ -0,0 +1,49 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.search.query; + +import org.opensearch.test.OpenSearchTestCase; + +public class QueryCollectorArgumentsTests extends OpenSearchTestCase { + + public void testBuilder() { + QueryCollectorArguments args = new QueryCollectorArguments.Builder().hasFilterCollector(true).build(); + + assertTrue(args.hasFilterCollector()); + } + + public void testEquals() { + QueryCollectorArguments args1 = new QueryCollectorArguments.Builder().hasFilterCollector(true).build(); + + QueryCollectorArguments args2 = new QueryCollectorArguments.Builder().hasFilterCollector(true).build(); + + QueryCollectorArguments args3 = new QueryCollectorArguments.Builder().hasFilterCollector(false).build(); + + assertTrue(args1.equals(args2)); // Same values + assertFalse(args1.equals(args3)); // Different values + assertTrue(args1.equals(args1)); // Same object + } + + public void testHashCode() { + QueryCollectorArguments args1 = new QueryCollectorArguments.Builder().hasFilterCollector(true).build(); + + QueryCollectorArguments args2 = new QueryCollectorArguments.Builder().hasFilterCollector(true).build(); + + assertEquals(args1.hashCode(), args2.hashCode()); + assertEquals(args1.hashCode(), args1.hashCode()); // Consistent + } + + public void testToString() { + QueryCollectorArguments args = new QueryCollectorArguments.Builder().hasFilterCollector(true).build(); + + String result = args.toString(); + + assertEquals("QueryCollectorArguments[hasFilterCollector=true]", result); + } +} diff --git a/server/src/test/java/org/opensearch/search/query/QueryCollectorContextSpecRegistryTests.java b/server/src/test/java/org/opensearch/search/query/QueryCollectorContextSpecRegistryTests.java new file mode 100644 index 0000000000000..03fedc3534e82 --- /dev/null +++ b/server/src/test/java/org/opensearch/search/query/QueryCollectorContextSpecRegistryTests.java @@ -0,0 +1,79 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.search.query; + +import org.opensearch.search.internal.SearchContext; +import org.opensearch.test.OpenSearchTestCase; +import org.junit.Before; + +import java.io.IOException; +import java.util.Optional; + +import org.mockito.Mock; +import org.mockito.MockitoAnnotations; + +import static org.mockito.Mockito.when; + +public class QueryCollectorContextSpecRegistryTests extends OpenSearchTestCase { + @Mock + private QueryCollectorContextSpecFactory mockFactory1; + + @Mock + private SearchContext mockSearchContext; + + @Mock + private QueryCollectorContextSpec mockSpec; + + @Before + public void setUp() throws Exception { + super.setUp(); + MockitoAnnotations.initMocks(this); + // Clear registry before each test + QueryCollectorContextSpecRegistry.getCollectorContextSpecFactories().clear(); + } + + public void testRegisterFactory() { + QueryCollectorContextSpecRegistry.registerFactory(mockFactory1); + assertEquals(1, QueryCollectorContextSpecRegistry.getCollectorContextSpecFactories().size()); + assertTrue(QueryCollectorContextSpecRegistry.getCollectorContextSpecFactories().contains(mockFactory1)); + } + + public void testGetQueryCollectorContextSpec_WithValidSpec() throws IOException { + + QueryCollectorArguments mockArguments = new QueryCollectorArguments.Builder().build(); + // Given + QueryCollectorContextSpecRegistry.registerFactory(mockFactory1); + when(mockFactory1.createQueryCollectorContextSpec(mockSearchContext, mockArguments)).thenReturn(Optional.of(mockSpec)); + + // When + Optional result = QueryCollectorContextSpecRegistry.getQueryCollectorContextSpec( + mockSearchContext, + mockArguments + ); + + // Then + assertTrue(result.isPresent()); + assertEquals(mockSpec, result.get()); + } + + public void testGetQueryCollectorContextSpec_NoFactories() throws IOException { + + QueryCollectorArguments mockArguments = new QueryCollectorArguments.Builder().build(); + + // When + Optional result = QueryCollectorContextSpecRegistry.getQueryCollectorContextSpec( + mockSearchContext, + mockArguments + ); + + // Then + assertTrue(result.isEmpty()); + } + +} diff --git a/server/src/test/java/org/opensearch/search/query/QueryPhaseTests.java b/server/src/test/java/org/opensearch/search/query/QueryPhaseTests.java index 73333ea2c5706..ae32ebd0a6f7a 100644 --- a/server/src/test/java/org/opensearch/search/query/QueryPhaseTests.java +++ b/server/src/test/java/org/opensearch/search/query/QueryPhaseTests.java @@ -994,6 +994,49 @@ public void testMinScore() throws Exception { dir.close(); } + public void testMaxScoreWithSortOnScoreFirstly() throws Exception { + Directory dir = newDirectory(); + IndexWriterConfig iwc = newIndexWriterConfig(); + RandomIndexWriter w = new RandomIndexWriter(random(), dir, iwc); + + final int numDocs = scaledRandomIntBetween(10, 20); + for (int i = 0; i < numDocs; i++) { + Document doc = new Document(); + doc.add(new StringField("foo", "bar", Store.NO)); + doc.add(new StringField("filter", "f1" + ((i > 0) ? " " + i : ""), Store.NO)); + w.addDocument(doc); + } + w.close(); + + IndexReader reader = DirectoryReader.open(dir); + TestSearchContext context = new TestSearchContext(null, indexShard, newContextSearcher(reader, executor)); + context.trackScores(false); + Sort sort = new Sort(new SortField(null, SortField.Type.SCORE), new SortField(null, SortField.Type.DOC)); + SortAndFormats sortAndFormats = new SortAndFormats(sort, new DocValueFormat[] { DocValueFormat.RAW, DocValueFormat.RAW }); + context.sort(sortAndFormats); + context.parsedQuery( + new ParsedQuery( + new BooleanQuery.Builder().add(new TermQuery(new Term("foo", "bar")), Occur.MUST) + .add(new TermQuery(new Term("filter", "f1")), Occur.SHOULD) + .build() + ) + ); + context.setTask(new SearchShardTask(123L, "", "", "", null, Collections.emptyMap())); + context.setSize(1); + context.trackTotalHitsUpTo(5); + + QueryPhase.executeInternal(context.withCleanQueryResult(), queryPhaseSearcher); + assertFalse(Float.isNaN(context.queryResult().getMaxScore())); + assertEquals(1, context.queryResult().topDocs().topDocs.scoreDocs.length); + assertThat( + ((FieldDoc) context.queryResult().topDocs().topDocs.scoreDocs[0]).fields[0], + equalTo(context.queryResult().getMaxScore()) + ); + + reader.close(); + dir.close(); + } + public void testMaxScore() throws Exception { Directory dir = newDirectory(); final Sort sort = new Sort(new SortField("filter", SortField.Type.STRING)); diff --git a/server/src/test/java/org/opensearch/search/rescore/QueryRescorerBuilderTests.java b/server/src/test/java/org/opensearch/search/rescore/QueryRescorerBuilderTests.java index a71c18aa2266b..7324bac55c21f 100644 --- a/server/src/test/java/org/opensearch/search/rescore/QueryRescorerBuilderTests.java +++ b/server/src/test/java/org/opensearch/search/rescore/QueryRescorerBuilderTests.java @@ -190,7 +190,7 @@ public MappedFieldType fieldMapper(String name) { : rescoreBuilder.windowSize().intValue(); assertEquals(expectedWindowSize, rescoreContext.getWindowSize()); Query expectedQuery = Rewriteable.rewrite(rescoreBuilder.getRescoreQuery(), mockShardContext).toQuery(mockShardContext); - assertEquals(expectedQuery, rescoreContext.query()); + assertEquals(expectedQuery, rescoreContext.parsedQuery().query()); assertEquals(rescoreBuilder.getQueryWeight(), rescoreContext.queryWeight(), Float.MIN_VALUE); assertEquals(rescoreBuilder.getRescoreQueryWeight(), rescoreContext.rescoreQueryWeight(), Float.MIN_VALUE); assertEquals(rescoreBuilder.getScoreMode(), rescoreContext.scoreMode()); @@ -202,6 +202,50 @@ public void testRescoreQueryNull() throws IOException { assertEquals("rescore_query cannot be null", e.getMessage()); } + /** + * Test that named queries from rescore contexts are captured + */ + public void testRescoreNamedQueries() throws IOException { + final long nowInMillis = randomNonNegativeLong(); + Settings indexSettings = Settings.builder().put(IndexMetadata.SETTING_VERSION_CREATED, Version.CURRENT).build(); + IndexSettings idxSettings = IndexSettingsModule.newIndexSettings(randomAlphaOfLengthBetween(1, 10), indexSettings); + + QueryShardContext mockShardContext = new QueryShardContext( + 0, + idxSettings, + BigArrays.NON_RECYCLING_INSTANCE, + null, + null, + null, + null, + null, + xContentRegistry(), + namedWriteableRegistry, + null, + null, + () -> nowInMillis, + null, + null, + () -> true, + null + ) { + @Override + public MappedFieldType fieldMapper(String name) { + TextFieldMapper.Builder builder = new TextFieldMapper.Builder(name, createDefaultIndexAnalyzers()); + return builder.build(new Mapper.BuilderContext(idxSettings.getSettings(), new ContentPath(1))).fieldType(); + } + }; + + QueryBuilder namedQueryBuilder = new MatchAllQueryBuilder().queryName("test_rescore_query"); + QueryRescorerBuilder rescoreBuilder = new QueryRescorerBuilder(namedQueryBuilder); + QueryRescoreContext rescoreContext = (QueryRescoreContext) rescoreBuilder.buildContext(mockShardContext); + assertNotNull(rescoreContext.parsedQuery()); + assertNotNull(rescoreContext.parsedQuery().namedFilters()); + assertEquals(1, rescoreContext.parsedQuery().namedFilters().size()); + assertTrue(rescoreContext.parsedQuery().namedFilters().containsKey("test_rescore_query")); + assertNotNull(rescoreContext.parsedQuery().namedFilters().get("test_rescore_query")); + } + class AlwaysRewriteQueryBuilder extends MatchAllQueryBuilder { @Override diff --git a/server/src/test/java/org/opensearch/wlm/WorkloadGroupServiceTests.java b/server/src/test/java/org/opensearch/wlm/WorkloadGroupServiceTests.java index 4784fe6cedf5a..989d390853cc2 100644 --- a/server/src/test/java/org/opensearch/wlm/WorkloadGroupServiceTests.java +++ b/server/src/test/java/org/opensearch/wlm/WorkloadGroupServiceTests.java @@ -477,7 +477,7 @@ public void testShouldSBPHandle() { task.setWorkloadGroupId(mockThreadPool.getThreadContext()); assertTrue(workloadGroupService.shouldSBPHandle(task)); - // Valid query group task but wlm not enabled + // Valid workload group task but wlm not enabled when(mockWorkloadManagementSettings.getWlmMode()).thenReturn(WlmMode.DISABLED); activeWorkloadGroups.add( new WorkloadGroup( diff --git a/server/src/test/java/org/opensearch/wlm/tracker/WorkloadGroupTaskResourceTrackingTests.java b/server/src/test/java/org/opensearch/wlm/tracker/WorkloadGroupTaskResourceTrackingTests.java index 123352c6f67a5..ed8f296cd39f0 100644 --- a/server/src/test/java/org/opensearch/wlm/tracker/WorkloadGroupTaskResourceTrackingTests.java +++ b/server/src/test/java/org/opensearch/wlm/tracker/WorkloadGroupTaskResourceTrackingTests.java @@ -50,7 +50,7 @@ public void testValidWorkloadGroupTasksCase() { WorkloadGroupTask task = new SearchTask(1, "test", "test", () -> "Test", TaskId.EMPTY_TASK_ID, new HashMap<>()); taskResourceTrackingService.startTracking(task); - // since the query group id is not set we should not track this task + // since the workload group id is not set we should not track this task Map resourceUsageViewMap = workloadGroupResourceUsageTrackerService .constructWorkloadGroupLevelUsageViews(); assertTrue(resourceUsageViewMap.isEmpty()); diff --git a/settings.gradle b/settings.gradle index 1fb5795b1a8ba..a1cd3c76e5c22 100644 --- a/settings.gradle +++ b/settings.gradle @@ -77,7 +77,6 @@ List projects = [ 'distribution:tools:launchers', 'distribution:tools:plugin-cli', 'distribution:tools:keystore-cli', - 'distribution:tools:upgrade-cli', 'server', 'server:cli', 'test:framework', diff --git a/test/fixtures/hdfs-fixture/build.gradle b/test/fixtures/hdfs-fixture/build.gradle index 396cd6feceb90..9bbb8f86856ce 100644 --- a/test/fixtures/hdfs-fixture/build.gradle +++ b/test/fixtures/hdfs-fixture/build.gradle @@ -88,6 +88,6 @@ dependencies { runtimeOnly("com.squareup.okhttp3:okhttp:4.12.0") { exclude group: "com.squareup.okio" } - runtimeOnly "com.squareup.okio:okio:3.13.0" + runtimeOnly "com.squareup.okio:okio:3.15.0" runtimeOnly "org.xerial.snappy:snappy-java:1.1.10.7" } diff --git a/test/framework/licenses/bcpkix-fips-2.0.7.jar.sha1 b/test/framework/licenses/bcpkix-fips-2.0.7.jar.sha1 deleted file mode 100644 index 5df930b54fe44..0000000000000 --- a/test/framework/licenses/bcpkix-fips-2.0.7.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -01eea0f325315ca6295b0a6926ff862d8001cdf9 \ No newline at end of file diff --git a/test/framework/licenses/bcpkix-fips-2.0.8.jar.sha1 b/test/framework/licenses/bcpkix-fips-2.0.8.jar.sha1 new file mode 100644 index 0000000000000..69293a600d472 --- /dev/null +++ b/test/framework/licenses/bcpkix-fips-2.0.8.jar.sha1 @@ -0,0 +1 @@ +aad7b0fcf55892e7ff7e2d23a290f143f4bb56e0 \ No newline at end of file diff --git a/test/framework/src/main/java/org/opensearch/search/aggregations/AggregatorTestCase.java b/test/framework/src/main/java/org/opensearch/search/aggregations/AggregatorTestCase.java index d9348aacd7a11..fc92065391fd4 100644 --- a/test/framework/src/main/java/org/opensearch/search/aggregations/AggregatorTestCase.java +++ b/test/framework/src/main/java/org/opensearch/search/aggregations/AggregatorTestCase.java @@ -120,6 +120,7 @@ import org.opensearch.index.mapper.ObjectMapper.Nested; import org.opensearch.index.mapper.RangeFieldMapper; import org.opensearch.index.mapper.RangeType; +import org.opensearch.index.mapper.SemanticVersionFieldMapper; import org.opensearch.index.mapper.StarTreeMapper; import org.opensearch.index.mapper.TextFieldMapper; import org.opensearch.index.query.QueryBuilder; @@ -213,6 +214,7 @@ public abstract class AggregatorTestCase extends OpenSearchTestCase { denylist.add(FieldAliasMapper.CONTENT_TYPE); // TODO support alias denylist.add(DerivedFieldMapper.CONTENT_TYPE); // TODO support derived fields denylist.add(StarTreeMapper.CONTENT_TYPE); // TODO evaluate support for star tree fields + denylist.add(SemanticVersionFieldMapper.CONTENT_TYPE); // TODO support for semantic version fields TYPE_TEST_DENYLIST = denylist; } diff --git a/test/framework/src/main/java/org/opensearch/transport/AbstractSimpleTransportTestCase.java b/test/framework/src/main/java/org/opensearch/transport/AbstractSimpleTransportTestCase.java index 3c351ce0ad2f9..f0f2d452faf8d 100644 --- a/test/framework/src/main/java/org/opensearch/transport/AbstractSimpleTransportTestCase.java +++ b/test/framework/src/main/java/org/opensearch/transport/AbstractSimpleTransportTestCase.java @@ -40,7 +40,6 @@ import org.apache.lucene.util.CollectionUtil; import org.apache.lucene.util.Constants; import org.opensearch.ExceptionsHelper; -import org.opensearch.LegacyESVersion; import org.opensearch.OpenSearchException; import org.opensearch.Version; import org.opensearch.action.ActionListenerResponseHandler; @@ -2199,7 +2198,7 @@ public void testTimeoutPerConnection() throws IOException { public void testHandshakeWithIncompatVersion() { assumeTrue("only tcp transport has a handshake method", serviceA.getOriginalTransport() instanceof TcpTransport); - Version version = LegacyESVersion.fromString("6.0.0"); + Version version = Version.fromString("6.0.0"); try (MockTransportService service = buildService("TS_C", version, Settings.EMPTY)) { service.start(); service.acceptIncomingRequests(); @@ -2225,14 +2224,7 @@ public void testHandshakeUpdatesVersion() throws IOException { service.start(); service.acceptIncomingRequests(); TransportAddress address = service.boundAddress().publishAddress(); - DiscoveryNode node = new DiscoveryNode( - "TS_TPC", - "TS_TPC", - address, - emptyMap(), - emptySet(), - LegacyESVersion.fromString("2.0.0") - ); + DiscoveryNode node = new DiscoveryNode("TS_TPC", "TS_TPC", address, emptyMap(), emptySet(), Version.fromString("2.0.0")); ConnectionProfile.Builder builder = new ConnectionProfile.Builder(); builder.addConnections( 1, diff --git a/test/framework/src/test/java/org/opensearch/test/rest/yaml/section/ClientYamlTestSectionTests.java b/test/framework/src/test/java/org/opensearch/test/rest/yaml/section/ClientYamlTestSectionTests.java index ee355e307345c..331414a2192fd 100644 --- a/test/framework/src/test/java/org/opensearch/test/rest/yaml/section/ClientYamlTestSectionTests.java +++ b/test/framework/src/test/java/org/opensearch/test/rest/yaml/section/ClientYamlTestSectionTests.java @@ -32,7 +32,7 @@ package org.opensearch.test.rest.yaml.section; -import org.opensearch.LegacyESVersion; +import org.opensearch.Version; import org.opensearch.common.xcontent.yaml.YamlXContent; import org.opensearch.core.common.ParsingException; import org.opensearch.core.xcontent.XContentParser; @@ -130,8 +130,8 @@ public void testParseTestSectionWithDoSetAndSkipSectionsNoSkip() throws Exceptio assertThat(testSection, notNullValue()); assertThat(testSection.getName(), equalTo("First test section")); assertThat(testSection.getSkipSection(), notNullValue()); - assertThat(testSection.getSkipSection().getLowerVersion(), equalTo(LegacyESVersion.fromString("6.0.0"))); - assertThat(testSection.getSkipSection().getUpperVersion(), equalTo(LegacyESVersion.fromString("6.2.0"))); + assertThat(testSection.getSkipSection().getLowerVersion(), equalTo(Version.fromString("6.0.0"))); + assertThat(testSection.getSkipSection().getUpperVersion(), equalTo(Version.fromString("6.2.0"))); assertThat(testSection.getSkipSection().getReason(), equalTo("Update doesn't return metadata fields, waiting for #3259")); assertThat(testSection.getExecutableSections().size(), equalTo(2)); DoSection doSection = (DoSection) testSection.getExecutableSections().get(0); diff --git a/test/framework/src/test/java/org/opensearch/test/rest/yaml/section/SetupSectionTests.java b/test/framework/src/test/java/org/opensearch/test/rest/yaml/section/SetupSectionTests.java index 159f5052e7148..7f55df08e4af9 100644 --- a/test/framework/src/test/java/org/opensearch/test/rest/yaml/section/SetupSectionTests.java +++ b/test/framework/src/test/java/org/opensearch/test/rest/yaml/section/SetupSectionTests.java @@ -31,7 +31,7 @@ package org.opensearch.test.rest.yaml.section; -import org.opensearch.LegacyESVersion; +import org.opensearch.Version; import org.opensearch.common.xcontent.yaml.YamlXContent; import java.io.IOException; @@ -135,8 +135,8 @@ public void testParseSetupAndSkipSectionNoSkip() throws Exception { assertThat(setupSection, notNullValue()); assertThat(setupSection.getSkipSection().isEmpty(), equalTo(false)); assertThat(setupSection.getSkipSection(), notNullValue()); - assertThat(setupSection.getSkipSection().getLowerVersion(), equalTo(LegacyESVersion.fromString("6.0.0"))); - assertThat(setupSection.getSkipSection().getUpperVersion(), equalTo(LegacyESVersion.fromString("6.3.0"))); + assertThat(setupSection.getSkipSection().getLowerVersion(), equalTo(Version.fromString("6.0.0"))); + assertThat(setupSection.getSkipSection().getUpperVersion(), equalTo(Version.fromString("6.3.0"))); assertThat(setupSection.getSkipSection().getReason(), equalTo("Update doesn't return metadata fields, waiting for #3259")); assertThat(setupSection.getExecutableSections().size(), equalTo(2)); assertThat(setupSection.getExecutableSections().get(0), instanceOf(DoSection.class)); diff --git a/test/framework/src/test/java/org/opensearch/test/rest/yaml/section/SkipSectionTests.java b/test/framework/src/test/java/org/opensearch/test/rest/yaml/section/SkipSectionTests.java index 9974917d475e8..f3467e8eb532c 100644 --- a/test/framework/src/test/java/org/opensearch/test/rest/yaml/section/SkipSectionTests.java +++ b/test/framework/src/test/java/org/opensearch/test/rest/yaml/section/SkipSectionTests.java @@ -32,7 +32,6 @@ package org.opensearch.test.rest.yaml.section; -import org.opensearch.LegacyESVersion; import org.opensearch.Version; import org.opensearch.common.xcontent.yaml.YamlXContent; import org.opensearch.core.common.ParsingException; @@ -72,7 +71,7 @@ public void testSkip() { "foobar" ); assertFalse(section.skip(Version.CURRENT)); - assertTrue(section.skip(LegacyESVersion.fromString("6.0.0"))); + assertTrue(section.skip(Version.fromString("6.0.0"))); section = new SkipSection(randomBoolean() ? null : "6.0.0 - 6.1.0", Collections.singletonList("boom"), "foobar"); assertTrue(section.skip(Version.CURRENT)); }