From beb09af65710aa17909f977a61b7a5414f7967cc Mon Sep 17 00:00:00 2001 From: Kartik Ganesh Date: Tue, 30 Aug 2022 11:17:11 -0700 Subject: [PATCH 01/16] Adding @dreamer-89 to Opensearch maintainers. (#4342) Signed-off-by: Kartik Ganesh Signed-off-by: Kartik Ganesh --- CHANGELOG.md | 1 + MAINTAINERS.md | 1 + 2 files changed, 2 insertions(+) diff --git a/CHANGELOG.md b/CHANGELOG.md index b3c5d731af082..c7f17dac5bf13 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -4,6 +4,7 @@ Inspired from [Keep a Changelog](https://keepachangelog.com/en/1.0.0/) ## [Unreleased] ### Added - Github workflow for changelog verification ([#4085](https://github.com/opensearch-project/OpenSearch/pull/4085)) +- Added @dreamer-89 as an Opensearch maintainer ([#4342](https://github.com/opensearch-project/OpenSearch/pull/4342)) ### Changed - Dependency updates (httpcore, mockito, slf4j, httpasyncclient, commons-codec) ([#4308](https://github.com/opensearch-project/OpenSearch/pull/4308)) diff --git a/MAINTAINERS.md b/MAINTAINERS.md index 94e649a634c7f..2f54656b2ab59 100644 --- a/MAINTAINERS.md +++ b/MAINTAINERS.md @@ -23,6 +23,7 @@ | Ryan Bogan | [ryanbogan](https://github.com/ryanbogan) | Amazon | | Sarat Vemulapalli | [saratvemulapalli](https://github.com/saratvemulapalli) | Amazon | | Shweta Thareja |[shwetathareja](https://github.com/shwetathareja) | Amazon | +| Suraj Singh |[dreamer-89](https://github.com/dreamer-89) | Amazon | | Tianli Feng | [tlfeng](https://github.com/tlfeng) | Amazon | | Vacha Shah | [VachaShah](https://github.com/VachaShah) | Amazon | | Xue Zhou | [xuezhou25](https://github.com/xuezhou25) | Amazon | From 4bccdbe9bdf474d79d3ff9e68e53267174b001ac Mon Sep 17 00:00:00 2001 From: Rabi Panda Date: Tue, 30 Aug 2022 13:21:15 -0700 Subject: [PATCH 02/16] [CVE] Update snakeyaml dependency (#4341) The package `org.yaml:snakeyaml` before version 1.31 are vulnerable to Denial of Service (DoS) due missing to nested depth limitation for collections. Details at https://nvd.nist.gov/vuln/detail/CVE-2022-25857 Signed-off-by: Rabi Panda --- CHANGELOG.md | 1 + buildSrc/version.properties | 2 +- libs/x-content/licenses/snakeyaml-1.26.jar.sha1 | 1 - libs/x-content/licenses/snakeyaml-1.31.jar.sha1 | 1 + 4 files changed, 3 insertions(+), 2 deletions(-) delete mode 100644 libs/x-content/licenses/snakeyaml-1.26.jar.sha1 create mode 100644 libs/x-content/licenses/snakeyaml-1.31.jar.sha1 diff --git a/CHANGELOG.md b/CHANGELOG.md index c7f17dac5bf13..877e472ac66d4 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -24,6 +24,7 @@ Inspired from [Keep a Changelog](https://keepachangelog.com/en/1.0.0/) - Add timeout on Mockito.verify to reduce flakyness in testReplicationOnDone test([#4314](https://github.com/opensearch-project/OpenSearch/pull/4314)) ### Security +- CVE-2022-25857 org.yaml:snakeyaml DOS vulnerability ([#4341](https://github.com/opensearch-project/OpenSearch/pull/4341)) ## [2.x] ### Added diff --git a/buildSrc/version.properties b/buildSrc/version.properties index 876910d5351d0..072dcc4578977 100644 --- a/buildSrc/version.properties +++ b/buildSrc/version.properties @@ -11,7 +11,7 @@ spatial4j = 0.7 jts = 1.15.0 jackson = 2.13.3 jackson_databind = 2.13.3 -snakeyaml = 1.26 +snakeyaml = 1.31 icu4j = 70.1 supercsv = 2.4.0 log4j = 2.17.1 diff --git a/libs/x-content/licenses/snakeyaml-1.26.jar.sha1 b/libs/x-content/licenses/snakeyaml-1.26.jar.sha1 deleted file mode 100644 index fde3aba8edad0..0000000000000 --- a/libs/x-content/licenses/snakeyaml-1.26.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -a78a8747147d2c5807683e76ec2b633e95c14fe9 \ No newline at end of file diff --git a/libs/x-content/licenses/snakeyaml-1.31.jar.sha1 b/libs/x-content/licenses/snakeyaml-1.31.jar.sha1 new file mode 100644 index 0000000000000..1ac9b78b88687 --- /dev/null +++ b/libs/x-content/licenses/snakeyaml-1.31.jar.sha1 @@ -0,0 +1 @@ +cf26b7b05fef01e7bec00cb88ab4feeeba743e12 \ No newline at end of file From 82bda895ccf039c48ea73c68c845908cd6289381 Mon Sep 17 00:00:00 2001 From: Kunal Kotwani Date: Tue, 30 Aug 2022 15:33:07 -0700 Subject: [PATCH 03/16] Fixed commit workflow for dependabot PR helper (#4331) * Fixed label for dependabot PR helper Signed-off-by: Kunal Kotwani * Update autocommit workflow for dependabot changelog Signed-off-by: Kunal Kotwani * Add version config for dependabot changelog helper Signed-off-by: Kunal Kotwani Signed-off-by: Kunal Kotwani --- .github/workflows/changelog_verifier.yml | 6 ++++++ CHANGELOG.md | 2 ++ 2 files changed, 8 insertions(+) diff --git a/.github/workflows/changelog_verifier.yml b/.github/workflows/changelog_verifier.yml index ee9bf5e18d0d5..ac0c0ec4d7297 100644 --- a/.github/workflows/changelog_verifier.yml +++ b/.github/workflows/changelog_verifier.yml @@ -14,9 +14,15 @@ jobs: ref: ${{ github.event.pull_request.head.sha }} - uses: dangoslen/dependabot-changelog-helper@v1 + with: + version: 'Unreleased' - uses: stefanzweifel/git-auto-commit-action@v4 with: commit_message: "Update changelog" + branch: ${{ github.head_ref }} + commit_user_name: dependabot[bot] + commit_user_email: support@github.com + commit_options: '--signoff' - uses: dangoslen/changelog-enforcer@v3 diff --git a/CHANGELOG.md b/CHANGELOG.md index 877e472ac66d4..9efafb1e69a50 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -22,6 +22,7 @@ Inspired from [Keep a Changelog](https://keepachangelog.com/en/1.0.0/) - Restore using the class ClusterInfoRequest and ClusterInfoRequestBuilder from package 'org.opensearch.action.support.master.info' for subclasses ([#4307](https://github.com/opensearch-project/OpenSearch/pull/4307)) - Do not fail replica shard due to primary closure ([#4133](https://github.com/opensearch-project/OpenSearch/pull/4133)) - Add timeout on Mockito.verify to reduce flakyness in testReplicationOnDone test([#4314](https://github.com/opensearch-project/OpenSearch/pull/4314)) +- Commit workflow for dependabot changelog helper ([#4331](https://github.com/opensearch-project/OpenSearch/pull/4331)) ### Security - CVE-2022-25857 org.yaml:snakeyaml DOS vulnerability ([#4341](https://github.com/opensearch-project/OpenSearch/pull/4341)) @@ -38,6 +39,7 @@ Inspired from [Keep a Changelog](https://keepachangelog.com/en/1.0.0/) ### Fixed - PR reference to checkout code for changelog verifier ([#4296](https://github.com/opensearch-project/OpenSearch/pull/4296)) +- Commit workflow for dependabot changelog helper ([#4331](https://github.com/opensearch-project/OpenSearch/pull/4331)) ### Security From 48d68699299346873d12bcfaf1013c148a0fe166 Mon Sep 17 00:00:00 2001 From: Rabi Panda Date: Tue, 30 Aug 2022 15:41:59 -0700 Subject: [PATCH 04/16] Add release notes for patch release 1.3.5 (#4343) Signed-off-by: Rabi Panda --- CHANGELOG.md | 1 + release-notes/opensearch.release-notes-1.3.5.md | 9 +++++++++ 2 files changed, 10 insertions(+) create mode 100644 release-notes/opensearch.release-notes-1.3.5.md diff --git a/CHANGELOG.md b/CHANGELOG.md index 9efafb1e69a50..8c000c02e86ba 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -5,6 +5,7 @@ Inspired from [Keep a Changelog](https://keepachangelog.com/en/1.0.0/) ### Added - Github workflow for changelog verification ([#4085](https://github.com/opensearch-project/OpenSearch/pull/4085)) - Added @dreamer-89 as an Opensearch maintainer ([#4342](https://github.com/opensearch-project/OpenSearch/pull/4342)) +- Added release notes for 1.3.5 ([#4343](https://github.com/opensearch-project/OpenSearch/pull/4343)) ### Changed - Dependency updates (httpcore, mockito, slf4j, httpasyncclient, commons-codec) ([#4308](https://github.com/opensearch-project/OpenSearch/pull/4308)) diff --git a/release-notes/opensearch.release-notes-1.3.5.md b/release-notes/opensearch.release-notes-1.3.5.md new file mode 100644 index 0000000000000..fbf866bb6e112 --- /dev/null +++ b/release-notes/opensearch.release-notes-1.3.5.md @@ -0,0 +1,9 @@ +## 2022-08-30 Version 1.3.5 Release Notes + +### Upgrades +* OpenJDK Update (July 2022 Patch releases) ([#4097](https://github.com/opensearch-project/OpenSearch/pull/4097)) +* Update Netty to 4.1.79.Final ([#3868](https://github.com/opensearch-project/OpenSearch/pull/3868)) + +### Bug Fixes +* OpenSearch crashes on closed client connection before search reply when total ops higher compared to expected ([#4143](https://github.com/opensearch-project/OpenSearch/pull/4143)) +* gradle check failing with java heap OutOfMemoryError ([#4150](https://github.com/opensearch-project/OpenSearch/pull/4150)) From f16ea9c8ec0a181c77b57388b2f3a322bcb47814 Mon Sep 17 00:00:00 2001 From: Rabi Panda Date: Tue, 30 Aug 2022 16:25:39 -0700 Subject: [PATCH 05/16] Add release notes for patch release 2.2.1 (#4344) Signed-off-by: Rabi Panda --- CHANGELOG.md | 1 + release-notes/opensearch.release-notes-2.2.1.md | 7 +++++++ 2 files changed, 8 insertions(+) create mode 100644 release-notes/opensearch.release-notes-2.2.1.md diff --git a/CHANGELOG.md b/CHANGELOG.md index 8c000c02e86ba..3b3d54a802e67 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -6,6 +6,7 @@ Inspired from [Keep a Changelog](https://keepachangelog.com/en/1.0.0/) - Github workflow for changelog verification ([#4085](https://github.com/opensearch-project/OpenSearch/pull/4085)) - Added @dreamer-89 as an Opensearch maintainer ([#4342](https://github.com/opensearch-project/OpenSearch/pull/4342)) - Added release notes for 1.3.5 ([#4343](https://github.com/opensearch-project/OpenSearch/pull/4343)) +- Added release notes for 2.2.1 ([#4344](https://github.com/opensearch-project/OpenSearch/pull/4344)) ### Changed - Dependency updates (httpcore, mockito, slf4j, httpasyncclient, commons-codec) ([#4308](https://github.com/opensearch-project/OpenSearch/pull/4308)) diff --git a/release-notes/opensearch.release-notes-2.2.1.md b/release-notes/opensearch.release-notes-2.2.1.md new file mode 100644 index 0000000000000..974ff8e09a426 --- /dev/null +++ b/release-notes/opensearch.release-notes-2.2.1.md @@ -0,0 +1,7 @@ +## 2022-08-30 Version 2.2.1 Release Notes + +### Upgrades +* Update Gradle to 7.5.1 ([#4211](https://github.com/opensearch-project/OpenSearch/pull/4211)) + +### Bug Fixes +* gradle check failing with java heap OutOfMemoryError ([#4150](https://github.com/opensearch-project/OpenSearch/pull/4150)) From 4f65ef58ef7e4836b93cbe12afcafdd07fea12a8 Mon Sep 17 00:00:00 2001 From: Kunal Kotwani Date: Tue, 30 Aug 2022 17:34:56 -0700 Subject: [PATCH 06/16] Add label configuration for dependabot PRs (#4348) --- .github/dependabot.yml | 522 +++++++++++++++++++++++++++++++++++++++++ CHANGELOG.md | 2 + 2 files changed, 524 insertions(+) diff --git a/.github/dependabot.yml b/.github/dependabot.yml index 9682461d9e110..07755ef69c6a3 100644 --- a/.github/dependabot.yml +++ b/.github/dependabot.yml @@ -4,869 +4,1391 @@ updates: package-ecosystem: gradle schedule: interval: weekly + labels: + - "dependabot" + - "dependencies" - directory: /benchmarks/ open-pull-requests-limit: 1 package-ecosystem: gradle schedule: interval: weekly + labels: + - "dependabot" + - "dependencies" - directory: /buildSrc/ open-pull-requests-limit: 1 package-ecosystem: gradle schedule: interval: weekly + labels: + - "dependabot" + - "dependencies" - directory: /buildSrc/reaper/ open-pull-requests-limit: 1 package-ecosystem: gradle schedule: interval: weekly + labels: + - "dependabot" + - "dependencies" - directory: /buildSrc/src/integTest/resources/org/opensearch/gradle/internal/fake_git/remote/ open-pull-requests-limit: 1 package-ecosystem: gradle schedule: interval: weekly + labels: + - "dependabot" + - "dependencies" - directory: /buildSrc/src/integTest/resources/org/opensearch/gradle/internal/fake_git/remote/distribution/archives/ open-pull-requests-limit: 1 package-ecosystem: gradle schedule: interval: weekly + labels: + - "dependabot" + - "dependencies" - directory: /buildSrc/src/integTest/resources/org/opensearch/gradle/internal/fake_git/remote/distribution/archives/darwin-tar/ open-pull-requests-limit: 1 package-ecosystem: gradle schedule: interval: weekly + labels: + - "dependabot" + - "dependencies" - directory: /buildSrc/src/integTest/resources/org/opensearch/gradle/internal/fake_git/remote/distribution/archives/oss-darwin-tar/ open-pull-requests-limit: 1 package-ecosystem: gradle schedule: interval: weekly + labels: + - "dependabot" + - "dependencies" - directory: /buildSrc/src/integTest/resources/org/opensearch/gradle/internal/fake_git/remote/distribution/bwc/bugfix/ open-pull-requests-limit: 1 package-ecosystem: gradle schedule: interval: weekly + labels: + - "dependabot" + - "dependencies" - directory: /buildSrc/src/integTest/resources/org/opensearch/gradle/internal/fake_git/remote/distribution/bwc/minor/ open-pull-requests-limit: 1 package-ecosystem: gradle schedule: interval: weekly + labels: + - "dependabot" + - "dependencies" - directory: /buildSrc/src/testKit/opensearch-build-resources/ open-pull-requests-limit: 1 package-ecosystem: gradle schedule: interval: weekly + labels: + - "dependabot" + - "dependencies" - directory: /buildSrc/src/testKit/opensearch.build/ open-pull-requests-limit: 1 package-ecosystem: gradle schedule: interval: weekly + labels: + - "dependabot" + - "dependencies" - directory: /buildSrc/src/testKit/reaper/ open-pull-requests-limit: 1 package-ecosystem: gradle schedule: interval: weekly + labels: + - "dependabot" + - "dependencies" - directory: /buildSrc/src/testKit/symbolic-link-preserving-tar/ open-pull-requests-limit: 1 package-ecosystem: gradle schedule: interval: weekly + labels: + - "dependabot" + - "dependencies" - directory: /buildSrc/src/testKit/testingConventions/ open-pull-requests-limit: 1 package-ecosystem: gradle schedule: interval: weekly + labels: + - "dependabot" + - "dependencies" - directory: /buildSrc/src/testKit/thirdPartyAudit/ open-pull-requests-limit: 1 package-ecosystem: gradle schedule: interval: weekly + labels: + - "dependabot" + - "dependencies" - directory: /buildSrc/src/testKit/thirdPartyAudit/sample_jars/ open-pull-requests-limit: 1 package-ecosystem: gradle schedule: interval: weekly + labels: + - "dependabot" + - "dependencies" - directory: /client/benchmark/ open-pull-requests-limit: 1 package-ecosystem: gradle schedule: interval: weekly + labels: + - "dependabot" + - "dependencies" - directory: /client/client-benchmark-noop-api-plugin/ open-pull-requests-limit: 1 package-ecosystem: gradle schedule: interval: weekly + labels: + - "dependabot" + - "dependencies" - directory: /client/rest/ open-pull-requests-limit: 1 package-ecosystem: gradle schedule: interval: weekly + labels: + - "dependabot" + - "dependencies" - directory: /client/rest-high-level/ open-pull-requests-limit: 1 package-ecosystem: gradle schedule: interval: weekly + labels: + - "dependabot" + - "dependencies" - directory: /client/sniffer/ open-pull-requests-limit: 1 package-ecosystem: gradle schedule: interval: weekly + labels: + - "dependabot" + - "dependencies" - directory: /client/test/ open-pull-requests-limit: 1 package-ecosystem: gradle schedule: interval: weekly + labels: + - "dependabot" + - "dependencies" - directory: /distribution/ open-pull-requests-limit: 1 package-ecosystem: gradle schedule: interval: weekly + labels: + - "dependabot" + - "dependencies" - directory: /distribution/archives/ open-pull-requests-limit: 1 package-ecosystem: gradle schedule: interval: weekly + labels: + - "dependabot" + - "dependencies" - directory: /distribution/archives/darwin-tar/ open-pull-requests-limit: 1 package-ecosystem: gradle schedule: interval: weekly + labels: + - "dependabot" + - "dependencies" - directory: /distribution/archives/integ-test-zip/ open-pull-requests-limit: 1 package-ecosystem: gradle schedule: interval: weekly + labels: + - "dependabot" + - "dependencies" - directory: /distribution/archives/linux-arm64-tar/ open-pull-requests-limit: 1 package-ecosystem: gradle schedule: interval: weekly + labels: + - "dependabot" + - "dependencies" - directory: /distribution/archives/linux-tar/ open-pull-requests-limit: 1 package-ecosystem: gradle schedule: interval: weekly + labels: + - "dependabot" + - "dependencies" - directory: /distribution/archives/no-jdk-darwin-tar/ open-pull-requests-limit: 1 package-ecosystem: gradle schedule: interval: weekly + labels: + - "dependabot" + - "dependencies" - directory: /distribution/archives/no-jdk-linux-tar/ open-pull-requests-limit: 1 package-ecosystem: gradle schedule: interval: weekly + labels: + - "dependabot" + - "dependencies" - directory: /distribution/archives/no-jdk-windows-zip/ open-pull-requests-limit: 1 package-ecosystem: gradle schedule: interval: weekly + labels: + - "dependabot" + - "dependencies" - directory: /distribution/archives/windows-zip/ open-pull-requests-limit: 1 package-ecosystem: gradle schedule: interval: weekly + labels: + - "dependabot" + - "dependencies" - directory: /distribution/bwc/ open-pull-requests-limit: 1 package-ecosystem: gradle schedule: interval: weekly + labels: + - "dependabot" + - "dependencies" - directory: /distribution/bwc/bugfix/ open-pull-requests-limit: 1 package-ecosystem: gradle schedule: interval: weekly + labels: + - "dependabot" + - "dependencies" - directory: /distribution/bwc/maintenance/ open-pull-requests-limit: 1 package-ecosystem: gradle schedule: interval: weekly + labels: + - "dependabot" + - "dependencies" - directory: /distribution/bwc/minor/ open-pull-requests-limit: 1 package-ecosystem: gradle schedule: interval: weekly + labels: + - "dependabot" + - "dependencies" - directory: /distribution/bwc/staged/ open-pull-requests-limit: 1 package-ecosystem: gradle schedule: interval: weekly + labels: + - "dependabot" + - "dependencies" - directory: /distribution/docker/ open-pull-requests-limit: 1 package-ecosystem: gradle schedule: interval: weekly + labels: + - "dependabot" + - "dependencies" - directory: /distribution/docker/docker-arm64-export/ open-pull-requests-limit: 1 package-ecosystem: gradle schedule: interval: weekly + labels: + - "dependabot" + - "dependencies" - directory: /distribution/docker/docker-build-context/ open-pull-requests-limit: 1 package-ecosystem: gradle schedule: interval: weekly + labels: + - "dependabot" + - "dependencies" - directory: /distribution/docker/docker-export/ open-pull-requests-limit: 1 package-ecosystem: gradle schedule: interval: weekly + labels: + - "dependabot" + - "dependencies" - directory: /distribution/packages/ open-pull-requests-limit: 1 package-ecosystem: gradle schedule: interval: weekly + labels: + - "dependabot" + - "dependencies" - directory: /distribution/packages/arm64-deb/ open-pull-requests-limit: 1 package-ecosystem: gradle schedule: interval: weekly + labels: + - "dependabot" + - "dependencies" - directory: /distribution/packages/arm64-rpm/ open-pull-requests-limit: 1 package-ecosystem: gradle schedule: interval: weekly + labels: + - "dependabot" + - "dependencies" - directory: /distribution/packages/deb/ open-pull-requests-limit: 1 package-ecosystem: gradle schedule: interval: weekly + labels: + - "dependabot" + - "dependencies" - directory: /distribution/packages/no-jdk-deb/ open-pull-requests-limit: 1 package-ecosystem: gradle schedule: interval: weekly + labels: + - "dependabot" + - "dependencies" - directory: /distribution/packages/no-jdk-rpm/ open-pull-requests-limit: 1 package-ecosystem: gradle schedule: interval: weekly + labels: + - "dependabot" + - "dependencies" - directory: /distribution/packages/rpm/ open-pull-requests-limit: 1 package-ecosystem: gradle schedule: interval: weekly + labels: + - "dependabot" + - "dependencies" - directory: /distribution/tools/java-version-checker/ open-pull-requests-limit: 1 package-ecosystem: gradle schedule: interval: weekly + labels: + - "dependabot" + - "dependencies" - directory: /distribution/tools/keystore-cli/ open-pull-requests-limit: 1 package-ecosystem: gradle schedule: interval: weekly + labels: + - "dependabot" + - "dependencies" - directory: /distribution/tools/launchers/ open-pull-requests-limit: 1 package-ecosystem: gradle schedule: interval: weekly + labels: + - "dependabot" + - "dependencies" - directory: /distribution/tools/plugin-cli/ open-pull-requests-limit: 1 package-ecosystem: gradle schedule: interval: weekly + labels: + - "dependabot" + - "dependencies" - directory: /distribution/tools/upgrade-cli/ open-pull-requests-limit: 1 package-ecosystem: gradle schedule: interval: weekly + labels: + - "dependabot" + - "dependencies" - directory: /doc-tools/ open-pull-requests-limit: 1 package-ecosystem: gradle schedule: interval: weekly + labels: + - "dependabot" + - "dependencies" - directory: /doc-tools/missing-doclet/ open-pull-requests-limit: 1 package-ecosystem: gradle schedule: interval: weekly + labels: + - "dependabot" + - "dependencies" - directory: /libs/ open-pull-requests-limit: 1 package-ecosystem: gradle schedule: interval: weekly + labels: + - "dependabot" + - "dependencies" - directory: /libs/cli/ open-pull-requests-limit: 1 package-ecosystem: gradle schedule: interval: weekly + labels: + - "dependabot" + - "dependencies" - directory: /libs/core/ open-pull-requests-limit: 1 package-ecosystem: gradle schedule: interval: weekly + labels: + - "dependabot" + - "dependencies" - directory: /libs/dissect/ open-pull-requests-limit: 1 package-ecosystem: gradle schedule: interval: weekly + labels: + - "dependabot" + - "dependencies" - directory: /libs/geo/ open-pull-requests-limit: 1 package-ecosystem: gradle schedule: interval: weekly + labels: + - "dependabot" + - "dependencies" - directory: /libs/grok/ open-pull-requests-limit: 1 package-ecosystem: gradle schedule: interval: weekly + labels: + - "dependabot" + - "dependencies" - directory: /libs/nio/ open-pull-requests-limit: 1 package-ecosystem: gradle schedule: interval: weekly + labels: + - "dependabot" + - "dependencies" - directory: /libs/plugin-classloader/ open-pull-requests-limit: 1 package-ecosystem: gradle schedule: interval: weekly + labels: + - "dependabot" + - "dependencies" - directory: /libs/secure-sm/ open-pull-requests-limit: 1 package-ecosystem: gradle schedule: interval: weekly + labels: + - "dependabot" + - "dependencies" - directory: /libs/ssl-config/ open-pull-requests-limit: 1 package-ecosystem: gradle schedule: interval: weekly + labels: + - "dependabot" + - "dependencies" - directory: /libs/x-content/ open-pull-requests-limit: 1 package-ecosystem: gradle schedule: interval: weekly + labels: + - "dependabot" + - "dependencies" - directory: /modules/ open-pull-requests-limit: 1 package-ecosystem: gradle schedule: interval: weekly + labels: + - "dependabot" + - "dependencies" - directory: /modules/aggs-matrix-stats/ open-pull-requests-limit: 1 package-ecosystem: gradle schedule: interval: weekly + labels: + - "dependabot" + - "dependencies" - directory: /modules/analysis-common/ open-pull-requests-limit: 1 package-ecosystem: gradle schedule: interval: weekly + labels: + - "dependabot" + - "dependencies" - directory: /modules/geo/ open-pull-requests-limit: 1 package-ecosystem: gradle schedule: interval: weekly + labels: + - "dependabot" + - "dependencies" - directory: /modules/ingest-common/ open-pull-requests-limit: 1 package-ecosystem: gradle schedule: interval: weekly + labels: + - "dependabot" + - "dependencies" - directory: /modules/ingest-geoip/ open-pull-requests-limit: 1 package-ecosystem: gradle schedule: interval: weekly + labels: + - "dependabot" + - "dependencies" - directory: /modules/ingest-user-agent/ open-pull-requests-limit: 1 package-ecosystem: gradle schedule: interval: weekly + labels: + - "dependabot" + - "dependencies" - directory: /modules/lang-expression/ open-pull-requests-limit: 1 package-ecosystem: gradle schedule: interval: weekly + labels: + - "dependabot" + - "dependencies" - directory: /modules/lang-mustache/ open-pull-requests-limit: 1 package-ecosystem: gradle schedule: interval: weekly + labels: + - "dependabot" + - "dependencies" - directory: /modules/lang-painless/ open-pull-requests-limit: 1 package-ecosystem: gradle schedule: interval: weekly + labels: + - "dependabot" + - "dependencies" - directory: /modules/lang-painless/spi/ open-pull-requests-limit: 1 package-ecosystem: gradle schedule: interval: weekly + labels: + - "dependabot" + - "dependencies" - directory: /modules/mapper-extras/ open-pull-requests-limit: 1 package-ecosystem: gradle schedule: interval: weekly + labels: + - "dependabot" + - "dependencies" - directory: /modules/opensearch-dashboards/ open-pull-requests-limit: 1 package-ecosystem: gradle schedule: interval: weekly + labels: + - "dependabot" + - "dependencies" - directory: /modules/parent-join/ open-pull-requests-limit: 1 package-ecosystem: gradle schedule: interval: weekly + labels: + - "dependabot" + - "dependencies" - directory: /modules/percolator/ open-pull-requests-limit: 1 package-ecosystem: gradle schedule: interval: weekly + labels: + - "dependabot" + - "dependencies" - directory: /modules/rank-eval/ open-pull-requests-limit: 1 package-ecosystem: gradle schedule: interval: weekly + labels: + - "dependabot" + - "dependencies" - directory: /modules/reindex/ open-pull-requests-limit: 1 package-ecosystem: gradle schedule: interval: weekly + labels: + - "dependabot" + - "dependencies" - directory: /modules/repository-url/ open-pull-requests-limit: 1 package-ecosystem: gradle schedule: interval: weekly + labels: + - "dependabot" + - "dependencies" - directory: /modules/systemd/ open-pull-requests-limit: 1 package-ecosystem: gradle schedule: interval: weekly + labels: + - "dependabot" + - "dependencies" - directory: /modules/transport-netty4/ open-pull-requests-limit: 1 package-ecosystem: gradle schedule: interval: weekly + labels: + - "dependabot" + - "dependencies" - directory: /plugins/ open-pull-requests-limit: 1 package-ecosystem: gradle schedule: interval: weekly + labels: + - "dependabot" + - "dependencies" - directory: /plugins/analysis-icu/ open-pull-requests-limit: 1 package-ecosystem: gradle schedule: interval: weekly + labels: + - "dependabot" + - "dependencies" - directory: /plugins/analysis-kuromoji/ open-pull-requests-limit: 1 package-ecosystem: gradle schedule: interval: weekly + labels: + - "dependabot" + - "dependencies" - directory: /plugins/analysis-nori/ open-pull-requests-limit: 1 package-ecosystem: gradle schedule: interval: weekly + labels: + - "dependabot" + - "dependencies" - directory: /plugins/analysis-phonetic/ open-pull-requests-limit: 1 package-ecosystem: gradle schedule: interval: weekly + labels: + - "dependabot" + - "dependencies" - directory: /plugins/analysis-smartcn/ open-pull-requests-limit: 1 package-ecosystem: gradle schedule: interval: weekly + labels: + - "dependabot" + - "dependencies" - directory: /plugins/analysis-stempel/ open-pull-requests-limit: 1 package-ecosystem: gradle schedule: interval: weekly + labels: + - "dependabot" + - "dependencies" - directory: /plugins/analysis-ukrainian/ open-pull-requests-limit: 1 package-ecosystem: gradle schedule: interval: weekly + labels: + - "dependabot" + - "dependencies" - directory: /plugins/discovery-azure-classic/ open-pull-requests-limit: 1 package-ecosystem: gradle schedule: interval: weekly + labels: + - "dependabot" + - "dependencies" - directory: /plugins/discovery-ec2/ open-pull-requests-limit: 1 package-ecosystem: gradle schedule: interval: weekly + labels: + - "dependabot" + - "dependencies" - directory: /plugins/discovery-ec2/qa/ open-pull-requests-limit: 1 package-ecosystem: gradle schedule: interval: weekly + labels: + - "dependabot" + - "dependencies" - directory: /plugins/discovery-ec2/qa/amazon-ec2/ open-pull-requests-limit: 1 package-ecosystem: gradle schedule: interval: weekly + labels: + - "dependabot" + - "dependencies" - directory: /plugins/discovery-gce/ open-pull-requests-limit: 1 package-ecosystem: gradle schedule: interval: weekly + labels: + - "dependabot" + - "dependencies" - directory: /plugins/discovery-gce/qa/ open-pull-requests-limit: 1 package-ecosystem: gradle schedule: interval: weekly + labels: + - "dependabot" + - "dependencies" - directory: /plugins/discovery-gce/qa/gce/ open-pull-requests-limit: 1 package-ecosystem: gradle schedule: interval: weekly + labels: + - "dependabot" + - "dependencies" - directory: /plugins/examples/ open-pull-requests-limit: 1 package-ecosystem: gradle schedule: interval: weekly + labels: + - "dependabot" + - "dependencies" - directory: /plugins/examples/custom-settings/ open-pull-requests-limit: 1 package-ecosystem: gradle schedule: interval: weekly + labels: + - "dependabot" + - "dependencies" - directory: /plugins/examples/custom-significance-heuristic/ open-pull-requests-limit: 1 package-ecosystem: gradle schedule: interval: weekly + labels: + - "dependabot" + - "dependencies" - directory: /plugins/examples/custom-suggester/ open-pull-requests-limit: 1 package-ecosystem: gradle schedule: interval: weekly + labels: + - "dependabot" + - "dependencies" - directory: /plugins/examples/painless-allowlist/ open-pull-requests-limit: 1 package-ecosystem: gradle schedule: interval: weekly + labels: + - "dependabot" + - "dependencies" - directory: /plugins/examples/rescore/ open-pull-requests-limit: 1 package-ecosystem: gradle schedule: interval: weekly + labels: + - "dependabot" + - "dependencies" - directory: /plugins/examples/rest-handler/ open-pull-requests-limit: 1 package-ecosystem: gradle schedule: interval: weekly + labels: + - "dependabot" + - "dependencies" - directory: /plugins/examples/script-expert-scoring/ open-pull-requests-limit: 1 package-ecosystem: gradle schedule: interval: weekly + labels: + - "dependabot" + - "dependencies" - directory: /plugins/ingest-attachment/ open-pull-requests-limit: 1 package-ecosystem: gradle schedule: interval: weekly + labels: + - "dependabot" + - "dependencies" - directory: /plugins/mapper-annotated-text/ open-pull-requests-limit: 1 package-ecosystem: gradle schedule: interval: weekly + labels: + - "dependabot" + - "dependencies" - directory: /plugins/mapper-murmur3/ open-pull-requests-limit: 1 package-ecosystem: gradle schedule: interval: weekly + labels: + - "dependabot" + - "dependencies" - directory: /plugins/mapper-size/ open-pull-requests-limit: 1 package-ecosystem: gradle schedule: interval: weekly + labels: + - "dependabot" + - "dependencies" - directory: /plugins/repository-azure/ open-pull-requests-limit: 1 package-ecosystem: gradle schedule: interval: weekly + labels: + - "dependabot" + - "dependencies" - directory: /plugins/repository-gcs/ open-pull-requests-limit: 1 package-ecosystem: gradle schedule: interval: weekly + labels: + - "dependabot" + - "dependencies" - directory: /plugins/repository-hdfs/ open-pull-requests-limit: 1 package-ecosystem: gradle schedule: interval: weekly + labels: + - "dependabot" + - "dependencies" - directory: /plugins/repository-s3/ open-pull-requests-limit: 1 package-ecosystem: gradle schedule: interval: weekly + labels: + - "dependabot" + - "dependencies" - directory: /plugins/store-smb/ open-pull-requests-limit: 1 package-ecosystem: gradle schedule: interval: weekly + labels: + - "dependabot" + - "dependencies" - directory: /plugins/transport-nio/ open-pull-requests-limit: 1 package-ecosystem: gradle schedule: interval: weekly + labels: + - "dependabot" + - "dependencies" - directory: /qa/ open-pull-requests-limit: 1 package-ecosystem: gradle schedule: interval: weekly + labels: + - "dependabot" + - "dependencies" - directory: /qa/ccs-unavailable-clusters/ open-pull-requests-limit: 1 package-ecosystem: gradle schedule: interval: weekly + labels: + - "dependabot" + - "dependencies" - directory: /qa/die-with-dignity/ open-pull-requests-limit: 1 package-ecosystem: gradle schedule: interval: weekly + labels: + - "dependabot" + - "dependencies" - directory: /qa/evil-tests/ open-pull-requests-limit: 1 package-ecosystem: gradle schedule: interval: weekly + labels: + - "dependabot" + - "dependencies" - directory: /qa/full-cluster-restart/ open-pull-requests-limit: 1 package-ecosystem: gradle schedule: interval: weekly + labels: + - "dependabot" + - "dependencies" - directory: /qa/logging-config/ open-pull-requests-limit: 1 package-ecosystem: gradle schedule: interval: weekly + labels: + - "dependabot" + - "dependencies" - directory: /qa/mixed-cluster/ open-pull-requests-limit: 1 package-ecosystem: gradle schedule: interval: weekly + labels: + - "dependabot" + - "dependencies" - directory: /qa/multi-cluster-search/ open-pull-requests-limit: 1 package-ecosystem: gradle schedule: interval: weekly + labels: + - "dependabot" + - "dependencies" - directory: /qa/no-bootstrap-tests/ open-pull-requests-limit: 1 package-ecosystem: gradle schedule: interval: weekly + labels: + - "dependabot" + - "dependencies" - directory: /qa/os/ open-pull-requests-limit: 1 package-ecosystem: gradle schedule: interval: weekly + labels: + - "dependabot" + - "dependencies" - directory: /qa/os/centos-6/ open-pull-requests-limit: 1 package-ecosystem: gradle schedule: interval: weekly + labels: + - "dependabot" + - "dependencies" - directory: /qa/os/centos-7/ open-pull-requests-limit: 1 package-ecosystem: gradle schedule: interval: weekly + labels: + - "dependabot" + - "dependencies" - directory: /qa/os/debian-8/ open-pull-requests-limit: 1 package-ecosystem: gradle schedule: interval: weekly + labels: + - "dependabot" + - "dependencies" - directory: /qa/os/debian-9/ open-pull-requests-limit: 1 package-ecosystem: gradle schedule: interval: weekly + labels: + - "dependabot" + - "dependencies" - directory: /qa/os/fedora-28/ open-pull-requests-limit: 1 package-ecosystem: gradle schedule: interval: weekly + labels: + - "dependabot" + - "dependencies" - directory: /qa/os/fedora-29/ open-pull-requests-limit: 1 package-ecosystem: gradle schedule: interval: weekly + labels: + - "dependabot" + - "dependencies" - directory: /qa/os/oel-6/ open-pull-requests-limit: 1 package-ecosystem: gradle schedule: interval: weekly + labels: + - "dependabot" + - "dependencies" - directory: /qa/os/oel-7/ open-pull-requests-limit: 1 package-ecosystem: gradle schedule: interval: weekly + labels: + - "dependabot" + - "dependencies" - directory: /qa/os/sles-12/ open-pull-requests-limit: 1 package-ecosystem: gradle schedule: interval: weekly + labels: + - "dependabot" + - "dependencies" - directory: /qa/os/ubuntu-1604/ open-pull-requests-limit: 1 package-ecosystem: gradle schedule: interval: weekly + labels: + - "dependabot" + - "dependencies" - directory: /qa/os/ubuntu-1804/ open-pull-requests-limit: 1 package-ecosystem: gradle schedule: interval: weekly + labels: + - "dependabot" + - "dependencies" - directory: /qa/os/windows-2012r2/ open-pull-requests-limit: 1 package-ecosystem: gradle schedule: interval: weekly + labels: + - "dependabot" + - "dependencies" - directory: /qa/os/windows-2016/ open-pull-requests-limit: 1 package-ecosystem: gradle schedule: interval: weekly + labels: + - "dependabot" + - "dependencies" - directory: /qa/remote-clusters/ open-pull-requests-limit: 1 package-ecosystem: gradle schedule: interval: weekly + labels: + - "dependabot" + - "dependencies" - directory: /qa/repository-multi-version/ open-pull-requests-limit: 1 package-ecosystem: gradle schedule: interval: weekly + labels: + - "dependabot" + - "dependencies" - directory: /qa/rolling-upgrade/ open-pull-requests-limit: 1 package-ecosystem: gradle schedule: interval: weekly + labels: + - "dependabot" + - "dependencies" - directory: /qa/smoke-test-http/ open-pull-requests-limit: 1 package-ecosystem: gradle schedule: interval: weekly + labels: + - "dependabot" + - "dependencies" - directory: /qa/smoke-test-ingest-disabled/ open-pull-requests-limit: 1 package-ecosystem: gradle schedule: interval: weekly + labels: + - "dependabot" + - "dependencies" - directory: /qa/smoke-test-ingest-with-all-dependencies/ open-pull-requests-limit: 1 package-ecosystem: gradle schedule: interval: weekly + labels: + - "dependabot" + - "dependencies" - directory: /qa/smoke-test-multinode/ open-pull-requests-limit: 1 package-ecosystem: gradle schedule: interval: weekly + labels: + - "dependabot" + - "dependencies" - directory: /qa/smoke-test-plugins/ open-pull-requests-limit: 1 package-ecosystem: gradle schedule: interval: weekly + labels: + - "dependabot" + - "dependencies" - directory: /qa/translog-policy/ open-pull-requests-limit: 1 package-ecosystem: gradle schedule: interval: weekly + labels: + - "dependabot" + - "dependencies" - directory: /qa/unconfigured-node-name/ open-pull-requests-limit: 1 package-ecosystem: gradle schedule: interval: weekly + labels: + - "dependabot" + - "dependencies" - directory: /qa/verify-version-constants/ open-pull-requests-limit: 1 package-ecosystem: gradle schedule: interval: weekly + labels: + - "dependabot" + - "dependencies" - directory: /qa/wildfly/ open-pull-requests-limit: 1 package-ecosystem: gradle schedule: interval: weekly + labels: + - "dependabot" + - "dependencies" - directory: /rest-api-spec/ open-pull-requests-limit: 1 package-ecosystem: gradle schedule: interval: weekly + labels: + - "dependabot" + - "dependencies" - directory: /sandbox/ open-pull-requests-limit: 1 package-ecosystem: gradle schedule: interval: weekly + labels: + - "dependabot" + - "dependencies" - directory: /sandbox/libs/ open-pull-requests-limit: 1 package-ecosystem: gradle schedule: interval: weekly + labels: + - "dependabot" + - "dependencies" - directory: /sandbox/modules/ open-pull-requests-limit: 1 package-ecosystem: gradle schedule: interval: weekly + labels: + - "dependabot" + - "dependencies" - directory: /sandbox/plugins/ open-pull-requests-limit: 1 package-ecosystem: gradle schedule: interval: weekly + labels: + - "dependabot" + - "dependencies" - directory: /server/ open-pull-requests-limit: 1 package-ecosystem: gradle schedule: interval: weekly + labels: + - "dependabot" + - "dependencies" - directory: /test/ open-pull-requests-limit: 1 package-ecosystem: gradle schedule: interval: weekly + labels: + - "dependabot" + - "dependencies" - directory: /test/external-modules/ open-pull-requests-limit: 1 package-ecosystem: gradle schedule: interval: weekly + labels: + - "dependabot" + - "dependencies" - directory: /test/external-modules/delayed-aggs/ open-pull-requests-limit: 1 package-ecosystem: gradle schedule: interval: weekly + labels: + - "dependabot" + - "dependencies" - directory: /test/fixtures/ open-pull-requests-limit: 1 package-ecosystem: gradle schedule: interval: weekly + labels: + - "dependabot" + - "dependencies" - directory: /test/fixtures/azure-fixture/ open-pull-requests-limit: 1 package-ecosystem: gradle schedule: interval: weekly + labels: + - "dependabot" + - "dependencies" - directory: /test/fixtures/gcs-fixture/ open-pull-requests-limit: 1 package-ecosystem: gradle schedule: interval: weekly + labels: + - "dependabot" + - "dependencies" - directory: /test/fixtures/hdfs-fixture/ open-pull-requests-limit: 1 package-ecosystem: gradle schedule: interval: weekly + labels: + - "dependabot" + - "dependencies" - directory: /test/fixtures/krb5kdc-fixture/ open-pull-requests-limit: 1 package-ecosystem: gradle schedule: interval: weekly + labels: + - "dependabot" + - "dependencies" - directory: /test/fixtures/minio-fixture/ open-pull-requests-limit: 1 package-ecosystem: gradle schedule: interval: weekly + labels: + - "dependabot" + - "dependencies" - directory: /test/fixtures/old-elasticsearch/ open-pull-requests-limit: 1 package-ecosystem: gradle schedule: interval: weekly + labels: + - "dependabot" + - "dependencies" - directory: /test/fixtures/s3-fixture/ open-pull-requests-limit: 1 package-ecosystem: gradle schedule: interval: weekly + labels: + - "dependabot" + - "dependencies" - directory: /test/framework/ open-pull-requests-limit: 1 package-ecosystem: gradle schedule: interval: weekly + labels: + - "dependabot" + - "dependencies" - directory: /test/logger-usage/ open-pull-requests-limit: 1 package-ecosystem: gradle schedule: interval: weekly + labels: + - "dependabot" + - "dependencies" version: 2 diff --git a/CHANGELOG.md b/CHANGELOG.md index 3b3d54a802e67..1be3d3f53f2d6 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -7,6 +7,7 @@ Inspired from [Keep a Changelog](https://keepachangelog.com/en/1.0.0/) - Added @dreamer-89 as an Opensearch maintainer ([#4342](https://github.com/opensearch-project/OpenSearch/pull/4342)) - Added release notes for 1.3.5 ([#4343](https://github.com/opensearch-project/OpenSearch/pull/4343)) - Added release notes for 2.2.1 ([#4344](https://github.com/opensearch-project/OpenSearch/pull/4344)) +- Label configuration for dependabot PRs ([#4348](https://github.com/opensearch-project/OpenSearch/pull/4348)) ### Changed - Dependency updates (httpcore, mockito, slf4j, httpasyncclient, commons-codec) ([#4308](https://github.com/opensearch-project/OpenSearch/pull/4308)) @@ -32,6 +33,7 @@ Inspired from [Keep a Changelog](https://keepachangelog.com/en/1.0.0/) ## [2.x] ### Added - Github workflow for changelog verification ([#4085](https://github.com/opensearch-project/OpenSearch/pull/4085)) +- Label configuration for dependabot PRs ([#4348](https://github.com/opensearch-project/OpenSearch/pull/4348)) ### Changed From d72861f9de379d2a263948232947e9b95aefa962 Mon Sep 17 00:00:00 2001 From: Andriy Redko Date: Wed, 31 Aug 2022 09:07:44 -0400 Subject: [PATCH 07/16] Support for HTTP/2 (server-side) (#3847) * Support for HTTP/2 (server-side) Signed-off-by: Andriy Redko * Addressing code review comments Signed-off-by: Andriy Redko * Added HTTP/1.1 channel configuration Signed-off-by: Andriy Redko * Addressing code review comments Signed-off-by: Andriy Redko * Update pul request URL in CHANGELOG.md Signed-off-by: Andriy Redko Signed-off-by: Andriy Redko --- CHANGELOG.md | 1 + modules/transport-netty4/build.gradle | 1 + .../netty-codec-http2-4.1.79.Final.jar.sha1 | 1 + .../opensearch/http/netty4/Netty4Http2IT.java | 62 ++++++ .../netty4/Netty4HttpRequestSizeLimitIT.java | 4 +- .../http/netty4/Netty4PipeliningIT.java | 2 +- .../http/netty4/Netty4HttpChannel.java | 13 ++ .../netty4/Netty4HttpServerTransport.java | 160 ++++++++++++-- .../http/netty4/Netty4BadRequestTests.java | 2 +- .../http/netty4/Netty4HttpClient.java | 197 +++++++++++++++++- .../Netty4HttpServerPipeliningTests.java | 7 +- .../Netty4HttpServerTransportTests.java | 10 +- 12 files changed, 428 insertions(+), 32 deletions(-) create mode 100644 modules/transport-netty4/licenses/netty-codec-http2-4.1.79.Final.jar.sha1 create mode 100644 modules/transport-netty4/src/internalClusterTest/java/org/opensearch/http/netty4/Netty4Http2IT.java diff --git a/CHANGELOG.md b/CHANGELOG.md index 1be3d3f53f2d6..8e7fa8b5547f0 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -8,6 +8,7 @@ Inspired from [Keep a Changelog](https://keepachangelog.com/en/1.0.0/) - Added release notes for 1.3.5 ([#4343](https://github.com/opensearch-project/OpenSearch/pull/4343)) - Added release notes for 2.2.1 ([#4344](https://github.com/opensearch-project/OpenSearch/pull/4344)) - Label configuration for dependabot PRs ([#4348](https://github.com/opensearch-project/OpenSearch/pull/4348)) +- Support for HTTP/2 (server-side) ([#3847](https://github.com/opensearch-project/OpenSearch/pull/3847)) ### Changed - Dependency updates (httpcore, mockito, slf4j, httpasyncclient, commons-codec) ([#4308](https://github.com/opensearch-project/OpenSearch/pull/4308)) diff --git a/modules/transport-netty4/build.gradle b/modules/transport-netty4/build.gradle index b72cb6d868d79..5d2047d7f18a2 100644 --- a/modules/transport-netty4/build.gradle +++ b/modules/transport-netty4/build.gradle @@ -58,6 +58,7 @@ dependencies { api "io.netty:netty-buffer:${versions.netty}" api "io.netty:netty-codec:${versions.netty}" api "io.netty:netty-codec-http:${versions.netty}" + api "io.netty:netty-codec-http2:${versions.netty}" api "io.netty:netty-common:${versions.netty}" api "io.netty:netty-handler:${versions.netty}" api "io.netty:netty-resolver:${versions.netty}" diff --git a/modules/transport-netty4/licenses/netty-codec-http2-4.1.79.Final.jar.sha1 b/modules/transport-netty4/licenses/netty-codec-http2-4.1.79.Final.jar.sha1 new file mode 100644 index 0000000000000..f2989024cfce1 --- /dev/null +++ b/modules/transport-netty4/licenses/netty-codec-http2-4.1.79.Final.jar.sha1 @@ -0,0 +1 @@ +0eeffab0cd5efb699d5e4ab9b694d32fef6694b3 \ No newline at end of file diff --git a/modules/transport-netty4/src/internalClusterTest/java/org/opensearch/http/netty4/Netty4Http2IT.java b/modules/transport-netty4/src/internalClusterTest/java/org/opensearch/http/netty4/Netty4Http2IT.java new file mode 100644 index 0000000000000..1424b392af8e7 --- /dev/null +++ b/modules/transport-netty4/src/internalClusterTest/java/org/opensearch/http/netty4/Netty4Http2IT.java @@ -0,0 +1,62 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.http.netty4; + +import io.netty.handler.codec.http.FullHttpResponse; +import io.netty.util.ReferenceCounted; +import org.opensearch.OpenSearchNetty4IntegTestCase; +import org.opensearch.common.transport.TransportAddress; +import org.opensearch.http.HttpServerTransport; +import org.opensearch.test.OpenSearchIntegTestCase.ClusterScope; +import org.opensearch.test.OpenSearchIntegTestCase.Scope; + +import java.util.Collection; +import java.util.Locale; +import java.util.stream.IntStream; + +import static org.hamcrest.Matchers.containsInAnyOrder; +import static org.hamcrest.Matchers.hasSize; + +@ClusterScope(scope = Scope.TEST, supportsDedicatedMasters = false, numDataNodes = 1) +public class Netty4Http2IT extends OpenSearchNetty4IntegTestCase { + + @Override + protected boolean addMockHttpTransport() { + return false; // enable http + } + + public void testThatNettyHttpServerSupportsHttp2() throws Exception { + String[] requests = new String[] { "/", "/_nodes/stats", "/", "/_cluster/state", "/" }; + + HttpServerTransport httpServerTransport = internalCluster().getInstance(HttpServerTransport.class); + TransportAddress[] boundAddresses = httpServerTransport.boundAddress().boundAddresses(); + TransportAddress transportAddress = randomFrom(boundAddresses); + + try (Netty4HttpClient nettyHttpClient = Netty4HttpClient.http2()) { + Collection responses = nettyHttpClient.get(transportAddress.address(), requests); + try { + assertThat(responses, hasSize(5)); + + Collection opaqueIds = Netty4HttpClient.returnOpaqueIds(responses); + assertOpaqueIdsInAnyOrder(opaqueIds); + } finally { + responses.forEach(ReferenceCounted::release); + } + } + } + + private void assertOpaqueIdsInAnyOrder(Collection opaqueIds) { + // check if opaque ids are present in any order, since for HTTP/2 we use streaming (no head of line blocking) + // and responses may come back at any order + int i = 0; + String msg = String.format(Locale.ROOT, "Expected list of opaque ids to be in any order, got [%s]", opaqueIds); + assertThat(msg, opaqueIds, containsInAnyOrder(IntStream.range(0, 5).mapToObj(Integer::toString).toArray())); + } + +} diff --git a/modules/transport-netty4/src/internalClusterTest/java/org/opensearch/http/netty4/Netty4HttpRequestSizeLimitIT.java b/modules/transport-netty4/src/internalClusterTest/java/org/opensearch/http/netty4/Netty4HttpRequestSizeLimitIT.java index 08df9259d475f..db76c0b145840 100644 --- a/modules/transport-netty4/src/internalClusterTest/java/org/opensearch/http/netty4/Netty4HttpRequestSizeLimitIT.java +++ b/modules/transport-netty4/src/internalClusterTest/java/org/opensearch/http/netty4/Netty4HttpRequestSizeLimitIT.java @@ -100,7 +100,7 @@ public void testLimitsInFlightRequests() throws Exception { HttpServerTransport httpServerTransport = internalCluster().getInstance(HttpServerTransport.class); TransportAddress transportAddress = randomFrom(httpServerTransport.boundAddress().boundAddresses()); - try (Netty4HttpClient nettyHttpClient = new Netty4HttpClient()) { + try (Netty4HttpClient nettyHttpClient = Netty4HttpClient.http()) { Collection singleResponse = nettyHttpClient.post(transportAddress.address(), requests.subList(0, 1)); try { assertThat(singleResponse, hasSize(1)); @@ -130,7 +130,7 @@ public void testDoesNotLimitExcludedRequests() throws Exception { HttpServerTransport httpServerTransport = internalCluster().getInstance(HttpServerTransport.class); TransportAddress transportAddress = randomFrom(httpServerTransport.boundAddress().boundAddresses()); - try (Netty4HttpClient nettyHttpClient = new Netty4HttpClient()) { + try (Netty4HttpClient nettyHttpClient = Netty4HttpClient.http()) { Collection responses = nettyHttpClient.put(transportAddress.address(), requestUris); try { assertThat(responses, hasSize(requestUris.size())); diff --git a/modules/transport-netty4/src/internalClusterTest/java/org/opensearch/http/netty4/Netty4PipeliningIT.java b/modules/transport-netty4/src/internalClusterTest/java/org/opensearch/http/netty4/Netty4PipeliningIT.java index 2bd1fa07f8afc..96193b0ecb954 100644 --- a/modules/transport-netty4/src/internalClusterTest/java/org/opensearch/http/netty4/Netty4PipeliningIT.java +++ b/modules/transport-netty4/src/internalClusterTest/java/org/opensearch/http/netty4/Netty4PipeliningIT.java @@ -61,7 +61,7 @@ public void testThatNettyHttpServerSupportsPipelining() throws Exception { TransportAddress[] boundAddresses = httpServerTransport.boundAddress().boundAddresses(); TransportAddress transportAddress = randomFrom(boundAddresses); - try (Netty4HttpClient nettyHttpClient = new Netty4HttpClient()) { + try (Netty4HttpClient nettyHttpClient = Netty4HttpClient.http()) { Collection responses = nettyHttpClient.get(transportAddress.address(), requests); try { assertThat(responses, hasSize(5)); diff --git a/modules/transport-netty4/src/main/java/org/opensearch/http/netty4/Netty4HttpChannel.java b/modules/transport-netty4/src/main/java/org/opensearch/http/netty4/Netty4HttpChannel.java index 66d60032d11a8..2dd7aaf41986f 100644 --- a/modules/transport-netty4/src/main/java/org/opensearch/http/netty4/Netty4HttpChannel.java +++ b/modules/transport-netty4/src/main/java/org/opensearch/http/netty4/Netty4HttpChannel.java @@ -33,7 +33,10 @@ package org.opensearch.http.netty4; import io.netty.channel.Channel; +import io.netty.channel.ChannelPipeline; + import org.opensearch.action.ActionListener; +import org.opensearch.common.Nullable; import org.opensearch.common.concurrent.CompletableContext; import org.opensearch.http.HttpChannel; import org.opensearch.http.HttpResponse; @@ -45,9 +48,15 @@ public class Netty4HttpChannel implements HttpChannel { private final Channel channel; private final CompletableContext closeContext = new CompletableContext<>(); + private final ChannelPipeline inboundPipeline; Netty4HttpChannel(Channel channel) { + this(channel, null); + } + + Netty4HttpChannel(Channel channel, ChannelPipeline inboundPipeline) { this.channel = channel; + this.inboundPipeline = inboundPipeline; Netty4TcpChannel.addListener(this.channel.closeFuture(), closeContext); } @@ -81,6 +90,10 @@ public void close() { channel.close(); } + public @Nullable ChannelPipeline inboundPipeline() { + return inboundPipeline; + } + public Channel getNettyChannel() { return channel; } diff --git a/modules/transport-netty4/src/main/java/org/opensearch/http/netty4/Netty4HttpServerTransport.java b/modules/transport-netty4/src/main/java/org/opensearch/http/netty4/Netty4HttpServerTransport.java index decab45ffca38..1e0a4d89f2fd5 100644 --- a/modules/transport-netty4/src/main/java/org/opensearch/http/netty4/Netty4HttpServerTransport.java +++ b/modules/transport-netty4/src/main/java/org/opensearch/http/netty4/Netty4HttpServerTransport.java @@ -40,18 +40,36 @@ import io.netty.channel.ChannelInboundHandlerAdapter; import io.netty.channel.ChannelInitializer; import io.netty.channel.ChannelOption; +import io.netty.channel.ChannelPipeline; import io.netty.channel.FixedRecvByteBufAllocator; import io.netty.channel.RecvByteBufAllocator; +import io.netty.channel.SimpleChannelInboundHandler; import io.netty.channel.socket.nio.NioChannelOption; import io.netty.handler.codec.ByteToMessageDecoder; import io.netty.handler.codec.http.HttpContentCompressor; import io.netty.handler.codec.http.HttpContentDecompressor; +import io.netty.handler.codec.http.HttpMessage; import io.netty.handler.codec.http.HttpObjectAggregator; import io.netty.handler.codec.http.HttpRequestDecoder; import io.netty.handler.codec.http.HttpResponseEncoder; +import io.netty.handler.codec.http.HttpServerCodec; +import io.netty.handler.codec.http.HttpServerUpgradeHandler; +import io.netty.handler.codec.http.HttpServerUpgradeHandler.UpgradeCodec; +import io.netty.handler.codec.http.HttpServerUpgradeHandler.UpgradeCodecFactory; +import io.netty.handler.codec.http2.CleartextHttp2ServerUpgradeHandler; +import io.netty.handler.codec.http2.Http2CodecUtil; +import io.netty.handler.codec.http2.Http2FrameCodecBuilder; +import io.netty.handler.codec.http2.Http2MultiplexHandler; +import io.netty.handler.codec.http2.Http2ServerUpgradeCodec; +import io.netty.handler.codec.http2.Http2StreamFrameToHttpObjectCodec; +import io.netty.handler.logging.LogLevel; +import io.netty.handler.logging.LoggingHandler; import io.netty.handler.timeout.ReadTimeoutException; import io.netty.handler.timeout.ReadTimeoutHandler; +import io.netty.util.AsciiString; import io.netty.util.AttributeKey; +import io.netty.util.ReferenceCountUtil; + import org.apache.logging.log4j.LogManager; import org.apache.logging.log4j.Logger; import org.opensearch.ExceptionsHelper; @@ -335,38 +353,152 @@ protected HttpChannelHandler(final Netty4HttpServerTransport transport, final Ht this.responseCreator = new Netty4HttpResponseCreator(); } + public ChannelHandler getRequestHandler() { + return requestHandler; + } + @Override protected void initChannel(Channel ch) throws Exception { Netty4HttpChannel nettyHttpChannel = new Netty4HttpChannel(ch); ch.attr(HTTP_CHANNEL_KEY).set(nettyHttpChannel); ch.pipeline().addLast("byte_buf_sizer", byteBufSizer); ch.pipeline().addLast("read_timeout", new ReadTimeoutHandler(transport.readTimeoutMillis, TimeUnit.MILLISECONDS)); + + configurePipeline(ch); + transport.serverAcceptedChannel(nettyHttpChannel); + } + + @Override + public void exceptionCaught(ChannelHandlerContext ctx, Throwable cause) throws Exception { + ExceptionsHelper.maybeDieOnAnotherThread(cause); + super.exceptionCaught(ctx, cause); + } + + protected void configurePipeline(Channel ch) { + final UpgradeCodecFactory upgradeCodecFactory = new UpgradeCodecFactory() { + @Override + public UpgradeCodec newUpgradeCodec(CharSequence protocol) { + if (AsciiString.contentEquals(Http2CodecUtil.HTTP_UPGRADE_PROTOCOL_NAME, protocol)) { + return new Http2ServerUpgradeCodec( + Http2FrameCodecBuilder.forServer().build(), + new Http2MultiplexHandler(createHttp2ChannelInitializer(ch.pipeline())) + ); + } else { + return null; + } + } + }; + + final HttpServerCodec sourceCodec = new HttpServerCodec( + handlingSettings.getMaxInitialLineLength(), + handlingSettings.getMaxHeaderSize(), + handlingSettings.getMaxChunkSize() + ); + + final HttpServerUpgradeHandler upgradeHandler = new HttpServerUpgradeHandler(sourceCodec, upgradeCodecFactory); + final CleartextHttp2ServerUpgradeHandler cleartextUpgradeHandler = new CleartextHttp2ServerUpgradeHandler( + sourceCodec, + upgradeHandler, + createHttp2ChannelInitializerPriorKnowledge() + ); + + ch.pipeline().addLast(cleartextUpgradeHandler).addLast(new SimpleChannelInboundHandler() { + @Override + protected void channelRead0(ChannelHandlerContext ctx, HttpMessage msg) throws Exception { + final HttpObjectAggregator aggregator = new HttpObjectAggregator(handlingSettings.getMaxContentLength()); + aggregator.setMaxCumulationBufferComponents(transport.maxCompositeBufferComponents); + + // If this handler is hit then no upgrade has been attempted and the client is just talking HTTP + final ChannelPipeline pipeline = ctx.pipeline(); + pipeline.addAfter(ctx.name(), "handler", getRequestHandler()); + pipeline.replace(this, "aggregator", aggregator); + + ch.pipeline().addLast("decoder_compress", new HttpContentDecompressor()); + ch.pipeline().addLast("encoder", new HttpResponseEncoder()); + if (handlingSettings.isCompression()) { + ch.pipeline() + .addAfter("aggregator", "encoder_compress", new HttpContentCompressor(handlingSettings.getCompressionLevel())); + } + ch.pipeline().addBefore("handler", "request_creator", requestCreator); + ch.pipeline().addBefore("handler", "response_creator", responseCreator); + ch.pipeline() + .addBefore("handler", "pipelining", new Netty4HttpPipeliningHandler(logger, transport.pipeliningMaxEvents)); + + ctx.fireChannelRead(ReferenceCountUtil.retain(msg)); + } + }); + } + + protected void configureDefaultHttpPipeline(ChannelPipeline pipeline) { final HttpRequestDecoder decoder = new HttpRequestDecoder( handlingSettings.getMaxInitialLineLength(), handlingSettings.getMaxHeaderSize(), handlingSettings.getMaxChunkSize() ); decoder.setCumulator(ByteToMessageDecoder.COMPOSITE_CUMULATOR); - ch.pipeline().addLast("decoder", decoder); - ch.pipeline().addLast("decoder_compress", new HttpContentDecompressor()); - ch.pipeline().addLast("encoder", new HttpResponseEncoder()); + pipeline.addLast("decoder", decoder); + pipeline.addLast("decoder_compress", new HttpContentDecompressor()); + pipeline.addLast("encoder", new HttpResponseEncoder()); final HttpObjectAggregator aggregator = new HttpObjectAggregator(handlingSettings.getMaxContentLength()); aggregator.setMaxCumulationBufferComponents(transport.maxCompositeBufferComponents); - ch.pipeline().addLast("aggregator", aggregator); + pipeline.addLast("aggregator", aggregator); if (handlingSettings.isCompression()) { - ch.pipeline().addLast("encoder_compress", new HttpContentCompressor(handlingSettings.getCompressionLevel())); + pipeline.addLast("encoder_compress", new HttpContentCompressor(handlingSettings.getCompressionLevel())); } - ch.pipeline().addLast("request_creator", requestCreator); - ch.pipeline().addLast("response_creator", responseCreator); - ch.pipeline().addLast("pipelining", new Netty4HttpPipeliningHandler(logger, transport.pipeliningMaxEvents)); - ch.pipeline().addLast("handler", requestHandler); - transport.serverAcceptedChannel(nettyHttpChannel); + pipeline.addLast("request_creator", requestCreator); + pipeline.addLast("response_creator", responseCreator); + pipeline.addLast("pipelining", new Netty4HttpPipeliningHandler(logger, transport.pipeliningMaxEvents)); + pipeline.addLast("handler", requestHandler); } - @Override - public void exceptionCaught(ChannelHandlerContext ctx, Throwable cause) throws Exception { - ExceptionsHelper.maybeDieOnAnotherThread(cause); - super.exceptionCaught(ctx, cause); + protected void configureDefaultHttp2Pipeline(ChannelPipeline pipeline) { + pipeline.addLast(Http2FrameCodecBuilder.forServer().build()) + .addLast(new Http2MultiplexHandler(createHttp2ChannelInitializer(pipeline))); + } + + private ChannelInitializer createHttp2ChannelInitializerPriorKnowledge() { + return new ChannelInitializer() { + @Override + protected void initChannel(Channel childChannel) throws Exception { + configureDefaultHttp2Pipeline(childChannel.pipeline()); + } + }; + } + + /** + * Http2MultiplexHandler creates new pipeline, we are preserving the old one in case some handlers need to be + * access (like for example opensearch-security plugin which accesses SSL handlers). + */ + private ChannelInitializer createHttp2ChannelInitializer(ChannelPipeline inboundPipeline) { + return new ChannelInitializer() { + @Override + protected void initChannel(Channel childChannel) throws Exception { + final Netty4HttpChannel nettyHttpChannel = new Netty4HttpChannel(childChannel, inboundPipeline); + childChannel.attr(HTTP_CHANNEL_KEY).set(nettyHttpChannel); + + final HttpObjectAggregator aggregator = new HttpObjectAggregator(handlingSettings.getMaxContentLength()); + aggregator.setMaxCumulationBufferComponents(transport.maxCompositeBufferComponents); + + childChannel.pipeline() + .addLast(new LoggingHandler(LogLevel.DEBUG)) + .addLast(new Http2StreamFrameToHttpObjectCodec(true)) + .addLast("byte_buf_sizer", byteBufSizer) + .addLast("read_timeout", new ReadTimeoutHandler(transport.readTimeoutMillis, TimeUnit.MILLISECONDS)) + .addLast("decoder_decompress", new HttpContentDecompressor()); + + if (handlingSettings.isCompression()) { + childChannel.pipeline() + .addLast("encoder_compress", new HttpContentCompressor(handlingSettings.getCompressionLevel())); + } + + childChannel.pipeline() + .addLast("aggregator", aggregator) + .addLast("request_creator", requestCreator) + .addLast("response_creator", responseCreator) + .addLast("pipelining", new Netty4HttpPipeliningHandler(logger, transport.pipeliningMaxEvents)) + .addLast("handler", getRequestHandler()); + } + }; } } diff --git a/modules/transport-netty4/src/test/java/org/opensearch/http/netty4/Netty4BadRequestTests.java b/modules/transport-netty4/src/test/java/org/opensearch/http/netty4/Netty4BadRequestTests.java index a0100930c7dcb..c18fe6efc4736 100644 --- a/modules/transport-netty4/src/test/java/org/opensearch/http/netty4/Netty4BadRequestTests.java +++ b/modules/transport-netty4/src/test/java/org/opensearch/http/netty4/Netty4BadRequestTests.java @@ -117,7 +117,7 @@ public void dispatchBadRequest(RestChannel channel, ThreadContext threadContext, httpServerTransport.start(); final TransportAddress transportAddress = randomFrom(httpServerTransport.boundAddress().boundAddresses()); - try (Netty4HttpClient nettyHttpClient = new Netty4HttpClient()) { + try (Netty4HttpClient nettyHttpClient = Netty4HttpClient.http()) { final Collection responses = nettyHttpClient.get( transportAddress.address(), "/_cluster/settings?pretty=%" diff --git a/modules/transport-netty4/src/test/java/org/opensearch/http/netty4/Netty4HttpClient.java b/modules/transport-netty4/src/test/java/org/opensearch/http/netty4/Netty4HttpClient.java index 57f95a022a33f..6fdd698c117f2 100644 --- a/modules/transport-netty4/src/test/java/org/opensearch/http/netty4/Netty4HttpClient.java +++ b/modules/transport-netty4/src/test/java/org/opensearch/http/netty4/Netty4HttpClient.java @@ -37,14 +37,19 @@ import io.netty.buffer.Unpooled; import io.netty.channel.ChannelFuture; import io.netty.channel.ChannelHandlerContext; +import io.netty.channel.ChannelInboundHandler; +import io.netty.channel.ChannelInboundHandlerAdapter; import io.netty.channel.ChannelInitializer; import io.netty.channel.ChannelOption; +import io.netty.channel.ChannelPromise; import io.netty.channel.SimpleChannelInboundHandler; import io.netty.channel.nio.NioEventLoopGroup; import io.netty.channel.socket.SocketChannel; import io.netty.handler.codec.http.DefaultFullHttpRequest; import io.netty.handler.codec.http.FullHttpRequest; import io.netty.handler.codec.http.FullHttpResponse; +import io.netty.handler.codec.http.HttpClientCodec; +import io.netty.handler.codec.http.HttpClientUpgradeHandler; import io.netty.handler.codec.http.HttpContentDecompressor; import io.netty.handler.codec.http.HttpHeaderNames; import io.netty.handler.codec.http.HttpMethod; @@ -55,6 +60,17 @@ import io.netty.handler.codec.http.HttpResponse; import io.netty.handler.codec.http.HttpResponseDecoder; import io.netty.handler.codec.http.HttpVersion; +import io.netty.handler.codec.http2.DefaultHttp2Connection; +import io.netty.handler.codec.http2.DelegatingDecompressorFrameListener; +import io.netty.handler.codec.http2.Http2ClientUpgradeCodec; +import io.netty.handler.codec.http2.Http2Connection; +import io.netty.handler.codec.http2.Http2Settings; +import io.netty.handler.codec.http2.HttpConversionUtil; +import io.netty.handler.codec.http2.HttpToHttp2ConnectionHandler; +import io.netty.handler.codec.http2.HttpToHttp2ConnectionHandlerBuilder; +import io.netty.handler.codec.http2.InboundHttp2ToHttpAdapterBuilder; +import io.netty.util.AttributeKey; + import org.opensearch.common.collect.Tuple; import org.opensearch.common.unit.ByteSizeUnit; import org.opensearch.common.unit.ByteSizeValue; @@ -70,6 +86,7 @@ import java.util.List; import java.util.concurrent.CountDownLatch; import java.util.concurrent.TimeUnit; +import java.util.function.BiFunction; import static io.netty.handler.codec.http.HttpHeaderNames.HOST; import static io.netty.handler.codec.http.HttpVersion.HTTP_1_1; @@ -97,11 +114,32 @@ static Collection returnOpaqueIds(Collection responses } private final Bootstrap clientBootstrap; + private final BiFunction, AwaitableChannelInitializer> handlerFactory; + + Netty4HttpClient( + Bootstrap clientBootstrap, + BiFunction, AwaitableChannelInitializer> handlerFactory + ) { + this.clientBootstrap = clientBootstrap; + this.handlerFactory = handlerFactory; + } + + static Netty4HttpClient http() { + return new Netty4HttpClient( + new Bootstrap().channel(NettyAllocator.getChannelType()) + .option(ChannelOption.ALLOCATOR, NettyAllocator.getAllocator()) + .group(new NioEventLoopGroup(1)), + CountDownLatchHandlerHttp::new + ); + } - Netty4HttpClient() { - clientBootstrap = new Bootstrap().channel(NettyAllocator.getChannelType()) - .option(ChannelOption.ALLOCATOR, NettyAllocator.getAllocator()) - .group(new NioEventLoopGroup(1)); + static Netty4HttpClient http2() { + return new Netty4HttpClient( + new Bootstrap().channel(NettyAllocator.getChannelType()) + .option(ChannelOption.ALLOCATOR, NettyAllocator.getAllocator()) + .group(new NioEventLoopGroup(1)), + CountDownLatchHandlerHttp2::new + ); } public List get(SocketAddress remoteAddress, String... uris) throws InterruptedException { @@ -110,6 +148,7 @@ public List get(SocketAddress remoteAddress, String... uris) t final HttpRequest httpRequest = new DefaultFullHttpRequest(HTTP_1_1, HttpMethod.GET, uris[i]); httpRequest.headers().add(HOST, "localhost"); httpRequest.headers().add("X-Opaque-ID", String.valueOf(i)); + httpRequest.headers().add(HttpConversionUtil.ExtensionHeaderNames.SCHEME.text(), "http"); requests.add(httpRequest); } return sendRequests(remoteAddress, requests); @@ -143,6 +182,7 @@ private List processRequestsWithBody( request.headers().add(HttpHeaderNames.HOST, "localhost"); request.headers().add(HttpHeaderNames.CONTENT_LENGTH, content.readableBytes()); request.headers().add(HttpHeaderNames.CONTENT_TYPE, "application/json"); + request.headers().add(HttpConversionUtil.ExtensionHeaderNames.SCHEME.text(), "http"); requests.add(request); } return sendRequests(remoteAddress, requests); @@ -153,12 +193,14 @@ private synchronized List sendRequests(final SocketAddress rem final CountDownLatch latch = new CountDownLatch(requests.size()); final List content = Collections.synchronizedList(new ArrayList<>(requests.size())); - clientBootstrap.handler(new CountDownLatchHandler(latch, content)); + final AwaitableChannelInitializer handler = handlerFactory.apply(latch, content); + clientBootstrap.handler(handler); ChannelFuture channelFuture = null; try { channelFuture = clientBootstrap.connect(remoteAddress); channelFuture.sync(); + handler.await(); for (HttpRequest request : requests) { channelFuture.channel().writeAndFlush(request); @@ -184,12 +226,12 @@ public void close() { /** * helper factory which adds returned data to a list and uses a count down latch to decide when done */ - private static class CountDownLatchHandler extends ChannelInitializer { + private static class CountDownLatchHandlerHttp extends AwaitableChannelInitializer { private final CountDownLatch latch; private final Collection content; - CountDownLatchHandler(final CountDownLatch latch, final Collection content) { + CountDownLatchHandlerHttp(final CountDownLatch latch, final Collection content) { this.latch = latch; this.content = content; } @@ -222,4 +264,145 @@ public void exceptionCaught(ChannelHandlerContext ctx, Throwable cause) throws E } + /** + * The channel initializer with the ability to await for initialization to be completed + * + */ + private static abstract class AwaitableChannelInitializer extends ChannelInitializer { + void await() { + // do nothing + } + } + + /** + * helper factory which adds returned data to a list and uses a count down latch to decide when done + */ + private static class CountDownLatchHandlerHttp2 extends AwaitableChannelInitializer { + + private final CountDownLatch latch; + private final Collection content; + private Http2SettingsHandler settingsHandler; + + CountDownLatchHandlerHttp2(final CountDownLatch latch, final Collection content) { + this.latch = latch; + this.content = content; + } + + @Override + protected void initChannel(SocketChannel ch) { + final int maxContentLength = new ByteSizeValue(100, ByteSizeUnit.MB).bytesAsInt(); + final Http2Connection connection = new DefaultHttp2Connection(false); + settingsHandler = new Http2SettingsHandler(ch.newPromise()); + + final ChannelInboundHandler responseHandler = new SimpleChannelInboundHandler() { + @Override + protected void channelRead0(ChannelHandlerContext ctx, HttpObject msg) { + final FullHttpResponse response = (FullHttpResponse) msg; + + // this is upgrade request, skipping it over + if (Boolean.TRUE.equals(ctx.channel().attr(AttributeKey.valueOf("upgrade")).getAndRemove())) { + return; + } + + // We copy the buffer manually to avoid a huge allocation on a pooled allocator. We have + // a test that tracks huge allocations, so we want to avoid them in this test code. + ByteBuf newContent = Unpooled.copiedBuffer(((FullHttpResponse) msg).content()); + content.add(response.replace(newContent)); + latch.countDown(); + } + + @Override + public void exceptionCaught(ChannelHandlerContext ctx, Throwable cause) throws Exception { + super.exceptionCaught(ctx, cause); + latch.countDown(); + } + }; + + final HttpToHttp2ConnectionHandler connectionHandler = new HttpToHttp2ConnectionHandlerBuilder().connection(connection) + .frameListener( + new DelegatingDecompressorFrameListener( + connection, + new InboundHttp2ToHttpAdapterBuilder(connection).maxContentLength(maxContentLength).propagateSettings(true).build() + ) + ) + .build(); + + final HttpClientCodec sourceCodec = new HttpClientCodec(); + final Http2ClientUpgradeCodec upgradeCodec = new Http2ClientUpgradeCodec(connectionHandler); + final HttpClientUpgradeHandler upgradeHandler = new HttpClientUpgradeHandler(sourceCodec, upgradeCodec, maxContentLength); + + ch.pipeline().addLast(sourceCodec); + ch.pipeline().addLast(upgradeHandler); + ch.pipeline().addLast(new HttpContentDecompressor()); + ch.pipeline().addLast(new UpgradeRequestHandler(settingsHandler, responseHandler)); + } + + @Override + void await() { + try { + // Await for HTTP/2 settings being sent over before moving on to sending the requests + settingsHandler.awaitSettings(5, TimeUnit.SECONDS); + } catch (final Exception ex) { + throw new RuntimeException(ex); + } + } + } + + /** + * A handler that triggers the cleartext upgrade to HTTP/2 (h2c) by sending an + * initial HTTP request. + */ + private static class UpgradeRequestHandler extends ChannelInboundHandlerAdapter { + private final ChannelInboundHandler settingsHandler; + private final ChannelInboundHandler responseHandler; + + UpgradeRequestHandler(final ChannelInboundHandler settingsHandler, final ChannelInboundHandler responseHandler) { + this.settingsHandler = settingsHandler; + this.responseHandler = responseHandler; + } + + @Override + public void channelActive(ChannelHandlerContext ctx) throws Exception { + // The first request is HTTP/2 protocol upgrade (since we support only h2c there) + final FullHttpRequest request = new DefaultFullHttpRequest(HttpVersion.HTTP_1_1, HttpMethod.GET, "/"); + request.headers().add(HttpHeaderNames.HOST, "localhost"); + request.headers().add(HttpConversionUtil.ExtensionHeaderNames.SCHEME.text(), "http"); + + ctx.channel().attr(AttributeKey.newInstance("upgrade")).set(true); + ctx.writeAndFlush(request); + ctx.fireChannelActive(); + + ctx.pipeline().remove(this); + ctx.pipeline().addLast(settingsHandler); + ctx.pipeline().addLast(responseHandler); + } + } + + private static class Http2SettingsHandler extends SimpleChannelInboundHandler { + private ChannelPromise promise; + + Http2SettingsHandler(ChannelPromise promise) { + this.promise = promise; + } + + /** + * Wait for this handler to be added after the upgrade to HTTP/2, and for initial preface + * handshake to complete. + */ + void awaitSettings(long timeout, TimeUnit unit) throws Exception { + if (!promise.awaitUninterruptibly(timeout, unit)) { + throw new IllegalStateException("Timed out waiting for HTTP/2 settings"); + } + if (!promise.isSuccess()) { + throw new RuntimeException(promise.cause()); + } + } + + @Override + protected void channelRead0(ChannelHandlerContext ctx, Http2Settings msg) throws Exception { + promise.setSuccess(); + ctx.pipeline().remove(this); + } + } + } diff --git a/modules/transport-netty4/src/test/java/org/opensearch/http/netty4/Netty4HttpServerPipeliningTests.java b/modules/transport-netty4/src/test/java/org/opensearch/http/netty4/Netty4HttpServerPipeliningTests.java index 029aed1f3cc89..cda66b8d828fa 100644 --- a/modules/transport-netty4/src/test/java/org/opensearch/http/netty4/Netty4HttpServerPipeliningTests.java +++ b/modules/transport-netty4/src/test/java/org/opensearch/http/netty4/Netty4HttpServerPipeliningTests.java @@ -109,7 +109,7 @@ public void testThatHttpPipeliningWorks() throws Exception { } } - try (Netty4HttpClient nettyHttpClient = new Netty4HttpClient()) { + try (Netty4HttpClient nettyHttpClient = Netty4HttpClient.http()) { Collection responses = nettyHttpClient.get(transportAddress.address(), requests.toArray(new String[] {})); try { Collection responseBodies = Netty4HttpClient.returnHttpResponseBodies(responses); @@ -163,9 +163,12 @@ private class CustomHttpChannelHandler extends Netty4HttpServerTransport.HttpCha @Override protected void initChannel(Channel ch) throws Exception { super.initChannel(ch); - ch.pipeline().replace("handler", "handler", new PossiblySlowUpstreamHandler(executorService)); } + @Override + public ChannelHandler getRequestHandler() { + return new PossiblySlowUpstreamHandler(executorService); + } } class PossiblySlowUpstreamHandler extends SimpleChannelInboundHandler { diff --git a/modules/transport-netty4/src/test/java/org/opensearch/http/netty4/Netty4HttpServerTransportTests.java b/modules/transport-netty4/src/test/java/org/opensearch/http/netty4/Netty4HttpServerTransportTests.java index ec879e538fe20..eb96f14f10c70 100644 --- a/modules/transport-netty4/src/test/java/org/opensearch/http/netty4/Netty4HttpServerTransportTests.java +++ b/modules/transport-netty4/src/test/java/org/opensearch/http/netty4/Netty4HttpServerTransportTests.java @@ -202,7 +202,7 @@ public void dispatchBadRequest(RestChannel channel, ThreadContext threadContext, ) { transport.start(); final TransportAddress remoteAddress = randomFrom(transport.boundAddress().boundAddresses()); - try (Netty4HttpClient client = new Netty4HttpClient()) { + try (Netty4HttpClient client = Netty4HttpClient.http()) { final FullHttpRequest request = new DefaultFullHttpRequest(HttpVersion.HTTP_1_1, HttpMethod.POST, "/"); request.headers().set(HttpHeaderNames.EXPECT, expectation); HttpUtil.setContentLength(request, contentLength); @@ -322,7 +322,7 @@ public void dispatchBadRequest(final RestChannel channel, final ThreadContext th transport.start(); final TransportAddress remoteAddress = randomFrom(transport.boundAddress().boundAddresses()); - try (Netty4HttpClient client = new Netty4HttpClient()) { + try (Netty4HttpClient client = Netty4HttpClient.http()) { final String url = "/" + new String(new byte[maxInitialLineLength], Charset.forName("UTF-8")); final FullHttpRequest request = new DefaultFullHttpRequest(HttpVersion.HTTP_1_1, HttpMethod.GET, url); @@ -384,7 +384,7 @@ public void dispatchBadRequest(final RestChannel channel, final ThreadContext th transport.start(); final TransportAddress remoteAddress = randomFrom(transport.boundAddress().boundAddresses()); - try (Netty4HttpClient client = new Netty4HttpClient()) { + try (Netty4HttpClient client = Netty4HttpClient.http()) { DefaultFullHttpRequest request = new DefaultFullHttpRequest(HttpVersion.HTTP_1_1, HttpMethod.GET, url); request.headers().add(HttpHeaderNames.ACCEPT_ENCODING, randomFrom("deflate", "gzip")); long numOfHugeAllocations = getHugeAllocationCount(); @@ -454,7 +454,7 @@ public void dispatchBadRequest(final RestChannel channel, final ThreadContext th final TransportAddress remoteAddress = randomFrom(transport.boundAddress().boundAddresses()); // Test pre-flight request - try (Netty4HttpClient client = new Netty4HttpClient()) { + try (Netty4HttpClient client = Netty4HttpClient.http()) { final FullHttpRequest request = new DefaultFullHttpRequest(HttpVersion.HTTP_1_1, HttpMethod.OPTIONS, "/"); request.headers().add(CorsHandler.ORIGIN, "test-cors.org"); request.headers().add(CorsHandler.ACCESS_CONTROL_REQUEST_METHOD, "POST"); @@ -471,7 +471,7 @@ public void dispatchBadRequest(final RestChannel channel, final ThreadContext th } // Test short-circuited request - try (Netty4HttpClient client = new Netty4HttpClient()) { + try (Netty4HttpClient client = Netty4HttpClient.http()) { final FullHttpRequest request = new DefaultFullHttpRequest(HttpVersion.HTTP_1_1, HttpMethod.GET, "/"); request.headers().add(CorsHandler.ORIGIN, "google.com"); From c28221e0176ad3ce782c18a2b23ea2e59ed0e0a8 Mon Sep 17 00:00:00 2001 From: Kunal Kotwani Date: Wed, 31 Aug 2022 09:41:37 -0700 Subject: [PATCH 08/16] Fix token usage for changelog helper (#4351) Signed-off-by: Kunal Kotwani Signed-off-by: Kunal Kotwani --- .github/workflows/changelog_verifier.yml | 13 ++++++++++++- CHANGELOG.md | 1 + 2 files changed, 13 insertions(+), 1 deletion(-) diff --git a/.github/workflows/changelog_verifier.yml b/.github/workflows/changelog_verifier.yml index ac0c0ec4d7297..fb4f8ea3f8ecc 100644 --- a/.github/workflows/changelog_verifier.yml +++ b/.github/workflows/changelog_verifier.yml @@ -7,10 +7,21 @@ jobs: # Enforces the update of a changelog file on every pull request verify-changelog: runs-on: ubuntu-latest + permissions: + pull-requests: write + contents: write steps: + - name: GitHub App token + id: github_app_token + uses: tibdex/github-app-token@v1.5.0 + with: + app_id: ${{ secrets.APP_ID }} + private_key: ${{ secrets.APP_PRIVATE_KEY }} + installation_id: 22958780 + - uses: actions/checkout@v3 with: - token: ${{ secrets.GITHUB_TOKEN }} + token: ${{ steps.github_app_token.outputs.token }} ref: ${{ github.event.pull_request.head.sha }} - uses: dangoslen/dependabot-changelog-helper@v1 diff --git a/CHANGELOG.md b/CHANGELOG.md index 8e7fa8b5547f0..c9b8b1041bd9e 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -27,6 +27,7 @@ Inspired from [Keep a Changelog](https://keepachangelog.com/en/1.0.0/) - Do not fail replica shard due to primary closure ([#4133](https://github.com/opensearch-project/OpenSearch/pull/4133)) - Add timeout on Mockito.verify to reduce flakyness in testReplicationOnDone test([#4314](https://github.com/opensearch-project/OpenSearch/pull/4314)) - Commit workflow for dependabot changelog helper ([#4331](https://github.com/opensearch-project/OpenSearch/pull/4331)) +- Token usage for dependabot changelog helper ([#4351](https://github.com/opensearch-project/OpenSearch/pull/4351)) ### Security - CVE-2022-25857 org.yaml:snakeyaml DOS vulnerability ([#4341](https://github.com/opensearch-project/OpenSearch/pull/4341)) From 100120a440c63ac8ebe4cf4bad51de29ce54ebf9 Mon Sep 17 00:00:00 2001 From: Kunal Kotwani Date: Wed, 31 Aug 2022 11:55:49 -0700 Subject: [PATCH 09/16] Revert "Fix token usage for changelog helper (#4351)" (#4361) This reverts commit c28221e0176ad3ce782c18a2b23ea2e59ed0e0a8. Signed-off-by: Kunal Kotwani Signed-off-by: Kunal Kotwani --- .github/workflows/changelog_verifier.yml | 13 +------------ CHANGELOG.md | 1 - 2 files changed, 1 insertion(+), 13 deletions(-) diff --git a/.github/workflows/changelog_verifier.yml b/.github/workflows/changelog_verifier.yml index fb4f8ea3f8ecc..ac0c0ec4d7297 100644 --- a/.github/workflows/changelog_verifier.yml +++ b/.github/workflows/changelog_verifier.yml @@ -7,21 +7,10 @@ jobs: # Enforces the update of a changelog file on every pull request verify-changelog: runs-on: ubuntu-latest - permissions: - pull-requests: write - contents: write steps: - - name: GitHub App token - id: github_app_token - uses: tibdex/github-app-token@v1.5.0 - with: - app_id: ${{ secrets.APP_ID }} - private_key: ${{ secrets.APP_PRIVATE_KEY }} - installation_id: 22958780 - - uses: actions/checkout@v3 with: - token: ${{ steps.github_app_token.outputs.token }} + token: ${{ secrets.GITHUB_TOKEN }} ref: ${{ github.event.pull_request.head.sha }} - uses: dangoslen/dependabot-changelog-helper@v1 diff --git a/CHANGELOG.md b/CHANGELOG.md index c9b8b1041bd9e..8e7fa8b5547f0 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -27,7 +27,6 @@ Inspired from [Keep a Changelog](https://keepachangelog.com/en/1.0.0/) - Do not fail replica shard due to primary closure ([#4133](https://github.com/opensearch-project/OpenSearch/pull/4133)) - Add timeout on Mockito.verify to reduce flakyness in testReplicationOnDone test([#4314](https://github.com/opensearch-project/OpenSearch/pull/4314)) - Commit workflow for dependabot changelog helper ([#4331](https://github.com/opensearch-project/OpenSearch/pull/4331)) -- Token usage for dependabot changelog helper ([#4351](https://github.com/opensearch-project/OpenSearch/pull/4351)) ### Security - CVE-2022-25857 org.yaml:snakeyaml DOS vulnerability ([#4341](https://github.com/opensearch-project/OpenSearch/pull/4341)) From 19d1a2b027fef8b981560969bf428476d700bd07 Mon Sep 17 00:00:00 2001 From: Marc Handalian Date: Wed, 31 Aug 2022 13:12:24 -0700 Subject: [PATCH 10/16] Segment Replication - Implement segment replication event cancellation. (#4225) * Segment Replication. Fix Cancellation of replication events. This PR updates segment replication paths to correctly cancel replication events on the primary and replica. In the source service, any ongoing event for a primary that is sending to a replica that shuts down or is promoted as a new primary are cancelled. In the target service, any ongoing event for a replica that is promoted as a new primary or is fetching from a primary that shuts down. It wires up SegmentReplicationSourceService as an IndexEventListener so that it can respond to events and cancel any ongoing transfer state. This change also includes some test cleanup for segment replication to rely on actual components over mocks. Signed-off-by: Marc Handalian Fix to not start/stop SegmentReplicationSourceService as a lifecycle component with feature flag off. Signed-off-by: Marc Handalian Update logic to properly mark SegmentReplicationTarget as cancelled when cancel initiated by primary. Signed-off-by: Marc Handalian Minor updates from self review. Signed-off-by: Marc Handalian * Add missing changelog entry. Signed-off-by: Marc Handalian Signed-off-by: Marc Handalian --- CHANGELOG.md | 1 + .../cluster/IndicesClusterStateService.java | 5 + .../OngoingSegmentReplications.java | 22 +- .../PrimaryShardReplicationSource.java | 6 + .../replication/SegmentReplicationSource.java | 6 + .../SegmentReplicationSourceHandler.java | 11 + .../SegmentReplicationSourceService.java | 44 ++- .../replication/SegmentReplicationState.java | 15 +- .../replication/SegmentReplicationTarget.java | 31 +- .../SegmentReplicationTargetService.java | 53 +++- .../main/java/org/opensearch/node/Node.java | 7 + .../SegmentReplicationIndexShardTests.java | 275 ++++++++++++++++++ ...ClusterStateServiceRandomUpdatesTests.java | 2 + .../OngoingSegmentReplicationsTests.java | 49 ++++ .../PrimaryShardReplicationSourceTests.java | 37 +++ .../SegmentReplicationSourceHandlerTests.java | 46 +++ .../SegmentReplicationTargetServiceTests.java | 200 +++++-------- .../snapshots/SnapshotResiliencyTests.java | 2 + .../index/shard/IndexShardTestCase.java | 118 +++++--- 19 files changed, 737 insertions(+), 193 deletions(-) diff --git a/CHANGELOG.md b/CHANGELOG.md index 8e7fa8b5547f0..4d07052d55ff0 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -27,6 +27,7 @@ Inspired from [Keep a Changelog](https://keepachangelog.com/en/1.0.0/) - Do not fail replica shard due to primary closure ([#4133](https://github.com/opensearch-project/OpenSearch/pull/4133)) - Add timeout on Mockito.verify to reduce flakyness in testReplicationOnDone test([#4314](https://github.com/opensearch-project/OpenSearch/pull/4314)) - Commit workflow for dependabot changelog helper ([#4331](https://github.com/opensearch-project/OpenSearch/pull/4331)) +- Fixed cancellation of segment replication events ([#4225](https://github.com/opensearch-project/OpenSearch/pull/4225)) ### Security - CVE-2022-25857 org.yaml:snakeyaml DOS vulnerability ([#4341](https://github.com/opensearch-project/OpenSearch/pull/4341)) diff --git a/server/src/main/java/org/opensearch/indices/cluster/IndicesClusterStateService.java b/server/src/main/java/org/opensearch/indices/cluster/IndicesClusterStateService.java index 8884ef2cddd0a..15a9bf9e4c492 100644 --- a/server/src/main/java/org/opensearch/indices/cluster/IndicesClusterStateService.java +++ b/server/src/main/java/org/opensearch/indices/cluster/IndicesClusterStateService.java @@ -81,6 +81,7 @@ import org.opensearch.indices.recovery.PeerRecoveryTargetService; import org.opensearch.indices.recovery.RecoveryListener; import org.opensearch.indices.recovery.RecoveryState; +import org.opensearch.indices.replication.SegmentReplicationSourceService; import org.opensearch.indices.replication.SegmentReplicationTargetService; import org.opensearch.indices.replication.checkpoint.SegmentReplicationCheckpointPublisher; import org.opensearch.indices.replication.common.ReplicationState; @@ -152,6 +153,7 @@ public IndicesClusterStateService( final ThreadPool threadPool, final PeerRecoveryTargetService recoveryTargetService, final SegmentReplicationTargetService segmentReplicationTargetService, + final SegmentReplicationSourceService segmentReplicationSourceService, final ShardStateAction shardStateAction, final NodeMappingRefreshAction nodeMappingRefreshAction, final RepositoriesService repositoriesService, @@ -170,6 +172,7 @@ public IndicesClusterStateService( threadPool, checkpointPublisher, segmentReplicationTargetService, + segmentReplicationSourceService, recoveryTargetService, shardStateAction, nodeMappingRefreshAction, @@ -191,6 +194,7 @@ public IndicesClusterStateService( final ThreadPool threadPool, final SegmentReplicationCheckpointPublisher checkpointPublisher, final SegmentReplicationTargetService segmentReplicationTargetService, + final SegmentReplicationSourceService segmentReplicationSourceService, final PeerRecoveryTargetService recoveryTargetService, final ShardStateAction shardStateAction, final NodeMappingRefreshAction nodeMappingRefreshAction, @@ -211,6 +215,7 @@ public IndicesClusterStateService( // if segrep feature flag is not enabled, don't wire the target serivce as an IndexEventListener. if (FeatureFlags.isEnabled(FeatureFlags.REPLICATION_TYPE)) { indexEventListeners.add(segmentReplicationTargetService); + indexEventListeners.add(segmentReplicationSourceService); } this.builtInIndexListener = Collections.unmodifiableList(indexEventListeners); this.indicesService = indicesService; diff --git a/server/src/main/java/org/opensearch/indices/replication/OngoingSegmentReplications.java b/server/src/main/java/org/opensearch/indices/replication/OngoingSegmentReplications.java index dfebe5f7cabf2..828aa29192fe3 100644 --- a/server/src/main/java/org/opensearch/indices/replication/OngoingSegmentReplications.java +++ b/server/src/main/java/org/opensearch/indices/replication/OngoingSegmentReplications.java @@ -37,7 +37,6 @@ * @opensearch.internal */ class OngoingSegmentReplications { - private final RecoverySettings recoverySettings; private final IndicesService indicesService; private final Map copyStateMap; @@ -161,6 +160,20 @@ synchronized void cancel(IndexShard shard, String reason) { cancelHandlers(handler -> handler.getCopyState().getShard().shardId().equals(shard.shardId()), reason); } + /** + * Cancel all Replication events for the given allocation ID, intended to be called when a primary is shutting down. + * + * @param allocationId {@link String} - Allocation ID. + * @param reason {@link String} - Reason for the cancel + */ + synchronized void cancel(String allocationId, String reason) { + final SegmentReplicationSourceHandler handler = allocationIdToHandlers.remove(allocationId); + if (handler != null) { + handler.cancel(reason); + removeCopyState(handler.getCopyState()); + } + } + /** * Cancel any ongoing replications for a given {@link DiscoveryNode} * @@ -168,7 +181,6 @@ synchronized void cancel(IndexShard shard, String reason) { */ void cancelReplication(DiscoveryNode node) { cancelHandlers(handler -> handler.getTargetNode().equals(node), "Node left"); - } /** @@ -243,11 +255,7 @@ private void cancelHandlers(Predicate p .map(SegmentReplicationSourceHandler::getAllocationId) .collect(Collectors.toList()); for (String allocationId : allocationIds) { - final SegmentReplicationSourceHandler handler = allocationIdToHandlers.remove(allocationId); - if (handler != null) { - handler.cancel(reason); - removeCopyState(handler.getCopyState()); - } + cancel(allocationId, reason); } } } diff --git a/server/src/main/java/org/opensearch/indices/replication/PrimaryShardReplicationSource.java b/server/src/main/java/org/opensearch/indices/replication/PrimaryShardReplicationSource.java index 08dc0b97b31d5..aa0b5416dd0ff 100644 --- a/server/src/main/java/org/opensearch/indices/replication/PrimaryShardReplicationSource.java +++ b/server/src/main/java/org/opensearch/indices/replication/PrimaryShardReplicationSource.java @@ -87,4 +87,10 @@ public void getSegmentFiles( ); transportClient.executeRetryableAction(GET_SEGMENT_FILES, request, responseListener, reader); } + + @Override + public void cancel() { + transportClient.cancel(); + } + } diff --git a/server/src/main/java/org/opensearch/indices/replication/SegmentReplicationSource.java b/server/src/main/java/org/opensearch/indices/replication/SegmentReplicationSource.java index 8628a266ea7d0..b2e7487fff4b2 100644 --- a/server/src/main/java/org/opensearch/indices/replication/SegmentReplicationSource.java +++ b/server/src/main/java/org/opensearch/indices/replication/SegmentReplicationSource.java @@ -9,6 +9,7 @@ package org.opensearch.indices.replication; import org.opensearch.action.ActionListener; +import org.opensearch.common.util.CancellableThreads.ExecutionCancelledException; import org.opensearch.index.store.Store; import org.opensearch.index.store.StoreFileMetadata; import org.opensearch.indices.replication.checkpoint.ReplicationCheckpoint; @@ -47,4 +48,9 @@ void getSegmentFiles( Store store, ActionListener listener ); + + /** + * Cancel any ongoing requests, should resolve any ongoing listeners with onFailure with a {@link ExecutionCancelledException}. + */ + default void cancel() {} } diff --git a/server/src/main/java/org/opensearch/indices/replication/SegmentReplicationSourceHandler.java b/server/src/main/java/org/opensearch/indices/replication/SegmentReplicationSourceHandler.java index 2d21653c1924c..022d90b41d8ee 100644 --- a/server/src/main/java/org/opensearch/indices/replication/SegmentReplicationSourceHandler.java +++ b/server/src/main/java/org/opensearch/indices/replication/SegmentReplicationSourceHandler.java @@ -113,6 +113,16 @@ public synchronized void sendFiles(GetSegmentFilesRequest request, ActionListene final Closeable releaseResources = () -> IOUtils.close(resources); try { timer.start(); + cancellableThreads.setOnCancel((reason, beforeCancelEx) -> { + final RuntimeException e = new CancellableThreads.ExecutionCancelledException( + "replication was canceled reason [" + reason + "]" + ); + if (beforeCancelEx != null) { + e.addSuppressed(beforeCancelEx); + } + IOUtils.closeWhileHandlingException(releaseResources, () -> future.onFailure(e)); + throw e; + }); final Consumer onFailure = e -> { assert Transports.assertNotTransportThread(SegmentReplicationSourceHandler.this + "[onFailure]"); IOUtils.closeWhileHandlingException(releaseResources, () -> future.onFailure(e)); @@ -153,6 +163,7 @@ public synchronized void sendFiles(GetSegmentFilesRequest request, ActionListene final MultiChunkTransfer transfer = segmentFileTransferHandler .createTransfer(shard.store(), storeFileMetadata, () -> 0, sendFileStep); resources.add(transfer); + cancellableThreads.checkForCancel(); transfer.start(); sendFileStep.whenComplete(r -> { diff --git a/server/src/main/java/org/opensearch/indices/replication/SegmentReplicationSourceService.java b/server/src/main/java/org/opensearch/indices/replication/SegmentReplicationSourceService.java index 0cee731fde2cb..db3f87201b774 100644 --- a/server/src/main/java/org/opensearch/indices/replication/SegmentReplicationSourceService.java +++ b/server/src/main/java/org/opensearch/indices/replication/SegmentReplicationSourceService.java @@ -15,6 +15,7 @@ import org.opensearch.cluster.ClusterChangedEvent; import org.opensearch.cluster.ClusterStateListener; import org.opensearch.cluster.node.DiscoveryNode; +import org.opensearch.cluster.routing.ShardRouting; import org.opensearch.cluster.service.ClusterService; import org.opensearch.common.Nullable; import org.opensearch.common.component.AbstractLifecycleComponent; @@ -42,7 +43,25 @@ * * @opensearch.internal */ -public final class SegmentReplicationSourceService extends AbstractLifecycleComponent implements ClusterStateListener, IndexEventListener { +public class SegmentReplicationSourceService extends AbstractLifecycleComponent implements ClusterStateListener, IndexEventListener { + + // Empty Implementation, only required while Segment Replication is under feature flag. + public static final SegmentReplicationSourceService NO_OP = new SegmentReplicationSourceService() { + @Override + public void clusterChanged(ClusterChangedEvent event) { + // NoOp; + } + + @Override + public void beforeIndexShardClosed(ShardId shardId, IndexShard indexShard, Settings indexSettings) { + // NoOp; + } + + @Override + public void shardRoutingChanged(IndexShard indexShard, @Nullable ShardRouting oldRouting, ShardRouting newRouting) { + // NoOp; + } + }; private static final Logger logger = LogManager.getLogger(SegmentReplicationSourceService.class); private final RecoverySettings recoverySettings; @@ -62,6 +81,14 @@ public static class Actions { private final OngoingSegmentReplications ongoingSegmentReplications; + // Used only for empty implementation. + private SegmentReplicationSourceService() { + recoverySettings = null; + ongoingSegmentReplications = null; + transportService = null; + indicesService = null; + } + public SegmentReplicationSourceService( IndicesService indicesService, TransportService transportService, @@ -163,10 +190,25 @@ protected void doClose() throws IOException { } + /** + * + * Cancels any replications on this node to a replica shard that is about to be closed. + */ @Override public void beforeIndexShardClosed(ShardId shardId, @Nullable IndexShard indexShard, Settings indexSettings) { if (indexShard != null) { ongoingSegmentReplications.cancel(indexShard, "shard is closed"); } } + + /** + * Cancels any replications on this node to a replica that has been promoted as primary. + */ + @Override + public void shardRoutingChanged(IndexShard indexShard, @Nullable ShardRouting oldRouting, ShardRouting newRouting) { + if (indexShard != null && oldRouting.primary() == false && newRouting.primary()) { + ongoingSegmentReplications.cancel(indexShard.routingEntry().allocationId().getId(), "Relocating primary shard."); + } + } + } diff --git a/server/src/main/java/org/opensearch/indices/replication/SegmentReplicationState.java b/server/src/main/java/org/opensearch/indices/replication/SegmentReplicationState.java index f865ba1332186..2e2e6df007c5c 100644 --- a/server/src/main/java/org/opensearch/indices/replication/SegmentReplicationState.java +++ b/server/src/main/java/org/opensearch/indices/replication/SegmentReplicationState.java @@ -35,7 +35,8 @@ public enum Stage { GET_CHECKPOINT_INFO((byte) 3), FILE_DIFF((byte) 4), GET_FILES((byte) 5), - FINALIZE_REPLICATION((byte) 6); + FINALIZE_REPLICATION((byte) 6), + CANCELLED((byte) 7); private static final Stage[] STAGES = new Stage[Stage.values().length]; @@ -118,6 +119,10 @@ protected void validateAndSetStage(Stage expected, Stage next) { "can't move replication to stage [" + next + "]. current stage: [" + stage + "] (expected [" + expected + "])" ); } + stopTimersAndSetStage(next); + } + + private void stopTimersAndSetStage(Stage next) { // save the timing data for the current step stageTimer.stop(); timingData.add(new Tuple<>(stage.name(), stageTimer.time())); @@ -155,6 +160,14 @@ public void setStage(Stage stage) { overallTimer.stop(); timingData.add(new Tuple<>("OVERALL", overallTimer.time())); break; + case CANCELLED: + if (this.stage == Stage.DONE) { + throw new IllegalStateException("can't move replication to Cancelled state from Done."); + } + stopTimersAndSetStage(Stage.CANCELLED); + overallTimer.stop(); + timingData.add(new Tuple<>("OVERALL", overallTimer.time())); + break; default: throw new IllegalArgumentException("unknown SegmentReplicationState.Stage [" + stage + "]"); } diff --git a/server/src/main/java/org/opensearch/indices/replication/SegmentReplicationTarget.java b/server/src/main/java/org/opensearch/indices/replication/SegmentReplicationTarget.java index a658ffc09d590..d1d6104a416ca 100644 --- a/server/src/main/java/org/opensearch/indices/replication/SegmentReplicationTarget.java +++ b/server/src/main/java/org/opensearch/indices/replication/SegmentReplicationTarget.java @@ -17,6 +17,7 @@ import org.apache.lucene.store.ByteBuffersDataInput; import org.apache.lucene.store.ByteBuffersIndexInput; import org.apache.lucene.store.ChecksumIndexInput; +import org.opensearch.ExceptionsHelper; import org.opensearch.OpenSearchException; import org.opensearch.action.ActionListener; import org.opensearch.action.StepListener; @@ -103,7 +104,15 @@ public String description() { @Override public void notifyListener(OpenSearchException e, boolean sendShardFailure) { - listener.onFailure(state(), e, sendShardFailure); + // Cancellations still are passed to our SegmentReplicationListner as failures, if we have failed because of cancellation + // update the stage. + final Throwable cancelledException = ExceptionsHelper.unwrap(e, CancellableThreads.ExecutionCancelledException.class); + if (cancelledException != null) { + state.setStage(SegmentReplicationState.Stage.CANCELLED); + listener.onFailure(state(), (CancellableThreads.ExecutionCancelledException) cancelledException, sendShardFailure); + } else { + listener.onFailure(state(), e, sendShardFailure); + } } @Override @@ -134,6 +143,14 @@ public void writeFileChunk( * @param listener {@link ActionListener} listener. */ public void startReplication(ActionListener listener) { + cancellableThreads.setOnCancel((reason, beforeCancelEx) -> { + // This method only executes when cancellation is triggered by this node and caught by a call to checkForCancel, + // SegmentReplicationSource does not share CancellableThreads. + final CancellableThreads.ExecutionCancelledException executionCancelledException = + new CancellableThreads.ExecutionCancelledException("replication was canceled reason [" + reason + "]"); + notifyListener(executionCancelledException, false); + throw executionCancelledException; + }); state.setStage(SegmentReplicationState.Stage.REPLICATING); final StepListener checkpointInfoListener = new StepListener<>(); final StepListener getFilesListener = new StepListener<>(); @@ -141,6 +158,7 @@ public void startReplication(ActionListener listener) { logger.trace("[shardId {}] Replica starting replication [id {}]", shardId().getId(), getId()); // Get list of files to copy from this checkpoint. + cancellableThreads.checkForCancel(); state.setStage(SegmentReplicationState.Stage.GET_CHECKPOINT_INFO); source.getCheckpointMetadata(getId(), checkpoint, checkpointInfoListener); @@ -154,6 +172,7 @@ public void startReplication(ActionListener listener) { private void getFiles(CheckpointInfoResponse checkpointInfo, StepListener getFilesListener) throws IOException { + cancellableThreads.checkForCancel(); state.setStage(SegmentReplicationState.Stage.FILE_DIFF); final Store.MetadataSnapshot snapshot = checkpointInfo.getSnapshot(); Store.MetadataSnapshot localMetadata = getMetadataSnapshot(); @@ -188,12 +207,14 @@ private void getFiles(CheckpointInfoResponse checkpointInfo, StepListener listener) { - state.setStage(SegmentReplicationState.Stage.FINALIZE_REPLICATION); ActionListener.completeWith(listener, () -> { + cancellableThreads.checkForCancel(); + state.setStage(SegmentReplicationState.Stage.FINALIZE_REPLICATION); multiFileWriter.renameAllTempFiles(); final Store store = store(); store.incRef(); @@ -261,4 +282,10 @@ Store.MetadataSnapshot getMetadataSnapshot() throws IOException { } return store.getMetadata(indexShard.getSegmentInfosSnapshot().get()); } + + @Override + protected void onCancel(String reason) { + cancellableThreads.cancel(reason); + source.cancel(); + } } diff --git a/server/src/main/java/org/opensearch/indices/replication/SegmentReplicationTargetService.java b/server/src/main/java/org/opensearch/indices/replication/SegmentReplicationTargetService.java index a79ce195ad83b..9e6b66dc4d7d6 100644 --- a/server/src/main/java/org/opensearch/indices/replication/SegmentReplicationTargetService.java +++ b/server/src/main/java/org/opensearch/indices/replication/SegmentReplicationTargetService.java @@ -11,10 +11,13 @@ import org.apache.logging.log4j.LogManager; import org.apache.logging.log4j.Logger; import org.apache.logging.log4j.message.ParameterizedMessage; +import org.opensearch.ExceptionsHelper; import org.opensearch.OpenSearchException; import org.opensearch.action.ActionListener; +import org.opensearch.cluster.routing.ShardRouting; import org.opensearch.common.Nullable; import org.opensearch.common.settings.Settings; +import org.opensearch.common.util.CancellableThreads; import org.opensearch.index.shard.IndexEventListener; import org.opensearch.index.shard.IndexShard; import org.opensearch.index.shard.ShardId; @@ -64,6 +67,11 @@ public void beforeIndexShardClosed(ShardId shardId, IndexShard indexShard, Setti public synchronized void onNewCheckpoint(ReplicationCheckpoint receivedCheckpoint, IndexShard replicaShard) { // noOp; } + + @Override + public void shardRoutingChanged(IndexShard indexShard, @Nullable ShardRouting oldRouting, ShardRouting newRouting) { + // noOp; + } }; // Used only for empty implementation. @@ -74,6 +82,10 @@ private SegmentReplicationTargetService() { sourceFactory = null; } + public ReplicationRef get(long replicationId) { + return onGoingReplications.get(replicationId); + } + /** * The internal actions * @@ -102,6 +114,9 @@ public SegmentReplicationTargetService( ); } + /** + * Cancel any replications on this node for a replica that is about to be closed. + */ @Override public void beforeIndexShardClosed(ShardId shardId, @Nullable IndexShard indexShard, Settings indexSettings) { if (indexShard != null) { @@ -109,11 +124,22 @@ public void beforeIndexShardClosed(ShardId shardId, @Nullable IndexShard indexSh } } + /** + * Cancel any replications on this node for a replica that has just been promoted as the new primary. + */ + @Override + public void shardRoutingChanged(IndexShard indexShard, @Nullable ShardRouting oldRouting, ShardRouting newRouting) { + if (oldRouting != null && oldRouting.primary() == false && newRouting.primary()) { + onGoingReplications.cancelForShard(indexShard.shardId(), "shard has been promoted to primary"); + } + } + /** * Invoked when a new checkpoint is received from a primary shard. * It checks if a new checkpoint should be processed or not and starts replication if needed. - * @param receivedCheckpoint received checkpoint that is checked for processing - * @param replicaShard replica shard on which checkpoint is received + * + * @param receivedCheckpoint received checkpoint that is checked for processing + * @param replicaShard replica shard on which checkpoint is received */ public synchronized void onNewCheckpoint(final ReplicationCheckpoint receivedCheckpoint, final IndexShard replicaShard) { logger.trace(() -> new ParameterizedMessage("Replica received new replication checkpoint from primary [{}]", receivedCheckpoint)); @@ -180,12 +206,19 @@ public void onReplicationFailure(SegmentReplicationState state, OpenSearchExcept } } - public void startReplication( + public SegmentReplicationTarget startReplication( final ReplicationCheckpoint checkpoint, final IndexShard indexShard, final SegmentReplicationListener listener ) { - startReplication(new SegmentReplicationTarget(checkpoint, indexShard, sourceFactory.get(indexShard), listener)); + final SegmentReplicationTarget target = new SegmentReplicationTarget( + checkpoint, + indexShard, + sourceFactory.get(indexShard), + listener + ); + startReplication(target); + return target; } // pkg-private for integration tests @@ -248,7 +281,17 @@ public void onResponse(Void o) { @Override public void onFailure(Exception e) { - onGoingReplications.fail(replicationId, new OpenSearchException("Segment Replication failed", e), true); + Throwable cause = ExceptionsHelper.unwrapCause(e); + if (cause instanceof CancellableThreads.ExecutionCancelledException) { + if (onGoingReplications.getTarget(replicationId) != null) { + // if the target still exists in our collection, the primary initiated the cancellation, fail the replication + // but do not fail the shard. Cancellations initiated by this node from Index events will be removed with + // onGoingReplications.cancel and not appear in the collection when this listener resolves. + onGoingReplications.fail(replicationId, (CancellableThreads.ExecutionCancelledException) cause, false); + } + } else { + onGoingReplications.fail(replicationId, new OpenSearchException("Segment Replication failed", e), true); + } } }); } diff --git a/server/src/main/java/org/opensearch/node/Node.java b/server/src/main/java/org/opensearch/node/Node.java index 3f4eadc52fd2a..92e9815313fa0 100644 --- a/server/src/main/java/org/opensearch/node/Node.java +++ b/server/src/main/java/org/opensearch/node/Node.java @@ -969,6 +969,7 @@ protected Node( .toInstance(new SegmentReplicationSourceService(indicesService, transportService, recoverySettings)); } else { b.bind(SegmentReplicationTargetService.class).toInstance(SegmentReplicationTargetService.NO_OP); + b.bind(SegmentReplicationSourceService.class).toInstance(SegmentReplicationSourceService.NO_OP); } } b.bind(HttpServerTransport.class).toInstance(httpServerTransport); @@ -1112,6 +1113,9 @@ public Node start() throws NodeValidationException { assert transportService.getLocalNode().equals(localNodeFactory.getNode()) : "transportService has a different local node than the factory provided"; injector.getInstance(PeerRecoverySourceService.class).start(); + if (FeatureFlags.isEnabled(REPLICATION_TYPE)) { + injector.getInstance(SegmentReplicationSourceService.class).start(); + } // Load (and maybe upgrade) the metadata stored on disk final GatewayMetaState gatewayMetaState = injector.getInstance(GatewayMetaState.class); @@ -1287,6 +1291,9 @@ public synchronized void close() throws IOException { // close filter/fielddata caches after indices toClose.add(injector.getInstance(IndicesStore.class)); toClose.add(injector.getInstance(PeerRecoverySourceService.class)); + if (FeatureFlags.isEnabled(REPLICATION_TYPE)) { + toClose.add(injector.getInstance(SegmentReplicationSourceService.class)); + } toClose.add(() -> stopWatch.stop().start("cluster")); toClose.add(injector.getInstance(ClusterService.class)); toClose.add(() -> stopWatch.stop().start("node_connections_service")); diff --git a/server/src/test/java/org/opensearch/index/shard/SegmentReplicationIndexShardTests.java b/server/src/test/java/org/opensearch/index/shard/SegmentReplicationIndexShardTests.java index 23371a39871c7..88a3bdad53d0c 100644 --- a/server/src/test/java/org/opensearch/index/shard/SegmentReplicationIndexShardTests.java +++ b/server/src/test/java/org/opensearch/index/shard/SegmentReplicationIndexShardTests.java @@ -8,11 +8,18 @@ package org.opensearch.index.shard; +import org.junit.Assert; +import org.opensearch.OpenSearchException; +import org.opensearch.action.ActionListener; import org.opensearch.action.delete.DeleteRequest; import org.opensearch.action.index.IndexRequest; import org.opensearch.cluster.metadata.IndexMetadata; +import org.opensearch.cluster.routing.ShardRouting; +import org.opensearch.common.lease.Releasable; +import org.opensearch.common.settings.ClusterSettings; import org.opensearch.common.settings.Settings; import org.opensearch.common.unit.TimeValue; +import org.opensearch.common.util.CancellableThreads; import org.opensearch.common.xcontent.XContentType; import org.opensearch.index.IndexSettings; import org.opensearch.index.engine.DocIdSeqNoAndSource; @@ -21,12 +28,28 @@ import org.opensearch.index.engine.NRTReplicationEngineFactory; import org.opensearch.index.mapper.MapperService; import org.opensearch.index.replication.OpenSearchIndexLevelReplicationTestCase; +import org.opensearch.index.store.Store; +import org.opensearch.index.store.StoreFileMetadata; +import org.opensearch.indices.recovery.RecoverySettings; +import org.opensearch.indices.replication.CheckpointInfoResponse; +import org.opensearch.indices.replication.GetSegmentFilesResponse; +import org.opensearch.indices.replication.SegmentReplicationSource; +import org.opensearch.indices.replication.SegmentReplicationSourceFactory; +import org.opensearch.indices.replication.SegmentReplicationState; +import org.opensearch.indices.replication.SegmentReplicationTarget; +import org.opensearch.indices.replication.SegmentReplicationTargetService; import org.opensearch.indices.replication.checkpoint.SegmentReplicationCheckpointPublisher; import org.opensearch.indices.replication.checkpoint.ReplicationCheckpoint; +import org.opensearch.indices.replication.common.CopyState; import org.opensearch.indices.replication.common.ReplicationType; +import org.opensearch.threadpool.ThreadPool; +import org.opensearch.transport.TransportService; import java.io.IOException; +import java.util.Collections; import java.util.List; +import java.util.concurrent.CountDownLatch; +import java.util.concurrent.TimeUnit; import static java.util.Arrays.asList; import static org.hamcrest.Matchers.equalTo; @@ -34,6 +57,7 @@ import static org.mockito.Mockito.mock; import static org.mockito.Mockito.times; import static org.mockito.Mockito.verify; +import static org.mockito.Mockito.when; public class SegmentReplicationIndexShardTests extends OpenSearchIndexLevelReplicationTestCase { @@ -241,6 +265,213 @@ public void testNRTReplicaPromotedAsPrimary() throws Exception { } } + public void testReplicaPromotedWhileReplicating() throws Exception { + try (ReplicationGroup shards = createGroup(1, settings, new NRTReplicationEngineFactory())) { + shards.startAll(); + final IndexShard oldPrimary = shards.getPrimary(); + final IndexShard nextPrimary = shards.getReplicas().get(0); + + final int numDocs = shards.indexDocs(randomInt(10)); + oldPrimary.refresh("Test"); + shards.syncGlobalCheckpoint(); + + final SegmentReplicationSourceFactory sourceFactory = mock(SegmentReplicationSourceFactory.class); + final SegmentReplicationTargetService targetService = newTargetService(sourceFactory); + SegmentReplicationSource source = new SegmentReplicationSource() { + @Override + public void getCheckpointMetadata( + long replicationId, + ReplicationCheckpoint checkpoint, + ActionListener listener + ) { + resolveCheckpointInfoResponseListener(listener, oldPrimary); + ShardRouting oldRouting = nextPrimary.shardRouting; + try { + shards.promoteReplicaToPrimary(nextPrimary); + } catch (IOException e) { + Assert.fail("Promotion should not fail"); + } + targetService.shardRoutingChanged(nextPrimary, oldRouting, nextPrimary.shardRouting); + } + + @Override + public void getSegmentFiles( + long replicationId, + ReplicationCheckpoint checkpoint, + List filesToFetch, + Store store, + ActionListener listener + ) { + listener.onResponse(new GetSegmentFilesResponse(Collections.emptyList())); + } + }; + when(sourceFactory.get(any())).thenReturn(source); + startReplicationAndAssertCancellation(nextPrimary, targetService); + // wait for replica to finish being promoted, and assert doc counts. + final CountDownLatch latch = new CountDownLatch(1); + nextPrimary.acquirePrimaryOperationPermit(new ActionListener<>() { + @Override + public void onResponse(Releasable releasable) { + latch.countDown(); + } + + @Override + public void onFailure(Exception e) { + throw new AssertionError(e); + } + }, ThreadPool.Names.GENERIC, ""); + latch.await(); + assertEquals(nextPrimary.getEngine().getClass(), InternalEngine.class); + nextPrimary.refresh("test"); + + oldPrimary.close("demoted", false); + oldPrimary.store().close(); + IndexShard newReplica = shards.addReplicaWithExistingPath(oldPrimary.shardPath(), oldPrimary.routingEntry().currentNodeId()); + shards.recoverReplica(newReplica); + + assertDocCount(nextPrimary, numDocs); + assertDocCount(newReplica, numDocs); + + nextPrimary.refresh("test"); + replicateSegments(nextPrimary, shards.getReplicas()); + final List docsAfterRecovery = getDocIdAndSeqNos(shards.getPrimary()); + for (IndexShard shard : shards.getReplicas()) { + assertThat(shard.routingEntry().toString(), getDocIdAndSeqNos(shard), equalTo(docsAfterRecovery)); + } + } + } + + public void testReplicaClosesWhileReplicating_AfterGetCheckpoint() throws Exception { + try (ReplicationGroup shards = createGroup(1, settings, new NRTReplicationEngineFactory())) { + shards.startAll(); + IndexShard primary = shards.getPrimary(); + final IndexShard replica = shards.getReplicas().get(0); + + final int numDocs = shards.indexDocs(randomInt(10)); + primary.refresh("Test"); + + final SegmentReplicationSourceFactory sourceFactory = mock(SegmentReplicationSourceFactory.class); + final SegmentReplicationTargetService targetService = newTargetService(sourceFactory); + SegmentReplicationSource source = new SegmentReplicationSource() { + @Override + public void getCheckpointMetadata( + long replicationId, + ReplicationCheckpoint checkpoint, + ActionListener listener + ) { + // trigger a cancellation by closing the replica. + targetService.beforeIndexShardClosed(replica.shardId, replica, Settings.EMPTY); + resolveCheckpointInfoResponseListener(listener, primary); + } + + @Override + public void getSegmentFiles( + long replicationId, + ReplicationCheckpoint checkpoint, + List filesToFetch, + Store store, + ActionListener listener + ) { + Assert.fail("Should not be reached"); + } + }; + when(sourceFactory.get(any())).thenReturn(source); + startReplicationAndAssertCancellation(replica, targetService); + + shards.removeReplica(replica); + closeShards(replica); + } + } + + public void testReplicaClosesWhileReplicating_AfterGetSegmentFiles() throws Exception { + try (ReplicationGroup shards = createGroup(1, settings, new NRTReplicationEngineFactory())) { + shards.startAll(); + IndexShard primary = shards.getPrimary(); + final IndexShard replica = shards.getReplicas().get(0); + + final int numDocs = shards.indexDocs(randomInt(10)); + primary.refresh("Test"); + + final SegmentReplicationSourceFactory sourceFactory = mock(SegmentReplicationSourceFactory.class); + final SegmentReplicationTargetService targetService = newTargetService(sourceFactory); + SegmentReplicationSource source = new SegmentReplicationSource() { + @Override + public void getCheckpointMetadata( + long replicationId, + ReplicationCheckpoint checkpoint, + ActionListener listener + ) { + resolveCheckpointInfoResponseListener(listener, primary); + } + + @Override + public void getSegmentFiles( + long replicationId, + ReplicationCheckpoint checkpoint, + List filesToFetch, + Store store, + ActionListener listener + ) { + // randomly resolve the listener, indicating the source has resolved. + listener.onResponse(new GetSegmentFilesResponse(Collections.emptyList())); + targetService.beforeIndexShardClosed(replica.shardId, replica, Settings.EMPTY); + } + }; + when(sourceFactory.get(any())).thenReturn(source); + startReplicationAndAssertCancellation(replica, targetService); + + shards.removeReplica(replica); + closeShards(replica); + } + } + + public void testPrimaryCancelsExecution() throws Exception { + try (ReplicationGroup shards = createGroup(1, settings, new NRTReplicationEngineFactory())) { + shards.startAll(); + IndexShard primary = shards.getPrimary(); + final IndexShard replica = shards.getReplicas().get(0); + + final int numDocs = shards.indexDocs(randomInt(10)); + primary.refresh("Test"); + + final SegmentReplicationSourceFactory sourceFactory = mock(SegmentReplicationSourceFactory.class); + final SegmentReplicationTargetService targetService = newTargetService(sourceFactory); + SegmentReplicationSource source = new SegmentReplicationSource() { + @Override + public void getCheckpointMetadata( + long replicationId, + ReplicationCheckpoint checkpoint, + ActionListener listener + ) { + listener.onFailure(new CancellableThreads.ExecutionCancelledException("Cancelled")); + } + + @Override + public void getSegmentFiles( + long replicationId, + ReplicationCheckpoint checkpoint, + List filesToFetch, + Store store, + ActionListener listener + ) {} + }; + when(sourceFactory.get(any())).thenReturn(source); + startReplicationAndAssertCancellation(replica, targetService); + + shards.removeReplica(replica); + closeShards(replica); + } + } + + private SegmentReplicationTargetService newTargetService(SegmentReplicationSourceFactory sourceFactory) { + return new SegmentReplicationTargetService( + threadPool, + new RecoverySettings(Settings.EMPTY, new ClusterSettings(Settings.EMPTY, ClusterSettings.BUILT_IN_CLUSTER_SETTINGS)), + mock(TransportService.class), + sourceFactory + ); + } + /** * Assert persisted and searchable doc counts. This method should not be used while docs are concurrently indexed because * it asserts point in time seqNos are relative to the doc counts. @@ -253,4 +484,48 @@ private void assertDocCounts(IndexShard indexShard, int expectedPersistedDocCoun // processed cp should be 1 less than our searchable doc count. assertEquals(expectedSearchableDocCount - 1, indexShard.getProcessedLocalCheckpoint()); } + + private void resolveCheckpointInfoResponseListener(ActionListener listener, IndexShard primary) { + try { + final CopyState copyState = new CopyState(ReplicationCheckpoint.empty(primary.shardId), primary); + listener.onResponse( + new CheckpointInfoResponse( + copyState.getCheckpoint(), + copyState.getMetadataSnapshot(), + copyState.getInfosBytes(), + copyState.getPendingDeleteFiles() + ) + ); + } catch (IOException e) { + logger.error("Unexpected error computing CopyState", e); + Assert.fail("Failed to compute copyState"); + } + } + + private void startReplicationAndAssertCancellation(IndexShard replica, SegmentReplicationTargetService targetService) + throws InterruptedException { + CountDownLatch latch = new CountDownLatch(1); + final SegmentReplicationTarget target = targetService.startReplication( + ReplicationCheckpoint.empty(replica.shardId), + replica, + new SegmentReplicationTargetService.SegmentReplicationListener() { + @Override + public void onReplicationDone(SegmentReplicationState state) { + Assert.fail("Replication should not complete"); + } + + @Override + public void onReplicationFailure(SegmentReplicationState state, OpenSearchException e, boolean sendShardFailure) { + assertTrue(e instanceof CancellableThreads.ExecutionCancelledException); + assertFalse(sendShardFailure); + assertEquals(SegmentReplicationState.Stage.CANCELLED, state.getStage()); + latch.countDown(); + } + } + ); + + latch.await(2, TimeUnit.SECONDS); + assertEquals("Should have resolved listener with failure", 0, latch.getCount()); + assertNull(targetService.get(target.getId())); + } } diff --git a/server/src/test/java/org/opensearch/indices/cluster/IndicesClusterStateServiceRandomUpdatesTests.java b/server/src/test/java/org/opensearch/indices/cluster/IndicesClusterStateServiceRandomUpdatesTests.java index 1f2360abde2ad..22481b5a7b99f 100644 --- a/server/src/test/java/org/opensearch/indices/cluster/IndicesClusterStateServiceRandomUpdatesTests.java +++ b/server/src/test/java/org/opensearch/indices/cluster/IndicesClusterStateServiceRandomUpdatesTests.java @@ -66,6 +66,7 @@ import org.opensearch.index.shard.PrimaryReplicaSyncer; import org.opensearch.index.shard.ShardId; import org.opensearch.indices.recovery.PeerRecoveryTargetService; +import org.opensearch.indices.replication.SegmentReplicationSourceService; import org.opensearch.indices.replication.SegmentReplicationTargetService; import org.opensearch.indices.replication.checkpoint.SegmentReplicationCheckpointPublisher; import org.opensearch.repositories.RepositoriesService; @@ -572,6 +573,7 @@ private IndicesClusterStateService createIndicesClusterStateService( threadPool, SegmentReplicationCheckpointPublisher.EMPTY, SegmentReplicationTargetService.NO_OP, + SegmentReplicationSourceService.NO_OP, recoveryTargetService, shardStateAction, null, diff --git a/server/src/test/java/org/opensearch/indices/replication/OngoingSegmentReplicationsTests.java b/server/src/test/java/org/opensearch/indices/replication/OngoingSegmentReplicationsTests.java index 38c55620e1223..f49ee0471b5e8 100644 --- a/server/src/test/java/org/opensearch/indices/replication/OngoingSegmentReplicationsTests.java +++ b/server/src/test/java/org/opensearch/indices/replication/OngoingSegmentReplicationsTests.java @@ -14,6 +14,8 @@ import org.opensearch.cluster.node.DiscoveryNode; import org.opensearch.common.settings.ClusterSettings; import org.opensearch.common.settings.Settings; +import org.opensearch.common.util.CancellableThreads; +import org.opensearch.common.xcontent.XContentType; import org.opensearch.index.IndexService; import org.opensearch.index.shard.IndexShard; import org.opensearch.index.shard.IndexShardTestCase; @@ -31,6 +33,8 @@ import java.util.Collection; import java.util.Collections; import java.util.List; +import java.util.concurrent.CountDownLatch; +import java.util.concurrent.TimeUnit; import static org.mockito.Mockito.any; import static org.mockito.Mockito.mock; @@ -154,6 +158,51 @@ public void testCancelReplication() throws IOException { assertEquals(0, replications.cachedCopyStateSize()); } + public void testCancelReplication_AfterSendFilesStarts() throws IOException, InterruptedException { + CountDownLatch latch = new CountDownLatch(1); + OngoingSegmentReplications replications = new OngoingSegmentReplications(mockIndicesService, recoverySettings); + // add a doc and refresh so primary has more than one segment. + indexDoc(primary, "1", "{\"foo\" : \"baz\"}", XContentType.JSON, "foobar"); + primary.refresh("Test"); + final CheckpointInfoRequest request = new CheckpointInfoRequest( + 1L, + replica.routingEntry().allocationId().getId(), + primaryDiscoveryNode, + testCheckpoint + ); + final FileChunkWriter segmentSegmentFileChunkWriter = (fileMetadata, position, content, lastChunk, totalTranslogOps, listener) -> { + // cancel the replication as soon as the writer starts sending files. + replications.cancel(replica.routingEntry().allocationId().getId(), "Test"); + }; + final CopyState copyState = replications.prepareForReplication(request, segmentSegmentFileChunkWriter); + assertEquals(1, replications.size()); + assertEquals(1, replications.cachedCopyStateSize()); + getSegmentFilesRequest = new GetSegmentFilesRequest( + 1L, + replica.routingEntry().allocationId().getId(), + replicaDiscoveryNode, + new ArrayList<>(copyState.getMetadataSnapshot().asMap().values()), + testCheckpoint + ); + replications.startSegmentCopy(getSegmentFilesRequest, new ActionListener<>() { + @Override + public void onResponse(GetSegmentFilesResponse getSegmentFilesResponse) { + Assert.fail("Expected onFailure to be invoked."); + } + + @Override + public void onFailure(Exception e) { + assertEquals(CancellableThreads.ExecutionCancelledException.class, e.getClass()); + assertEquals(0, copyState.refCount()); + assertEquals(0, replications.size()); + assertEquals(0, replications.cachedCopyStateSize()); + latch.countDown(); + } + }); + latch.await(2, TimeUnit.SECONDS); + assertEquals("listener should have resolved with failure", 0, latch.getCount()); + } + public void testMultipleReplicasUseSameCheckpoint() throws IOException { IndexShard secondReplica = newShard(primary.shardId(), false); recoverReplica(secondReplica, primary, true); diff --git a/server/src/test/java/org/opensearch/indices/replication/PrimaryShardReplicationSourceTests.java b/server/src/test/java/org/opensearch/indices/replication/PrimaryShardReplicationSourceTests.java index 6bce74be569c3..323445bee1274 100644 --- a/server/src/test/java/org/opensearch/indices/replication/PrimaryShardReplicationSourceTests.java +++ b/server/src/test/java/org/opensearch/indices/replication/PrimaryShardReplicationSourceTests.java @@ -9,12 +9,14 @@ package org.opensearch.indices.replication; import org.apache.lucene.util.Version; +import org.junit.Assert; import org.opensearch.action.ActionListener; import org.opensearch.cluster.node.DiscoveryNode; import org.opensearch.cluster.node.DiscoveryNodeRole; import org.opensearch.cluster.service.ClusterService; import org.opensearch.common.settings.ClusterSettings; import org.opensearch.common.settings.Settings; +import org.opensearch.common.util.CancellableThreads; import org.opensearch.core.internal.io.IOUtils; import org.opensearch.index.shard.IndexShard; import org.opensearch.index.shard.IndexShardTestCase; @@ -28,6 +30,8 @@ import java.util.Arrays; import java.util.Collections; +import java.util.concurrent.CountDownLatch; +import java.util.concurrent.TimeUnit; import static org.mockito.Mockito.mock; @@ -126,6 +130,39 @@ public void testGetSegmentFiles() { assertTrue(capturedRequest.request instanceof GetSegmentFilesRequest); } + public void testGetSegmentFiles_CancelWhileRequestOpen() throws InterruptedException { + CountDownLatch latch = new CountDownLatch(1); + final ReplicationCheckpoint checkpoint = new ReplicationCheckpoint( + indexShard.shardId(), + PRIMARY_TERM, + SEGMENTS_GEN, + SEQ_NO, + VERSION + ); + StoreFileMetadata testMetadata = new StoreFileMetadata("testFile", 1L, "checksum", Version.LATEST); + replicationSource.getSegmentFiles( + REPLICATION_ID, + checkpoint, + Arrays.asList(testMetadata), + mock(Store.class), + new ActionListener<>() { + @Override + public void onResponse(GetSegmentFilesResponse getSegmentFilesResponse) { + Assert.fail("onFailure response expected."); + } + + @Override + public void onFailure(Exception e) { + assertEquals(e.getClass(), CancellableThreads.ExecutionCancelledException.class); + latch.countDown(); + } + } + ); + replicationSource.cancel(); + latch.await(2, TimeUnit.SECONDS); + assertEquals("listener should have resolved in a failure", 0, latch.getCount()); + } + private DiscoveryNode newDiscoveryNode(String nodeName) { return new DiscoveryNode( nodeName, diff --git a/server/src/test/java/org/opensearch/indices/replication/SegmentReplicationSourceHandlerTests.java b/server/src/test/java/org/opensearch/indices/replication/SegmentReplicationSourceHandlerTests.java index 2c52772649acc..a6e169dbc3d61 100644 --- a/server/src/test/java/org/opensearch/indices/replication/SegmentReplicationSourceHandlerTests.java +++ b/server/src/test/java/org/opensearch/indices/replication/SegmentReplicationSourceHandlerTests.java @@ -18,6 +18,7 @@ import org.opensearch.cluster.metadata.IndexMetadata; import org.opensearch.cluster.node.DiscoveryNode; import org.opensearch.common.settings.Settings; +import org.opensearch.common.util.CancellableThreads; import org.opensearch.index.shard.IndexShard; import org.opensearch.index.shard.IndexShardTestCase; import org.opensearch.index.store.StoreFileMetadata; @@ -28,6 +29,8 @@ import java.io.IOException; import java.util.Collections; import java.util.List; +import java.util.concurrent.CountDownLatch; +import java.util.concurrent.TimeUnit; import static org.mockito.Mockito.mock; @@ -197,4 +200,47 @@ public void testReplicationAlreadyRunning() throws IOException { handler.sendFiles(getSegmentFilesRequest, mock(ActionListener.class)); Assert.assertThrows(OpenSearchException.class, () -> { handler.sendFiles(getSegmentFilesRequest, mock(ActionListener.class)); }); } + + public void testCancelReplication() throws IOException, InterruptedException { + CountDownLatch latch = new CountDownLatch(1); + chunkWriter = mock(FileChunkWriter.class); + + final ReplicationCheckpoint latestReplicationCheckpoint = primary.getLatestReplicationCheckpoint(); + final CopyState copyState = new CopyState(latestReplicationCheckpoint, primary); + SegmentReplicationSourceHandler handler = new SegmentReplicationSourceHandler( + localNode, + chunkWriter, + threadPool, + copyState, + primary.routingEntry().allocationId().getId(), + 5000, + 1 + ); + + final GetSegmentFilesRequest getSegmentFilesRequest = new GetSegmentFilesRequest( + 1L, + replica.routingEntry().allocationId().getId(), + replicaDiscoveryNode, + Collections.emptyList(), + latestReplicationCheckpoint + ); + + // cancel before xfer starts. Cancels during copy will be tested in SegmentFileTransferHandlerTests, that uses the same + // cancellableThreads. + handler.cancel("test"); + handler.sendFiles(getSegmentFilesRequest, new ActionListener<>() { + @Override + public void onResponse(GetSegmentFilesResponse getSegmentFilesResponse) { + Assert.fail("Expected failure."); + } + + @Override + public void onFailure(Exception e) { + assertEquals(CancellableThreads.ExecutionCancelledException.class, e.getClass()); + latch.countDown(); + } + }); + latch.await(2, TimeUnit.SECONDS); + assertEquals("listener should have resolved with failure", 0, latch.getCount()); + } } diff --git a/server/src/test/java/org/opensearch/indices/replication/SegmentReplicationTargetServiceTests.java b/server/src/test/java/org/opensearch/indices/replication/SegmentReplicationTargetServiceTests.java index de739f4ca834a..7d9b0f09f21cd 100644 --- a/server/src/test/java/org/opensearch/indices/replication/SegmentReplicationTargetServiceTests.java +++ b/server/src/test/java/org/opensearch/indices/replication/SegmentReplicationTargetServiceTests.java @@ -9,21 +9,22 @@ package org.opensearch.indices.replication; import org.junit.Assert; -import org.mockito.ArgumentCaptor; import org.mockito.Mockito; import org.opensearch.OpenSearchException; import org.opensearch.action.ActionListener; import org.opensearch.cluster.metadata.IndexMetadata; import org.opensearch.common.settings.ClusterSettings; import org.opensearch.common.settings.Settings; +import org.opensearch.index.engine.NRTReplicationEngineFactory; import org.opensearch.index.shard.IndexShard; import org.opensearch.index.shard.IndexShardTestCase; -import org.opensearch.indices.recovery.RecoverySettings; +import org.opensearch.index.store.Store; +import org.opensearch.index.store.StoreFileMetadata; import org.opensearch.indices.replication.checkpoint.ReplicationCheckpoint; -import org.opensearch.indices.replication.common.ReplicationLuceneIndex; -import org.opensearch.transport.TransportService; +import org.opensearch.indices.replication.common.ReplicationType; import java.io.IOException; +import java.util.List; import java.util.concurrent.CountDownLatch; import java.util.concurrent.TimeUnit; @@ -35,12 +36,12 @@ import static org.mockito.Mockito.verify; import static org.mockito.Mockito.times; import static org.mockito.Mockito.spy; -import static org.mockito.Mockito.timeout; import static org.mockito.Mockito.eq; public class SegmentReplicationTargetServiceTests extends IndexShardTestCase { - private IndexShard indexShard; + private IndexShard replicaShard; + private IndexShard primaryShard; private ReplicationCheckpoint checkpoint; private SegmentReplicationSource replicationSource; private SegmentReplicationTargetService sut; @@ -52,20 +53,20 @@ public class SegmentReplicationTargetServiceTests extends IndexShardTestCase { public void setUp() throws Exception { super.setUp(); final Settings settings = Settings.builder() - .put(IndexMetadata.SETTING_REPLICATION_TYPE, "SEGMENT") + .put(IndexMetadata.SETTING_REPLICATION_TYPE, ReplicationType.SEGMENT) .put("node.name", SegmentReplicationTargetServiceTests.class.getSimpleName()) .build(); final ClusterSettings clusterSettings = new ClusterSettings(settings, ClusterSettings.BUILT_IN_CLUSTER_SETTINGS); - final RecoverySettings recoverySettings = new RecoverySettings(settings, clusterSettings); - final TransportService transportService = mock(TransportService.class); - indexShard = newStartedShard(false, settings); - checkpoint = new ReplicationCheckpoint(indexShard.shardId(), 0L, 0L, 0L, 0L); + primaryShard = newStartedShard(true); + replicaShard = newShard(false, settings, new NRTReplicationEngineFactory()); + recoverReplica(replicaShard, primaryShard, true); + checkpoint = new ReplicationCheckpoint(replicaShard.shardId(), 0L, 0L, 0L, 0L); SegmentReplicationSourceFactory replicationSourceFactory = mock(SegmentReplicationSourceFactory.class); replicationSource = mock(SegmentReplicationSource.class); - when(replicationSourceFactory.get(indexShard)).thenReturn(replicationSource); + when(replicationSourceFactory.get(replicaShard)).thenReturn(replicationSource); - sut = new SegmentReplicationTargetService(threadPool, recoverySettings, transportService, replicationSourceFactory); - initialCheckpoint = indexShard.getLatestReplicationCheckpoint(); + sut = prepareForReplication(primaryShard); + initialCheckpoint = replicaShard.getLatestReplicationCheckpoint(); aheadCheckpoint = new ReplicationCheckpoint( initialCheckpoint.getShardId(), initialCheckpoint.getPrimaryTerm(), @@ -77,44 +78,58 @@ public void setUp() throws Exception { @Override public void tearDown() throws Exception { - closeShards(indexShard); + closeShards(primaryShard, replicaShard); super.tearDown(); } - public void testTargetReturnsSuccess_listenerCompletes() { - final SegmentReplicationTarget target = new SegmentReplicationTarget( - checkpoint, - indexShard, - replicationSource, - new SegmentReplicationTargetService.SegmentReplicationListener() { - @Override - public void onReplicationDone(SegmentReplicationState state) { - assertEquals(SegmentReplicationState.Stage.DONE, state.getStage()); - } + public void testsSuccessfulReplication_listenerCompletes() throws InterruptedException { + CountDownLatch latch = new CountDownLatch(1); + sut.startReplication(checkpoint, replicaShard, new SegmentReplicationTargetService.SegmentReplicationListener() { + @Override + public void onReplicationDone(SegmentReplicationState state) { + assertEquals(SegmentReplicationState.Stage.DONE, state.getStage()); + latch.countDown(); + } - @Override - public void onReplicationFailure(SegmentReplicationState state, OpenSearchException e, boolean sendShardFailure) { - Assert.fail(); - } + @Override + public void onReplicationFailure(SegmentReplicationState state, OpenSearchException e, boolean sendShardFailure) { + logger.error("Unexpected error", e); + Assert.fail("Test should succeed"); } - ); - final SegmentReplicationTarget spy = Mockito.spy(target); - doAnswer(invocation -> { - // set up stage correctly so the transition in markAsDone succeeds on listener completion - moveTargetToFinalStage(target); - final ActionListener listener = invocation.getArgument(0); - listener.onResponse(null); - return null; - }).when(spy).startReplication(any()); - sut.startReplication(spy); + }); + latch.await(2, TimeUnit.SECONDS); + assertEquals(0, latch.getCount()); } - public void testTargetThrowsException() { + public void testReplicationFails() throws InterruptedException { + CountDownLatch latch = new CountDownLatch(1); final OpenSearchException expectedError = new OpenSearchException("Fail"); + SegmentReplicationSource source = new SegmentReplicationSource() { + + @Override + public void getCheckpointMetadata( + long replicationId, + ReplicationCheckpoint checkpoint, + ActionListener listener + ) { + listener.onFailure(expectedError); + } + + @Override + public void getSegmentFiles( + long replicationId, + ReplicationCheckpoint checkpoint, + List filesToFetch, + Store store, + ActionListener listener + ) { + Assert.fail("Should not be called"); + } + }; final SegmentReplicationTarget target = new SegmentReplicationTarget( checkpoint, - indexShard, - replicationSource, + replicaShard, + source, new SegmentReplicationTargetService.SegmentReplicationListener() { @Override public void onReplicationDone(SegmentReplicationState state) { @@ -123,24 +138,21 @@ public void onReplicationDone(SegmentReplicationState state) { @Override public void onReplicationFailure(SegmentReplicationState state, OpenSearchException e, boolean sendShardFailure) { - assertEquals(SegmentReplicationState.Stage.INIT, state.getStage()); + // failures leave state object in last entered stage. + assertEquals(SegmentReplicationState.Stage.GET_CHECKPOINT_INFO, state.getStage()); assertEquals(expectedError, e.getCause()); - assertTrue(sendShardFailure); + latch.countDown(); } } ); - final SegmentReplicationTarget spy = Mockito.spy(target); - doAnswer(invocation -> { - final ActionListener listener = invocation.getArgument(0); - listener.onFailure(expectedError); - return null; - }).when(spy).startReplication(any()); - sut.startReplication(spy); + sut.startReplication(target); + latch.await(2, TimeUnit.SECONDS); + assertEquals(0, latch.getCount()); } public void testAlreadyOnNewCheckpoint() { SegmentReplicationTargetService spy = spy(sut); - spy.onNewCheckpoint(indexShard.getLatestReplicationCheckpoint(), indexShard); + spy.onNewCheckpoint(replicaShard.getLatestReplicationCheckpoint(), replicaShard); verify(spy, times(0)).startReplication(any(), any(), any()); } @@ -149,7 +161,7 @@ public void testShardAlreadyReplicating() throws InterruptedException { SegmentReplicationTargetService serviceSpy = spy(sut); final SegmentReplicationTarget target = new SegmentReplicationTarget( checkpoint, - indexShard, + replicaShard, replicationSource, mock(SegmentReplicationTargetService.SegmentReplicationListener.class) ); @@ -161,7 +173,7 @@ public void testShardAlreadyReplicating() throws InterruptedException { doAnswer(invocation -> { final ActionListener listener = invocation.getArgument(0); // a new checkpoint arrives before we've completed. - serviceSpy.onNewCheckpoint(aheadCheckpoint, indexShard); + serviceSpy.onNewCheckpoint(aheadCheckpoint, replicaShard); listener.onResponse(null); latch.countDown(); return null; @@ -173,12 +185,12 @@ public void testShardAlreadyReplicating() throws InterruptedException { // wait for the new checkpoint to arrive, before the listener completes. latch.await(30, TimeUnit.SECONDS); - verify(serviceSpy, times(0)).startReplication(eq(aheadCheckpoint), eq(indexShard), any()); + verify(serviceSpy, times(0)).startReplication(eq(aheadCheckpoint), eq(replicaShard), any()); } public void testNewCheckpointBehindCurrentCheckpoint() { SegmentReplicationTargetService spy = spy(sut); - spy.onNewCheckpoint(checkpoint, indexShard); + spy.onNewCheckpoint(checkpoint, replicaShard); verify(spy, times(0)).startReplication(any(), any(), any()); } @@ -190,22 +202,6 @@ public void testShardNotStarted() throws IOException { closeShards(shard); } - public void testNewCheckpoint_validationPassesAndReplicationFails() throws IOException { - allowShardFailures(); - SegmentReplicationTargetService spy = spy(sut); - IndexShard spyShard = spy(indexShard); - ArgumentCaptor captor = ArgumentCaptor.forClass( - SegmentReplicationTargetService.SegmentReplicationListener.class - ); - doNothing().when(spy).startReplication(any(), any(), any()); - spy.onNewCheckpoint(aheadCheckpoint, spyShard); - verify(spy, times(1)).startReplication(any(), any(), captor.capture()); - SegmentReplicationTargetService.SegmentReplicationListener listener = captor.getValue(); - listener.onFailure(new SegmentReplicationState(new ReplicationLuceneIndex()), new OpenSearchException("testing"), true); - verify(spyShard).failShard(any(), any()); - closeShard(indexShard, false); - } - /** * here we are starting a new shard in PrimaryMode and testing that we don't process a checkpoint on shard when it is in PrimaryMode. */ @@ -215,70 +211,10 @@ public void testRejectCheckpointOnShardPrimaryMode() throws IOException { // Starting a new shard in PrimaryMode. IndexShard primaryShard = newStartedShard(true); IndexShard spyShard = spy(primaryShard); - doNothing().when(spy).startReplication(any(), any(), any()); spy.onNewCheckpoint(aheadCheckpoint, spyShard); // Verify that checkpoint is not processed as shard is in PrimaryMode. verify(spy, times(0)).startReplication(any(), any(), any()); closeShards(primaryShard); } - - public void testReplicationOnDone() throws IOException { - SegmentReplicationTargetService spy = spy(sut); - IndexShard spyShard = spy(indexShard); - ReplicationCheckpoint cp = indexShard.getLatestReplicationCheckpoint(); - ReplicationCheckpoint newCheckpoint = new ReplicationCheckpoint( - cp.getShardId(), - cp.getPrimaryTerm(), - cp.getSegmentsGen(), - cp.getSeqNo(), - cp.getSegmentInfosVersion() + 1 - ); - ReplicationCheckpoint anotherNewCheckpoint = new ReplicationCheckpoint( - cp.getShardId(), - cp.getPrimaryTerm(), - cp.getSegmentsGen(), - cp.getSeqNo(), - cp.getSegmentInfosVersion() + 2 - ); - ArgumentCaptor captor = ArgumentCaptor.forClass( - SegmentReplicationTargetService.SegmentReplicationListener.class - ); - doNothing().when(spy).startReplication(any(), any(), any()); - spy.onNewCheckpoint(newCheckpoint, spyShard); - spy.onNewCheckpoint(anotherNewCheckpoint, spyShard); - verify(spy, times(1)).startReplication(eq(newCheckpoint), any(), captor.capture()); - verify(spy, times(1)).onNewCheckpoint(eq(anotherNewCheckpoint), any()); - SegmentReplicationTargetService.SegmentReplicationListener listener = captor.getValue(); - listener.onDone(new SegmentReplicationState(new ReplicationLuceneIndex())); - doNothing().when(spy).onNewCheckpoint(any(), any()); - verify(spy, timeout(100).times(2)).onNewCheckpoint(eq(anotherNewCheckpoint), any()); - closeShard(indexShard, false); - } - - public void testBeforeIndexShardClosed_CancelsOngoingReplications() { - final SegmentReplicationTarget target = new SegmentReplicationTarget( - checkpoint, - indexShard, - replicationSource, - mock(SegmentReplicationTargetService.SegmentReplicationListener.class) - ); - final SegmentReplicationTarget spy = Mockito.spy(target); - sut.startReplication(spy); - sut.beforeIndexShardClosed(indexShard.shardId(), indexShard, Settings.EMPTY); - verify(spy, times(1)).cancel(any()); - } - - /** - * Move the {@link SegmentReplicationTarget} object through its {@link SegmentReplicationState.Stage} values in order - * until the final, non-terminal stage. - */ - private void moveTargetToFinalStage(SegmentReplicationTarget target) { - SegmentReplicationState.Stage[] stageValues = SegmentReplicationState.Stage.values(); - assertEquals(target.state().getStage(), SegmentReplicationState.Stage.INIT); - // Skip the first two stages (DONE and INIT) and iterate until the last value - for (int i = 2; i < stageValues.length; i++) { - target.state().setStage(stageValues[i]); - } - } } diff --git a/server/src/test/java/org/opensearch/snapshots/SnapshotResiliencyTests.java b/server/src/test/java/org/opensearch/snapshots/SnapshotResiliencyTests.java index 4d3b841e203de..ff4005d9bcedf 100644 --- a/server/src/test/java/org/opensearch/snapshots/SnapshotResiliencyTests.java +++ b/server/src/test/java/org/opensearch/snapshots/SnapshotResiliencyTests.java @@ -185,6 +185,7 @@ import org.opensearch.indices.recovery.PeerRecoveryTargetService; import org.opensearch.indices.recovery.RecoverySettings; import org.opensearch.indices.replication.SegmentReplicationSourceFactory; +import org.opensearch.indices.replication.SegmentReplicationSourceService; import org.opensearch.indices.replication.SegmentReplicationTargetService; import org.opensearch.indices.replication.checkpoint.SegmentReplicationCheckpointPublisher; import org.opensearch.ingest.IngestService; @@ -1857,6 +1858,7 @@ public void onFailure(final Exception e) { transportService, new SegmentReplicationSourceFactory(transportService, recoverySettings, clusterService) ), + SegmentReplicationSourceService.NO_OP, shardStateAction, new NodeMappingRefreshAction(transportService, metadataMappingService), repositoriesService, diff --git a/test/framework/src/main/java/org/opensearch/index/shard/IndexShardTestCase.java b/test/framework/src/main/java/org/opensearch/index/shard/IndexShardTestCase.java index 08004b7e42fea..1b40cb4f2dfa3 100644 --- a/test/framework/src/main/java/org/opensearch/index/shard/IndexShardTestCase.java +++ b/test/framework/src/main/java/org/opensearch/index/shard/IndexShardTestCase.java @@ -68,7 +68,6 @@ import org.opensearch.common.lucene.uid.Versions; import org.opensearch.common.settings.ClusterSettings; import org.opensearch.common.settings.Settings; -import org.opensearch.common.unit.TimeValue; import org.opensearch.common.util.BigArrays; import org.opensearch.common.xcontent.XContentType; import org.opensearch.core.internal.io.IOUtils; @@ -112,7 +111,10 @@ import org.opensearch.indices.replication.CheckpointInfoResponse; import org.opensearch.indices.replication.GetSegmentFilesResponse; import org.opensearch.indices.replication.SegmentReplicationSource; +import org.opensearch.indices.replication.SegmentReplicationSourceFactory; +import org.opensearch.indices.replication.SegmentReplicationState; import org.opensearch.indices.replication.SegmentReplicationTarget; +import org.opensearch.indices.replication.SegmentReplicationTargetService; import org.opensearch.indices.replication.checkpoint.ReplicationCheckpoint; import org.opensearch.indices.replication.checkpoint.SegmentReplicationCheckpointPublisher; import org.opensearch.indices.replication.common.CopyState; @@ -127,8 +129,10 @@ import org.opensearch.test.OpenSearchTestCase; import org.opensearch.threadpool.TestThreadPool; import org.opensearch.threadpool.ThreadPool; +import org.opensearch.transport.TransportService; import java.io.IOException; +import java.util.ArrayList; import java.nio.file.Path; import java.util.Arrays; import java.util.Collections; @@ -146,7 +150,9 @@ import static org.hamcrest.Matchers.contains; import static org.hamcrest.Matchers.equalTo; import static org.hamcrest.Matchers.hasSize; +import static org.mockito.ArgumentMatchers.any; import static org.mockito.Mockito.mock; +import static org.mockito.Mockito.when; import static org.opensearch.cluster.routing.TestShardRouting.newShardRouting; /** @@ -1171,35 +1177,40 @@ public static Engine.Warmer createTestWarmer(IndexSettings indexSettings) { } /** - * Segment Replication specific test method - Replicate segments to a list of replicas from a given primary. - * This test will use a real {@link SegmentReplicationTarget} for each replica with a mock {@link SegmentReplicationSource} that - * writes all segments directly to the target. + * Segment Replication specific test method - Creates a {@link SegmentReplicationTargetService} to perform replications that has + * been configured to return the given primaryShard's current segments. + * + * @param primaryShard {@link IndexShard} - The primary shard to replicate from. */ - public final void replicateSegments(IndexShard primaryShard, List replicaShards) throws IOException, InterruptedException { - final CountDownLatch countDownLatch = new CountDownLatch(replicaShards.size()); - Store.MetadataSnapshot primaryMetadata; - try (final GatedCloseable segmentInfosSnapshot = primaryShard.getSegmentInfosSnapshot()) { - final SegmentInfos primarySegmentInfos = segmentInfosSnapshot.get(); - primaryMetadata = primaryShard.store().getMetadata(primarySegmentInfos); - } - final CopyState copyState = new CopyState(ReplicationCheckpoint.empty(primaryShard.shardId), primaryShard); - - final ReplicationCollection replicationCollection = new ReplicationCollection<>(logger, threadPool); - final SegmentReplicationSource source = new SegmentReplicationSource() { + public final SegmentReplicationTargetService prepareForReplication(IndexShard primaryShard) { + final SegmentReplicationSourceFactory sourceFactory = mock(SegmentReplicationSourceFactory.class); + final SegmentReplicationTargetService targetService = new SegmentReplicationTargetService( + threadPool, + new RecoverySettings(Settings.EMPTY, new ClusterSettings(Settings.EMPTY, ClusterSettings.BUILT_IN_CLUSTER_SETTINGS)), + mock(TransportService.class), + sourceFactory + ); + final SegmentReplicationSource replicationSource = new SegmentReplicationSource() { @Override public void getCheckpointMetadata( long replicationId, ReplicationCheckpoint checkpoint, ActionListener listener ) { - listener.onResponse( - new CheckpointInfoResponse( - copyState.getCheckpoint(), - copyState.getMetadataSnapshot(), - copyState.getInfosBytes(), - copyState.getPendingDeleteFiles() - ) - ); + try { + final CopyState copyState = new CopyState(ReplicationCheckpoint.empty(primaryShard.shardId), primaryShard); + listener.onResponse( + new CheckpointInfoResponse( + copyState.getCheckpoint(), + copyState.getMetadataSnapshot(), + copyState.getInfosBytes(), + copyState.getPendingDeleteFiles() + ) + ); + } catch (IOException e) { + logger.error("Unexpected error computing CopyState", e); + Assert.fail("Failed to compute copyState"); + } } @Override @@ -1211,9 +1222,7 @@ public void getSegmentFiles( ActionListener listener ) { try ( - final ReplicationCollection.ReplicationRef replicationRef = replicationCollection.get( - replicationId - ) + final ReplicationCollection.ReplicationRef replicationRef = targetService.get(replicationId) ) { writeFileChunks(replicationRef.get(), primaryShard, filesToFetch.toArray(new StoreFileMetadata[] {})); } catch (IOException e) { @@ -1222,15 +1231,43 @@ public void getSegmentFiles( listener.onResponse(new GetSegmentFilesResponse(filesToFetch)); } }; + when(sourceFactory.get(any())).thenReturn(replicationSource); + return targetService; + } + /** + * Segment Replication specific test method - Replicate segments to a list of replicas from a given primary. + * This test will use a real {@link SegmentReplicationTarget} for each replica with a mock {@link SegmentReplicationSource} that + * writes all segments directly to the target. + * @param primaryShard - {@link IndexShard} The current primary shard. + * @param replicaShards - Replicas that will be updated. + * @return {@link List} List of target components orchestrating replication. + */ + public final List replicateSegments(IndexShard primaryShard, List replicaShards) + throws IOException, InterruptedException { + final SegmentReplicationTargetService targetService = prepareForReplication(primaryShard); + return replicateSegments(targetService, primaryShard, replicaShards); + } + + public final List replicateSegments( + SegmentReplicationTargetService targetService, + IndexShard primaryShard, + List replicaShards + ) throws IOException, InterruptedException { + final CountDownLatch countDownLatch = new CountDownLatch(replicaShards.size()); + Store.MetadataSnapshot primaryMetadata; + try (final GatedCloseable segmentInfosSnapshot = primaryShard.getSegmentInfosSnapshot()) { + final SegmentInfos primarySegmentInfos = segmentInfosSnapshot.get(); + primaryMetadata = primaryShard.store().getMetadata(primarySegmentInfos); + } + List ids = new ArrayList<>(); for (IndexShard replica : replicaShards) { - final SegmentReplicationTarget target = new SegmentReplicationTarget( + final SegmentReplicationTarget target = targetService.startReplication( ReplicationCheckpoint.empty(replica.shardId), replica, - source, - new ReplicationListener() { + new SegmentReplicationTargetService.SegmentReplicationListener() { @Override - public void onDone(ReplicationState state) { + public void onReplicationDone(SegmentReplicationState state) { try (final GatedCloseable snapshot = replica.getSegmentInfosSnapshot()) { final SegmentInfos replicaInfos = snapshot.get(); final Store.MetadataSnapshot replicaMetadata = replica.store().getMetadata(replicaInfos); @@ -1241,31 +1278,22 @@ public void onDone(ReplicationState state) { assertEquals(primaryMetadata.getCommitUserData(), replicaMetadata.getCommitUserData()); } catch (Exception e) { throw ExceptionsHelper.convertToRuntime(e); + } finally { + countDownLatch.countDown(); } - countDownLatch.countDown(); } @Override - public void onFailure(ReplicationState state, OpenSearchException e, boolean sendShardFailure) { + public void onReplicationFailure(SegmentReplicationState state, OpenSearchException e, boolean sendShardFailure) { logger.error("Unexpected replication failure in test", e); Assert.fail("test replication should not fail: " + e); } } ); - replicationCollection.start(target, TimeValue.timeValueMillis(5000)); - target.startReplication(new ActionListener<>() { - @Override - public void onResponse(Void o) { - replicationCollection.markAsDone(target.getId()); - } - - @Override - public void onFailure(Exception e) { - replicationCollection.fail(target.getId(), new OpenSearchException("Segment Replication failed", e), true); - } - }); + ids.add(target); + countDownLatch.await(1, TimeUnit.SECONDS); } - countDownLatch.await(3, TimeUnit.SECONDS); + return ids; } private void writeFileChunks(SegmentReplicationTarget target, IndexShard primary, StoreFileMetadata[] files) throws IOException { From 4a6e937b3ba120f38fa991c9c481a01daec18e94 Mon Sep 17 00:00:00 2001 From: Kunal Kotwani Date: Wed, 31 Aug 2022 18:49:19 -0700 Subject: [PATCH 11/16] Bug fixes for dependabot changelog verifier (#4364) * Fix token usage for changelog helper Signed-off-by: Kunal Kotwani * Add conditional check for dependabot steps Signed-off-by: Kunal Kotwani * Add dependency section Signed-off-by: Kunal Kotwani * Bug fixes for dependabot changelog verifier Signed-off-by: Kunal Kotwani * Update the changelog Signed-off-by: Kunal Kotwani Signed-off-by: Kunal Kotwani --- .github/pull_request_template.md | 2 +- .github/workflows/changelog_verifier.yml | 12 ------------ .github/workflows/dependabot_pr.yml | 14 ++++++++++++++ .linelint.yml | 1 + CHANGELOG.md | 1 + 5 files changed, 17 insertions(+), 13 deletions(-) diff --git a/.github/pull_request_template.md b/.github/pull_request_template.md index c76e27d6dfc7d..4537cadf71074 100644 --- a/.github/pull_request_template.md +++ b/.github/pull_request_template.md @@ -10,7 +10,7 @@ - [ ] New functionality has been documented. - [ ] New functionality has javadoc added - [ ] Commits are signed per the DCO using --signoff -- [ ] Commit changes are listed out in CHANGELOG.md file (See: [Changelog](../CONTRIBUTING.md#changelog)) +- [ ] Commit changes are listed out in CHANGELOG.md file (See: [Changelog](../blob/main/CONTRIBUTING.md#changelog)) By submitting this pull request, I confirm that my contribution is made under the terms of the Apache 2.0 license. For more information on following Developer Certificate of Origin and signing off your commits, please check [here](https://github.com/opensearch-project/OpenSearch/blob/main/CONTRIBUTING.md#developer-certificate-of-origin). diff --git a/.github/workflows/changelog_verifier.yml b/.github/workflows/changelog_verifier.yml index ac0c0ec4d7297..cda5dde462068 100644 --- a/.github/workflows/changelog_verifier.yml +++ b/.github/workflows/changelog_verifier.yml @@ -13,16 +13,4 @@ jobs: token: ${{ secrets.GITHUB_TOKEN }} ref: ${{ github.event.pull_request.head.sha }} - - uses: dangoslen/dependabot-changelog-helper@v1 - with: - version: 'Unreleased' - - - uses: stefanzweifel/git-auto-commit-action@v4 - with: - commit_message: "Update changelog" - branch: ${{ github.head_ref }} - commit_user_name: dependabot[bot] - commit_user_email: support@github.com - commit_options: '--signoff' - - uses: dangoslen/changelog-enforcer@v3 diff --git a/.github/workflows/dependabot_pr.yml b/.github/workflows/dependabot_pr.yml index 2ac904bf4ccf7..ed98bae8978ed 100644 --- a/.github/workflows/dependabot_pr.yml +++ b/.github/workflows/dependabot_pr.yml @@ -47,3 +47,17 @@ jobs: commit_user_name: dependabot[bot] commit_user_email: support@github.com commit_options: '--signoff' + + - name: Update the changelog + uses: dangoslen/dependabot-changelog-helper@v1 + with: + version: 'Unreleased' + + - name: Commit the changes + uses: stefanzweifel/git-auto-commit-action@v4 + with: + commit_message: "Update changelog" + branch: ${{ github.head_ref }} + commit_user_name: dependabot[bot] + commit_user_email: support@github.com + commit_options: '--signoff' diff --git a/.linelint.yml b/.linelint.yml index 6240c8b3d7a96..ec947019f8ab6 100644 --- a/.linelint.yml +++ b/.linelint.yml @@ -7,6 +7,7 @@ ignore: - .idea/ - '*.sha1' - '*.txt' + - 'CHANGELOG.md' - '.github/CODEOWNERS' - 'buildSrc/src/testKit/opensearch.build/LICENSE' - 'buildSrc/src/testKit/opensearch.build/NOTICE' diff --git a/CHANGELOG.md b/CHANGELOG.md index 4d07052d55ff0..07b08d853cf45 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -28,6 +28,7 @@ Inspired from [Keep a Changelog](https://keepachangelog.com/en/1.0.0/) - Add timeout on Mockito.verify to reduce flakyness in testReplicationOnDone test([#4314](https://github.com/opensearch-project/OpenSearch/pull/4314)) - Commit workflow for dependabot changelog helper ([#4331](https://github.com/opensearch-project/OpenSearch/pull/4331)) - Fixed cancellation of segment replication events ([#4225](https://github.com/opensearch-project/OpenSearch/pull/4225)) +- Bugs for dependabot changelog verifier workflow ([#4364](https://github.com/opensearch-project/OpenSearch/pull/4364)) ### Security - CVE-2022-25857 org.yaml:snakeyaml DOS vulnerability ([#4341](https://github.com/opensearch-project/OpenSearch/pull/4341)) From 689a2c44eee85b025c95516c4949d3a1bc3ec284 Mon Sep 17 00:00:00 2001 From: Bharathwaj G <58062316+bharath-techie@users.noreply.github.com> Date: Thu, 1 Sep 2022 10:58:45 +0530 Subject: [PATCH 12/16] Add changes for Create PIT and Delete PIT rest layer and rest high level client (#4064) * Create and delete PIT search rest layer changes Signed-off-by: Bharathwaj G --- CHANGELOG.md | 1 + .../opensearch/client/RequestConverters.java | 38 ++++- .../client/RestHighLevelClient.java | 118 ++++++++++++++++ .../java/org/opensearch/client/PitIT.java | 102 ++++++++++++++ .../client/RequestConvertersTests.java | 44 ++++++ .../client/RestHighLevelClientTests.java | 1 + .../java/org/opensearch/client/SearchIT.java | 47 +++++++ .../rest-api-spec/api/create_pit.json | 44 ++++++ .../rest-api-spec/api/delete_all_pits.json | 19 +++ .../rest-api-spec/api/delete_pit.json | 23 +++ .../rest-api-spec/test/pit/10_basic.yml | 130 +++++++++++++++++ .../org/opensearch/action/ActionModule.java | 10 ++ .../action/search/DeletePitInfo.java | 4 +- .../action/search/DeletePitRequest.java | 5 + .../action/search/GetAllPitNodesRequest.java | 11 ++ .../action/search/GetAllPitNodesResponse.java | 8 ++ .../action/search/NodesGetAllPitsAction.java | 23 +++ .../opensearch/action/search/PitService.java | 23 ++- .../search/TransportDeletePitAction.java | 6 +- .../search/TransportGetAllPitsAction.java | 80 +++-------- .../TransportNodesGetAllPitsAction.java | 86 +++++++++++ .../action/search/RestCreatePitAction.java | 57 ++++++++ .../action/search/RestDeletePitAction.java | 60 ++++++++ .../org/opensearch/search/SearchService.java | 4 +- .../search/CreatePitControllerTests.java | 11 +- .../search/TransportDeletePitActionTests.java | 18 +-- .../search/CreatePitSingleNodeTests.java | 52 +++++++ .../search/pit/RestCreatePitActionTests.java | 78 ++++++++++ .../search/pit/RestDeletePitActionTests.java | 133 ++++++++++++++++++ 29 files changed, 1152 insertions(+), 84 deletions(-) create mode 100644 client/rest-high-level/src/test/java/org/opensearch/client/PitIT.java create mode 100644 rest-api-spec/src/main/resources/rest-api-spec/api/create_pit.json create mode 100644 rest-api-spec/src/main/resources/rest-api-spec/api/delete_all_pits.json create mode 100644 rest-api-spec/src/main/resources/rest-api-spec/api/delete_pit.json create mode 100644 rest-api-spec/src/main/resources/rest-api-spec/test/pit/10_basic.yml create mode 100644 server/src/main/java/org/opensearch/action/search/NodesGetAllPitsAction.java create mode 100644 server/src/main/java/org/opensearch/action/search/TransportNodesGetAllPitsAction.java create mode 100644 server/src/main/java/org/opensearch/rest/action/search/RestCreatePitAction.java create mode 100644 server/src/main/java/org/opensearch/rest/action/search/RestDeletePitAction.java create mode 100644 server/src/test/java/org/opensearch/search/pit/RestCreatePitActionTests.java create mode 100644 server/src/test/java/org/opensearch/search/pit/RestDeletePitActionTests.java diff --git a/CHANGELOG.md b/CHANGELOG.md index 07b08d853cf45..f89e7eba0698c 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -4,6 +4,7 @@ Inspired from [Keep a Changelog](https://keepachangelog.com/en/1.0.0/) ## [Unreleased] ### Added - Github workflow for changelog verification ([#4085](https://github.com/opensearch-project/OpenSearch/pull/4085)) +- Point in time rest layer changes for create and delete PIT API ([#4064](https://github.com/opensearch-project/OpenSearch/pull/4064)) - Added @dreamer-89 as an Opensearch maintainer ([#4342](https://github.com/opensearch-project/OpenSearch/pull/4342)) - Added release notes for 1.3.5 ([#4343](https://github.com/opensearch-project/OpenSearch/pull/4343)) - Added release notes for 2.2.1 ([#4344](https://github.com/opensearch-project/OpenSearch/pull/4344)) diff --git a/client/rest-high-level/src/main/java/org/opensearch/client/RequestConverters.java b/client/rest-high-level/src/main/java/org/opensearch/client/RequestConverters.java index 6fa57295f48e4..eedc27d1d2ea7 100644 --- a/client/rest-high-level/src/main/java/org/opensearch/client/RequestConverters.java +++ b/client/rest-high-level/src/main/java/org/opensearch/client/RequestConverters.java @@ -54,6 +54,8 @@ import org.opensearch.action.get.MultiGetRequest; import org.opensearch.action.index.IndexRequest; import org.opensearch.action.search.ClearScrollRequest; +import org.opensearch.action.search.CreatePitRequest; +import org.opensearch.action.search.DeletePitRequest; import org.opensearch.action.search.MultiSearchRequest; import org.opensearch.action.search.SearchRequest; import org.opensearch.action.search.SearchScrollRequest; @@ -92,6 +94,7 @@ import org.opensearch.index.reindex.ReindexRequest; import org.opensearch.index.reindex.UpdateByQueryRequest; import org.opensearch.index.seqno.SequenceNumbers; +import org.opensearch.rest.action.search.RestCreatePitAction; import org.opensearch.rest.action.search.RestSearchAction; import org.opensearch.script.mustache.MultiSearchTemplateRequest; import org.opensearch.script.mustache.SearchTemplateRequest; @@ -433,9 +436,19 @@ static void addSearchRequestParams(Params params, SearchRequest searchRequest) { params.putParam(RestSearchAction.TYPED_KEYS_PARAM, "true"); params.withRouting(searchRequest.routing()); params.withPreference(searchRequest.preference()); - params.withIndicesOptions(searchRequest.indicesOptions()); + if (searchRequest.pointInTimeBuilder() == null) { + params.withIndicesOptions(searchRequest.indicesOptions()); + } params.withSearchType(searchRequest.searchType().name().toLowerCase(Locale.ROOT)); - params.putParam("ccs_minimize_roundtrips", Boolean.toString(searchRequest.isCcsMinimizeRoundtrips())); + /** + * Merging search responses as part of CCS flow to reduce roundtrips is not supported for point in time - + * refer to org.opensearch.action.search.SearchResponseMerger + */ + if (searchRequest.pointInTimeBuilder() != null) { + params.putParam("ccs_minimize_roundtrips", "false"); + } else { + params.putParam("ccs_minimize_roundtrips", Boolean.toString(searchRequest.isCcsMinimizeRoundtrips())); + } if (searchRequest.getPreFilterShardSize() != null) { params.putParam("pre_filter_shard_size", Integer.toString(searchRequest.getPreFilterShardSize())); } @@ -464,6 +477,27 @@ static Request clearScroll(ClearScrollRequest clearScrollRequest) throws IOExcep return request; } + static Request createPit(CreatePitRequest createPitRequest) throws IOException { + Params params = new Params(); + params.putParam(RestCreatePitAction.ALLOW_PARTIAL_PIT_CREATION, Boolean.toString(createPitRequest.shouldAllowPartialPitCreation())); + params.putParam(RestCreatePitAction.KEEP_ALIVE, createPitRequest.getKeepAlive()); + params.withIndicesOptions(createPitRequest.indicesOptions()); + Request request = new Request(HttpPost.METHOD_NAME, endpoint(createPitRequest.indices(), "_search/point_in_time")); + request.addParameters(params.asMap()); + request.setEntity(createEntity(createPitRequest, REQUEST_BODY_CONTENT_TYPE)); + return request; + } + + static Request deletePit(DeletePitRequest deletePitRequest) throws IOException { + Request request = new Request(HttpDelete.METHOD_NAME, "/_search/point_in_time"); + request.setEntity(createEntity(deletePitRequest, REQUEST_BODY_CONTENT_TYPE)); + return request; + } + + static Request deleteAllPits() { + return new Request(HttpDelete.METHOD_NAME, "/_search/point_in_time/_all"); + } + static Request multiSearch(MultiSearchRequest multiSearchRequest) throws IOException { Request request = new Request(HttpPost.METHOD_NAME, "/_msearch"); diff --git a/client/rest-high-level/src/main/java/org/opensearch/client/RestHighLevelClient.java b/client/rest-high-level/src/main/java/org/opensearch/client/RestHighLevelClient.java index 28a441bdf7f7f..0c73c65f6175f 100644 --- a/client/rest-high-level/src/main/java/org/opensearch/client/RestHighLevelClient.java +++ b/client/rest-high-level/src/main/java/org/opensearch/client/RestHighLevelClient.java @@ -59,6 +59,10 @@ import org.opensearch.action.index.IndexResponse; import org.opensearch.action.search.ClearScrollRequest; import org.opensearch.action.search.ClearScrollResponse; +import org.opensearch.action.search.CreatePitRequest; +import org.opensearch.action.search.CreatePitResponse; +import org.opensearch.action.search.DeletePitRequest; +import org.opensearch.action.search.DeletePitResponse; import org.opensearch.action.search.MultiSearchRequest; import org.opensearch.action.search.MultiSearchResponse; import org.opensearch.action.search.SearchRequest; @@ -1250,6 +1254,120 @@ public final Cancellable scrollAsync( ); } + /** + * Create PIT context using create PIT API + * + * @param createPitRequest the request + * @param options the request options (e.g. headers), use {@link RequestOptions#DEFAULT} if nothing needs to be customized + * @return the response + */ + public final CreatePitResponse createPit(CreatePitRequest createPitRequest, RequestOptions options) throws IOException { + return performRequestAndParseEntity( + createPitRequest, + RequestConverters::createPit, + options, + CreatePitResponse::fromXContent, + emptySet() + ); + } + + /** + * Asynchronously Create PIT context using create PIT API + * + * @param createPitRequest the request + * @param options the request options (e.g. headers), use {@link RequestOptions#DEFAULT} if nothing needs to be customized + * @param listener the listener to be notified upon request completion + * @return the response + */ + public final Cancellable createPitAsync( + CreatePitRequest createPitRequest, + RequestOptions options, + ActionListener listener + ) { + return performRequestAsyncAndParseEntity( + createPitRequest, + RequestConverters::createPit, + options, + CreatePitResponse::fromXContent, + listener, + emptySet() + ); + } + + /** + * Delete point in time searches using delete PIT API + * + * @param deletePitRequest the request + * @param options the request options (e.g. headers), use {@link RequestOptions#DEFAULT} if nothing needs to be customized + * @return the response + */ + public final DeletePitResponse deletePit(DeletePitRequest deletePitRequest, RequestOptions options) throws IOException { + return performRequestAndParseEntity( + deletePitRequest, + RequestConverters::deletePit, + options, + DeletePitResponse::fromXContent, + emptySet() + ); + } + + /** + * Asynchronously Delete point in time searches using delete PIT API + * + * @param deletePitRequest the request + * @param options the request options (e.g. headers), use {@link RequestOptions#DEFAULT} if nothing needs to be customized + * @param listener the listener to be notified upon request completion + * @return the response + */ + public final Cancellable deletePitAsync( + DeletePitRequest deletePitRequest, + RequestOptions options, + ActionListener listener + ) { + return performRequestAsyncAndParseEntity( + deletePitRequest, + RequestConverters::deletePit, + options, + DeletePitResponse::fromXContent, + listener, + emptySet() + ); + } + + /** + * Delete all point in time searches using delete all PITs API + * + * @param options the request options (e.g. headers), use {@link RequestOptions#DEFAULT} if nothing needs to be customized + * @return the response + */ + public final DeletePitResponse deleteAllPits(RequestOptions options) throws IOException { + return performRequestAndParseEntity( + new MainRequest(), + (request) -> RequestConverters.deleteAllPits(), + options, + DeletePitResponse::fromXContent, + emptySet() + ); + } + + /** + * Asynchronously Delete all point in time searches using delete all PITs API + * + * @param options the request options (e.g. headers), use {@link RequestOptions#DEFAULT} if nothing needs to be customized + * @param listener the listener to be notified upon request completion + * @return the response + */ + public final Cancellable deleteAllPitsAsync(RequestOptions options, ActionListener listener) { + return performRequestAsyncAndParseEntity( + new MainRequest(), + (request) -> RequestConverters.deleteAllPits(), + options, + DeletePitResponse::fromXContent, + listener, + emptySet() + ); + } + /** * Clears one or more scroll ids using the Clear Scroll API. * diff --git a/client/rest-high-level/src/test/java/org/opensearch/client/PitIT.java b/client/rest-high-level/src/test/java/org/opensearch/client/PitIT.java new file mode 100644 index 0000000000000..395ec6e46a7b3 --- /dev/null +++ b/client/rest-high-level/src/test/java/org/opensearch/client/PitIT.java @@ -0,0 +1,102 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.client; + +import org.apache.http.client.methods.HttpPost; +import org.apache.http.client.methods.HttpPut; +import org.junit.Before; +import org.opensearch.OpenSearchStatusException; +import org.opensearch.action.ActionListener; +import org.opensearch.action.search.CreatePitRequest; +import org.opensearch.action.search.CreatePitResponse; +import org.opensearch.action.search.DeletePitInfo; +import org.opensearch.action.search.DeletePitRequest; +import org.opensearch.action.search.DeletePitResponse; +import org.opensearch.common.unit.TimeValue; + +import java.io.IOException; +import java.util.ArrayList; +import java.util.List; +import java.util.concurrent.TimeUnit; + +/** + * Tests point in time API with rest high level client + */ +public class PitIT extends OpenSearchRestHighLevelClientTestCase { + + @Before + public void indexDocuments() throws IOException { + Request doc1 = new Request(HttpPut.METHOD_NAME, "/index/_doc/1"); + doc1.setJsonEntity("{\"type\":\"type1\", \"id\":1, \"num\":10, \"num2\":50}"); + client().performRequest(doc1); + Request doc2 = new Request(HttpPut.METHOD_NAME, "/index/_doc/2"); + doc2.setJsonEntity("{\"type\":\"type1\", \"id\":2, \"num\":20, \"num2\":40}"); + client().performRequest(doc2); + Request doc3 = new Request(HttpPut.METHOD_NAME, "/index/_doc/3"); + doc3.setJsonEntity("{\"type\":\"type1\", \"id\":3, \"num\":50, \"num2\":35}"); + client().performRequest(doc3); + Request doc4 = new Request(HttpPut.METHOD_NAME, "/index/_doc/4"); + doc4.setJsonEntity("{\"type\":\"type2\", \"id\":4, \"num\":100, \"num2\":10}"); + client().performRequest(doc4); + Request doc5 = new Request(HttpPut.METHOD_NAME, "/index/_doc/5"); + doc5.setJsonEntity("{\"type\":\"type2\", \"id\":5, \"num\":100, \"num2\":10}"); + client().performRequest(doc5); + client().performRequest(new Request(HttpPost.METHOD_NAME, "/_refresh")); + } + + public void testCreateAndDeletePit() throws IOException { + CreatePitRequest pitRequest = new CreatePitRequest(new TimeValue(1, TimeUnit.DAYS), true, "index"); + CreatePitResponse pitResponse = execute(pitRequest, highLevelClient()::createPit, highLevelClient()::createPitAsync); + assertTrue(pitResponse.getId() != null); + assertEquals(1, pitResponse.getTotalShards()); + assertEquals(1, pitResponse.getSuccessfulShards()); + assertEquals(0, pitResponse.getFailedShards()); + assertEquals(0, pitResponse.getSkippedShards()); + List pitIds = new ArrayList<>(); + pitIds.add(pitResponse.getId()); + DeletePitRequest deletePitRequest = new DeletePitRequest(pitIds); + DeletePitResponse deletePitResponse = execute(deletePitRequest, highLevelClient()::deletePit, highLevelClient()::deletePitAsync); + assertTrue(deletePitResponse.getDeletePitResults().get(0).isSuccessful()); + assertTrue(deletePitResponse.getDeletePitResults().get(0).getPitId().equals(pitResponse.getId())); + } + + public void testDeleteAllPits() throws IOException { + CreatePitRequest pitRequest = new CreatePitRequest(new TimeValue(1, TimeUnit.DAYS), true, "index"); + CreatePitResponse pitResponse = execute(pitRequest, highLevelClient()::createPit, highLevelClient()::createPitAsync); + CreatePitResponse pitResponse1 = execute(pitRequest, highLevelClient()::createPit, highLevelClient()::createPitAsync); + assertTrue(pitResponse.getId() != null); + assertTrue(pitResponse1.getId() != null); + DeletePitResponse deletePitResponse = highLevelClient().deleteAllPits(RequestOptions.DEFAULT); + for (DeletePitInfo deletePitInfo : deletePitResponse.getDeletePitResults()) { + assertTrue(deletePitInfo.isSuccessful()); + } + pitResponse = execute(pitRequest, highLevelClient()::createPit, highLevelClient()::createPitAsync); + pitResponse1 = execute(pitRequest, highLevelClient()::createPit, highLevelClient()::createPitAsync); + assertTrue(pitResponse.getId() != null); + assertTrue(pitResponse1.getId() != null); + ActionListener deletePitListener = new ActionListener<>() { + @Override + public void onResponse(DeletePitResponse response) { + for (DeletePitInfo deletePitInfo : response.getDeletePitResults()) { + assertTrue(deletePitInfo.isSuccessful()); + } + } + + @Override + public void onFailure(Exception e) { + if (!(e instanceof OpenSearchStatusException)) { + throw new AssertionError("Delete all failed"); + } + } + }; + highLevelClient().deleteAllPitsAsync(RequestOptions.DEFAULT, deletePitListener); + // validate no pits case + highLevelClient().deleteAllPitsAsync(RequestOptions.DEFAULT, deletePitListener); + } +} diff --git a/client/rest-high-level/src/test/java/org/opensearch/client/RequestConvertersTests.java b/client/rest-high-level/src/test/java/org/opensearch/client/RequestConvertersTests.java index 97c0f2f475826..ee5795deb165d 100644 --- a/client/rest-high-level/src/test/java/org/opensearch/client/RequestConvertersTests.java +++ b/client/rest-high-level/src/test/java/org/opensearch/client/RequestConvertersTests.java @@ -53,6 +53,8 @@ import org.opensearch.action.get.MultiGetRequest; import org.opensearch.action.index.IndexRequest; import org.opensearch.action.search.ClearScrollRequest; +import org.opensearch.action.search.CreatePitRequest; +import org.opensearch.action.search.DeletePitRequest; import org.opensearch.action.search.MultiSearchRequest; import org.opensearch.action.search.SearchRequest; import org.opensearch.action.search.SearchScrollRequest; @@ -131,6 +133,7 @@ import java.util.Locale; import java.util.Map; import java.util.StringJoiner; +import java.util.concurrent.TimeUnit; import java.util.function.BiFunction; import java.util.function.Consumer; import java.util.function.Function; @@ -1303,6 +1306,47 @@ public void testClearScroll() throws IOException { assertEquals(REQUEST_BODY_CONTENT_TYPE.mediaTypeWithoutParameters(), request.getEntity().getContentType().getValue()); } + public void testCreatePit() throws IOException { + String[] indices = randomIndicesNames(0, 5); + Map expectedParams = new HashMap<>(); + expectedParams.put("keep_alive", "1d"); + expectedParams.put("allow_partial_pit_creation", "true"); + CreatePitRequest createPitRequest = new CreatePitRequest(new TimeValue(1, TimeUnit.DAYS), true, indices); + setRandomIndicesOptions(createPitRequest::indicesOptions, createPitRequest::indicesOptions, expectedParams); + Request request = RequestConverters.createPit(createPitRequest); + StringJoiner endpoint = new StringJoiner("/", "/", ""); + String index = String.join(",", indices); + if (Strings.hasLength(index)) { + endpoint.add(index); + } + endpoint.add("_search/point_in_time"); + assertEquals(HttpPost.METHOD_NAME, request.getMethod()); + assertEquals(endpoint.toString(), request.getEndpoint()); + assertEquals(expectedParams, request.getParameters()); + assertToXContentBody(createPitRequest, request.getEntity()); + assertEquals(REQUEST_BODY_CONTENT_TYPE.mediaTypeWithoutParameters(), request.getEntity().getContentType().getValue()); + } + + public void testDeletePit() throws IOException { + List pitIdsList = new ArrayList<>(); + pitIdsList.add("pitId1"); + pitIdsList.add("pitId2"); + DeletePitRequest deletePitRequest = new DeletePitRequest(pitIdsList); + Request request = RequestConverters.deletePit(deletePitRequest); + String endpoint = "/_search/point_in_time"; + assertEquals(HttpDelete.METHOD_NAME, request.getMethod()); + assertEquals(endpoint, request.getEndpoint()); + assertToXContentBody(deletePitRequest, request.getEntity()); + assertEquals(REQUEST_BODY_CONTENT_TYPE.mediaTypeWithoutParameters(), request.getEntity().getContentType().getValue()); + } + + public void testDeleteAllPits() { + Request request = RequestConverters.deleteAllPits(); + String endpoint = "/_search/point_in_time/_all"; + assertEquals(HttpDelete.METHOD_NAME, request.getMethod()); + assertEquals(endpoint, request.getEndpoint()); + } + public void testSearchTemplate() throws Exception { // Create a random request. String[] indices = randomIndicesNames(0, 5); diff --git a/client/rest-high-level/src/test/java/org/opensearch/client/RestHighLevelClientTests.java b/client/rest-high-level/src/test/java/org/opensearch/client/RestHighLevelClientTests.java index 3da0f81023f72..cdd63743f2644 100644 --- a/client/rest-high-level/src/test/java/org/opensearch/client/RestHighLevelClientTests.java +++ b/client/rest-high-level/src/test/java/org/opensearch/client/RestHighLevelClientTests.java @@ -134,6 +134,7 @@ public class RestHighLevelClientTests extends OpenSearchTestCase { // core "ping", "info", + "delete_all_pits", // security "security.get_ssl_certificates", "security.authenticate", diff --git a/client/rest-high-level/src/test/java/org/opensearch/client/SearchIT.java b/client/rest-high-level/src/test/java/org/opensearch/client/SearchIT.java index 19e287fb91be5..8b509e5d19e92 100644 --- a/client/rest-high-level/src/test/java/org/opensearch/client/SearchIT.java +++ b/client/rest-high-level/src/test/java/org/opensearch/client/SearchIT.java @@ -43,6 +43,10 @@ import org.opensearch.action.fieldcaps.FieldCapabilitiesResponse; import org.opensearch.action.search.ClearScrollRequest; import org.opensearch.action.search.ClearScrollResponse; +import org.opensearch.action.search.CreatePitRequest; +import org.opensearch.action.search.CreatePitResponse; +import org.opensearch.action.search.DeletePitRequest; +import org.opensearch.action.search.DeletePitResponse; import org.opensearch.action.search.MultiSearchRequest; import org.opensearch.action.search.MultiSearchResponse; import org.opensearch.action.search.SearchRequest; @@ -89,6 +93,7 @@ import org.opensearch.search.aggregations.metrics.WeightedAvgAggregationBuilder; import org.opensearch.search.aggregations.support.MultiValuesSourceFieldConfig; import org.opensearch.search.aggregations.support.ValueType; +import org.opensearch.search.builder.PointInTimeBuilder; import org.opensearch.search.builder.SearchSourceBuilder; import org.opensearch.search.fetch.subphase.FetchSourceContext; import org.opensearch.search.fetch.subphase.highlight.HighlightBuilder; @@ -100,11 +105,13 @@ import org.junit.Before; import java.io.IOException; +import java.util.ArrayList; import java.util.Arrays; import java.util.Collections; import java.util.HashMap; import java.util.List; import java.util.Map; +import java.util.concurrent.TimeUnit; import static org.opensearch.common.xcontent.XContentFactory.jsonBuilder; import static org.opensearch.test.hamcrest.OpenSearchAssertions.assertToXContentEquivalent; @@ -762,6 +769,46 @@ public void testSearchScroll() throws Exception { } } + public void testSearchWithPit() throws Exception { + for (int i = 0; i < 100; i++) { + XContentBuilder builder = jsonBuilder().startObject().field("field", i).endObject(); + Request doc = new Request(HttpPut.METHOD_NAME, "/test/_doc/" + Integer.toString(i)); + doc.setJsonEntity(Strings.toString(builder)); + client().performRequest(doc); + } + client().performRequest(new Request(HttpPost.METHOD_NAME, "/test/_refresh")); + + CreatePitRequest pitRequest = new CreatePitRequest(new TimeValue(1, TimeUnit.DAYS), true, "test"); + CreatePitResponse pitResponse = execute(pitRequest, highLevelClient()::createPit, highLevelClient()::createPitAsync); + + SearchSourceBuilder searchSourceBuilder = new SearchSourceBuilder().size(35) + .sort("field", SortOrder.ASC) + .pointInTimeBuilder(new PointInTimeBuilder(pitResponse.getId())); + SearchRequest searchRequest = new SearchRequest().source(searchSourceBuilder); + SearchResponse searchResponse = execute(searchRequest, highLevelClient()::search, highLevelClient()::searchAsync); + + try { + long counter = 0; + assertSearchHeader(searchResponse); + assertThat(searchResponse.getHits().getTotalHits().value, equalTo(100L)); + assertThat(searchResponse.getHits().getHits().length, equalTo(35)); + for (SearchHit hit : searchResponse.getHits()) { + assertThat(((Number) hit.getSortValues()[0]).longValue(), equalTo(counter++)); + } + } finally { + List pitIds = new ArrayList<>(); + pitIds.add(pitResponse.getId()); + DeletePitRequest deletePitRequest = new DeletePitRequest(pitIds); + DeletePitResponse deletePitResponse = execute( + deletePitRequest, + highLevelClient()::deletePit, + highLevelClient()::deletePitAsync + ); + assertTrue(deletePitResponse.getDeletePitResults().get(0).isSuccessful()); + assertTrue(deletePitResponse.getDeletePitResults().get(0).getPitId().equals(pitResponse.getId())); + } + } + public void testMultiSearch() throws Exception { MultiSearchRequest multiSearchRequest = new MultiSearchRequest(); SearchRequest searchRequest1 = new SearchRequest("index1"); diff --git a/rest-api-spec/src/main/resources/rest-api-spec/api/create_pit.json b/rest-api-spec/src/main/resources/rest-api-spec/api/create_pit.json new file mode 100644 index 0000000000000..d3a2104c01bc0 --- /dev/null +++ b/rest-api-spec/src/main/resources/rest-api-spec/api/create_pit.json @@ -0,0 +1,44 @@ + +{ + "create_pit":{ + "documentation":{ + "url":"https://opensearch.org/docs/latest/opensearch/rest-api/point_in_time/", + "description":"Creates point in time context." + }, + "stability":"stable", + "url":{ + "paths":[ + { + "path":"/{index}/_search/point_in_time", + "methods":[ + "POST" + ], + "parts":{ + "index":{ + "type":"list", + "description":"A comma-separated list of index names to search; use `_all` or empty string to perform the operation on all indices" + } + } + } + ] + }, + "params":{ + "allow_partial_pit_creation":{ + "type":"boolean", + "description":"Allow if point in time can be created with partial failures" + }, + "keep_alive":{ + "type":"string", + "description":"Specify the keep alive for point in time" + }, + "preference":{ + "type":"string", + "description":"Specify the node or shard the operation should be performed on (default: random)" + }, + "routing":{ + "type":"list", + "description":"A comma-separated list of specific routing values" + } + } + } +} diff --git a/rest-api-spec/src/main/resources/rest-api-spec/api/delete_all_pits.json b/rest-api-spec/src/main/resources/rest-api-spec/api/delete_all_pits.json new file mode 100644 index 0000000000000..5ff01aa746df9 --- /dev/null +++ b/rest-api-spec/src/main/resources/rest-api-spec/api/delete_all_pits.json @@ -0,0 +1,19 @@ +{ + "delete_all_pits":{ + "documentation":{ + "url":"https://opensearch.org/docs/latest/opensearch/rest-api/point_in_time/", + "description":"Deletes all active point in time searches." + }, + "stability":"stable", + "url":{ + "paths":[ + { + "path":"/_search/point_in_time/_all", + "methods":[ + "DELETE" + ] + } + ] + } + } +} diff --git a/rest-api-spec/src/main/resources/rest-api-spec/api/delete_pit.json b/rest-api-spec/src/main/resources/rest-api-spec/api/delete_pit.json new file mode 100644 index 0000000000000..b54d9f76204f4 --- /dev/null +++ b/rest-api-spec/src/main/resources/rest-api-spec/api/delete_pit.json @@ -0,0 +1,23 @@ +{ + "delete_pit":{ + "documentation":{ + "url":"https://opensearch.org/docs/latest/opensearch/rest-api/point_in_time/", + "description":"Deletes one or more point in time searches based on the IDs passed." + }, + "stability":"stable", + "url":{ + "paths":[ + { + "path":"/_search/point_in_time", + "methods":[ + "DELETE" + ] + } + ] + }, + "body":{ + "description":"A comma-separated list of pit IDs to clear", + "required":true + } + } +} diff --git a/rest-api-spec/src/main/resources/rest-api-spec/test/pit/10_basic.yml b/rest-api-spec/src/main/resources/rest-api-spec/test/pit/10_basic.yml new file mode 100644 index 0000000000000..2023bcc8f5c87 --- /dev/null +++ b/rest-api-spec/src/main/resources/rest-api-spec/test/pit/10_basic.yml @@ -0,0 +1,130 @@ +"Create PIT, Search with PIT ID and Delete": + - skip: + version: " - 2.9.99" + reason: "mode to be introduced later than 3.0" + - do: + indices.create: + index: test_pit + - do: + index: + index: test_pit + id: 42 + body: { foo: 1 } + + - do: + index: + index: test_pit + id: 43 + body: { foo: 2 } + + - do: + indices.refresh: {} + + - do: + create_pit: + allow_partial_pit_creation: true + index: test_pit + keep_alive: 23h + + - set: {pit_id: pit_id} + - match: { _shards.failed: 0} + - do: + search: + rest_total_hits_as_int: true + size: 1 + sort: foo + body: + query: + match_all: {} + pit: {"id": "$pit_id"} + + - match: {hits.total: 2 } + - length: {hits.hits: 1 } + - match: {hits.hits.0._id: "42" } + + - do: + index: + index: test_pit + id: 44 + body: { foo: 3 } + + - do: + indices.refresh: {} + + - do: + search: + rest_total_hits_as_int: true + size: 1 + sort: foo + body: + query: + match_all: {} + pit: {"id": "$pit_id", "keep_alive":"10m"} + + - match: {hits.total: 2 } + - length: {hits.hits: 1 } + - match: {hits.hits.0._id: "42" } + + + - do: + search: + rest_total_hits_as_int: true + index: test_pit + size: 1 + sort: foo + body: + query: + match_all: {} + + - match: {hits.total: 3 } + - length: {hits.hits: 1 } + + - do: + delete_pit: + body: + "pit_id": [$pit_id] + + - match: {pits.0.pit_id: $pit_id} + - match: {pits.0.successful: true } + +--- +"Delete all": + - skip: + version: " - 2.9.99" + reason: "mode to be introduced later than 3.0" + - do: + indices.create: + index: test_pit + - do: + index: + index: test_pit + id: 42 + body: { foo: 1 } + + - do: + index: + index: test_pit + id: 43 + body: { foo: 2 } + + - do: + indices.refresh: {} + + - do: + create_pit: + allow_partial_pit_creation: true + index: test_pit + keep_alive: 23h + + - set: {pit_id: pit_id} + - match: { _shards.failed: 0} + + - do: + delete_all_pits: {} + + - match: {pits.0.pit_id: $pit_id} + - match: {pits.0.successful: true } + + - do: + catch: missing + delete_all_pits: { } diff --git a/server/src/main/java/org/opensearch/action/ActionModule.java b/server/src/main/java/org/opensearch/action/ActionModule.java index 797c5c38fada6..74be544123d9f 100644 --- a/server/src/main/java/org/opensearch/action/ActionModule.java +++ b/server/src/main/java/org/opensearch/action/ActionModule.java @@ -240,12 +240,14 @@ import org.opensearch.action.search.DeletePitAction; import org.opensearch.action.search.GetAllPitsAction; import org.opensearch.action.search.MultiSearchAction; +import org.opensearch.action.search.NodesGetAllPitsAction; import org.opensearch.action.search.SearchAction; import org.opensearch.action.search.SearchScrollAction; import org.opensearch.action.search.TransportClearScrollAction; import org.opensearch.action.search.TransportCreatePitAction; import org.opensearch.action.search.TransportDeletePitAction; import org.opensearch.action.search.TransportGetAllPitsAction; +import org.opensearch.action.search.TransportNodesGetAllPitsAction; import org.opensearch.action.search.TransportMultiSearchAction; import org.opensearch.action.search.TransportSearchAction; import org.opensearch.action.search.TransportSearchScrollAction; @@ -408,6 +410,8 @@ import org.opensearch.rest.action.ingest.RestSimulatePipelineAction; import org.opensearch.rest.action.search.RestClearScrollAction; import org.opensearch.rest.action.search.RestCountAction; +import org.opensearch.rest.action.search.RestCreatePitAction; +import org.opensearch.rest.action.search.RestDeletePitAction; import org.opensearch.rest.action.search.RestExplainAction; import org.opensearch.rest.action.search.RestMultiSearchAction; import org.opensearch.rest.action.search.RestSearchAction; @@ -674,6 +678,7 @@ public void reg actions.register(GetAllPitsAction.INSTANCE, TransportGetAllPitsAction.class); actions.register(DeletePitAction.INSTANCE, TransportDeletePitAction.class); actions.register(PitSegmentsAction.INSTANCE, TransportPitSegmentsAction.class); + actions.register(NodesGetAllPitsAction.INSTANCE, TransportNodesGetAllPitsAction.class); // Remote Store actions.register(RestoreRemoteStoreAction.INSTANCE, TransportRestoreRemoteStoreAction.class); @@ -849,6 +854,11 @@ public void initRestHandlers(Supplier nodesInCluster) { registerHandler.accept(new RestRepositoriesAction()); registerHandler.accept(new RestSnapshotAction()); registerHandler.accept(new RestTemplatesAction()); + + // Point in time API + registerHandler.accept(new RestCreatePitAction()); + registerHandler.accept(new RestDeletePitAction()); + for (ActionPlugin plugin : actionPlugins) { for (RestHandler handler : plugin.getRestHandlers( settings, diff --git a/server/src/main/java/org/opensearch/action/search/DeletePitInfo.java b/server/src/main/java/org/opensearch/action/search/DeletePitInfo.java index 943199812771a..5a167c5a6f160 100644 --- a/server/src/main/java/org/opensearch/action/search/DeletePitInfo.java +++ b/server/src/main/java/org/opensearch/action/search/DeletePitInfo.java @@ -65,11 +65,11 @@ public void writeTo(StreamOutput out) throws IOException { static { PARSER.declareBoolean(constructorArg(), new ParseField("successful")); - PARSER.declareString(constructorArg(), new ParseField("pitId")); + PARSER.declareString(constructorArg(), new ParseField("pit_id")); } private static final ParseField SUCCESSFUL = new ParseField("successful"); - private static final ParseField PIT_ID = new ParseField("pitId"); + private static final ParseField PIT_ID = new ParseField("pit_id"); @Override public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException { diff --git a/server/src/main/java/org/opensearch/action/search/DeletePitRequest.java b/server/src/main/java/org/opensearch/action/search/DeletePitRequest.java index 945fcfd17eb6c..926e9c19a33f5 100644 --- a/server/src/main/java/org/opensearch/action/search/DeletePitRequest.java +++ b/server/src/main/java/org/opensearch/action/search/DeletePitRequest.java @@ -48,6 +48,11 @@ public DeletePitRequest(List pitIds) { this.pitIds.addAll(pitIds); } + public void clearAndSetPitIds(List pitIds) { + this.pitIds.clear(); + this.pitIds.addAll(pitIds); + } + public DeletePitRequest() {} public List getPitIds() { diff --git a/server/src/main/java/org/opensearch/action/search/GetAllPitNodesRequest.java b/server/src/main/java/org/opensearch/action/search/GetAllPitNodesRequest.java index b4ad2f6641087..340f9b842adbf 100644 --- a/server/src/main/java/org/opensearch/action/search/GetAllPitNodesRequest.java +++ b/server/src/main/java/org/opensearch/action/search/GetAllPitNodesRequest.java @@ -21,11 +21,22 @@ */ public class GetAllPitNodesRequest extends BaseNodesRequest { + // Security plugin intercepts and sets the response with permitted PIT contexts + private GetAllPitNodesResponse getAllPitNodesResponse; + @Inject public GetAllPitNodesRequest(DiscoveryNode... concreteNodes) { super(concreteNodes); } + public void setGetAllPitNodesResponse(GetAllPitNodesResponse getAllPitNodesResponse) { + this.getAllPitNodesResponse = getAllPitNodesResponse; + } + + public GetAllPitNodesResponse getGetAllPitNodesResponse() { + return getAllPitNodesResponse; + } + public GetAllPitNodesRequest(StreamInput in) throws IOException { super(in); } diff --git a/server/src/main/java/org/opensearch/action/search/GetAllPitNodesResponse.java b/server/src/main/java/org/opensearch/action/search/GetAllPitNodesResponse.java index 4a454e7145eff..091447798cf5f 100644 --- a/server/src/main/java/org/opensearch/action/search/GetAllPitNodesResponse.java +++ b/server/src/main/java/org/opensearch/action/search/GetAllPitNodesResponse.java @@ -52,6 +52,14 @@ public GetAllPitNodesResponse( ); } + /** + * Copy constructor that explicitly sets the list pit infos + */ + public GetAllPitNodesResponse(List listPitInfos, GetAllPitNodesResponse response) { + super(response.getClusterName(), response.getNodes(), response.failures()); + pitInfos.addAll(listPitInfos); + } + @Override public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException { builder.startObject(); diff --git a/server/src/main/java/org/opensearch/action/search/NodesGetAllPitsAction.java b/server/src/main/java/org/opensearch/action/search/NodesGetAllPitsAction.java new file mode 100644 index 0000000000000..af41f7d49551c --- /dev/null +++ b/server/src/main/java/org/opensearch/action/search/NodesGetAllPitsAction.java @@ -0,0 +1,23 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.action.search; + +import org.opensearch.action.ActionType; + +/** + * Action type for retrieving all PIT reader contexts from nodes + */ +public class NodesGetAllPitsAction extends ActionType { + public static final NodesGetAllPitsAction INSTANCE = new NodesGetAllPitsAction(); + public static final String NAME = "cluster:admin/point_in_time/read_from_nodes"; + + private NodesGetAllPitsAction() { + super(NAME, GetAllPitNodesResponse::new); + } +} diff --git a/server/src/main/java/org/opensearch/action/search/PitService.java b/server/src/main/java/org/opensearch/action/search/PitService.java index 0b79b77fd6014..ff068397ad94e 100644 --- a/server/src/main/java/org/opensearch/action/search/PitService.java +++ b/server/src/main/java/org/opensearch/action/search/PitService.java @@ -15,6 +15,7 @@ import org.opensearch.action.ActionListener; import org.opensearch.action.StepListener; import org.opensearch.action.support.GroupedActionListener; +import org.opensearch.client.node.NodeClient; import org.opensearch.cluster.node.DiscoveryNode; import org.opensearch.cluster.service.ClusterService; import org.opensearch.common.Strings; @@ -47,12 +48,19 @@ public class PitService { private final ClusterService clusterService; private final SearchTransportService searchTransportService; private final TransportService transportService; + private final NodeClient nodeClient; @Inject - public PitService(ClusterService clusterService, SearchTransportService searchTransportService, TransportService transportService) { + public PitService( + ClusterService clusterService, + SearchTransportService searchTransportService, + TransportService transportService, + NodeClient nodeClient + ) { this.clusterService = clusterService; this.searchTransportService = searchTransportService; this.transportService = transportService; + this.nodeClient = nodeClient; } /** @@ -144,6 +152,17 @@ public void onFailure(final Exception e) { }, size); } + /** + * This method returns indices associated for each pit + */ + public Map getIndicesForPits(List pitIds) { + Map pitToIndicesMap = new HashMap<>(); + for (String pitId : pitIds) { + pitToIndicesMap.put(pitId, SearchContextId.decode(nodeClient.getNamedWriteableRegistry(), pitId).getActualIndices()); + } + return pitToIndicesMap; + } + /** * Get all active point in time contexts */ @@ -156,7 +175,7 @@ public void getAllPits(ActionListener getAllPitsListener DiscoveryNode[] disNodesArr = nodes.toArray(new DiscoveryNode[nodes.size()]); transportService.sendRequest( transportService.getLocalNode(), - GetAllPitsAction.NAME, + NodesGetAllPitsAction.NAME, new GetAllPitNodesRequest(disNodesArr), new TransportResponseHandler() { diff --git a/server/src/main/java/org/opensearch/action/search/TransportDeletePitAction.java b/server/src/main/java/org/opensearch/action/search/TransportDeletePitAction.java index f9e36c479dd54..19abe2361290d 100644 --- a/server/src/main/java/org/opensearch/action/search/TransportDeletePitAction.java +++ b/server/src/main/java/org/opensearch/action/search/TransportDeletePitAction.java @@ -57,7 +57,11 @@ public TransportDeletePitAction( @Override protected void doExecute(Task task, DeletePitRequest request, ActionListener listener) { List pitIds = request.getPitIds(); - if (pitIds.size() == 1 && "_all".equals(pitIds.get(0))) { + // when security plugin intercepts the request, if PITs are not present in the cluster the PIT IDs in request will be empty + // and in this case return empty response + if (pitIds.isEmpty()) { + listener.onResponse(new DeletePitResponse(new ArrayList<>())); + } else if (pitIds.size() == 1 && "_all".equals(pitIds.get(0))) { deleteAllPits(listener); } else { deletePits(listener, request); diff --git a/server/src/main/java/org/opensearch/action/search/TransportGetAllPitsAction.java b/server/src/main/java/org/opensearch/action/search/TransportGetAllPitsAction.java index 21a64e388fa7b..c8529c5b02bd4 100644 --- a/server/src/main/java/org/opensearch/action/search/TransportGetAllPitsAction.java +++ b/server/src/main/java/org/opensearch/action/search/TransportGetAllPitsAction.java @@ -8,79 +8,31 @@ package org.opensearch.action.search; -import org.opensearch.action.FailedNodeException; +import org.opensearch.action.ActionListener; import org.opensearch.action.support.ActionFilters; -import org.opensearch.action.support.nodes.TransportNodesAction; -import org.opensearch.cluster.service.ClusterService; +import org.opensearch.action.support.HandledTransportAction; import org.opensearch.common.inject.Inject; -import org.opensearch.common.io.stream.StreamInput; -import org.opensearch.search.SearchService; -import org.opensearch.threadpool.ThreadPool; +import org.opensearch.tasks.Task; import org.opensearch.transport.TransportService; -import java.io.IOException; -import java.util.List; - /** - * Transport action to get all active PIT contexts across all nodes + * Transport action to get all active PIT contexts across the cluster */ -public class TransportGetAllPitsAction extends TransportNodesAction< - GetAllPitNodesRequest, - GetAllPitNodesResponse, - GetAllPitNodeRequest, - GetAllPitNodeResponse> { - private final SearchService searchService; +public class TransportGetAllPitsAction extends HandledTransportAction { + private final PitService pitService; @Inject - public TransportGetAllPitsAction( - ThreadPool threadPool, - ClusterService clusterService, - TransportService transportService, - ActionFilters actionFilters, - SearchService searchService - ) { - super( - GetAllPitsAction.NAME, - threadPool, - clusterService, - transportService, - actionFilters, - GetAllPitNodesRequest::new, - GetAllPitNodeRequest::new, - ThreadPool.Names.SAME, - GetAllPitNodeResponse.class - ); - this.searchService = searchService; - } - - @Override - protected GetAllPitNodesResponse newResponse( - GetAllPitNodesRequest request, - List getAllPitNodeRespons, - List failures - ) { - return new GetAllPitNodesResponse(clusterService.getClusterName(), getAllPitNodeRespons, failures); - } - - @Override - protected GetAllPitNodeRequest newNodeRequest(GetAllPitNodesRequest request) { - return new GetAllPitNodeRequest(); - } - - @Override - protected GetAllPitNodeResponse newNodeResponse(StreamInput in) throws IOException { - return new GetAllPitNodeResponse(in); + public TransportGetAllPitsAction(ActionFilters actionFilters, TransportService transportService, PitService pitService) { + super(GetAllPitsAction.NAME, transportService, actionFilters, in -> new GetAllPitNodesRequest(in)); + this.pitService = pitService; } - /** - * This retrieves all active PITs in the node - */ - @Override - protected GetAllPitNodeResponse nodeOperation(GetAllPitNodeRequest request) { - GetAllPitNodeResponse nodeResponse = new GetAllPitNodeResponse( - transportService.getLocalNode(), - searchService.getAllPITReaderContexts() - ); - return nodeResponse; + protected void doExecute(Task task, GetAllPitNodesRequest request, ActionListener listener) { + // If security plugin intercepts the request, it'll replace all PIT IDs with permitted PIT IDs + if (request.getGetAllPitNodesResponse() != null) { + listener.onResponse(request.getGetAllPitNodesResponse()); + } else { + pitService.getAllPits(listener); + } } } diff --git a/server/src/main/java/org/opensearch/action/search/TransportNodesGetAllPitsAction.java b/server/src/main/java/org/opensearch/action/search/TransportNodesGetAllPitsAction.java new file mode 100644 index 0000000000000..520830cd293f0 --- /dev/null +++ b/server/src/main/java/org/opensearch/action/search/TransportNodesGetAllPitsAction.java @@ -0,0 +1,86 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.action.search; + +import org.opensearch.action.FailedNodeException; +import org.opensearch.action.support.ActionFilters; +import org.opensearch.action.support.nodes.TransportNodesAction; +import org.opensearch.cluster.service.ClusterService; +import org.opensearch.common.inject.Inject; +import org.opensearch.common.io.stream.StreamInput; +import org.opensearch.search.SearchService; +import org.opensearch.threadpool.ThreadPool; +import org.opensearch.transport.TransportService; + +import java.io.IOException; +import java.util.List; + +/** + * Transport action to get all active PIT contexts across all nodes + */ +public class TransportNodesGetAllPitsAction extends TransportNodesAction< + GetAllPitNodesRequest, + GetAllPitNodesResponse, + GetAllPitNodeRequest, + GetAllPitNodeResponse> { + private final SearchService searchService; + + @Inject + public TransportNodesGetAllPitsAction( + ThreadPool threadPool, + ClusterService clusterService, + TransportService transportService, + ActionFilters actionFilters, + SearchService searchService + ) { + super( + NodesGetAllPitsAction.NAME, + threadPool, + clusterService, + transportService, + actionFilters, + GetAllPitNodesRequest::new, + GetAllPitNodeRequest::new, + ThreadPool.Names.SAME, + GetAllPitNodeResponse.class + ); + this.searchService = searchService; + } + + @Override + protected GetAllPitNodesResponse newResponse( + GetAllPitNodesRequest request, + List getAllPitNodeRespons, + List failures + ) { + return new GetAllPitNodesResponse(clusterService.getClusterName(), getAllPitNodeRespons, failures); + } + + @Override + protected GetAllPitNodeRequest newNodeRequest(GetAllPitNodesRequest request) { + return new GetAllPitNodeRequest(); + } + + @Override + protected GetAllPitNodeResponse newNodeResponse(StreamInput in) throws IOException { + return new GetAllPitNodeResponse(in); + } + + /** + * This retrieves all active PITs in the node + */ + @Override + protected GetAllPitNodeResponse nodeOperation(GetAllPitNodeRequest request) { + GetAllPitNodeResponse nodeResponse = new GetAllPitNodeResponse( + transportService.getLocalNode(), + searchService.getAllPITReaderContexts() + ); + return nodeResponse; + } +} diff --git a/server/src/main/java/org/opensearch/rest/action/search/RestCreatePitAction.java b/server/src/main/java/org/opensearch/rest/action/search/RestCreatePitAction.java new file mode 100644 index 0000000000000..9439670880015 --- /dev/null +++ b/server/src/main/java/org/opensearch/rest/action/search/RestCreatePitAction.java @@ -0,0 +1,57 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.rest.action.search; + +import org.opensearch.action.search.CreatePitRequest; +import org.opensearch.action.support.IndicesOptions; +import org.opensearch.client.node.NodeClient; +import org.opensearch.common.Strings; +import org.opensearch.common.unit.TimeValue; +import org.opensearch.rest.BaseRestHandler; +import org.opensearch.rest.RestRequest; +import org.opensearch.rest.action.RestStatusToXContentListener; + +import java.io.IOException; +import java.util.List; + +import static java.util.Arrays.asList; +import static java.util.Collections.unmodifiableList; +import static org.opensearch.rest.RestRequest.Method.POST; + +/** + * Rest action for creating PIT context + */ +public class RestCreatePitAction extends BaseRestHandler { + public static String ALLOW_PARTIAL_PIT_CREATION = "allow_partial_pit_creation"; + public static String KEEP_ALIVE = "keep_alive"; + + @Override + public String getName() { + return "create_pit_action"; + } + + @Override + public RestChannelConsumer prepareRequest(RestRequest request, NodeClient client) throws IOException { + boolean allowPartialPitCreation = request.paramAsBoolean(ALLOW_PARTIAL_PIT_CREATION, true); + String[] indices = Strings.splitStringByCommaToArray(request.param("index")); + TimeValue keepAlive = request.paramAsTime(KEEP_ALIVE, null); + CreatePitRequest createPitRequest = new CreatePitRequest(keepAlive, allowPartialPitCreation, indices); + createPitRequest.setIndicesOptions(IndicesOptions.fromRequest(request, createPitRequest.indicesOptions())); + createPitRequest.setPreference(request.param("preference")); + createPitRequest.setRouting(request.param("routing")); + + return channel -> client.createPit(createPitRequest, new RestStatusToXContentListener<>(channel)); + } + + @Override + public List routes() { + return unmodifiableList(asList(new Route(POST, "/{index}/_search/point_in_time"))); + } + +} diff --git a/server/src/main/java/org/opensearch/rest/action/search/RestDeletePitAction.java b/server/src/main/java/org/opensearch/rest/action/search/RestDeletePitAction.java new file mode 100644 index 0000000000000..452e66f8f5018 --- /dev/null +++ b/server/src/main/java/org/opensearch/rest/action/search/RestDeletePitAction.java @@ -0,0 +1,60 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.rest.action.search; + +import org.opensearch.action.search.DeletePitRequest; +import org.opensearch.action.search.DeletePitResponse; +import org.opensearch.client.node.NodeClient; +import org.opensearch.rest.BaseRestHandler; +import org.opensearch.rest.RestRequest; +import org.opensearch.rest.action.RestStatusToXContentListener; + +import java.io.IOException; +import java.util.List; + +import static java.util.Arrays.asList; +import static java.util.Collections.unmodifiableList; +import static org.opensearch.rest.RestRequest.Method.DELETE; + +/** + * Rest action for deleting PIT contexts + */ +public class RestDeletePitAction extends BaseRestHandler { + + @Override + public String getName() { + return "delete_pit_action"; + } + + @Override + public RestChannelConsumer prepareRequest(RestRequest request, NodeClient client) throws IOException { + String allPitIdsQualifier = "_all"; + final DeletePitRequest deletePITRequest; + if (request.path().contains(allPitIdsQualifier)) { + deletePITRequest = new DeletePitRequest(asList(allPitIdsQualifier)); + } else { + deletePITRequest = new DeletePitRequest(); + request.withContentOrSourceParamParserOrNull((xContentParser -> { + if (xContentParser != null) { + try { + deletePITRequest.fromXContent(xContentParser); + } catch (IOException e) { + throw new IllegalArgumentException("Failed to parse request body", e); + } + } + })); + } + return channel -> client.deletePits(deletePITRequest, new RestStatusToXContentListener(channel)); + } + + @Override + public List routes() { + return unmodifiableList(asList(new Route(DELETE, "/_search/point_in_time"), new Route(DELETE, "/_search/point_in_time/_all"))); + } +} diff --git a/server/src/main/java/org/opensearch/search/SearchService.java b/server/src/main/java/org/opensearch/search/SearchService.java index 4bd95da193668..04fab85c163a9 100644 --- a/server/src/main/java/org/opensearch/search/SearchService.java +++ b/server/src/main/java/org/opensearch/search/SearchService.java @@ -881,6 +881,7 @@ public void createPitReaderContext(ShardId shardId, TimeValue keepAlive, ActionL shard.awaitShardSearchActive(ignored -> { Engine.SearcherSupplier searcherSupplier = null; ReaderContext readerContext = null; + Releasable decreasePitContexts = openPitContexts::decrementAndGet; try { if (openPitContexts.incrementAndGet() > maxOpenPitContext) { throw new OpenSearchRejectedExecutionException( @@ -902,15 +903,16 @@ public void createPitReaderContext(ShardId shardId, TimeValue keepAlive, ActionL searchOperationListener.onNewPitContext(finalReaderContext); readerContext.addOnClose(() -> { - openPitContexts.decrementAndGet(); searchOperationListener.onFreeReaderContext(finalReaderContext); searchOperationListener.onFreePitContext(finalReaderContext); }); + readerContext.addOnClose(decreasePitContexts); // add the newly created pit reader context to active readers putReaderContext(readerContext); readerContext = null; listener.onResponse(finalReaderContext.id()); } catch (Exception exc) { + Releasables.closeWhileHandlingException(decreasePitContexts); Releasables.closeWhileHandlingException(searcherSupplier, readerContext); listener.onFailure(exc); } diff --git a/server/src/test/java/org/opensearch/action/search/CreatePitControllerTests.java b/server/src/test/java/org/opensearch/action/search/CreatePitControllerTests.java index a5c6e1c12b79c..c03c27f7d7e4d 100644 --- a/server/src/test/java/org/opensearch/action/search/CreatePitControllerTests.java +++ b/server/src/test/java/org/opensearch/action/search/CreatePitControllerTests.java @@ -14,6 +14,7 @@ import org.opensearch.action.ActionListener; import org.opensearch.action.LatchedActionListener; import org.opensearch.action.StepListener; +import org.opensearch.client.node.NodeClient; import org.opensearch.cluster.ClusterState; import org.opensearch.cluster.metadata.Metadata; import org.opensearch.cluster.node.DiscoveryNode; @@ -70,6 +71,8 @@ public class CreatePitControllerTests extends OpenSearchTestCase { ClusterService clusterServiceMock = null; private final ThreadPool threadPool = new TestThreadPool(getClass().getName()); + Settings settings = Settings.builder().put("node.name", CreatePitControllerTests.class.getSimpleName()).build(); + NodeClient client = new NodeClient(settings, threadPool); @Override public void tearDown() throws Exception { @@ -219,7 +222,7 @@ public Transport.Connection getConnection(String clusterAlias, DiscoveryNode nod CreatePitRequest request = new CreatePitRequest(TimeValue.timeValueDays(1), true); request.setIndices(new String[] { "index" }); - PitService pitService = new PitService(clusterServiceMock, searchTransportService, transportService); + PitService pitService = new PitService(clusterServiceMock, searchTransportService, transportService, client); CreatePitController controller = new CreatePitController( searchTransportService, clusterServiceMock, @@ -308,7 +311,7 @@ public void sendFreePITContexts( CreatePitRequest request = new CreatePitRequest(TimeValue.timeValueDays(1), true); request.setIndices(new String[] { "index" }); - PitService pitService = new PitService(clusterServiceMock, searchTransportService, transportService); + PitService pitService = new PitService(clusterServiceMock, searchTransportService, transportService, client); CreatePitController controller = new CreatePitController( searchTransportService, clusterServiceMock, @@ -406,7 +409,7 @@ public Transport.Connection getConnection(String clusterAlias, DiscoveryNode nod CreatePitRequest request = new CreatePitRequest(TimeValue.timeValueDays(1), true); request.setIndices(new String[] { "index" }); - PitService pitService = new PitService(clusterServiceMock, searchTransportService, transportService); + PitService pitService = new PitService(clusterServiceMock, searchTransportService, transportService, client); CreatePitController controller = new CreatePitController( searchTransportService, clusterServiceMock, @@ -494,7 +497,7 @@ public Transport.Connection getConnection(String clusterAlias, DiscoveryNode nod }; CreatePitRequest request = new CreatePitRequest(TimeValue.timeValueDays(1), true); request.setIndices(new String[] { "index" }); - PitService pitService = new PitService(clusterServiceMock, searchTransportService, transportService); + PitService pitService = new PitService(clusterServiceMock, searchTransportService, transportService, client); CreatePitController controller = new CreatePitController( searchTransportService, clusterServiceMock, diff --git a/server/src/test/java/org/opensearch/action/search/TransportDeletePitActionTests.java b/server/src/test/java/org/opensearch/action/search/TransportDeletePitActionTests.java index 7a1d9a6fe963c..bdc0440a89f69 100644 --- a/server/src/test/java/org/opensearch/action/search/TransportDeletePitActionTests.java +++ b/server/src/test/java/org/opensearch/action/search/TransportDeletePitActionTests.java @@ -13,6 +13,7 @@ import org.opensearch.action.support.ActionFilter; import org.opensearch.action.support.ActionFilters; import org.opensearch.action.support.PlainActionFuture; +import org.opensearch.client.node.NodeClient; import org.opensearch.cluster.ClusterName; import org.opensearch.cluster.ClusterState; import org.opensearch.cluster.metadata.Metadata; @@ -62,6 +63,7 @@ public class TransportDeletePitActionTests extends OpenSearchTestCase { ClusterService clusterServiceMock = null; Settings settings = Settings.builder().put("node.name", TransportMultiSearchActionTests.class.getSimpleName()).build(); private ThreadPool threadPool = new ThreadPool(settings); + NodeClient client = new NodeClient(settings, threadPool); @Override public void tearDown() throws Exception { @@ -165,7 +167,7 @@ public Transport.Connection getConnection(String clusterAlias, DiscoveryNode nod return new SearchAsyncActionTests.MockConnection(node); } }; - PitService pitService = new PitService(clusterServiceMock, searchTransportService, transportService); + PitService pitService = new PitService(clusterServiceMock, searchTransportService, transportService, client); TransportDeletePitAction action = new TransportDeletePitAction( transportService, actionFilters, @@ -229,7 +231,7 @@ public Transport.Connection getConnection(String clusterAlias, DiscoveryNode nod return new SearchAsyncActionTests.MockConnection(node); } }; - PitService pitService = new PitService(clusterServiceMock, searchTransportService, transportService) { + PitService pitService = new PitService(clusterServiceMock, searchTransportService, transportService, client) { @Override public void getAllPits(ActionListener getAllPitsListener) { ListPitInfo listPitInfo = new ListPitInfo(getPitId(), 0, 0); @@ -312,7 +314,7 @@ public Transport.Connection getConnection(String clusterAlias, DiscoveryNode nod return new SearchAsyncActionTests.MockConnection(node); } }; - PitService pitService = new PitService(clusterServiceMock, searchTransportService, transportService); + PitService pitService = new PitService(clusterServiceMock, searchTransportService, transportService, client); TransportDeletePitAction action = new TransportDeletePitAction( transportService, actionFilters, @@ -371,7 +373,7 @@ public Transport.Connection getConnection(String clusterAlias, DiscoveryNode nod return new SearchAsyncActionTests.MockConnection(node); } }; - PitService pitService = new PitService(clusterServiceMock, searchTransportService, transportService); + PitService pitService = new PitService(clusterServiceMock, searchTransportService, transportService, client); TransportDeletePitAction action = new TransportDeletePitAction( transportService, actionFilters, @@ -439,7 +441,7 @@ public Transport.Connection getConnection(String clusterAlias, DiscoveryNode nod return new SearchAsyncActionTests.MockConnection(node); } }; - PitService pitService = new PitService(clusterServiceMock, searchTransportService, transportService); + PitService pitService = new PitService(clusterServiceMock, searchTransportService, transportService, client); TransportDeletePitAction action = new TransportDeletePitAction( transportService, actionFilters, @@ -505,7 +507,7 @@ public Transport.Connection getConnection(String clusterAlias, DiscoveryNode nod return new SearchAsyncActionTests.MockConnection(node); } }; - PitService pitService = new PitService(clusterServiceMock, searchTransportService, transportService) { + PitService pitService = new PitService(clusterServiceMock, searchTransportService, transportService, client) { @Override public void getAllPits(ActionListener getAllPitsListener) { ListPitInfo listPitInfo = new ListPitInfo(getPitId(), 0, 0); @@ -581,7 +583,7 @@ public Transport.Connection getConnection(String clusterAlias, DiscoveryNode nod return new SearchAsyncActionTests.MockConnection(node); } }; - PitService pitService = new PitService(clusterServiceMock, searchTransportService, transportService) { + PitService pitService = new PitService(clusterServiceMock, searchTransportService, transportService, client) { @Override public void getAllPits(ActionListener getAllPitsListener) { ListPitInfo listPitInfo = new ListPitInfo(getPitId(), 0, 0); @@ -661,7 +663,7 @@ public Transport.Connection getConnection(String clusterAlias, DiscoveryNode nod return new SearchAsyncActionTests.MockConnection(node); } }; - PitService pitService = new PitService(clusterServiceMock, searchTransportService, transportService) { + PitService pitService = new PitService(clusterServiceMock, searchTransportService, transportService, client) { @Override public void getAllPits(ActionListener getAllPitsListener) { ListPitInfo listPitInfo = new ListPitInfo(getPitId(), 0, 0); diff --git a/server/src/test/java/org/opensearch/search/CreatePitSingleNodeTests.java b/server/src/test/java/org/opensearch/search/CreatePitSingleNodeTests.java index a10f004b2ee97..9a28f1800847e 100644 --- a/server/src/test/java/org/opensearch/search/CreatePitSingleNodeTests.java +++ b/server/src/test/java/org/opensearch/search/CreatePitSingleNodeTests.java @@ -14,6 +14,10 @@ import org.opensearch.action.search.CreatePitController; import org.opensearch.action.search.CreatePitRequest; import org.opensearch.action.search.CreatePitResponse; +import org.opensearch.action.search.DeletePitAction; +import org.opensearch.action.search.DeletePitInfo; +import org.opensearch.action.search.DeletePitRequest; +import org.opensearch.action.search.DeletePitResponse; import org.opensearch.action.search.PitTestsUtil; import org.opensearch.action.search.SearchPhaseExecutionException; import org.opensearch.action.search.SearchResponse; @@ -33,6 +37,8 @@ import java.util.concurrent.ExecutionException; import static org.hamcrest.CoreMatchers.equalTo; +import static org.hamcrest.Matchers.blankOrNullString; +import static org.hamcrest.Matchers.not; import static org.opensearch.action.search.PitTestsUtil.assertSegments; import static org.opensearch.action.support.WriteRequest.RefreshPolicy.IMMEDIATE; import static org.opensearch.common.xcontent.XContentFactory.jsonBuilder; @@ -282,6 +288,52 @@ public void testMaxOpenPitContexts() throws Exception { validatePitStats("index", 0, maxPitContexts, 0); } + public void testCreatePitMoreThanMaxOpenPitContexts() throws Exception { + createIndex("index"); + client().prepareIndex("index").setId("1").setSource("field", "value").setRefreshPolicy(IMMEDIATE).get(); + + CreatePitRequest request = new CreatePitRequest(TimeValue.timeValueDays(1), true); + request.setIndices(new String[] { "index" }); + SearchService service = getInstanceFromNode(SearchService.class); + + try { + for (int i = 0; i < 1000; i++) { + client().execute(CreatePitAction.INSTANCE, request).get(); + } + } catch (Exception ex) { + assertTrue( + ex.getMessage() + .contains( + "Trying to create too many Point In Time contexts. " + + "Must be less than or equal to: [" + + SearchService.MAX_OPEN_PIT_CONTEXT.get(Settings.EMPTY) + + "]. " + + "This limit can be set by changing the [search.max_open_pit_context] setting." + ) + ); + } + final int maxPitContexts = SearchService.MAX_OPEN_PIT_CONTEXT.get(Settings.EMPTY); + validatePitStats("index", maxPitContexts, 0, 0); + // deleteall + DeletePitRequest deletePITRequest = new DeletePitRequest("_all"); + + /** + * When we invoke delete again, returns success after clearing the remaining readers. Asserting reader context + * not found exceptions don't result in failures ( as deletion in one node is successful ) + */ + ActionFuture execute = client().execute(DeletePitAction.INSTANCE, deletePITRequest); + DeletePitResponse deletePITResponse = execute.get(); + for (DeletePitInfo deletePitInfo : deletePITResponse.getDeletePitResults()) { + assertThat(deletePitInfo.getPitId(), not(blankOrNullString())); + assertTrue(deletePitInfo.isSuccessful()); + } + validatePitStats("index", 0, maxPitContexts, 0); + client().execute(CreatePitAction.INSTANCE, request).get(); + validatePitStats("index", 1, maxPitContexts, 0); + service.doClose(); + validatePitStats("index", 0, maxPitContexts + 1, 0); + } + public void testOpenPitContextsConcurrently() throws Exception { createIndex("index"); final int maxPitContexts = SearchService.MAX_OPEN_PIT_CONTEXT.get(Settings.EMPTY); diff --git a/server/src/test/java/org/opensearch/search/pit/RestCreatePitActionTests.java b/server/src/test/java/org/opensearch/search/pit/RestCreatePitActionTests.java new file mode 100644 index 0000000000000..5ca384daedbff --- /dev/null +++ b/server/src/test/java/org/opensearch/search/pit/RestCreatePitActionTests.java @@ -0,0 +1,78 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.search.pit; + +import org.apache.lucene.util.SetOnce; +import org.opensearch.action.ActionListener; +import org.opensearch.action.search.CreatePitRequest; +import org.opensearch.action.search.CreatePitResponse; +import org.opensearch.client.node.NodeClient; +import org.opensearch.rest.RestRequest; +import org.opensearch.rest.action.search.RestCreatePitAction; +import org.opensearch.test.OpenSearchTestCase; +import org.opensearch.test.client.NoOpNodeClient; +import org.opensearch.test.rest.FakeRestChannel; +import org.opensearch.test.rest.FakeRestRequest; + +import java.util.HashMap; +import java.util.Map; + +import static org.hamcrest.Matchers.equalTo; + +/** + * Tests to verify behavior of create pit rest action + */ +public class RestCreatePitActionTests extends OpenSearchTestCase { + public void testRestCreatePit() throws Exception { + SetOnce createPitCalled = new SetOnce<>(); + RestCreatePitAction action = new RestCreatePitAction(); + try (NodeClient nodeClient = new NoOpNodeClient(this.getTestName()) { + @Override + public void createPit(CreatePitRequest request, ActionListener listener) { + createPitCalled.set(true); + assertThat(request.getKeepAlive().getStringRep(), equalTo("1m")); + assertFalse(request.shouldAllowPartialPitCreation()); + } + }) { + Map params = new HashMap<>(); + params.put("keep_alive", "1m"); + params.put("allow_partial_pit_creation", "false"); + RestRequest request = new FakeRestRequest.Builder(xContentRegistry()).withParams(params) + .withMethod(RestRequest.Method.POST) + .build(); + FakeRestChannel channel = new FakeRestChannel(request, false, 0); + action.handleRequest(request, channel, nodeClient); + + assertThat(createPitCalled.get(), equalTo(true)); + } + } + + public void testRestCreatePitDefaultPartialCreation() throws Exception { + SetOnce createPitCalled = new SetOnce<>(); + RestCreatePitAction action = new RestCreatePitAction(); + try (NodeClient nodeClient = new NoOpNodeClient(this.getTestName()) { + @Override + public void createPit(CreatePitRequest request, ActionListener listener) { + createPitCalled.set(true); + assertThat(request.getKeepAlive().getStringRep(), equalTo("1m")); + assertTrue(request.shouldAllowPartialPitCreation()); + } + }) { + Map params = new HashMap<>(); + params.put("keep_alive", "1m"); + RestRequest request = new FakeRestRequest.Builder(xContentRegistry()).withParams(params) + .withMethod(RestRequest.Method.POST) + .build(); + FakeRestChannel channel = new FakeRestChannel(request, false, 0); + action.handleRequest(request, channel, nodeClient); + + assertThat(createPitCalled.get(), equalTo(true)); + } + } +} diff --git a/server/src/test/java/org/opensearch/search/pit/RestDeletePitActionTests.java b/server/src/test/java/org/opensearch/search/pit/RestDeletePitActionTests.java new file mode 100644 index 0000000000000..0bfa16aafe1e3 --- /dev/null +++ b/server/src/test/java/org/opensearch/search/pit/RestDeletePitActionTests.java @@ -0,0 +1,133 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.search.pit; + +import org.apache.lucene.util.SetOnce; +import org.opensearch.action.ActionListener; +import org.opensearch.action.search.DeletePitRequest; +import org.opensearch.action.search.DeletePitResponse; +import org.opensearch.client.node.NodeClient; +import org.opensearch.common.bytes.BytesArray; +import org.opensearch.common.xcontent.XContentType; +import org.opensearch.rest.RestRequest; +import org.opensearch.rest.action.search.RestDeletePitAction; +import org.opensearch.test.OpenSearchTestCase; +import org.opensearch.test.client.NoOpNodeClient; +import org.opensearch.test.rest.FakeRestChannel; +import org.opensearch.test.rest.FakeRestRequest; + +import java.util.Collections; + +import static org.hamcrest.Matchers.equalTo; +import static org.hamcrest.Matchers.hasSize; + +/** + * Tests to verify the behavior of rest delete pit action for list delete and delete all PIT endpoints + */ +public class RestDeletePitActionTests extends OpenSearchTestCase { + public void testParseDeletePitRequestWithInvalidJsonThrowsException() throws Exception { + RestDeletePitAction action = new RestDeletePitAction(); + RestRequest request = new FakeRestRequest.Builder(xContentRegistry()).withContent( + new BytesArray("{invalid_json}"), + XContentType.JSON + ).build(); + Exception e = expectThrows(IllegalArgumentException.class, () -> action.prepareRequest(request, null)); + assertThat(e.getMessage(), equalTo("Failed to parse request body")); + } + + public void testDeletePitWithBody() throws Exception { + SetOnce pitCalled = new SetOnce<>(); + try (NodeClient nodeClient = new NoOpNodeClient(this.getTestName()) { + @Override + public void deletePits(DeletePitRequest request, ActionListener listener) { + pitCalled.set(true); + assertThat(request.getPitIds(), hasSize(1)); + assertThat(request.getPitIds().get(0), equalTo("BODY")); + } + }) { + RestDeletePitAction action = new RestDeletePitAction(); + RestRequest request = new FakeRestRequest.Builder(xContentRegistry()).withContent( + new BytesArray("{\"pit_id\": [\"BODY\"]}"), + XContentType.JSON + ).build(); + FakeRestChannel channel = new FakeRestChannel(request, false, 0); + action.handleRequest(request, channel, nodeClient); + + assertThat(pitCalled.get(), equalTo(true)); + } + } + + public void testDeleteAllPit() throws Exception { + SetOnce pitCalled = new SetOnce<>(); + try (NodeClient nodeClient = new NoOpNodeClient(this.getTestName()) { + @Override + public void deletePits(DeletePitRequest request, ActionListener listener) { + pitCalled.set(true); + assertThat(request.getPitIds(), hasSize(1)); + assertThat(request.getPitIds().get(0), equalTo("_all")); + } + }) { + RestDeletePitAction action = new RestDeletePitAction(); + RestRequest request = new FakeRestRequest.Builder(xContentRegistry()).withPath("/_all").build(); + FakeRestChannel channel = new FakeRestChannel(request, false, 0); + action.handleRequest(request, channel, nodeClient); + + assertThat(pitCalled.get(), equalTo(true)); + } + } + + public void testDeleteAllPitWithBody() { + SetOnce pitCalled = new SetOnce<>(); + try (NodeClient nodeClient = new NoOpNodeClient(this.getTestName()) { + @Override + public void deletePits(DeletePitRequest request, ActionListener listener) { + pitCalled.set(true); + assertThat(request.getPitIds(), hasSize(1)); + assertThat(request.getPitIds().get(0), equalTo("_all")); + } + }) { + RestDeletePitAction action = new RestDeletePitAction(); + RestRequest request = new FakeRestRequest.Builder(xContentRegistry()).withContent( + new BytesArray("{\"pit_id\": [\"BODY\"]}"), + XContentType.JSON + ).withPath("/_all").build(); + FakeRestChannel channel = new FakeRestChannel(request, false, 0); + + IllegalArgumentException ex = expectThrows( + IllegalArgumentException.class, + () -> action.handleRequest(request, channel, nodeClient) + ); + assertTrue(ex.getMessage().contains("request [GET /_all] does not support having a body")); + } + } + + public void testDeletePitQueryStringParamsShouldThrowException() { + SetOnce pitCalled = new SetOnce<>(); + try (NodeClient nodeClient = new NoOpNodeClient(this.getTestName()) { + @Override + public void deletePits(DeletePitRequest request, ActionListener listener) { + pitCalled.set(true); + assertThat(request.getPitIds(), hasSize(2)); + assertThat(request.getPitIds().get(0), equalTo("QUERY_STRING")); + assertThat(request.getPitIds().get(1), equalTo("QUERY_STRING_1")); + } + }) { + RestDeletePitAction action = new RestDeletePitAction(); + RestRequest request = new FakeRestRequest.Builder(xContentRegistry()).withParams( + Collections.singletonMap("pit_id", "QUERY_STRING,QUERY_STRING_1") + ).build(); + FakeRestChannel channel = new FakeRestChannel(request, false, 0); + IllegalArgumentException ex = expectThrows( + IllegalArgumentException.class, + () -> action.handleRequest(request, channel, nodeClient) + ); + assertTrue(ex.getMessage().contains("unrecognized param")); + } + } +} From bd11c6958a78f57ce4de33ef5d004044bf838622 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Thu, 1 Sep 2022 08:24:08 -0700 Subject: [PATCH 13/16] Bump com.diffplug.spotless from 6.9.1 to 6.10.0 (#4319) --- CHANGELOG.md | 4 +++- build.gradle | 2 +- 2 files changed, 4 insertions(+), 2 deletions(-) diff --git a/CHANGELOG.md b/CHANGELOG.md index f89e7eba0698c..19b5c8e85cfeb 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -10,6 +10,8 @@ Inspired from [Keep a Changelog](https://keepachangelog.com/en/1.0.0/) - Added release notes for 2.2.1 ([#4344](https://github.com/opensearch-project/OpenSearch/pull/4344)) - Label configuration for dependabot PRs ([#4348](https://github.com/opensearch-project/OpenSearch/pull/4348)) - Support for HTTP/2 (server-side) ([#3847](https://github.com/opensearch-project/OpenSearch/pull/3847)) +### Dependencies +- Bumps `com.diffplug.spotless` from 6.9.1 to 6.10.0 ### Changed - Dependency updates (httpcore, mockito, slf4j, httpasyncclient, commons-codec) ([#4308](https://github.com/opensearch-project/OpenSearch/pull/4308)) @@ -53,4 +55,4 @@ Inspired from [Keep a Changelog](https://keepachangelog.com/en/1.0.0/) [Unreleased]: https://github.com/opensearch-project/OpenSearch/compare/2.2.0...HEAD -[2.x]: https://github.com/opensearch-project/OpenSearch/compare/2.2.0...2.x +[2.x]: https://github.com/opensearch-project/OpenSearch/compare/2.2.0...2.x \ No newline at end of file diff --git a/build.gradle b/build.gradle index ce5ea6cdd7e11..a1f4f2d04883a 100644 --- a/build.gradle +++ b/build.gradle @@ -55,7 +55,7 @@ plugins { id 'lifecycle-base' id 'opensearch.docker-support' id 'opensearch.global-build-info' - id "com.diffplug.spotless" version "6.9.1" apply false + id "com.diffplug.spotless" version "6.10.0" apply false id "org.gradle.test-retry" version "1.4.0" apply false id "test-report-aggregation" id 'jacoco-report-aggregation' From 236f2f611051bd50aaa9fcb2ccbaa4bd6211fea0 Mon Sep 17 00:00:00 2001 From: Andriy Redko Date: Thu, 1 Sep 2022 11:28:35 -0400 Subject: [PATCH 14/16] Update to Netty 4.1.80.Final (#4359) Signed-off-by: Andriy Redko --- CHANGELOG.md | 1 + buildSrc/version.properties | 2 +- modules/transport-netty4/build.gradle | 8 ++++++++ .../licenses/netty-buffer-4.1.79.Final.jar.sha1 | 1 - .../licenses/netty-buffer-4.1.80.Final.jar.sha1 | 1 + .../licenses/netty-codec-4.1.79.Final.jar.sha1 | 1 - .../licenses/netty-codec-4.1.80.Final.jar.sha1 | 1 + .../licenses/netty-codec-http-4.1.79.Final.jar.sha1 | 1 - .../licenses/netty-codec-http-4.1.80.Final.jar.sha1 | 1 + .../licenses/netty-codec-http2-4.1.79.Final.jar.sha1 | 1 - .../licenses/netty-codec-http2-4.1.80.Final.jar.sha1 | 1 + .../licenses/netty-common-4.1.79.Final.jar.sha1 | 1 - .../licenses/netty-common-4.1.80.Final.jar.sha1 | 1 + .../licenses/netty-handler-4.1.79.Final.jar.sha1 | 1 - .../licenses/netty-handler-4.1.80.Final.jar.sha1 | 1 + .../licenses/netty-resolver-4.1.79.Final.jar.sha1 | 1 - .../licenses/netty-resolver-4.1.80.Final.jar.sha1 | 1 + .../licenses/netty-transport-4.1.79.Final.jar.sha1 | 1 - .../licenses/netty-transport-4.1.80.Final.jar.sha1 | 1 + ...tty-transport-native-unix-common-4.1.79.Final.jar.sha1 | 1 - ...tty-transport-native-unix-common-4.1.80.Final.jar.sha1 | 1 + .../licenses/netty-codec-dns-4.1.79.Final.jar.sha1 | 1 - .../licenses/netty-codec-dns-4.1.80.Final.jar.sha1 | 1 + .../licenses/netty-codec-http2-4.1.79.Final.jar.sha1 | 1 - .../licenses/netty-codec-http2-4.1.80.Final.jar.sha1 | 1 + .../licenses/netty-codec-socks-4.1.79.Final.jar.sha1 | 1 - .../licenses/netty-codec-socks-4.1.80.Final.jar.sha1 | 1 + .../licenses/netty-handler-proxy-4.1.79.Final.jar.sha1 | 1 - .../licenses/netty-handler-proxy-4.1.80.Final.jar.sha1 | 1 + .../licenses/netty-resolver-dns-4.1.79.Final.jar.sha1 | 1 - .../licenses/netty-resolver-dns-4.1.80.Final.jar.sha1 | 1 + ...tty-transport-native-unix-common-4.1.79.Final.jar.sha1 | 1 - ...tty-transport-native-unix-common-4.1.80.Final.jar.sha1 | 1 + .../licenses/netty-all-4.1.79.Final.jar.sha1 | 1 - .../licenses/netty-all-4.1.80.Final.jar.sha1 | 1 + plugins/transport-nio/build.gradle | 6 ++++++ .../licenses/netty-buffer-4.1.79.Final.jar.sha1 | 1 - .../licenses/netty-buffer-4.1.80.Final.jar.sha1 | 1 + .../licenses/netty-codec-4.1.79.Final.jar.sha1 | 1 - .../licenses/netty-codec-4.1.80.Final.jar.sha1 | 1 + .../licenses/netty-codec-http-4.1.79.Final.jar.sha1 | 1 - .../licenses/netty-codec-http-4.1.80.Final.jar.sha1 | 1 + .../licenses/netty-common-4.1.79.Final.jar.sha1 | 1 - .../licenses/netty-common-4.1.80.Final.jar.sha1 | 1 + .../licenses/netty-handler-4.1.79.Final.jar.sha1 | 1 - .../licenses/netty-handler-4.1.80.Final.jar.sha1 | 1 + .../licenses/netty-resolver-4.1.79.Final.jar.sha1 | 1 - .../licenses/netty-resolver-4.1.80.Final.jar.sha1 | 1 + .../licenses/netty-transport-4.1.79.Final.jar.sha1 | 1 - .../licenses/netty-transport-4.1.80.Final.jar.sha1 | 1 + 50 files changed, 39 insertions(+), 24 deletions(-) delete mode 100644 modules/transport-netty4/licenses/netty-buffer-4.1.79.Final.jar.sha1 create mode 100644 modules/transport-netty4/licenses/netty-buffer-4.1.80.Final.jar.sha1 delete mode 100644 modules/transport-netty4/licenses/netty-codec-4.1.79.Final.jar.sha1 create mode 100644 modules/transport-netty4/licenses/netty-codec-4.1.80.Final.jar.sha1 delete mode 100644 modules/transport-netty4/licenses/netty-codec-http-4.1.79.Final.jar.sha1 create mode 100644 modules/transport-netty4/licenses/netty-codec-http-4.1.80.Final.jar.sha1 delete mode 100644 modules/transport-netty4/licenses/netty-codec-http2-4.1.79.Final.jar.sha1 create mode 100644 modules/transport-netty4/licenses/netty-codec-http2-4.1.80.Final.jar.sha1 delete mode 100644 modules/transport-netty4/licenses/netty-common-4.1.79.Final.jar.sha1 create mode 100644 modules/transport-netty4/licenses/netty-common-4.1.80.Final.jar.sha1 delete mode 100644 modules/transport-netty4/licenses/netty-handler-4.1.79.Final.jar.sha1 create mode 100644 modules/transport-netty4/licenses/netty-handler-4.1.80.Final.jar.sha1 delete mode 100644 modules/transport-netty4/licenses/netty-resolver-4.1.79.Final.jar.sha1 create mode 100644 modules/transport-netty4/licenses/netty-resolver-4.1.80.Final.jar.sha1 delete mode 100644 modules/transport-netty4/licenses/netty-transport-4.1.79.Final.jar.sha1 create mode 100644 modules/transport-netty4/licenses/netty-transport-4.1.80.Final.jar.sha1 delete mode 100644 modules/transport-netty4/licenses/netty-transport-native-unix-common-4.1.79.Final.jar.sha1 create mode 100644 modules/transport-netty4/licenses/netty-transport-native-unix-common-4.1.80.Final.jar.sha1 delete mode 100644 plugins/repository-azure/licenses/netty-codec-dns-4.1.79.Final.jar.sha1 create mode 100644 plugins/repository-azure/licenses/netty-codec-dns-4.1.80.Final.jar.sha1 delete mode 100644 plugins/repository-azure/licenses/netty-codec-http2-4.1.79.Final.jar.sha1 create mode 100644 plugins/repository-azure/licenses/netty-codec-http2-4.1.80.Final.jar.sha1 delete mode 100644 plugins/repository-azure/licenses/netty-codec-socks-4.1.79.Final.jar.sha1 create mode 100644 plugins/repository-azure/licenses/netty-codec-socks-4.1.80.Final.jar.sha1 delete mode 100644 plugins/repository-azure/licenses/netty-handler-proxy-4.1.79.Final.jar.sha1 create mode 100644 plugins/repository-azure/licenses/netty-handler-proxy-4.1.80.Final.jar.sha1 delete mode 100644 plugins/repository-azure/licenses/netty-resolver-dns-4.1.79.Final.jar.sha1 create mode 100644 plugins/repository-azure/licenses/netty-resolver-dns-4.1.80.Final.jar.sha1 delete mode 100644 plugins/repository-azure/licenses/netty-transport-native-unix-common-4.1.79.Final.jar.sha1 create mode 100644 plugins/repository-azure/licenses/netty-transport-native-unix-common-4.1.80.Final.jar.sha1 delete mode 100644 plugins/repository-hdfs/licenses/netty-all-4.1.79.Final.jar.sha1 create mode 100644 plugins/repository-hdfs/licenses/netty-all-4.1.80.Final.jar.sha1 delete mode 100644 plugins/transport-nio/licenses/netty-buffer-4.1.79.Final.jar.sha1 create mode 100644 plugins/transport-nio/licenses/netty-buffer-4.1.80.Final.jar.sha1 delete mode 100644 plugins/transport-nio/licenses/netty-codec-4.1.79.Final.jar.sha1 create mode 100644 plugins/transport-nio/licenses/netty-codec-4.1.80.Final.jar.sha1 delete mode 100644 plugins/transport-nio/licenses/netty-codec-http-4.1.79.Final.jar.sha1 create mode 100644 plugins/transport-nio/licenses/netty-codec-http-4.1.80.Final.jar.sha1 delete mode 100644 plugins/transport-nio/licenses/netty-common-4.1.79.Final.jar.sha1 create mode 100644 plugins/transport-nio/licenses/netty-common-4.1.80.Final.jar.sha1 delete mode 100644 plugins/transport-nio/licenses/netty-handler-4.1.79.Final.jar.sha1 create mode 100644 plugins/transport-nio/licenses/netty-handler-4.1.80.Final.jar.sha1 delete mode 100644 plugins/transport-nio/licenses/netty-resolver-4.1.79.Final.jar.sha1 create mode 100644 plugins/transport-nio/licenses/netty-resolver-4.1.80.Final.jar.sha1 delete mode 100644 plugins/transport-nio/licenses/netty-transport-4.1.79.Final.jar.sha1 create mode 100644 plugins/transport-nio/licenses/netty-transport-4.1.80.Final.jar.sha1 diff --git a/CHANGELOG.md b/CHANGELOG.md index 19b5c8e85cfeb..6434041b038ee 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -17,6 +17,7 @@ Inspired from [Keep a Changelog](https://keepachangelog.com/en/1.0.0/) - Dependency updates (httpcore, mockito, slf4j, httpasyncclient, commons-codec) ([#4308](https://github.com/opensearch-project/OpenSearch/pull/4308)) - Use RemoteSegmentStoreDirectory instead of RemoteDirectory ([#4240](https://github.com/opensearch-project/OpenSearch/pull/4240)) - Plugin ZIP publication groupId value is configurable ([#4156](https://github.com/opensearch-project/OpenSearch/pull/4156)) +- Update to Netty 4.1.80.Final ([#4359](https://github.com/opensearch-project/OpenSearch/pull/4359)) ### Deprecated diff --git a/buildSrc/version.properties b/buildSrc/version.properties index 072dcc4578977..6cc24a3f09244 100644 --- a/buildSrc/version.properties +++ b/buildSrc/version.properties @@ -21,7 +21,7 @@ asm = 9.3 # when updating the JNA version, also update the version in buildSrc/build.gradle jna = 5.5.0 -netty = 4.1.79.Final +netty = 4.1.80.Final joda = 2.10.13 # client dependencies diff --git a/modules/transport-netty4/build.gradle b/modules/transport-netty4/build.gradle index 5d2047d7f18a2..8bbe0bf2ef65f 100644 --- a/modules/transport-netty4/build.gradle +++ b/modules/transport-netty4/build.gradle @@ -144,6 +144,14 @@ thirdPartyAudit { 'org.apache.log4j.Level', 'org.apache.log4j.Logger', + // from io.netty.handler.ssl.OpenSslEngine (netty) + 'org.bouncycastle.openssl.PEMEncryptedKeyPair', + 'org.bouncycastle.openssl.PEMParser', + 'org.bouncycastle.openssl.jcajce.JcaPEMKeyConverter', + 'org.bouncycastle.openssl.jcajce.JceOpenSSLPKCS8DecryptorProviderBuilder', + 'org.bouncycastle.openssl.jcajce.JcePEMDecryptorProviderBuilder', + 'org.bouncycastle.pkcs.PKCS8EncryptedPrivateKeyInfo', + // from io.netty.handler.ssl.OpenSslEngine (netty) 'io.netty.internal.tcnative.Buffer', 'io.netty.internal.tcnative.CertificateCompressionAlgo', diff --git a/modules/transport-netty4/licenses/netty-buffer-4.1.79.Final.jar.sha1 b/modules/transport-netty4/licenses/netty-buffer-4.1.79.Final.jar.sha1 deleted file mode 100644 index 8e9e4d0b7f754..0000000000000 --- a/modules/transport-netty4/licenses/netty-buffer-4.1.79.Final.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -6c014412b599489b1db27c6bc08d8a46da94e397 \ No newline at end of file diff --git a/modules/transport-netty4/licenses/netty-buffer-4.1.80.Final.jar.sha1 b/modules/transport-netty4/licenses/netty-buffer-4.1.80.Final.jar.sha1 new file mode 100644 index 0000000000000..471fe8b211df2 --- /dev/null +++ b/modules/transport-netty4/licenses/netty-buffer-4.1.80.Final.jar.sha1 @@ -0,0 +1 @@ +a087321a63d9991e25f7b7d24ef53edcbcb954ff \ No newline at end of file diff --git a/modules/transport-netty4/licenses/netty-codec-4.1.79.Final.jar.sha1 b/modules/transport-netty4/licenses/netty-codec-4.1.79.Final.jar.sha1 deleted file mode 100644 index c0920231d79a8..0000000000000 --- a/modules/transport-netty4/licenses/netty-codec-4.1.79.Final.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -18f5b02af7ca611978bc28f2cb58cbb3b9b0f0ef \ No newline at end of file diff --git a/modules/transport-netty4/licenses/netty-codec-4.1.80.Final.jar.sha1 b/modules/transport-netty4/licenses/netty-codec-4.1.80.Final.jar.sha1 new file mode 100644 index 0000000000000..0f8e3bebe1532 --- /dev/null +++ b/modules/transport-netty4/licenses/netty-codec-4.1.80.Final.jar.sha1 @@ -0,0 +1 @@ +4941821a158d16311665d8606aefa610ecf0f64c \ No newline at end of file diff --git a/modules/transport-netty4/licenses/netty-codec-http-4.1.79.Final.jar.sha1 b/modules/transport-netty4/licenses/netty-codec-http-4.1.79.Final.jar.sha1 deleted file mode 100644 index a3f650da5abbd..0000000000000 --- a/modules/transport-netty4/licenses/netty-codec-http-4.1.79.Final.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -882c70bc0a30a98bf3ce477f043e967ac026044c \ No newline at end of file diff --git a/modules/transport-netty4/licenses/netty-codec-http-4.1.80.Final.jar.sha1 b/modules/transport-netty4/licenses/netty-codec-http-4.1.80.Final.jar.sha1 new file mode 100644 index 0000000000000..d18720d164335 --- /dev/null +++ b/modules/transport-netty4/licenses/netty-codec-http-4.1.80.Final.jar.sha1 @@ -0,0 +1 @@ +efb23f9d5187d2f733595ef7930137f0cb2cec48 \ No newline at end of file diff --git a/modules/transport-netty4/licenses/netty-codec-http2-4.1.79.Final.jar.sha1 b/modules/transport-netty4/licenses/netty-codec-http2-4.1.79.Final.jar.sha1 deleted file mode 100644 index f2989024cfce1..0000000000000 --- a/modules/transport-netty4/licenses/netty-codec-http2-4.1.79.Final.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -0eeffab0cd5efb699d5e4ab9b694d32fef6694b3 \ No newline at end of file diff --git a/modules/transport-netty4/licenses/netty-codec-http2-4.1.80.Final.jar.sha1 b/modules/transport-netty4/licenses/netty-codec-http2-4.1.80.Final.jar.sha1 new file mode 100644 index 0000000000000..d96a286b98493 --- /dev/null +++ b/modules/transport-netty4/licenses/netty-codec-http2-4.1.80.Final.jar.sha1 @@ -0,0 +1 @@ +bf7b66834188ef1a6f6095291c6b81a1880798ba \ No newline at end of file diff --git a/modules/transport-netty4/licenses/netty-common-4.1.79.Final.jar.sha1 b/modules/transport-netty4/licenses/netty-common-4.1.79.Final.jar.sha1 deleted file mode 100644 index faa7b099406a3..0000000000000 --- a/modules/transport-netty4/licenses/netty-common-4.1.79.Final.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -2814bd465731355323aba0fdd22163bfce638a75 \ No newline at end of file diff --git a/modules/transport-netty4/licenses/netty-common-4.1.80.Final.jar.sha1 b/modules/transport-netty4/licenses/netty-common-4.1.80.Final.jar.sha1 new file mode 100644 index 0000000000000..d256e77b7024c --- /dev/null +++ b/modules/transport-netty4/licenses/netty-common-4.1.80.Final.jar.sha1 @@ -0,0 +1 @@ +3d43ce22863bc590e4e33fbdabbb58dc05f4c43d \ No newline at end of file diff --git a/modules/transport-netty4/licenses/netty-handler-4.1.79.Final.jar.sha1 b/modules/transport-netty4/licenses/netty-handler-4.1.79.Final.jar.sha1 deleted file mode 100644 index 8e314f164da69..0000000000000 --- a/modules/transport-netty4/licenses/netty-handler-4.1.79.Final.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -2dc22423c8ed19906615fb936a5fcb7db14a4e6c \ No newline at end of file diff --git a/modules/transport-netty4/licenses/netty-handler-4.1.80.Final.jar.sha1 b/modules/transport-netty4/licenses/netty-handler-4.1.80.Final.jar.sha1 new file mode 100644 index 0000000000000..022ad6bc93dba --- /dev/null +++ b/modules/transport-netty4/licenses/netty-handler-4.1.80.Final.jar.sha1 @@ -0,0 +1 @@ +cf7029d2f9bc4eeae8ff15af7a528d06b518a017 \ No newline at end of file diff --git a/modules/transport-netty4/licenses/netty-resolver-4.1.79.Final.jar.sha1 b/modules/transport-netty4/licenses/netty-resolver-4.1.79.Final.jar.sha1 deleted file mode 100644 index af550935bb911..0000000000000 --- a/modules/transport-netty4/licenses/netty-resolver-4.1.79.Final.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -55ecb1ff4464b56564a90824a741c3911264aaa4 \ No newline at end of file diff --git a/modules/transport-netty4/licenses/netty-resolver-4.1.80.Final.jar.sha1 b/modules/transport-netty4/licenses/netty-resolver-4.1.80.Final.jar.sha1 new file mode 100644 index 0000000000000..ad0f71b569377 --- /dev/null +++ b/modules/transport-netty4/licenses/netty-resolver-4.1.80.Final.jar.sha1 @@ -0,0 +1 @@ +3bbb0d4bfbbab867e5b757b97a6e5e0d1348d94c \ No newline at end of file diff --git a/modules/transport-netty4/licenses/netty-transport-4.1.79.Final.jar.sha1 b/modules/transport-netty4/licenses/netty-transport-4.1.79.Final.jar.sha1 deleted file mode 100644 index c6e18efb3ad3d..0000000000000 --- a/modules/transport-netty4/licenses/netty-transport-4.1.79.Final.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -6cc2b49749b4fbcc39c687027e04e65e857552a9 \ No newline at end of file diff --git a/modules/transport-netty4/licenses/netty-transport-4.1.80.Final.jar.sha1 b/modules/transport-netty4/licenses/netty-transport-4.1.80.Final.jar.sha1 new file mode 100644 index 0000000000000..2bfb4f377d89b --- /dev/null +++ b/modules/transport-netty4/licenses/netty-transport-4.1.80.Final.jar.sha1 @@ -0,0 +1 @@ +57fcace7a1b8567aa39921c915d1b1ba78fd4d2d \ No newline at end of file diff --git a/modules/transport-netty4/licenses/netty-transport-native-unix-common-4.1.79.Final.jar.sha1 b/modules/transport-netty4/licenses/netty-transport-native-unix-common-4.1.79.Final.jar.sha1 deleted file mode 100644 index 7f984663dfa85..0000000000000 --- a/modules/transport-netty4/licenses/netty-transport-native-unix-common-4.1.79.Final.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -731937caec938b77b39df932a8da8aaca8d5ec05 \ No newline at end of file diff --git a/modules/transport-netty4/licenses/netty-transport-native-unix-common-4.1.80.Final.jar.sha1 b/modules/transport-netty4/licenses/netty-transport-native-unix-common-4.1.80.Final.jar.sha1 new file mode 100644 index 0000000000000..998e6e8560724 --- /dev/null +++ b/modules/transport-netty4/licenses/netty-transport-native-unix-common-4.1.80.Final.jar.sha1 @@ -0,0 +1 @@ +da3d7da1a8d317ae2c82b400fd255fe610c43ebe \ No newline at end of file diff --git a/plugins/repository-azure/licenses/netty-codec-dns-4.1.79.Final.jar.sha1 b/plugins/repository-azure/licenses/netty-codec-dns-4.1.79.Final.jar.sha1 deleted file mode 100644 index a1753b194ea31..0000000000000 --- a/plugins/repository-azure/licenses/netty-codec-dns-4.1.79.Final.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -6c19c46f9529791964f636c93cfaca0556f0d5d0 \ No newline at end of file diff --git a/plugins/repository-azure/licenses/netty-codec-dns-4.1.80.Final.jar.sha1 b/plugins/repository-azure/licenses/netty-codec-dns-4.1.80.Final.jar.sha1 new file mode 100644 index 0000000000000..2dab7f40b02b7 --- /dev/null +++ b/plugins/repository-azure/licenses/netty-codec-dns-4.1.80.Final.jar.sha1 @@ -0,0 +1 @@ +6926d2ea779f41071ecb1948d880dfbb3a6ee126 \ No newline at end of file diff --git a/plugins/repository-azure/licenses/netty-codec-http2-4.1.79.Final.jar.sha1 b/plugins/repository-azure/licenses/netty-codec-http2-4.1.79.Final.jar.sha1 deleted file mode 100644 index f2989024cfce1..0000000000000 --- a/plugins/repository-azure/licenses/netty-codec-http2-4.1.79.Final.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -0eeffab0cd5efb699d5e4ab9b694d32fef6694b3 \ No newline at end of file diff --git a/plugins/repository-azure/licenses/netty-codec-http2-4.1.80.Final.jar.sha1 b/plugins/repository-azure/licenses/netty-codec-http2-4.1.80.Final.jar.sha1 new file mode 100644 index 0000000000000..d96a286b98493 --- /dev/null +++ b/plugins/repository-azure/licenses/netty-codec-http2-4.1.80.Final.jar.sha1 @@ -0,0 +1 @@ +bf7b66834188ef1a6f6095291c6b81a1880798ba \ No newline at end of file diff --git a/plugins/repository-azure/licenses/netty-codec-socks-4.1.79.Final.jar.sha1 b/plugins/repository-azure/licenses/netty-codec-socks-4.1.79.Final.jar.sha1 deleted file mode 100644 index 913f0e7685c86..0000000000000 --- a/plugins/repository-azure/licenses/netty-codec-socks-4.1.79.Final.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -794a5937cdb1871c4ae350610752dec2929dc1d6 \ No newline at end of file diff --git a/plugins/repository-azure/licenses/netty-codec-socks-4.1.80.Final.jar.sha1 b/plugins/repository-azure/licenses/netty-codec-socks-4.1.80.Final.jar.sha1 new file mode 100644 index 0000000000000..625344e6cfb0a --- /dev/null +++ b/plugins/repository-azure/licenses/netty-codec-socks-4.1.80.Final.jar.sha1 @@ -0,0 +1 @@ +00025b767be3425f3b31a34ee095c85619169f17 \ No newline at end of file diff --git a/plugins/repository-azure/licenses/netty-handler-proxy-4.1.79.Final.jar.sha1 b/plugins/repository-azure/licenses/netty-handler-proxy-4.1.79.Final.jar.sha1 deleted file mode 100644 index dbb072f3f665f..0000000000000 --- a/plugins/repository-azure/licenses/netty-handler-proxy-4.1.79.Final.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -054aace8683de7893cf28d4aab72cd60f49b5700 \ No newline at end of file diff --git a/plugins/repository-azure/licenses/netty-handler-proxy-4.1.80.Final.jar.sha1 b/plugins/repository-azure/licenses/netty-handler-proxy-4.1.80.Final.jar.sha1 new file mode 100644 index 0000000000000..c3184ec5ff7d3 --- /dev/null +++ b/plugins/repository-azure/licenses/netty-handler-proxy-4.1.80.Final.jar.sha1 @@ -0,0 +1 @@ +9b3b42ff805723fb98120f5ab2019c53e71da91b \ No newline at end of file diff --git a/plugins/repository-azure/licenses/netty-resolver-dns-4.1.79.Final.jar.sha1 b/plugins/repository-azure/licenses/netty-resolver-dns-4.1.79.Final.jar.sha1 deleted file mode 100644 index a5d1be00d9c29..0000000000000 --- a/plugins/repository-azure/licenses/netty-resolver-dns-4.1.79.Final.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -8eb9be9b6a66a03f5f4df67fe559cb676493d167 \ No newline at end of file diff --git a/plugins/repository-azure/licenses/netty-resolver-dns-4.1.80.Final.jar.sha1 b/plugins/repository-azure/licenses/netty-resolver-dns-4.1.80.Final.jar.sha1 new file mode 100644 index 0000000000000..bb6a3502a729f --- /dev/null +++ b/plugins/repository-azure/licenses/netty-resolver-dns-4.1.80.Final.jar.sha1 @@ -0,0 +1 @@ +6b1602f80b6235b0b7d53bc5e9c1a6cd11c1b804 \ No newline at end of file diff --git a/plugins/repository-azure/licenses/netty-transport-native-unix-common-4.1.79.Final.jar.sha1 b/plugins/repository-azure/licenses/netty-transport-native-unix-common-4.1.79.Final.jar.sha1 deleted file mode 100644 index 7f984663dfa85..0000000000000 --- a/plugins/repository-azure/licenses/netty-transport-native-unix-common-4.1.79.Final.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -731937caec938b77b39df932a8da8aaca8d5ec05 \ No newline at end of file diff --git a/plugins/repository-azure/licenses/netty-transport-native-unix-common-4.1.80.Final.jar.sha1 b/plugins/repository-azure/licenses/netty-transport-native-unix-common-4.1.80.Final.jar.sha1 new file mode 100644 index 0000000000000..998e6e8560724 --- /dev/null +++ b/plugins/repository-azure/licenses/netty-transport-native-unix-common-4.1.80.Final.jar.sha1 @@ -0,0 +1 @@ +da3d7da1a8d317ae2c82b400fd255fe610c43ebe \ No newline at end of file diff --git a/plugins/repository-hdfs/licenses/netty-all-4.1.79.Final.jar.sha1 b/plugins/repository-hdfs/licenses/netty-all-4.1.79.Final.jar.sha1 deleted file mode 100644 index 724950db96f09..0000000000000 --- a/plugins/repository-hdfs/licenses/netty-all-4.1.79.Final.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -1c53cffaa14d61de523b167377843e35807292a7 \ No newline at end of file diff --git a/plugins/repository-hdfs/licenses/netty-all-4.1.80.Final.jar.sha1 b/plugins/repository-hdfs/licenses/netty-all-4.1.80.Final.jar.sha1 new file mode 100644 index 0000000000000..ae6eb1d85f1ea --- /dev/null +++ b/plugins/repository-hdfs/licenses/netty-all-4.1.80.Final.jar.sha1 @@ -0,0 +1 @@ +39e73b76a3ec65df731b371179e15f2c3e4e7575 \ No newline at end of file diff --git a/plugins/transport-nio/build.gradle b/plugins/transport-nio/build.gradle index a7e8c42a4e2d3..c5b401de60c8c 100644 --- a/plugins/transport-nio/build.gradle +++ b/plugins/transport-nio/build.gradle @@ -83,6 +83,12 @@ thirdPartyAudit { 'org.bouncycastle.cert.X509v3CertificateBuilder', 'org.bouncycastle.cert.jcajce.JcaX509CertificateConverter', 'org.bouncycastle.operator.jcajce.JcaContentSignerBuilder', + 'org.bouncycastle.openssl.PEMEncryptedKeyPair', + 'org.bouncycastle.openssl.PEMParser', + 'org.bouncycastle.openssl.jcajce.JcaPEMKeyConverter', + 'org.bouncycastle.openssl.jcajce.JceOpenSSLPKCS8DecryptorProviderBuilder', + 'org.bouncycastle.openssl.jcajce.JcePEMDecryptorProviderBuilder', + 'org.bouncycastle.pkcs.PKCS8EncryptedPrivateKeyInfo', // from io.netty.handler.ssl.JettyNpnSslEngine (netty) 'org.eclipse.jetty.npn.NextProtoNego$ClientProvider', diff --git a/plugins/transport-nio/licenses/netty-buffer-4.1.79.Final.jar.sha1 b/plugins/transport-nio/licenses/netty-buffer-4.1.79.Final.jar.sha1 deleted file mode 100644 index 8e9e4d0b7f754..0000000000000 --- a/plugins/transport-nio/licenses/netty-buffer-4.1.79.Final.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -6c014412b599489b1db27c6bc08d8a46da94e397 \ No newline at end of file diff --git a/plugins/transport-nio/licenses/netty-buffer-4.1.80.Final.jar.sha1 b/plugins/transport-nio/licenses/netty-buffer-4.1.80.Final.jar.sha1 new file mode 100644 index 0000000000000..471fe8b211df2 --- /dev/null +++ b/plugins/transport-nio/licenses/netty-buffer-4.1.80.Final.jar.sha1 @@ -0,0 +1 @@ +a087321a63d9991e25f7b7d24ef53edcbcb954ff \ No newline at end of file diff --git a/plugins/transport-nio/licenses/netty-codec-4.1.79.Final.jar.sha1 b/plugins/transport-nio/licenses/netty-codec-4.1.79.Final.jar.sha1 deleted file mode 100644 index c0920231d79a8..0000000000000 --- a/plugins/transport-nio/licenses/netty-codec-4.1.79.Final.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -18f5b02af7ca611978bc28f2cb58cbb3b9b0f0ef \ No newline at end of file diff --git a/plugins/transport-nio/licenses/netty-codec-4.1.80.Final.jar.sha1 b/plugins/transport-nio/licenses/netty-codec-4.1.80.Final.jar.sha1 new file mode 100644 index 0000000000000..0f8e3bebe1532 --- /dev/null +++ b/plugins/transport-nio/licenses/netty-codec-4.1.80.Final.jar.sha1 @@ -0,0 +1 @@ +4941821a158d16311665d8606aefa610ecf0f64c \ No newline at end of file diff --git a/plugins/transport-nio/licenses/netty-codec-http-4.1.79.Final.jar.sha1 b/plugins/transport-nio/licenses/netty-codec-http-4.1.79.Final.jar.sha1 deleted file mode 100644 index a3f650da5abbd..0000000000000 --- a/plugins/transport-nio/licenses/netty-codec-http-4.1.79.Final.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -882c70bc0a30a98bf3ce477f043e967ac026044c \ No newline at end of file diff --git a/plugins/transport-nio/licenses/netty-codec-http-4.1.80.Final.jar.sha1 b/plugins/transport-nio/licenses/netty-codec-http-4.1.80.Final.jar.sha1 new file mode 100644 index 0000000000000..d18720d164335 --- /dev/null +++ b/plugins/transport-nio/licenses/netty-codec-http-4.1.80.Final.jar.sha1 @@ -0,0 +1 @@ +efb23f9d5187d2f733595ef7930137f0cb2cec48 \ No newline at end of file diff --git a/plugins/transport-nio/licenses/netty-common-4.1.79.Final.jar.sha1 b/plugins/transport-nio/licenses/netty-common-4.1.79.Final.jar.sha1 deleted file mode 100644 index faa7b099406a3..0000000000000 --- a/plugins/transport-nio/licenses/netty-common-4.1.79.Final.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -2814bd465731355323aba0fdd22163bfce638a75 \ No newline at end of file diff --git a/plugins/transport-nio/licenses/netty-common-4.1.80.Final.jar.sha1 b/plugins/transport-nio/licenses/netty-common-4.1.80.Final.jar.sha1 new file mode 100644 index 0000000000000..d256e77b7024c --- /dev/null +++ b/plugins/transport-nio/licenses/netty-common-4.1.80.Final.jar.sha1 @@ -0,0 +1 @@ +3d43ce22863bc590e4e33fbdabbb58dc05f4c43d \ No newline at end of file diff --git a/plugins/transport-nio/licenses/netty-handler-4.1.79.Final.jar.sha1 b/plugins/transport-nio/licenses/netty-handler-4.1.79.Final.jar.sha1 deleted file mode 100644 index 8e314f164da69..0000000000000 --- a/plugins/transport-nio/licenses/netty-handler-4.1.79.Final.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -2dc22423c8ed19906615fb936a5fcb7db14a4e6c \ No newline at end of file diff --git a/plugins/transport-nio/licenses/netty-handler-4.1.80.Final.jar.sha1 b/plugins/transport-nio/licenses/netty-handler-4.1.80.Final.jar.sha1 new file mode 100644 index 0000000000000..022ad6bc93dba --- /dev/null +++ b/plugins/transport-nio/licenses/netty-handler-4.1.80.Final.jar.sha1 @@ -0,0 +1 @@ +cf7029d2f9bc4eeae8ff15af7a528d06b518a017 \ No newline at end of file diff --git a/plugins/transport-nio/licenses/netty-resolver-4.1.79.Final.jar.sha1 b/plugins/transport-nio/licenses/netty-resolver-4.1.79.Final.jar.sha1 deleted file mode 100644 index af550935bb911..0000000000000 --- a/plugins/transport-nio/licenses/netty-resolver-4.1.79.Final.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -55ecb1ff4464b56564a90824a741c3911264aaa4 \ No newline at end of file diff --git a/plugins/transport-nio/licenses/netty-resolver-4.1.80.Final.jar.sha1 b/plugins/transport-nio/licenses/netty-resolver-4.1.80.Final.jar.sha1 new file mode 100644 index 0000000000000..ad0f71b569377 --- /dev/null +++ b/plugins/transport-nio/licenses/netty-resolver-4.1.80.Final.jar.sha1 @@ -0,0 +1 @@ +3bbb0d4bfbbab867e5b757b97a6e5e0d1348d94c \ No newline at end of file diff --git a/plugins/transport-nio/licenses/netty-transport-4.1.79.Final.jar.sha1 b/plugins/transport-nio/licenses/netty-transport-4.1.79.Final.jar.sha1 deleted file mode 100644 index c6e18efb3ad3d..0000000000000 --- a/plugins/transport-nio/licenses/netty-transport-4.1.79.Final.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -6cc2b49749b4fbcc39c687027e04e65e857552a9 \ No newline at end of file diff --git a/plugins/transport-nio/licenses/netty-transport-4.1.80.Final.jar.sha1 b/plugins/transport-nio/licenses/netty-transport-4.1.80.Final.jar.sha1 new file mode 100644 index 0000000000000..2bfb4f377d89b --- /dev/null +++ b/plugins/transport-nio/licenses/netty-transport-4.1.80.Final.jar.sha1 @@ -0,0 +1 @@ +57fcace7a1b8567aa39921c915d1b1ba78fd4d2d \ No newline at end of file From 5c3cc935c79b0b1eb48826a3182805d6c51bafea Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Thu, 1 Sep 2022 09:15:10 -0700 Subject: [PATCH 15/16] Bump xmlbeans from 5.1.0 to 5.1.1 in /plugins/ingest-attachment (#4354) * Bump xmlbeans from 5.1.0 to 5.1.1 in /plugins/ingest-attachment Bumps xmlbeans from 5.1.0 to 5.1.1. --- updated-dependencies: - dependency-name: org.apache.xmlbeans:xmlbeans dependency-type: direct:production update-type: version-update:semver-patch ... Signed-off-by: dependabot[bot] * Updating SHAs Signed-off-by: dependabot[bot] * Update changelog Signed-off-by: dependabot[bot] Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> Co-authored-by: dependabot[bot] --- CHANGELOG.md | 1 + plugins/ingest-attachment/build.gradle | 2 +- plugins/ingest-attachment/licenses/xmlbeans-5.1.0.jar.sha1 | 1 - plugins/ingest-attachment/licenses/xmlbeans-5.1.1.jar.sha1 | 1 + 4 files changed, 3 insertions(+), 2 deletions(-) delete mode 100644 plugins/ingest-attachment/licenses/xmlbeans-5.1.0.jar.sha1 create mode 100644 plugins/ingest-attachment/licenses/xmlbeans-5.1.1.jar.sha1 diff --git a/CHANGELOG.md b/CHANGELOG.md index 6434041b038ee..93aef24a46674 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -12,6 +12,7 @@ Inspired from [Keep a Changelog](https://keepachangelog.com/en/1.0.0/) - Support for HTTP/2 (server-side) ([#3847](https://github.com/opensearch-project/OpenSearch/pull/3847)) ### Dependencies - Bumps `com.diffplug.spotless` from 6.9.1 to 6.10.0 +- Bumps `xmlbeans` from 5.1.0 to 5.1.1 ### Changed - Dependency updates (httpcore, mockito, slf4j, httpasyncclient, commons-codec) ([#4308](https://github.com/opensearch-project/OpenSearch/pull/4308)) diff --git a/plugins/ingest-attachment/build.gradle b/plugins/ingest-attachment/build.gradle index 86694b9bc9da7..af9485c991f0c 100644 --- a/plugins/ingest-attachment/build.gradle +++ b/plugins/ingest-attachment/build.gradle @@ -79,7 +79,7 @@ dependencies { api "org.apache.poi:poi:${versions.poi}" api "org.apache.poi:poi-ooxml-lite:${versions.poi}" api "commons-codec:commons-codec:${versions.commonscodec}" - api 'org.apache.xmlbeans:xmlbeans:5.1.0' + api 'org.apache.xmlbeans:xmlbeans:5.1.1' api 'org.apache.commons:commons-collections4:4.4' // MS Office api "org.apache.poi:poi-scratchpad:${versions.poi}" diff --git a/plugins/ingest-attachment/licenses/xmlbeans-5.1.0.jar.sha1 b/plugins/ingest-attachment/licenses/xmlbeans-5.1.0.jar.sha1 deleted file mode 100644 index 85f757b61048c..0000000000000 --- a/plugins/ingest-attachment/licenses/xmlbeans-5.1.0.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -3534ab896663e6f6d8a2cf46882d7407641d7a31 \ No newline at end of file diff --git a/plugins/ingest-attachment/licenses/xmlbeans-5.1.1.jar.sha1 b/plugins/ingest-attachment/licenses/xmlbeans-5.1.1.jar.sha1 new file mode 100644 index 0000000000000..4d1d2ad0807e7 --- /dev/null +++ b/plugins/ingest-attachment/licenses/xmlbeans-5.1.1.jar.sha1 @@ -0,0 +1 @@ +48a369df0eccb509d46203104e4df9cb00f0f68b \ No newline at end of file From 715da849e0bf910c84a7c7615ab2faccef95b4a7 Mon Sep 17 00:00:00 2001 From: Rabi Panda Date: Thu, 1 Sep 2022 10:06:19 -0700 Subject: [PATCH 16/16] Fix randomized test failure NRTReplicationEngineTests.testUpdateSegments (#4352) Overload `generateHistoryOnReplica` to be able to generate only a specific `Engine.Operation.TYPE` operations as required by the `testUpdateSegments` test Signed-off-by: Rabi Panda Signed-off-by: Rabi Panda --- CHANGELOG.md | 3 ++- .../index/engine/NRTReplicationEngineTests.java | 12 ++++++++---- .../opensearch/index/engine/EngineTestCase.java | 17 ++++++++++++++++- 3 files changed, 26 insertions(+), 6 deletions(-) diff --git a/CHANGELOG.md b/CHANGELOG.md index 93aef24a46674..376c8f37c8063 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -33,7 +33,8 @@ Inspired from [Keep a Changelog](https://keepachangelog.com/en/1.0.0/) - Add timeout on Mockito.verify to reduce flakyness in testReplicationOnDone test([#4314](https://github.com/opensearch-project/OpenSearch/pull/4314)) - Commit workflow for dependabot changelog helper ([#4331](https://github.com/opensearch-project/OpenSearch/pull/4331)) - Fixed cancellation of segment replication events ([#4225](https://github.com/opensearch-project/OpenSearch/pull/4225)) -- Bugs for dependabot changelog verifier workflow ([#4364](https://github.com/opensearch-project/OpenSearch/pull/4364)) +- Bugs for dependabot changelog verifier workflow ([#4364](https://github.com/opensearch-project/OpenSearch/pull/4364)) +- Fix flaky random test `NRTReplicationEngineTests.testUpdateSegments` ([#4352](https://github.com/opensearch-project/OpenSearch/pull/4352)) ### Security - CVE-2022-25857 org.yaml:snakeyaml DOS vulnerability ([#4341](https://github.com/opensearch-project/OpenSearch/pull/4341)) diff --git a/server/src/test/java/org/opensearch/index/engine/NRTReplicationEngineTests.java b/server/src/test/java/org/opensearch/index/engine/NRTReplicationEngineTests.java index 1fe1a37dedae0..0008afcc901c7 100644 --- a/server/src/test/java/org/opensearch/index/engine/NRTReplicationEngineTests.java +++ b/server/src/test/java/org/opensearch/index/engine/NRTReplicationEngineTests.java @@ -112,10 +112,14 @@ public void testUpdateSegments() throws Exception { final NRTReplicationEngine nrtEngine = buildNrtReplicaEngine(globalCheckpoint, nrtEngineStore) ) { // add docs to the primary engine. - List operations = generateHistoryOnReplica(between(1, 500), randomBoolean(), randomBoolean(), randomBoolean()) - .stream() - .filter(op -> op.operationType().equals(Engine.Operation.TYPE.INDEX)) - .collect(Collectors.toList()); + List operations = generateHistoryOnReplica( + between(1, 500), + randomBoolean(), + randomBoolean(), + randomBoolean(), + Engine.Operation.TYPE.INDEX + ); + for (Engine.Operation op : operations) { applyOperation(engine, op); applyOperation(nrtEngine, op); diff --git a/test/framework/src/main/java/org/opensearch/index/engine/EngineTestCase.java b/test/framework/src/main/java/org/opensearch/index/engine/EngineTestCase.java index 174747d306ff5..af754d77560cc 100644 --- a/test/framework/src/main/java/org/opensearch/index/engine/EngineTestCase.java +++ b/test/framework/src/main/java/org/opensearch/index/engine/EngineTestCase.java @@ -1070,6 +1070,22 @@ public List generateHistoryOnReplica( boolean allowGapInSeqNo, boolean allowDuplicate, boolean includeNestedDocs + ) throws Exception { + return generateHistoryOnReplica( + numOps, + allowGapInSeqNo, + allowDuplicate, + includeNestedDocs, + randomFrom(Engine.Operation.TYPE.values()) + ); + } + + public List generateHistoryOnReplica( + int numOps, + boolean allowGapInSeqNo, + boolean allowDuplicate, + boolean includeNestedDocs, + Engine.Operation.TYPE opType ) throws Exception { long seqNo = 0; final int maxIdValue = randomInt(numOps * 2); @@ -1077,7 +1093,6 @@ public List generateHistoryOnReplica( CheckedBiFunction nestedParsedDocFactory = nestedParsedDocFactory(); for (int i = 0; i < numOps; i++) { final String id = Integer.toString(randomInt(maxIdValue)); - final Engine.Operation.TYPE opType = randomFrom(Engine.Operation.TYPE.values()); final boolean isNestedDoc = includeNestedDocs && opType == Engine.Operation.TYPE.INDEX && randomBoolean(); final int nestedValues = between(0, 3); final long startTime = threadPool.relativeTimeInNanos();