diff --git a/.github/workflows/ci.yml b/.github/workflows/ci.yml index 331da2385e64..7eee00b78698 100644 --- a/.github/workflows/ci.yml +++ b/.github/workflows/ci.yml @@ -38,11 +38,11 @@ jobs: needs-kubernetes-tests: ${{ steps.selective-checks.outputs.needs-kubernetes-tests }} steps: - name: "Checkout ${{ github.ref }} ( ${{ github.sha }} )" - uses: actions/checkout@v3 + uses: actions/checkout@v4 with: persist-credentials: false - name: Fetch incoming commit ${{ github.sha }} with its parent - uses: actions/checkout@v3 + uses: actions/checkout@v4 with: ref: ${{ github.sha }} fetch-depth: 2 @@ -81,9 +81,9 @@ jobs: fail-fast: false steps: - name: Checkout project - uses: actions/checkout@v3 + uses: actions/checkout@v4 - name: Cache for npm dependencies - uses: actions/cache@v3 + uses: actions/cache@v4 with: path: | ~/.pnpm-store @@ -92,7 +92,7 @@ jobs: restore-keys: | ${{ runner.os }}-pnpm- - name: Cache for maven dependencies - uses: actions/cache@v3 + uses: actions/cache@v4 with: path: | ~/.m2/repository @@ -101,7 +101,7 @@ jobs: restore-keys: | maven-repo- - name: Setup java - uses: actions/setup-java@v3 + uses: actions/setup-java@v4 with: distribution: 'temurin' java-version: ${{ matrix.java }} @@ -110,7 +110,7 @@ jobs: env: GRADLE_ENTERPRISE_ACCESS_KEY: ${{ secrets.GE_ACCESS_TOKEN }} - name: Store binaries for tests - uses: actions/upload-artifact@v3 + uses: actions/upload-artifact@v4 with: name: ozone-bin path: | @@ -118,13 +118,13 @@ jobs: !hadoop-ozone/dist/target/ozone-*-src.tar.gz retention-days: 1 - name: Store source tarball for compilation - uses: actions/upload-artifact@v3 + uses: actions/upload-artifact@v4 with: name: ozone-src path: hadoop-ozone/dist/target/ozone-*-src.tar.gz retention-days: 1 - name: Store Maven repo for tests - uses: actions/upload-artifact@v3 + uses: actions/upload-artifact@v4 with: name: ozone-repo path: | @@ -144,7 +144,7 @@ jobs: fail-fast: false steps: - name: Download Ozone source tarball - uses: actions/download-artifact@v3 + uses: actions/download-artifact@v4 with: name: ozone-src - name: Untar sources @@ -157,7 +157,7 @@ jobs: git config user.email 'noreply@github.com' git commit --allow-empty -a -m 'workaround for HADOOP-19011' - name: Cache for maven dependencies - uses: actions/cache/restore@v3 + uses: actions/cache/restore@v4 with: path: | ~/.m2/repository @@ -166,7 +166,7 @@ jobs: restore-keys: | maven-repo- - name: Setup java - uses: actions/setup-java@v3 + uses: actions/setup-java@v4 with: distribution: 'temurin' java-version: ${{ matrix.java }} @@ -187,15 +187,15 @@ jobs: fail-fast: false steps: - name: Checkout project - uses: actions/checkout@v3 + uses: actions/checkout@v4 if: matrix.check != 'bats' - name: Checkout project with history - uses: actions/checkout@v3 + uses: actions/checkout@v4 with: fetch-depth: 0 if: matrix.check == 'bats' - name: Cache for maven dependencies - uses: actions/cache/restore@v3 + uses: actions/cache/restore@v4 with: path: | ~/.m2/repository @@ -205,7 +205,7 @@ jobs: maven-repo- if: ${{ !contains('author,bats,docs', matrix.check) }} - name: Setup java - uses: actions/setup-java@v3 + uses: actions/setup-java@v4 with: distribution: 'temurin' java-version: 8 @@ -218,7 +218,7 @@ jobs: run: hadoop-ozone/dev-support/checks/_summary.sh target/${{ matrix.check }}/summary.txt if: ${{ !cancelled() }} - name: Archive build results - uses: actions/upload-artifact@v3 + uses: actions/upload-artifact@v4 if: ${{ !cancelled() }} with: name: ${{ matrix.check }} @@ -237,9 +237,9 @@ jobs: fail-fast: false steps: - name: Checkout project - uses: actions/checkout@v3 + uses: actions/checkout@v4 - name: Cache for maven dependencies - uses: actions/cache/restore@v3 + uses: actions/cache/restore@v4 with: path: | ~/.m2/repository @@ -248,7 +248,7 @@ jobs: restore-keys: | maven-repo- - name: Setup java - uses: actions/setup-java@v3 + uses: actions/setup-java@v4 with: distribution: 'temurin' java-version: 8 @@ -261,7 +261,7 @@ jobs: run: hadoop-ozone/dev-support/checks/_summary.sh target/${{ matrix.check }}/summary.txt if: ${{ !cancelled() }} - name: Archive build results - uses: actions/upload-artifact@v3 + uses: actions/upload-artifact@v4 if: ${{ !cancelled() }} with: name: ${{ matrix.check }} @@ -276,9 +276,9 @@ jobs: if: needs.build-info.outputs.needs-dependency-check == 'true' steps: - name: Checkout project - uses: actions/checkout@v3 + uses: actions/checkout@v4 - name: Download compiled Ozone binaries - uses: actions/download-artifact@v3 + uses: actions/download-artifact@v4 with: name: ozone-bin - name: Untar binaries @@ -290,7 +290,7 @@ jobs: export OZONE_DIST_DIR=`pwd`/dist ./hadoop-ozone/dev-support/checks/dependency.sh - name: Archive build results - uses: actions/upload-artifact@v3 + uses: actions/upload-artifact@v4 if: always() with: name: dependency @@ -305,9 +305,9 @@ jobs: if: needs.build-info.outputs.needs-dependency-check == 'true' steps: - name: Checkout project - uses: actions/checkout@v3 + uses: actions/checkout@v4 - name: Cache for maven dependencies - uses: actions/cache/restore@v3 + uses: actions/cache/restore@v4 with: path: | ~/.m2/repository @@ -317,7 +317,7 @@ jobs: maven-repo- - name: Download Ozone repo id: download-ozone-repo - uses: actions/download-artifact@v3 + uses: actions/download-artifact@v4 with: name: ozone-repo path: | @@ -330,7 +330,7 @@ jobs: run: hadoop-ozone/dev-support/checks/_summary.sh target/${{ github.job }}/summary.txt if: ${{ !cancelled() }} - name: Archive build results - uses: actions/upload-artifact@v3 + uses: actions/upload-artifact@v4 if: always() with: name: ${{ github.job }} @@ -350,9 +350,9 @@ jobs: fail-fast: false steps: - name: Checkout project - uses: actions/checkout@v3 + uses: actions/checkout@v4 - name: Download compiled Ozone binaries - uses: actions/download-artifact@v3 + uses: actions/download-artifact@v4 with: name: ozone-bin - name: Untar binaries @@ -376,7 +376,7 @@ jobs: run: hadoop-ozone/dev-support/checks/_summary.sh target/${{ github.job }}/summary.txt if: ${{ !cancelled() }} - name: Archive build results - uses: actions/upload-artifact@v3 + uses: actions/upload-artifact@v4 if: always() with: name: acceptance-${{ matrix.suite }} @@ -392,9 +392,9 @@ jobs: if: needs.build-info.outputs.needs-kubernetes-tests == 'true' steps: - name: Checkout project - uses: actions/checkout@v3 + uses: actions/checkout@v4 - name: Download compiled Ozone binaries - uses: actions/download-artifact@v3 + uses: actions/download-artifact@v4 with: name: ozone-bin - name: Untar binaries @@ -412,7 +412,7 @@ jobs: run: hadoop-ozone/dev-support/checks/_summary.sh target/${{ github.job }}/summary.txt if: ${{ !cancelled() }} - name: Archive build results - uses: actions/upload-artifact@v3 + uses: actions/upload-artifact@v4 if: always() with: name: kubernetes @@ -441,9 +441,9 @@ jobs: fail-fast: false steps: - name: Checkout project - uses: actions/checkout@v3 + uses: actions/checkout@v4 - name: Cache for maven dependencies - uses: actions/cache/restore@v3 + uses: actions/cache/restore@v4 with: path: | ~/.m2/repository @@ -453,14 +453,14 @@ jobs: maven-repo- - name: Download Ozone repo id: download-ozone-repo - uses: actions/download-artifact@v3 + uses: actions/download-artifact@v4 with: name: ozone-repo path: | ~/.m2/repository/org/apache/ozone continue-on-error: true - name: Setup java - uses: actions/setup-java@v3 + uses: actions/setup-java@v4 with: distribution: 'temurin' java-version: 8 @@ -483,7 +483,7 @@ jobs: run: hadoop-ozone/dev-support/checks/_summary.sh target/${{ github.job }}/summary.txt if: ${{ !cancelled() }} - name: Archive build results - uses: actions/upload-artifact@v3 + uses: actions/upload-artifact@v4 if: always() with: name: it-${{ matrix.profile }} @@ -499,11 +499,11 @@ jobs: - integration steps: - name: Checkout project - uses: actions/checkout@v3 + uses: actions/checkout@v4 with: fetch-depth: 0 - name: Cache for maven dependencies - uses: actions/cache/restore@v3 + uses: actions/cache/restore@v4 with: path: | ~/.m2/repository @@ -512,7 +512,7 @@ jobs: restore-keys: | maven-repo- - name: Download artifacts - uses: actions/download-artifact@v3 + uses: actions/download-artifact@v4 with: path: target/artifacts - name: Untar binaries @@ -522,7 +522,7 @@ jobs: - name: Calculate combined coverage run: ./hadoop-ozone/dev-support/checks/coverage.sh - name: Setup java 17 - uses: actions/setup-java@v3 + uses: actions/setup-java@v4 with: distribution: 'temurin' java-version: 17 @@ -533,7 +533,7 @@ jobs: GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }} GRADLE_ENTERPRISE_ACCESS_KEY: ${{ secrets.GE_ACCESS_TOKEN }} - name: Archive build results - uses: actions/upload-artifact@v3 + uses: actions/upload-artifact@v4 with: name: coverage path: target/coverage diff --git a/.github/workflows/close-pending.yaml b/.github/workflows/close-pending.yaml index 2a7cec992155..ce18152407d0 100644 --- a/.github/workflows/close-pending.yaml +++ b/.github/workflows/close-pending.yaml @@ -24,7 +24,7 @@ jobs: runs-on: ubuntu-20.04 steps: - name: Checkout code - uses: actions/checkout@v3 + uses: actions/checkout@v4 - name: Execute close-pending script if: github.repository == 'apache/ozone' run: ./.github/close-pending.sh diff --git a/.github/workflows/comments.yaml b/.github/workflows/comments.yaml index 4ffcc061d782..8f7d41c2bda4 100644 --- a/.github/workflows/comments.yaml +++ b/.github/workflows/comments.yaml @@ -26,7 +26,7 @@ jobs: runs-on: ubuntu-20.04 steps: - name: Checkout code - uses: actions/checkout@v3 + uses: actions/checkout@v4 - name: Execute process-comment script run: ./.github/process-comment.sh env: diff --git a/.github/workflows/dependabot-ci.yml b/.github/workflows/dependabot-ci.yml index dc02ff72cda0..aa216d1c58a6 100644 --- a/.github/workflows/dependabot-ci.yml +++ b/.github/workflows/dependabot-ci.yml @@ -49,7 +49,7 @@ jobs: #Delete the lockfile created by dependabot rm -rf hadoop-ozone/recon/src/main/resources/webapps/recon/ozone-recon-web/pnpm-lock.yaml - name: Install NodeJS v${{ env.NODE_VERSION }} - uses: actions/setup-node@v3 + uses: actions/setup-node@v4 with: node-version: ${{ env.NODE_VERSION }} - name: Install pnpm v${{ env.PNPM_VERSION }} and recreate lockfile @@ -75,4 +75,4 @@ jobs: git config --global user.name 'Github Actions' git config --global user.email 'noreply@github.com' git commit -m "[auto] Generated pnpm-lock from actions for $OZONE_SHA" || true - git push origin HEAD:${{ steps.get_branch_name.outputs.branch_name }} \ No newline at end of file + git push origin HEAD:${{ steps.get_branch_name.outputs.branch_name }} diff --git a/.github/workflows/intermittent-test-check.yml b/.github/workflows/intermittent-test-check.yml index c1cb2dd6a339..b31762c6c946 100644 --- a/.github/workflows/intermittent-test-check.yml +++ b/.github/workflows/intermittent-test-check.yml @@ -54,7 +54,7 @@ jobs: matrix: ${{steps.generate.outputs.matrix}} test_type: ${{steps.check-test-existence.outputs.test_type}} steps: - - uses: actions/checkout@v3 + - uses: actions/checkout@v4 with: ref: ${{ github.event.inputs.ref }} - name: Check for Test File @@ -101,11 +101,11 @@ jobs: split: ${{fromJson(needs.prepare-job.outputs.matrix)}} # Define splits fail-fast: ${{ fromJson(github.event.inputs.fail-fast) }} steps: - - uses: actions/checkout@v3 + - uses: actions/checkout@v4 with: ref: ${{ github.event.inputs.ref }} - name: Cache for maven dependencies - uses: actions/cache@v3 + uses: actions/cache@v4 with: path: ~/.m2/repository key: maven-repo-${{ hashFiles('**/pom.xml') }}-8-single @@ -114,7 +114,7 @@ jobs: maven-repo-${{ hashFiles('**/pom.xml') }} maven-repo- - name: Setup java - uses: actions/setup-java@v3 + uses: actions/setup-java@v4 with: distribution: 'temurin' java-version: 8 @@ -140,7 +140,7 @@ jobs: run: hadoop-ozone/dev-support/checks/_summary.sh target/${{ needs.prepare-job.outputs.test_type }}/summary.txt if: ${{ !cancelled() }} - name: Archive build results - uses: actions/upload-artifact@v3 + uses: actions/upload-artifact@v4 if: always() with: name: result-${{ env.TEST_CLASS }}-split-${{ matrix.split }} @@ -157,7 +157,7 @@ jobs: runs-on: ubuntu-20.04 steps: - name: Download build results - uses: actions/download-artifact@v3 + uses: actions/download-artifact@v4 - name: Count failures run: | failures=$(find . -name 'summary.txt' | grep -v 'iteration' | xargs grep -v 'exit code: 0' | wc -l) diff --git a/.github/workflows/pull-request.yml b/.github/workflows/pull-request.yml index f37e680a5f51..53ba44e0f2fb 100644 --- a/.github/workflows/pull-request.yml +++ b/.github/workflows/pull-request.yml @@ -28,7 +28,7 @@ jobs: runs-on: ubuntu-20.04 steps: - name: Checkout project - uses: actions/checkout@v3 + uses: actions/checkout@v4 - name: Check pull request title env: TITLE: ${{ github.event.pull_request.title }} diff --git a/.github/workflows/repeat-acceptance.yml b/.github/workflows/repeat-acceptance.yml index e19cc2cd267b..7269a9c417a6 100644 --- a/.github/workflows/repeat-acceptance.yml +++ b/.github/workflows/repeat-acceptance.yml @@ -55,7 +55,7 @@ jobs: outputs: matrix: ${{steps.generate.outputs.matrix}} steps: - - uses: actions/checkout@v3 + - uses: actions/checkout@v4 with: ref: ${{ github.event.inputs.ref }} - name: Verify Test Filter @@ -80,9 +80,9 @@ jobs: timeout-minutes: 60 steps: - name: Checkout project - uses: actions/checkout@v3 + uses: actions/checkout@v4 - name: Cache for npm dependencies - uses: actions/cache@v3 + uses: actions/cache@v4 with: path: | ~/.pnpm-store @@ -91,7 +91,7 @@ jobs: restore-keys: | ${{ runner.os }}-pnpm- - name: Cache for maven dependencies - uses: actions/cache@v3 + uses: actions/cache@v4 with: path: ~/.m2/repository key: maven-repo-${{ hashFiles('**/pom.xml') }}-${{ env.JAVA_VERSION }} @@ -99,7 +99,7 @@ jobs: maven-repo-${{ hashFiles('**/pom.xml') }} maven-repo- - name: Setup java - uses: actions/setup-java@v3 + uses: actions/setup-java@v4 with: distribution: 'temurin' java-version: ${{ env.JAVA_VERSION }} @@ -108,7 +108,7 @@ jobs: env: GRADLE_ENTERPRISE_ACCESS_KEY: ${{ secrets.GE_ACCESS_TOKEN }} - name: Store binaries for tests - uses: actions/upload-artifact@v3 + uses: actions/upload-artifact@v4 with: name: ozone-bin path: | @@ -132,11 +132,11 @@ jobs: split: ${{ fromJson(needs.prepare-job.outputs.matrix) }} fail-fast: ${{ fromJson(github.event.inputs.fail-fast) }} steps: - - uses: actions/checkout@v3 + - uses: actions/checkout@v4 with: ref: ${{ github.event.inputs.ref }} - name: Download compiled Ozone binaries - uses: actions/download-artifact@v3 + uses: actions/download-artifact@v4 with: name: ozone-bin - name: Untar binaries @@ -159,7 +159,7 @@ jobs: run: hadoop-ozone/dev-support/checks/_summary.sh target/${{ github.job }}/summary.txt if: ${{ !cancelled() }} - name: Archive build results - uses: actions/upload-artifact@v3 + uses: actions/upload-artifact@v4 if: always() with: name: acceptance-${{ matrix.split }} diff --git a/hadoop-hdds/common/pom.xml b/hadoop-hdds/common/pom.xml index 6c1bac1b021a..17c9d541a5a5 100644 --- a/hadoop-hdds/common/pom.xml +++ b/hadoop-hdds/common/pom.xml @@ -112,7 +112,7 @@ https://maven.apache.org/xsd/maven-4.0.0.xsd"> org.bouncycastle - bcprov-jdk15on + bcprov-jdk18on @@ -158,7 +158,7 @@ https://maven.apache.org/xsd/maven-4.0.0.xsd"> org.bouncycastle - bcpkix-jdk15on + bcpkix-jdk18on ${bouncycastle.version} diff --git a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/ec/reconstruction/ECReconstructionCoordinator.java b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/ec/reconstruction/ECReconstructionCoordinator.java index 24e76821f9c5..234439a00c24 100644 --- a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/ec/reconstruction/ECReconstructionCoordinator.java +++ b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/ec/reconstruction/ECReconstructionCoordinator.java @@ -112,6 +112,7 @@ public class ECReconstructionCoordinator implements Closeable { private final ContainerClientMetrics clientMetrics; private final ECReconstructionMetrics metrics; private final StateContext context; + private final OzoneClientConfig ozoneClientConfig; public ECReconstructionCoordinator( ConfigurationSource conf, CertificateClient certificateClient, @@ -125,10 +126,10 @@ public ECReconstructionCoordinator( ThreadFactory threadFactory = new ThreadFactoryBuilder() .setNameFormat(threadNamePrefix + "ec-reconstruct-reader-TID-%d") .build(); + ozoneClientConfig = conf.getObject(OzoneClientConfig.class); this.ecReconstructExecutor = new ThreadPoolExecutor(EC_RECONSTRUCT_STRIPE_READ_POOL_MIN_SIZE, - conf.getObject(OzoneClientConfig.class) - .getEcReconstructStripeReadPoolLimit(), + ozoneClientConfig.getEcReconstructStripeReadPoolLimit(), 60, TimeUnit.SECONDS, new SynchronousQueue<>(), @@ -222,16 +223,15 @@ public void reconstructECContainerGroup(long containerID, private ECBlockOutputStream getECBlockOutputStream( BlockLocationInfo blockLocationInfo, DatanodeDetails datanodeDetails, - ECReplicationConfig repConfig, int replicaIndex, - OzoneClientConfig configuration) throws IOException { + ECReplicationConfig repConfig, int replicaIndex) throws IOException { StreamBufferArgs streamBufferArgs = - StreamBufferArgs.getDefaultStreamBufferArgs(repConfig, configuration); + StreamBufferArgs.getDefaultStreamBufferArgs(repConfig, ozoneClientConfig); return new ECBlockOutputStream( blockLocationInfo.getBlockID(), containerOperationClient.getXceiverClientManager(), containerOperationClient.singleNodePipeline(datanodeDetails, repConfig, replicaIndex), - BufferPool.empty(), configuration, + BufferPool.empty(), ozoneClientConfig, blockLocationInfo.getToken(), clientMetrics, streamBufferArgs); } @@ -277,15 +277,14 @@ public void reconstructECBlockGroup(BlockLocationInfo blockLocationInfo, ECBlockOutputStream[] targetBlockStreams = new ECBlockOutputStream[toReconstructIndexes.size()]; ByteBuffer[] bufs = new ByteBuffer[toReconstructIndexes.size()]; - OzoneClientConfig configuration = new OzoneClientConfig(); try { for (int i = 0; i < toReconstructIndexes.size(); i++) { int replicaIndex = toReconstructIndexes.get(i); DatanodeDetails datanodeDetails = targetMap.get(replicaIndex); targetBlockStreams[i] = getECBlockOutputStream(blockLocationInfo, - datanodeDetails, repConfig, replicaIndex, - configuration); + datanodeDetails, repConfig, replicaIndex + ); bufs[i] = byteBufferPool.getBuffer(false, repConfig.getEcChunkSize()); // Make sure it's clean. Don't want to reuse the erroneously returned // buffers from the pool. diff --git a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/keyvalue/TarContainerPacker.java b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/keyvalue/TarContainerPacker.java index cdb3230a9b25..8f43b5c235f2 100644 --- a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/keyvalue/TarContainerPacker.java +++ b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/keyvalue/TarContainerPacker.java @@ -20,8 +20,6 @@ import java.io.BufferedOutputStream; import java.io.ByteArrayOutputStream; import java.io.File; -import java.io.FileInputStream; -import java.io.FileOutputStream; import java.io.IOException; import java.io.InputStream; import java.io.OutputStream; @@ -42,6 +40,7 @@ import org.apache.commons.compress.archivers.ArchiveEntry; import org.apache.commons.compress.archivers.ArchiveInputStream; import org.apache.commons.compress.archivers.ArchiveOutputStream; +import org.apache.commons.compress.archivers.tar.TarArchiveEntry; import org.apache.commons.compress.archivers.tar.TarArchiveInputStream; import org.apache.commons.compress.archivers.tar.TarArchiveOutputStream; import org.apache.commons.io.FileUtils; @@ -124,7 +123,7 @@ private void extractEntry(ArchiveEntry entry, InputStream input, long size, Files.createDirectories(parent); } - try (OutputStream fileOutput = new FileOutputStream(path.toFile()); + try (OutputStream fileOutput = Files.newOutputStream(path); OutputStream output = new BufferedOutputStream(fileOutput)) { int bufferSize = 1024; byte[] buffer = new byte[bufferSize + 1]; @@ -157,7 +156,7 @@ public void pack(Container container, KeyValueContainerData containerData = container.getContainerData(); - try (ArchiveOutputStream archiveOutput = tar(compress(output))) { + try (ArchiveOutputStream archiveOutput = tar(compress(output))) { includeFile(container.getContainerFile(), CONTAINER_FILE_NAME, archiveOutput); @@ -172,7 +171,7 @@ public void pack(Container container, @Override public byte[] unpackContainerDescriptor(InputStream input) throws IOException { - try (ArchiveInputStream archiveInput = untar(decompress(input))) { + try (ArchiveInputStream archiveInput = untar(decompress(input))) { ArchiveEntry entry = archiveInput.getNextEntry(); while (entry != null) { @@ -219,9 +218,7 @@ public static Path getDbPath(Path baseDir, public static Path getChunkPath(Path baseDir, KeyValueContainerData containerData) { - Path chunkDir = KeyValueContainerLocationUtil.getChunksLocationPath( - baseDir.toString()).toPath(); - return chunkDir; + return KeyValueContainerLocationUtil.getChunksLocationPath(baseDir.toString()).toPath(); } private byte[] readEntry(InputStream input, final long size) @@ -240,11 +237,11 @@ private byte[] readEntry(InputStream input, final long size) } private void includePath(Path dir, String subdir, - ArchiveOutputStream archiveOutput) throws IOException { + ArchiveOutputStream archiveOutput) throws IOException { // Add a directory entry before adding files, in case the directory is // empty. - ArchiveEntry entry = archiveOutput.createArchiveEntry(dir.toFile(), subdir); + TarArchiveEntry entry = archiveOutput.createArchiveEntry(dir.toFile(), subdir); archiveOutput.putArchiveEntry(entry); archiveOutput.closeArchiveEntry(); @@ -258,20 +255,20 @@ private void includePath(Path dir, String subdir, } static void includeFile(File file, String entryName, - ArchiveOutputStream archiveOutput) throws IOException { - ArchiveEntry entry = archiveOutput.createArchiveEntry(file, entryName); + ArchiveOutputStream archiveOutput) throws IOException { + TarArchiveEntry entry = archiveOutput.createArchiveEntry(file, entryName); archiveOutput.putArchiveEntry(entry); - try (InputStream input = new FileInputStream(file)) { + try (InputStream input = Files.newInputStream(file.toPath())) { IOUtils.copy(input, archiveOutput); } archiveOutput.closeArchiveEntry(); } - private static ArchiveInputStream untar(InputStream input) { + private static ArchiveInputStream untar(InputStream input) { return new TarArchiveInputStream(input); } - private static ArchiveOutputStream tar(OutputStream output) { + private static ArchiveOutputStream tar(OutputStream output) { TarArchiveOutputStream os = new TarArchiveOutputStream(output); os.setBigNumberMode(TarArchiveOutputStream.BIGNUMBER_POSIX); return os; @@ -290,7 +287,7 @@ OutputStream compress(OutputStream output) throws IOException { private byte[] innerUnpack(InputStream input, Path dbRoot, Path chunksRoot) throws IOException { byte[] descriptorFileContent = null; - try (ArchiveInputStream archiveInput = untar(decompress(input))) { + try (ArchiveInputStream archiveInput = untar(decompress(input))) { ArchiveEntry entry = archiveInput.getNextEntry(); while (entry != null) { String name = entry.getName(); diff --git a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/replication/TestContainerImporter.java b/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/replication/TestContainerImporter.java index 6b81acee77aa..d88a649652a3 100644 --- a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/replication/TestContainerImporter.java +++ b/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/replication/TestContainerImporter.java @@ -18,13 +18,15 @@ package org.apache.hadoop.ozone.container.replication; import java.io.File; -import java.io.FileInputStream; import java.io.FileOutputStream; import java.io.IOException; import java.io.InputStream; +import java.nio.file.Files; import java.util.concurrent.CompletableFuture; import java.util.concurrent.Semaphore; -import org.apache.commons.compress.archivers.ArchiveEntry; + +import org.apache.commons.compress.archivers.ArchiveOutputStream; +import org.apache.commons.compress.archivers.tar.TarArchiveEntry; import org.apache.commons.compress.archivers.tar.TarArchiveOutputStream; import org.apache.commons.io.IOUtils; import org.apache.hadoop.hdds.conf.OzoneConfiguration; @@ -145,11 +147,11 @@ private File containerTarFile( File tarFile = tempFolder.newFile( ContainerUtils.getContainerTarName(containerId)); try (FileOutputStream output = new FileOutputStream(tarFile)) { - TarArchiveOutputStream archive = new TarArchiveOutputStream(output); - ArchiveEntry entry = archive.createArchiveEntry(yamlFile, + ArchiveOutputStream archive = new TarArchiveOutputStream(output); + TarArchiveEntry entry = archive.createArchiveEntry(yamlFile, "container.yaml"); archive.putArchiveEntry(entry); - try (InputStream input = new FileInputStream(yamlFile)) { + try (InputStream input = Files.newInputStream(yamlFile.toPath())) { IOUtils.copy(input, archive); } archive.closeArchiveEntry(); diff --git a/hadoop-hdds/framework/pom.xml b/hadoop-hdds/framework/pom.xml index caf0ffdac510..1041f9e4c58b 100644 --- a/hadoop-hdds/framework/pom.xml +++ b/hadoop-hdds/framework/pom.xml @@ -118,7 +118,7 @@ https://maven.apache.org/xsd/maven-4.0.0.xsd"> org.bouncycastle - bcprov-jdk15on + bcprov-jdk18on diff --git a/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/utils/db/TypedTable.java b/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/utils/db/TypedTable.java index 8e8abdcc7a81..539bf8a29c42 100644 --- a/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/utils/db/TypedTable.java +++ b/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/utils/db/TypedTable.java @@ -28,6 +28,7 @@ import java.util.Objects; import com.google.common.annotations.VisibleForTesting; +import org.apache.hadoop.hdds.utils.IOUtils; import org.apache.hadoop.hdds.utils.MetadataKeyFilters; import org.apache.hadoop.hdds.utils.TableCacheMetrics; import org.apache.hadoop.hdds.utils.db.cache.CacheKey; @@ -165,10 +166,17 @@ public void put(KEY key, VALUE value) throws IOException { public void putWithBatch(BatchOperation batch, KEY key, VALUE value) throws IOException { if (supportCodecBuffer) { - // The buffers will be released after commit. - rawTable.putWithBatch(batch, - keyCodec.toDirectCodecBuffer(key), - valueCodec.toDirectCodecBuffer(value)); + CodecBuffer keyBuffer = null; + CodecBuffer valueBuffer = null; + try { + keyBuffer = keyCodec.toDirectCodecBuffer(key); + valueBuffer = valueCodec.toDirectCodecBuffer(value); + // The buffers will be released after commit. + rawTable.putWithBatch(batch, keyBuffer, valueBuffer); + } catch (Exception e) { + IOUtils.closeQuietly(valueBuffer, keyBuffer); + throw e; + } } else { rawTable.putWithBatch(batch, encodeKey(key), encodeValue(value)); } diff --git a/hadoop-hdds/framework/src/test/java/org/apache/hadoop/hdds/security/x509/certificate/utils/TestCertificateSignRequest.java b/hadoop-hdds/framework/src/test/java/org/apache/hadoop/hdds/security/x509/certificate/utils/TestCertificateSignRequest.java index 2c71e2b36823..3036855ae0cd 100644 --- a/hadoop-hdds/framework/src/test/java/org/apache/hadoop/hdds/security/x509/certificate/utils/TestCertificateSignRequest.java +++ b/hadoop-hdds/framework/src/test/java/org/apache/hadoop/hdds/security/x509/certificate/utils/TestCertificateSignRequest.java @@ -288,7 +288,7 @@ private void verifyServiceId(Extensions extensions) { Assertions.assertEquals("2.16.840.1.113730.3.1.34", oid); } if (o instanceof DERTaggedObject) { - String serviceName = ((DERTaggedObject)o).getObject().toString(); + String serviceName = ((DERTaggedObject)o).toASN1Primitive().toString(); Assertions.assertEquals("OzoneMarketingCluster003", serviceName); } } diff --git a/hadoop-hdds/hadoop-dependency-client/pom.xml b/hadoop-hdds/hadoop-dependency-client/pom.xml index 027b9a68b1b6..f6ebcb0a0201 100644 --- a/hadoop-hdds/hadoop-dependency-client/pom.xml +++ b/hadoop-hdds/hadoop-dependency-client/pom.xml @@ -131,10 +131,6 @@ https://maven.apache.org/xsd/maven-4.0.0.xsd"> org.apache.commons commons-lang3 - - org.apache.commons - commons-text - org.slf4j * diff --git a/hadoop-hdds/pom.xml b/hadoop-hdds/pom.xml index 16b682b6d435..549d2c8f8799 100644 --- a/hadoop-hdds/pom.xml +++ b/hadoop-hdds/pom.xml @@ -208,16 +208,6 @@ https://maven.apache.org/xsd/maven-4.0.0.xsd"> test - - org.bouncycastle - bcprov-jdk15on - ${bouncycastle.version} - - - org.bouncycastle - bcpkix-jdk15on - ${bouncycastle.version} - org.apache.ozone hdds-rocks-native diff --git a/hadoop-hdds/rocksdb-checkpoint-differ/src/main/java/org/apache/ozone/rocksdiff/RocksDBCheckpointDiffer.java b/hadoop-hdds/rocksdb-checkpoint-differ/src/main/java/org/apache/ozone/rocksdiff/RocksDBCheckpointDiffer.java index e830106e5700..5e612d8b204d 100644 --- a/hadoop-hdds/rocksdb-checkpoint-differ/src/main/java/org/apache/ozone/rocksdiff/RocksDBCheckpointDiffer.java +++ b/hadoop-hdds/rocksdb-checkpoint-differ/src/main/java/org/apache/ozone/rocksdiff/RocksDBCheckpointDiffer.java @@ -1421,16 +1421,21 @@ public String getCompactionLogDir() { * those are not needed to generate snapshot diff. These files are basically * non-leaf nodes of the DAG. */ - public synchronized void pruneSstFiles() { + public void pruneSstFiles() { if (!shouldRun()) { return; } Set nonLeafSstFiles; - nonLeafSstFiles = forwardCompactionDAG.nodes().stream() - .filter(node -> !forwardCompactionDAG.successors(node).isEmpty()) - .map(node -> node.getFileName()) - .collect(Collectors.toSet()); + // This is synchronized because compaction thread can update the compactionDAG and can be in situation + // when nodes are added to the graph, but arcs are still in progress. + // Hence, the lock is taken. + synchronized (this) { + nonLeafSstFiles = forwardCompactionDAG.nodes().stream() + .filter(node -> !forwardCompactionDAG.successors(node).isEmpty()) + .map(node -> node.getFileName()) + .collect(Collectors.toSet()); + } if (CollectionUtils.isNotEmpty(nonLeafSstFiles)) { LOG.info("Removing SST files: {} as part of SST file pruning.", @@ -1448,8 +1453,13 @@ public void incrementTarballRequestCount() { tarballRequestCount.incrementAndGet(); } - public void decrementTarballRequestCount() { - tarballRequestCount.decrementAndGet(); + public void decrementTarballRequestCountAndNotify() { + // Synchronized block is used to ensure that lock is on the same instance notifyAll is being called. + synchronized (this) { + tarballRequestCount.decrementAndGet(); + // Notify compaction threads to continue. + notifyAll(); + } } public boolean shouldRun() { @@ -1517,8 +1527,7 @@ public static RocksDBCheckpointDiffer getInstance( * for cache. */ public static void invalidateCacheEntry(String cacheKey) { - IOUtils.closeQuietly(INSTANCE_MAP.get(cacheKey)); - INSTANCE_MAP.remove(cacheKey); + IOUtils.close(LOG, INSTANCE_MAP.remove(cacheKey)); } } diff --git a/hadoop-hdds/server-scm/pom.xml b/hadoop-hdds/server-scm/pom.xml index 3a8bd8aa82c9..fc0ec55b46b9 100644 --- a/hadoop-hdds/server-scm/pom.xml +++ b/hadoop-hdds/server-scm/pom.xml @@ -125,7 +125,7 @@ https://maven.apache.org/xsd/maven-4.0.0.xsd"> org.bouncycastle - bcprov-jdk15on + bcprov-jdk18on org.apache.ozone diff --git a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/S3InMemoryCache.java b/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/S3InMemoryCache.java index 4f1f66faccba..122b04b715d3 100644 --- a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/S3InMemoryCache.java +++ b/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/S3InMemoryCache.java @@ -44,13 +44,7 @@ public void put(String id, S3SecretValue secretValue) { @Override public void invalidate(String id) { - S3SecretValue secret = cache.getIfPresent(id); - if (secret == null) { - return; - } - secret.setDeleted(true); - secret.setAwsSecret(null); - cache.put(id, secret); + cache.asMap().computeIfPresent(id, (k, secret) -> secret.deleted()); } /** diff --git a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/helpers/S3SecretValue.java b/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/helpers/S3SecretValue.java index e97adc0a50f4..cb1ed0976a08 100644 --- a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/helpers/S3SecretValue.java +++ b/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/helpers/S3SecretValue.java @@ -27,7 +27,7 @@ /** * S3Secret to be saved in database. */ -public class S3SecretValue { +public final class S3SecretValue { private static final Codec CODEC = new DelegatedCodec<>( Proto2Codec.get(S3Secret.getDefaultInstance()), S3SecretValue::fromProtobuf, @@ -38,16 +38,29 @@ public static Codec getCodec() { } // TODO: This field should be renamed to accessId for generalization. - private String kerberosID; - private String awsSecret; - private boolean isDeleted; - private long transactionLogIndex; + private final String kerberosID; + private final String awsSecret; + private final boolean isDeleted; + private final long transactionLogIndex; - public S3SecretValue(String kerberosID, String awsSecret) { - this(kerberosID, awsSecret, false, 0L); + public static S3SecretValue of(String kerberosID, String awsSecret) { + return of(kerberosID, awsSecret, 0); } - public S3SecretValue(String kerberosID, String awsSecret, boolean isDeleted, + public static S3SecretValue of(String kerberosID, String awsSecret, long transactionLogIndex) { + return new S3SecretValue( + Objects.requireNonNull(kerberosID), + Objects.requireNonNull(awsSecret), + false, + transactionLogIndex + ); + } + + public S3SecretValue deleted() { + return new S3SecretValue(kerberosID, "", true, transactionLogIndex); + } + + private S3SecretValue(String kerberosID, String awsSecret, boolean isDeleted, long transactionLogIndex) { this.kerberosID = kerberosID; this.awsSecret = awsSecret; @@ -59,26 +72,14 @@ public String getKerberosID() { return kerberosID; } - public void setKerberosID(String kerberosID) { - this.kerberosID = kerberosID; - } - public String getAwsSecret() { return awsSecret; } - public void setAwsSecret(String awsSecret) { - this.awsSecret = awsSecret; - } - public boolean isDeleted() { return isDeleted; } - public void setDeleted(boolean status) { - this.isDeleted = status; - } - public String getAwsAccessKey() { return kerberosID; } @@ -87,12 +88,8 @@ public long getTransactionLogIndex() { return transactionLogIndex; } - public void setTransactionLogIndex(long transactionLogIndex) { - this.transactionLogIndex = transactionLogIndex; - } - public static S3SecretValue fromProtobuf(S3Secret s3Secret) { - return new S3SecretValue(s3Secret.getKerberosID(), s3Secret.getAwsSecret()); + return S3SecretValue.of(s3Secret.getKerberosID(), s3Secret.getAwsSecret()); } public S3Secret getProtobuf() { diff --git a/hadoop-ozone/dist/src/main/license/bin/LICENSE.txt b/hadoop-ozone/dist/src/main/license/bin/LICENSE.txt index a361067ae7f1..952e0f348133 100644 --- a/hadoop-ozone/dist/src/main/license/bin/LICENSE.txt +++ b/hadoop-ozone/dist/src/main/license/bin/LICENSE.txt @@ -446,7 +446,6 @@ Apache License 2.0 org.xerial.snappy:snappy-java org.xerial:sqlite-jdbc org.yaml:snakeyaml - software.amazon.ion:ion-java org.awaitility:awaitility MIT @@ -454,8 +453,9 @@ MIT com.bettercloud:vault-java-driver com.kstruct:gethostname4j - org.bouncycastle:bcpkix-jdk15on - org.bouncycastle:bcprov-jdk15on + org.bouncycastle:bcpkix-jdk18on + org.bouncycastle:bcprov-jdk18on + org.bouncycastle:bcutil-jdk18on org.checkerframework:checker-qual org.codehaus.mojo:animal-sniffer-annotations org.kohsuke.metainf-services:metainf-services diff --git a/hadoop-ozone/dist/src/main/license/bin/NOTICE.txt b/hadoop-ozone/dist/src/main/license/bin/NOTICE.txt index dafb8905d0f7..44492fd26f09 100644 --- a/hadoop-ozone/dist/src/main/license/bin/NOTICE.txt +++ b/hadoop-ozone/dist/src/main/license/bin/NOTICE.txt @@ -482,10 +482,10 @@ For additional credits (generally to people who reported problems) see CREDITS file. -org.bouncycastle:bcprov-jdk15on +org.bouncycastle:bcpkix-jdk18on ==================== -Copyright (c) 2000 - 2019 The Legion of the Bouncy Castle Inc. (https://www.bouncycastle.org) +Copyright (c) 2000 - 2023 The Legion of the Bouncy Castle Inc. (https://www.bouncycastle.org) Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions: diff --git a/hadoop-ozone/dist/src/main/license/jar-report.txt b/hadoop-ozone/dist/src/main/license/jar-report.txt index e9a781862d3c..7d224af80898 100644 --- a/hadoop-ozone/dist/src/main/license/jar-report.txt +++ b/hadoop-ozone/dist/src/main/license/jar-report.txt @@ -11,8 +11,9 @@ share/ozone/lib/awaitility.jar share/ozone/lib/aws-java-sdk-core.jar share/ozone/lib/aws-java-sdk-kms.jar share/ozone/lib/aws-java-sdk-s3.jar -share/ozone/lib/bcpkix-jdk15on.jar -share/ozone/lib/bcprov-jdk15on.jar +share/ozone/lib/bcpkix-jdk18on.jar +share/ozone/lib/bcprov-jdk18on.jar +share/ozone/lib/bcutil-jdk18on.jar share/ozone/lib/bonecp.RELEASE.jar share/ozone/lib/cdi-api.jar share/ozone/lib/checker-qual.jar @@ -90,7 +91,6 @@ share/ozone/lib/httpclient.jar share/ozone/lib/httpcore.jar share/ozone/lib/httpcore-nio.jar share/ozone/lib/httpmime.jar -share/ozone/lib/ion-java.jar share/ozone/lib/istack-commons-runtime.jar share/ozone/lib/j2objc-annotations.jar share/ozone/lib/jackson-annotations.jar diff --git a/hadoop-ozone/dist/src/main/smoketest/commonlib.robot b/hadoop-ozone/dist/src/main/smoketest/commonlib.robot index 7d9edcdef448..55ed9ddf5044 100644 --- a/hadoop-ozone/dist/src/main/smoketest/commonlib.robot +++ b/hadoop-ozone/dist/src/main/smoketest/commonlib.robot @@ -32,10 +32,12 @@ Get test user principal [return] ${user}/${instance}@EXAMPLE.COM Kinit HTTP user + Pass Execution If '${SECURITY_ENABLED}' == 'false' Skip in unsecure cluster ${principal} = Get test user principal HTTP Wait Until Keyword Succeeds 2min 10sec Execute kinit -k -t /etc/security/keytabs/HTTP.keytab ${principal} Kinit test user + Pass Execution If '${SECURITY_ENABLED}' == 'false' Skip in unsecure cluster [arguments] ${user} ${keytab} ${TEST_USER} = Get test user principal ${user} Set Suite Variable ${TEST_USER} diff --git a/hadoop-ozone/dist/src/main/smoketest/s3/commonawslib.robot b/hadoop-ozone/dist/src/main/smoketest/s3/commonawslib.robot index c0b2c9f7bfae..840fb963d8d1 100644 --- a/hadoop-ozone/dist/src/main/smoketest/s3/commonawslib.robot +++ b/hadoop-ozone/dist/src/main/smoketest/s3/commonawslib.robot @@ -207,3 +207,9 @@ Verify Multipart Upload ${tmp} = Catenate @{files} Execute cat ${tmp} > /tmp/original${random} Compare files /tmp/original${random} /tmp/verify${random} + +Revoke S3 secrets + Execute and Ignore Error ozone s3 revokesecret -y + Execute and Ignore Error ozone s3 revokesecret -y -u testuser + Execute and Ignore Error ozone s3 revokesecret -y -u testuser2 + diff --git a/hadoop-ozone/dist/src/main/smoketest/s3/secretgenerate.robot b/hadoop-ozone/dist/src/main/smoketest/s3/secretgenerate.robot index b9f6993f45e2..70dcfa1abede 100644 --- a/hadoop-ozone/dist/src/main/smoketest/s3/secretgenerate.robot +++ b/hadoop-ozone/dist/src/main/smoketest/s3/secretgenerate.robot @@ -21,30 +21,37 @@ Library String Resource ../commonlib.robot Resource ./commonawslib.robot Test Timeout 5 minutes -Suite Setup Setup s3 tests Default Tags no-bucket-type +Test Setup Run Keywords Kinit test user testuser testuser.keytab +... AND Revoke S3 secrets +Test Teardown Run Keyword Revoke S3 secrets *** Variables *** ${ENDPOINT_URL} http://s3g:9878 +${SECURITY_ENABLED} true *** Test Cases *** S3 Gateway Generate Secret - Run Keyword if '${SECURITY_ENABLED}' == 'true' Kinit HTTP user + Pass Execution If '${SECURITY_ENABLED}' == 'false' Skipping this check as security is not enabled ${result} = Execute curl -X PUT --negotiate -u : -v ${ENDPOINT_URL}/secret - IF '${SECURITY_ENABLED}' == 'true' - Should contain ${result} HTTP/1.1 200 OK ignore_case=True - Should Match Regexp ${result} .*.* - ELSE - Should contain ${result} S3 Secret endpoint is disabled. - END + Should contain ${result} HTTP/1.1 200 OK ignore_case=True + Should Match Regexp ${result} .*.* + +S3 Gateway Secret Already Exists + Pass Execution If '${SECURITY_ENABLED}' == 'false' Skipping this check as security is not enabled + Execute ozone s3 getsecret ${OM_HA_PARAM} + ${result} = Execute curl -X PUT --negotiate -u : -v ${ENDPOINT_URL}/secret + Should contain ${result} HTTP/1.1 400 S3_SECRET_ALREADY_EXISTS ignore_case=True S3 Gateway Generate Secret By Username - Run Keyword if '${SECURITY_ENABLED}' == 'true' Kinit test user testuser testuser.keytab + Pass Execution If '${SECURITY_ENABLED}' == 'false' Skipping this check as security is not enabled + ${result} = Execute curl -X PUT --negotiate -u : -v ${ENDPOINT_URL}/secret/testuser + Should contain ${result} HTTP/1.1 200 OK ignore_case=True + Should Match Regexp ${result} .*.* + +S3 Gateway Generate Secret By Username For Other User + Pass Execution If '${SECURITY_ENABLED}' == 'false' Skipping this check as security is not enabled ${result} = Execute curl -X PUT --negotiate -u : -v ${ENDPOINT_URL}/secret/testuser2 - IF '${SECURITY_ENABLED}' == 'true' - Should contain ${result} HTTP/1.1 200 OK ignore_case=True - Should Match Regexp ${result} .*.* - ELSE - Should contain ${result} S3 Secret endpoint is disabled. - END + Should contain ${result} HTTP/1.1 200 OK ignore_case=True + Should Match Regexp ${result} .*.* \ No newline at end of file diff --git a/hadoop-ozone/dist/src/main/smoketest/s3/secretrevoke.robot b/hadoop-ozone/dist/src/main/smoketest/s3/secretrevoke.robot index 27b4580f419b..0f15f23067b0 100644 --- a/hadoop-ozone/dist/src/main/smoketest/s3/secretrevoke.robot +++ b/hadoop-ozone/dist/src/main/smoketest/s3/secretrevoke.robot @@ -21,8 +21,9 @@ Library String Resource ../commonlib.robot Resource ./commonawslib.robot Test Timeout 5 minutes -Suite Setup Setup s3 tests Default Tags no-bucket-type +Test Setup Run Keywords Kinit test user testuser testuser.keytab +... AND Revoke S3 secrets *** Variables *** ${ENDPOINT_URL} http://s3g:9878 @@ -31,19 +32,19 @@ ${SECURITY_ENABLED} true *** Test Cases *** S3 Gateway Revoke Secret - Run Keyword if '${SECURITY_ENABLED}' == 'true' Kinit HTTP user + Pass Execution If '${SECURITY_ENABLED}' == 'false' Skipping this check as security is not enabled + Execute ozone s3 getsecret ${OM_HA_PARAM} ${result} = Execute curl -X DELETE --negotiate -u : -v ${ENDPOINT_URL}/secret - IF '${SECURITY_ENABLED}' == 'true' - Should contain ${result} HTTP/1.1 200 OK ignore_case=True - ELSE - Should contain ${result} S3 Secret endpoint is disabled. - END + Should contain ${result} HTTP/1.1 200 OK ignore_case=True S3 Gateway Revoke Secret By Username - Run Keyword if '${SECURITY_ENABLED}' == 'true' Kinit test user testuser testuser.keytab + Pass Execution If '${SECURITY_ENABLED}' == 'false' Skipping this check as security is not enabled + Execute ozone s3 getsecret -u testuser ${OM_HA_PARAM} + ${result} = Execute curl -X DELETE --negotiate -u : -v ${ENDPOINT_URL}/secret/testuser + Should contain ${result} HTTP/1.1 200 OK ignore_case=True + +S3 Gateway Revoke Secret By Username For Other User + Pass Execution If '${SECURITY_ENABLED}' == 'false' Skipping this check as security is not enabled + Execute ozone s3 getsecret -u testuser2 ${OM_HA_PARAM} ${result} = Execute curl -X DELETE --negotiate -u : -v ${ENDPOINT_URL}/secret/testuser2 - IF '${SECURITY_ENABLED}' == 'true' - Should contain ${result} HTTP/1.1 200 OK ignore_case=True - ELSE - Should contain ${result} S3 Secret endpoint is disabled. - END \ No newline at end of file + Should contain ${result} HTTP/1.1 200 OK ignore_case=True \ No newline at end of file diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestSecureOzoneRpcClient.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestSecureOzoneRpcClient.java index e8aece20f4fd..7c3674574935 100644 --- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestSecureOzoneRpcClient.java +++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestSecureOzoneRpcClient.java @@ -263,7 +263,7 @@ public void testS3Auth() throws Exception { // Add secret to S3Secret table. s3SecretManager.storeSecret(accessKey, - new S3SecretValue(accessKey, secret)); + S3SecretValue.of(accessKey, secret)); OMRequest writeRequest = OMRequest.newBuilder() .setCmdType(OzoneManagerProtocolProtos.Type.CreateVolume) @@ -312,7 +312,7 @@ public void testS3Auth() throws Exception { // Override secret to S3Secret store with some dummy value s3SecretManager - .storeSecret(accessKey, new S3SecretValue(accessKey, "dummy")); + .storeSecret(accessKey, S3SecretValue.of(accessKey, "dummy")); // Write request with invalid credentials. omResponse = cluster.getOzoneManager().getOmServerProtocol() diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/TestOzoneManagerHA.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/TestOzoneManagerHA.java index 7830d6996531..6cef867e896b 100644 --- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/TestOzoneManagerHA.java +++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/TestOzoneManagerHA.java @@ -216,13 +216,16 @@ public static void shutdown() { */ public static String createKey(OzoneBucket ozoneBucket) throws IOException { String keyName = "key" + RandomStringUtils.randomNumeric(5); + createKey(ozoneBucket, keyName); + return keyName; + } + + public static void createKey(OzoneBucket ozoneBucket, String keyName) throws IOException { String data = "data" + RandomStringUtils.randomNumeric(5); - OzoneOutputStream ozoneOutputStream = ozoneBucket.createKey(keyName, - data.length(), ReplicationType.RATIS, + OzoneOutputStream ozoneOutputStream = ozoneBucket.createKey(keyName, data.length(), ReplicationType.RATIS, ReplicationFactor.ONE, new HashMap<>()); ozoneOutputStream.write(data.getBytes(UTF_8), 0, data.length()); ozoneOutputStream.close(); - return keyName; } protected OzoneBucket setupBucket() throws Exception { diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/TestSnapshotBackgroundServices.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/TestSnapshotBackgroundServices.java index cc78452c7c49..682d02cd2861 100644 --- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/TestSnapshotBackgroundServices.java +++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/TestSnapshotBackgroundServices.java @@ -16,7 +16,6 @@ */ package org.apache.hadoop.ozone.om; -import org.apache.commons.lang3.RandomStringUtils; import org.apache.hadoop.hdds.conf.OzoneConfiguration; import org.apache.hadoop.hdds.conf.StorageUnit; import org.apache.hadoop.hdds.utils.IOUtils; @@ -40,6 +39,7 @@ import org.apache.hadoop.ozone.om.helpers.BucketLayout; import org.apache.hadoop.ozone.om.helpers.OmKeyInfo; import org.apache.hadoop.ozone.om.helpers.SnapshotInfo; +import org.apache.hadoop.ozone.om.ratis.OzoneManagerRatisServerConfig; import org.apache.hadoop.ozone.om.snapshot.ReferenceCounted; import org.apache.hadoop.ozone.om.snapshot.SnapshotCache; import org.apache.hadoop.ozone.snapshot.SnapshotDiffReportOzone; @@ -48,7 +48,6 @@ import org.apache.ozone.rocksdiff.CompactionNode; import org.apache.ozone.test.GenericTestUtils; import org.apache.ozone.test.LambdaTestUtils; -import org.apache.ozone.test.tag.Flaky; import org.apache.ratis.server.protocol.TermIndex; import org.junit.jupiter.api.AfterEach; import org.junit.jupiter.api.Assertions; @@ -67,6 +66,7 @@ import java.util.UUID; import java.util.concurrent.TimeUnit; import java.util.concurrent.TimeoutException; +import java.util.concurrent.atomic.AtomicInteger; import java.util.concurrent.atomic.AtomicLong; import java.util.concurrent.atomic.AtomicReference; @@ -86,10 +86,8 @@ * Tests snapshot background services. */ @Timeout(5000) -@Flaky("HDDS-9455") public class TestSnapshotBackgroundServices { - - private MiniOzoneHAClusterImpl cluster = null; + private MiniOzoneHAClusterImpl cluster; private ObjectStore objectStore; private OzoneBucket ozoneBucket; private String volumeName; @@ -97,12 +95,12 @@ public class TestSnapshotBackgroundServices { private static final long SNAPSHOT_THRESHOLD = 50; private static final int LOG_PURGE_GAP = 50; - // This test depends on direct RocksDB checks that are easier done with OBS - // buckets. - private static final BucketLayout TEST_BUCKET_LAYOUT = - BucketLayout.OBJECT_STORE; - private static final String SNAPSHOT_NAME_PREFIX = "snapshot"; + // This test depends on direct RocksDB checks that are easier done with OBS buckets. + private static final BucketLayout TEST_BUCKET_LAYOUT = BucketLayout.OBJECT_STORE; + private static final String SNAPSHOT_NAME_PREFIX = "snapshot-"; + private static final String KEY_NAME_PREFIX = "key-"; private OzoneClient client; + private final AtomicInteger counter = new AtomicInteger(); /** * Create a MiniOzoneCluster for testing. The cluster initially has one @@ -115,11 +113,12 @@ public void init(TestInfo testInfo) throws Exception { String clusterId = UUID.randomUUID().toString(); String scmId = UUID.randomUUID().toString(); String omServiceId = "om-service-test1"; + OzoneManagerRatisServerConfig omRatisConf = conf.getObject(OzoneManagerRatisServerConfig.class); + omRatisConf.setLogAppenderWaitTimeMin(10); + conf.setFromObject(omRatisConf); conf.setInt(OMConfigKeys.OZONE_OM_RATIS_LOG_PURGE_GAP, LOG_PURGE_GAP); - conf.setStorageSize(OMConfigKeys.OZONE_OM_RATIS_SEGMENT_SIZE_KEY, 16, - StorageUnit.KB); - conf.setStorageSize(OMConfigKeys. - OZONE_OM_RATIS_SEGMENT_PREALLOCATED_SIZE_KEY, 16, StorageUnit.KB); + conf.setStorageSize(OMConfigKeys.OZONE_OM_RATIS_SEGMENT_SIZE_KEY, 16, StorageUnit.KB); + conf.setStorageSize(OMConfigKeys.OZONE_OM_RATIS_SEGMENT_PREALLOCATED_SIZE_KEY, 16, StorageUnit.KB); if ("testSSTFilteringBackgroundService".equals(testInfo.getDisplayName())) { conf.setTimeDuration(OZONE_SNAPSHOT_SST_FILTERING_SERVICE_INTERVAL, 1, TimeUnit.SECONDS); @@ -174,12 +173,12 @@ public void init(TestInfo testInfo) throws Exception { client = OzoneClientFactory.getRpcClient(omServiceId, conf); objectStore = client.getObjectStore(); - volumeName = "volume" + RandomStringUtils.randomNumeric(5); - bucketName = "bucket" + RandomStringUtils.randomNumeric(5); + volumeName = "volume" + counter.incrementAndGet(); + bucketName = "bucket" + counter.incrementAndGet(); VolumeArgs createVolumeArgs = VolumeArgs.newBuilder() - .setOwner("user" + RandomStringUtils.randomNumeric(5)) - .setAdmin("admin" + RandomStringUtils.randomNumeric(5)) + .setOwner("user" + counter.incrementAndGet()) + .setAdmin("admin" + counter.incrementAndGet()) .build(); objectStore.createVolume(volumeName, createVolumeArgs); @@ -224,8 +223,7 @@ public void testSnapshotAndKeyDeletionBackgroundServices() cluster.getOzoneManager(leaderOM.getOMNodeId()); Assertions.assertEquals(leaderOM, newFollowerOM); - SnapshotInfo newSnapshot = createOzoneSnapshot(newLeaderOM, - SNAPSHOT_NAME_PREFIX + RandomStringUtils.randomNumeric(5)); + SnapshotInfo newSnapshot = createOzoneSnapshot(newLeaderOM, SNAPSHOT_NAME_PREFIX + counter.incrementAndGet()); /* Check whether newly created key data is reclaimed @@ -250,8 +248,7 @@ public void testSnapshotAndKeyDeletionBackgroundServices() Assertions.assertNotNull(keyInfoA); // create snapshot b - SnapshotInfo snapshotInfoB = createOzoneSnapshot(newLeaderOM, - SNAPSHOT_NAME_PREFIX + RandomStringUtils.randomNumeric(5)); + SnapshotInfo snapshotInfoB = createOzoneSnapshot(newLeaderOM, SNAPSHOT_NAME_PREFIX + counter.incrementAndGet()); Assertions.assertNotNull(snapshotInfoB); // delete key a @@ -261,8 +258,7 @@ public void testSnapshotAndKeyDeletionBackgroundServices() () -> !isKeyInTable(keyA, omKeyInfoTable)); // create snapshot c - SnapshotInfo snapshotInfoC = createOzoneSnapshot(newLeaderOM, - SNAPSHOT_NAME_PREFIX + RandomStringUtils.randomNumeric(5)); + SnapshotInfo snapshotInfoC = createOzoneSnapshot(newLeaderOM, SNAPSHOT_NAME_PREFIX + counter.incrementAndGet()); // get snapshot c OmSnapshot snapC; @@ -279,8 +275,7 @@ public void testSnapshotAndKeyDeletionBackgroundServices() () -> isKeyInTable(keyA, snapC.getMetadataManager().getDeletedTable())); // create snapshot d - SnapshotInfo snapshotInfoD = createOzoneSnapshot(newLeaderOM, - SNAPSHOT_NAME_PREFIX + RandomStringUtils.randomNumeric(5)); + SnapshotInfo snapshotInfoD = createOzoneSnapshot(newLeaderOM, SNAPSHOT_NAME_PREFIX + counter.incrementAndGet()); // delete snapshot c client.getObjectStore() @@ -533,18 +528,14 @@ public void testSSTFilteringBackgroundService() private void confirmSnapDiffForTwoSnapshotsDifferingBySingleKey( OzoneManager ozoneManager) throws IOException, InterruptedException, TimeoutException { - String firstSnapshot = createOzoneSnapshot(ozoneManager, - TestSnapshotBackgroundServices.SNAPSHOT_NAME_PREFIX + - RandomStringUtils.randomNumeric(10)).getName(); + String firstSnapshot = createOzoneSnapshot(ozoneManager, SNAPSHOT_NAME_PREFIX + counter.incrementAndGet()) + .getName(); String diffKey = writeKeys(1).get(0); - String secondSnapshot = createOzoneSnapshot(ozoneManager, - TestSnapshotBackgroundServices.SNAPSHOT_NAME_PREFIX + - RandomStringUtils.randomNumeric(10)).getName(); - SnapshotDiffReportOzone diff = getSnapDiffReport(volumeName, bucketName, - firstSnapshot, secondSnapshot); + String secondSnapshot = createOzoneSnapshot(ozoneManager, SNAPSHOT_NAME_PREFIX + counter.incrementAndGet()) + .getName(); + SnapshotDiffReportOzone diff = getSnapDiffReport(volumeName, bucketName, firstSnapshot, secondSnapshot); Assertions.assertEquals(Collections.singletonList( - SnapshotDiffReportOzone.getDiffReportEntry( - SnapshotDiffReport.DiffType.CREATE, diffKey, null)), + SnapshotDiffReportOzone.getDiffReportEntry(SnapshotDiffReport.DiffType.CREATE, diffKey, null)), diff.getDiffList()); } @@ -574,9 +565,7 @@ private static File getSstBackupDir(OzoneManager ozoneManager) { private void checkIfSnapshotGetsProcessedBySFS(OzoneManager ozoneManager) throws IOException, TimeoutException, InterruptedException { writeKeys(1); - SnapshotInfo newSnapshot = createOzoneSnapshot(ozoneManager, - TestSnapshotBackgroundServices.SNAPSHOT_NAME_PREFIX + - RandomStringUtils.randomNumeric(5)); + SnapshotInfo newSnapshot = createOzoneSnapshot(ozoneManager, SNAPSHOT_NAME_PREFIX + counter.incrementAndGet()); Assertions.assertNotNull(newSnapshot); Table snapshotInfoTable = ozoneManager.getMetadataManager().getSnapshotInfoTable(); @@ -640,22 +629,17 @@ private SnapshotDiffReportOzone getSnapDiffReport(String volume, return response.get().getSnapshotDiffReport(); } - private SnapshotInfo createOzoneSnapshot(OzoneManager leaderOM, String name) - throws IOException { + private SnapshotInfo createOzoneSnapshot(OzoneManager leaderOM, String name) throws IOException { objectStore.createSnapshot(volumeName, bucketName, name); - String tableKey = SnapshotInfo.getTableKey(volumeName, - bucketName, - name); + String tableKey = SnapshotInfo.getTableKey(volumeName, bucketName, name); SnapshotInfo snapshotInfo = leaderOM.getMetadataManager() .getSnapshotInfoTable() .get(tableKey); // Allow the snapshot to be written to disk - String fileName = - getSnapshotPath(leaderOM.getConfiguration(), snapshotInfo); + String fileName = getSnapshotPath(leaderOM.getConfiguration(), snapshotInfo); File snapshotDir = new File(fileName); - if (!RDBCheckpointUtils - .waitForCheckpointDirectoryExist(snapshotDir)) { + if (!RDBCheckpointUtils.waitForCheckpointDirectoryExist(snapshotDir)) { throw new IOException("snapshot directory doesn't exist"); } return snapshotInfo; @@ -665,7 +649,9 @@ private List writeKeys(long keyCount) throws IOException { List keys = new ArrayList<>(); long index = 0; while (index < keyCount) { - keys.add(createKey(ozoneBucket)); + String key = KEY_NAME_PREFIX + counter.incrementAndGet(); + createKey(ozoneBucket, key); + keys.add(key); index++; } return keys; @@ -679,5 +665,4 @@ private void readKeys(List keys) throws IOException { inputStream.close(); } } - } diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/TestSnapshotDeletingService.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/TestSnapshotDeletingService.java index 98c23b8076f8..843a955ecde0 100644 --- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/TestSnapshotDeletingService.java +++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/TestSnapshotDeletingService.java @@ -194,7 +194,6 @@ public void testMultipleSnapshotKeyReclaim() throws Exception { // /vol1/bucket2/bucket2snap1 has been cleaned up from cache map SnapshotCache snapshotCache = om.getOmSnapshotManager().getSnapshotCache(); assertEquals(2, snapshotCache.size()); - assertEquals(2, snapshotCache.getPendingEvictionListSize()); } @SuppressWarnings("checkstyle:MethodLength") diff --git a/hadoop-ozone/interface-storage/src/test/java/org/apache/hadoop/ozone/om/helpers/TestS3SecretValueCodec.java b/hadoop-ozone/interface-storage/src/test/java/org/apache/hadoop/ozone/om/helpers/TestS3SecretValueCodec.java index 19cb68619715..98f06a245031 100644 --- a/hadoop-ozone/interface-storage/src/test/java/org/apache/hadoop/ozone/om/helpers/TestS3SecretValueCodec.java +++ b/hadoop-ozone/interface-storage/src/test/java/org/apache/hadoop/ozone/om/helpers/TestS3SecretValueCodec.java @@ -40,7 +40,7 @@ public void testCodecWithCorrectData() throws Exception { final Codec codec = getCodec(); S3SecretValue s3SecretValue = - new S3SecretValue(UUID.randomUUID().toString(), + S3SecretValue.of(UUID.randomUUID().toString(), UUID.randomUUID().toString()); byte[] data = codec.toPersistedFormat(s3SecretValue); diff --git a/hadoop-ozone/ozone-manager/pom.xml b/hadoop-ozone/ozone-manager/pom.xml index ef9ca990c108..9fb1b1173077 100644 --- a/hadoop-ozone/ozone-manager/pom.xml +++ b/hadoop-ozone/ozone-manager/pom.xml @@ -93,7 +93,7 @@ https://maven.apache.org/xsd/maven-4.0.0.xsd"> org.bouncycastle - bcprov-jdk15on + bcprov-jdk18on io.netty diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/OMDBCheckpointServlet.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/OMDBCheckpointServlet.java index 3e6d70626728..65584d705852 100644 --- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/OMDBCheckpointServlet.java +++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/OMDBCheckpointServlet.java @@ -242,9 +242,9 @@ public DBCheckpoint getCheckpoint(Path tmpdir, boolean flush) long startTime = System.currentTimeMillis(); long pauseCounter = PAUSE_COUNTER.incrementAndGet(); - // Pause compactions, Copy/link files and get checkpoint. try { LOG.info("Compaction pausing {} started.", pauseCounter); + // Pause compactions, Copy/link files and get checkpoint. differ.incrementTarballRequestCount(); FileUtils.copyDirectory(compactionLogDir.getOriginalDir(), compactionLogDir.getTmpDir()); @@ -253,13 +253,9 @@ public DBCheckpoint getCheckpoint(Path tmpdir, boolean flush) checkpoint = getDbStore().getCheckpoint(flush); } finally { // Unpause the compaction threads. - synchronized (getDbStore().getRocksDBCheckpointDiffer()) { - differ.decrementTarballRequestCount(); - differ.notifyAll(); - long elapsedTime = System.currentTimeMillis() - startTime; - LOG.info("Compaction pausing {} ended. Elapsed ms: {}", - pauseCounter, elapsedTime); - } + differ.decrementTarballRequestCountAndNotify(); + long elapsedTime = System.currentTimeMillis() - startTime; + LOG.info("Compaction pausing {} ended. Elapsed ms: {}", pauseCounter, elapsedTime); } return checkpoint; } diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/OmMetadataManagerImpl.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/OmMetadataManagerImpl.java index e272cb8692c0..56ad604922ad 100644 --- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/OmMetadataManagerImpl.java +++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/OmMetadataManagerImpl.java @@ -317,6 +317,7 @@ public class OmMetadataManagerImpl implements OMMetadataManager, private final Map tableCacheMetricsMap = new HashMap<>(); private SnapshotChainManager snapshotChainManager; + private final S3Batcher s3Batcher = new S3SecretBatcher(); /** * OmMetadataManagerImpl constructor. @@ -1940,25 +1941,7 @@ public void revokeSecret(String kerberosId) throws IOException { @Override public S3Batcher batcher() { - return new S3Batcher() { - @Override - public void addWithBatch(AutoCloseable batchOperator, - String id, S3SecretValue s3SecretValue) - throws IOException { - if (batchOperator instanceof BatchOperation) { - s3SecretTable.putWithBatch((BatchOperation) batchOperator, - id, s3SecretValue); - } - } - - @Override - public void deleteWithBatch(AutoCloseable batchOperator, String id) - throws IOException { - if (batchOperator instanceof BatchOperation) { - s3SecretTable.deleteWithBatch((BatchOperation) batchOperator, id); - } - } - }; + return s3Batcher; } @Override @@ -2170,4 +2153,23 @@ public boolean containsIncompleteMPUs(String volume, String bucket) return false; } + + private final class S3SecretBatcher implements S3Batcher { + @Override + public void addWithBatch(AutoCloseable batchOperator, String id, S3SecretValue s3SecretValue) + throws IOException { + if (batchOperator instanceof BatchOperation) { + s3SecretTable.putWithBatch((BatchOperation) batchOperator, + id, s3SecretValue); + } + } + + @Override + public void deleteWithBatch(AutoCloseable batchOperator, String id) + throws IOException { + if (batchOperator instanceof BatchOperation) { + s3SecretTable.deleteWithBatch((BatchOperation) batchOperator, id); + } + } + } } diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/S3SecretManagerImpl.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/S3SecretManagerImpl.java index 4e46adc66b1e..195ff816bc50 100644 --- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/S3SecretManagerImpl.java +++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/S3SecretManagerImpl.java @@ -62,8 +62,7 @@ public S3SecretValue getSecret(String kerberosID) throws IOException { // purposely deleted the secret. Hence, we do not have to check the DB. return null; } - return new S3SecretValue(cacheValue.getKerberosID(), - cacheValue.getAwsSecret()); + return cacheValue; } S3SecretValue result = s3SecretStore.getSecret(kerberosID); if (result != null) { diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/s3/security/OMSetSecretRequest.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/s3/security/OMSetSecretRequest.java index 0ad6beb7ef63..a7981247d468 100644 --- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/s3/security/OMSetSecretRequest.java +++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/s3/security/OMSetSecretRequest.java @@ -113,26 +113,20 @@ public OMClientResponse validateAndUpdateCache(OzoneManager ozoneManager, try { omClientResponse = ozoneManager.getS3SecretManager() .doUnderLock(accessId, s3SecretManager -> { - // Intentionally set to final so they can only be set once. - final S3SecretValue newS3SecretValue; // Update legacy S3SecretTable, if the accessId entry exists - if (s3SecretManager.hasS3Secret(accessId)) { - // accessId found in S3SecretTable. Update S3SecretTable - LOG.debug("Updating S3SecretTable cache entry"); - // Update S3SecretTable cache entry in this case - newS3SecretValue = new S3SecretValue(accessId, secretKey); - // Set the transactionLogIndex to be used for updating. - newS3SecretValue.setTransactionLogIndex(transactionLogIndex); - s3SecretManager - .updateCache(accessId, newS3SecretValue); - } else { + if (!s3SecretManager.hasS3Secret(accessId)) { // If S3SecretTable is not updated, // throw ACCESS_ID_NOT_FOUND exception. throw new OMException("accessId '" + accessId + "' not found.", OMException.ResultCodes.ACCESS_ID_NOT_FOUND); } + // Update S3SecretTable cache entry in this case + // Set the transactionLogIndex to be used for updating. + final S3SecretValue newS3SecretValue = S3SecretValue.of(accessId, secretKey, transactionLogIndex); + s3SecretManager.updateCache(accessId, newS3SecretValue); + // Compose response final SetS3SecretResponse.Builder setSecretResponse = SetS3SecretResponse.newBuilder() diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/s3/security/S3GetSecretRequest.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/s3/security/S3GetSecretRequest.java index 24a79019228d..846acb4fdae3 100644 --- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/s3/security/S3GetSecretRequest.java +++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/s3/security/S3GetSecretRequest.java @@ -150,21 +150,16 @@ public OMClientResponse validateAndUpdateCache(OzoneManager ozoneManager, try { omClientResponse = ozoneManager.getS3SecretManager() .doUnderLock(accessId, s3SecretManager -> { - S3SecretValue assignS3SecretValue; - S3SecretValue s3SecretValue = - s3SecretManager.getSecret(accessId); + final S3SecretValue assignS3SecretValue; + S3SecretValue s3SecretValue = s3SecretManager.getSecret(accessId); if (s3SecretValue == null) { // Not found in S3SecretTable. if (createIfNotExist) { // Add new entry in this case - assignS3SecretValue = - new S3SecretValue(accessId, awsSecret.get()); - // Set the transactionLogIndex to be used for updating. - assignS3SecretValue.setTransactionLogIndex(transactionLogIndex); + assignS3SecretValue = S3SecretValue.of(accessId, awsSecret.get(), transactionLogIndex); // Add cache entry first. - s3SecretManager.updateCache(accessId, - assignS3SecretValue); + s3SecretManager.updateCache(accessId, assignS3SecretValue); } else { assignS3SecretValue = null; } diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/s3/tenant/OMTenantAssignUserAccessIdRequest.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/s3/tenant/OMTenantAssignUserAccessIdRequest.java index 969a85252387..000fd2c8d5f0 100644 --- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/s3/tenant/OMTenantAssignUserAccessIdRequest.java +++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/s3/tenant/OMTenantAssignUserAccessIdRequest.java @@ -271,10 +271,7 @@ public OMClientResponse validateAndUpdateCache( } } - final S3SecretValue s3SecretValue = - new S3SecretValue(accessId, awsSecret); - // Set the transactionLogIndex to be used for updating. - s3SecretValue.setTransactionLogIndex(transactionLogIndex); + final S3SecretValue s3SecretValue = S3SecretValue.of(accessId, awsSecret, transactionLogIndex); // Add to tenantAccessIdTable final OmDBAccessIdInfo omDBAccessIdInfo = new OmDBAccessIdInfo.Builder() diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/snapshot/ReferenceCounted.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/snapshot/ReferenceCounted.java index e064812de8ef..808a5ed4c192 100644 --- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/snapshot/ReferenceCounted.java +++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/snapshot/ReferenceCounted.java @@ -25,7 +25,7 @@ /** * Add reference counter to an object instance. */ -public class ReferenceCounted +public class ReferenceCounted implements AutoCloseable { /** @@ -95,12 +95,6 @@ public long incrementRefCount() { long newValTotal = refCount.incrementAndGet(); Preconditions.checkState(newValTotal > 0L, "Total reference count overflown"); - - if (refCount.get() == 1L) { - // ref count increased to one (from zero), remove from - // pendingEvictionList if added - parentWithCallback.callback(this); - } } return refCount.get(); @@ -131,11 +125,6 @@ public long decrementRefCount() { long newValTotal = refCount.decrementAndGet(); Preconditions.checkState(newValTotal >= 0L, "Total reference count underflow"); - - if (refCount.get() == 0L) { - // ref count decreased to zero, add to pendingEvictionList - parentWithCallback.callback(this); - } } return refCount.get(); diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/snapshot/ReferenceCountedCallback.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/snapshot/ReferenceCountedCallback.java deleted file mode 100644 index d63f5783b808..000000000000 --- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/snapshot/ReferenceCountedCallback.java +++ /dev/null @@ -1,25 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package org.apache.hadoop.ozone.om.snapshot; - -/** - * Callback interface for ReferenceCounted. - */ -public interface ReferenceCountedCallback { - void callback(ReferenceCounted referenceCounted); -} diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/snapshot/SnapshotCache.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/snapshot/SnapshotCache.java index d8932c0e7e0a..226acbb7dd1b 100644 --- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/snapshot/SnapshotCache.java +++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/snapshot/SnapshotCache.java @@ -18,7 +18,6 @@ package org.apache.hadoop.ozone.om.snapshot; import com.google.common.annotations.VisibleForTesting; -import com.google.common.base.Preconditions; import com.google.common.cache.CacheLoader; import org.apache.hadoop.ozone.om.IOmMetadataReader; import org.apache.hadoop.ozone.om.OmSnapshot; @@ -28,11 +27,8 @@ import org.slf4j.LoggerFactory; import java.io.IOException; -import java.util.Collections; import java.util.Iterator; -import java.util.LinkedHashSet; import java.util.Map; -import java.util.Set; import java.util.concurrent.ConcurrentHashMap; import static org.apache.hadoop.ozone.om.exceptions.OMException.ResultCodes.FILE_NOT_FOUND; @@ -41,7 +37,7 @@ /** * Thread-safe custom unbounded LRU cache to manage open snapshot DB instances. */ -public class SnapshotCache implements ReferenceCountedCallback { +public class SnapshotCache { static final Logger LOG = LoggerFactory.getLogger(SnapshotCache.class); @@ -49,15 +45,8 @@ public class SnapshotCache implements ReferenceCountedCallback { // Key: DB snapshot table key // Value: OmSnapshot instance, each holds a DB instance handle inside // TODO: [SNAPSHOT] Consider wrapping SoftReference<> around IOmMetadataReader - private final ConcurrentHashMap> dbMap; + private final ConcurrentHashMap> dbMap; - // Linked hash set that holds OmSnapshot instances whose reference count - // has reached zero. Those entries are eligible to be evicted and closed. - // Sorted in last used order. - // Least-recently-used entry located at the beginning. - private final Set< - ReferenceCounted> pendingEvictionList; private final OmSnapshotManager omSnapshotManager; private final CacheLoader cacheLoader; // Soft-limit of the total number of snapshot DB instances allowed to be @@ -69,25 +58,16 @@ public SnapshotCache( CacheLoader cacheLoader, int cacheSizeLimit) { this.dbMap = new ConcurrentHashMap<>(); - this.pendingEvictionList = - Collections.synchronizedSet(new LinkedHashSet<>()); this.omSnapshotManager = omSnapshotManager; this.cacheLoader = cacheLoader; this.cacheSizeLimit = cacheSizeLimit; } @VisibleForTesting - ConcurrentHashMap> getDbMap() { + ConcurrentHashMap> getDbMap() { return dbMap; } - @VisibleForTesting - Set> getPendingEvictionList() { - return pendingEvictionList; - } - /** * @return number of DB instances currently held in cache. */ @@ -95,41 +75,34 @@ public int size() { return dbMap.size(); } - public int getPendingEvictionListSize() { - return pendingEvictionList.size(); - } - /** * Immediately invalidate an entry. * @param key DB snapshot table key */ public void invalidate(String key) throws IOException { - ReferenceCounted - rcOmSnapshot = dbMap.get(key); - if (rcOmSnapshot != null) { - pendingEvictionList.remove(rcOmSnapshot); - try { - ((OmSnapshot) rcOmSnapshot.get()).close(); - } catch (IOException e) { - throw new IllegalStateException("Failed to close snapshot: " + key, e); + dbMap.compute(key, (k, v) -> { + if (v == null) { + LOG.warn("Key: '{}' does not exist in cache.", k); + } else { + try { + ((OmSnapshot) v.get()).close(); + } catch (IOException e) { + throw new IllegalStateException("Failed to close snapshot: " + key, e); + } } - // Remove the entry from map - dbMap.remove(key); - } + return null; + }); } /** * Immediately invalidate all entries and close their DB instances in cache. */ public void invalidateAll() { - Iterator< - Map.Entry>> + Iterator>> it = dbMap.entrySet().iterator(); while (it.hasNext()) { - Map.Entry> - entry = it.next(); - pendingEvictionList.remove(entry.getValue()); + Map.Entry> entry = it.next(); OmSnapshot omSnapshot = (OmSnapshot) entry.getValue().get(); try { // TODO: If wrapped with SoftReference<>, omSnapshot could be null? @@ -152,8 +125,7 @@ public enum Reason { GARBAGE_COLLECTION_WRITE } - public ReferenceCounted get(String key) - throws IOException { + public ReferenceCounted get(String key) throws IOException { return get(key, false); } @@ -163,8 +135,8 @@ public ReferenceCounted get(String key) * @param key snapshot table key * @return an OmSnapshot instance, or null on error */ - public ReferenceCounted get(String key, - boolean skipActiveCheck) throws IOException { + public ReferenceCounted get(String key, boolean skipActiveCheck) + throws IOException { // Atomic operation to initialize the OmSnapshot instance (once) if the key // does not exist, and increment the reference count on the instance. ReferenceCounted rcOmSnapshot = @@ -186,6 +158,10 @@ public ReferenceCounted get(String key, throw new IllegalStateException(ex); } } + if (v != null) { + // When RC OmSnapshot is successfully loaded + v.incrementRefCount(); + } return v; }); @@ -195,16 +171,12 @@ public ReferenceCounted get(String key, throw new OMException("Snapshot table key '" + key + "' not found, " + "or the snapshot is no longer active", OMException.ResultCodes.FILE_NOT_FOUND); - } else { - // When RC OmSnapshot is successfully loaded - rcOmSnapshot.incrementRefCount(); } // If the snapshot is already loaded in cache, the check inside the loader // above is ignored. But we would still want to reject all get()s except // when called from SDT (and some) if the snapshot is not active anymore. - if (!skipActiveCheck && - !omSnapshotManager.isSnapshotStatus(key, SNAPSHOT_ACTIVE)) { + if (!skipActiveCheck && !omSnapshotManager.isSnapshotStatus(key, SNAPSHOT_ACTIVE)) { // Ref count was incremented. Need to decrement on exception here. rcOmSnapshot.decrementRefCount(); throw new OMException("Unable to load snapshot. " + @@ -212,11 +184,6 @@ public ReferenceCounted get(String key, FILE_NOT_FOUND); } - synchronized (pendingEvictionList) { - // Remove instance from clean up list when it exists. - pendingEvictionList.remove(rcOmSnapshot); - } - // Check if any entries can be cleaned up. // At this point, cache size might temporarily exceed cacheSizeLimit // even if there are entries that can be evicted, which is fine since it @@ -231,21 +198,16 @@ public ReferenceCounted get(String key, * @param key snapshot table key */ public void release(String key) { - ReferenceCounted - rcOmSnapshot = dbMap.get(key); - if (rcOmSnapshot == null) { - throw new IllegalArgumentException( - "Key '" + key + "' does not exist in cache"); - } - - if (rcOmSnapshot.decrementRefCount() == 0L) { - synchronized (pendingEvictionList) { - // v is eligible to be evicted and closed - pendingEvictionList.add(rcOmSnapshot); + dbMap.compute(key, (k, v) -> { + if (v == null) { + throw new IllegalArgumentException("Key '" + key + "' does not exist in cache."); + } else { + v.decrementRefCount(); } - } + return v; + }); - // The cache size might have already exceed the soft limit + // The cache size might have already exceeded the soft limit // Thus triggering cleanup() to check and evict if applicable cleanup(); } @@ -259,33 +221,12 @@ public void release(OmSnapshot omSnapshot) { release(snapshotTableKey); } - /** - * Callback method used to enqueue or dequeue ReferenceCounted from - * pendingEvictionList. - * @param referenceCounted ReferenceCounted object - */ - @Override - public void callback(ReferenceCounted referenceCounted) { - synchronized (pendingEvictionList) { - if (referenceCounted.getTotalRefCount() == 0L) { - // Reference count reaches zero, add to pendingEvictionList - Preconditions.checkState( - !pendingEvictionList.contains(referenceCounted), - "SnapshotCache is inconsistent. Entry should not be in the " - + "pendingEvictionList when ref count just reached zero."); - pendingEvictionList.add(referenceCounted); - } else if (referenceCounted.getTotalRefCount() == 1L) { - pendingEvictionList.remove(referenceCounted); - } - } - } - /** * Wrapper for cleanupInternal() that is synchronized to prevent multiple * threads from interleaving into the cleanup method. */ private synchronized void cleanup() { - synchronized (pendingEvictionList) { + if (dbMap.size() > cacheSizeLimit) { cleanupInternal(); } } @@ -296,105 +237,28 @@ private synchronized void cleanup() { * TODO: [SNAPSHOT] Add new ozone debug CLI command to trigger this directly. */ private void cleanupInternal() { - long numEntriesToEvict = (long) dbMap.size() - cacheSizeLimit; - while (numEntriesToEvict > 0L && pendingEvictionList.size() > 0) { - // Get the first instance in the clean up list - ReferenceCounted rcOmSnapshot = - pendingEvictionList.iterator().next(); - OmSnapshot omSnapshot = (OmSnapshot) rcOmSnapshot.get(); - LOG.debug("Evicting OmSnapshot instance {} with table key {}", - rcOmSnapshot, omSnapshot.getSnapshotTableKey()); - // Sanity check - Preconditions.checkState(rcOmSnapshot.getTotalRefCount() == 0L, - "Illegal state: OmSnapshot reference count non-zero (" - + rcOmSnapshot.getTotalRefCount() + ") but shows up in the " - + "clean up list"); - - final String key = omSnapshot.getSnapshotTableKey(); - final ReferenceCounted result = - dbMap.remove(key); - // Sanity check - Preconditions.checkState(rcOmSnapshot == result, - "Cache map entry removal failure. The cache is in an inconsistent " - + "state. Expected OmSnapshot instance: " + rcOmSnapshot - + ", actual: " + result + " for key: " + key); - - pendingEvictionList.remove(result); - - // Close the instance, which also closes its DB handle. - try { - ((OmSnapshot) rcOmSnapshot.get()).close(); - } catch (IOException ex) { - throw new IllegalStateException("Error while closing snapshot DB", ex); - } - - --numEntriesToEvict; - } - - // Print warning message if actual cache size is exceeding the soft limit - // even after the cleanup procedure above. - if ((long) dbMap.size() > cacheSizeLimit) { - LOG.warn("Current snapshot cache size ({}) is exceeding configured " - + "soft-limit ({}) after possible evictions.", - dbMap.size(), cacheSizeLimit); - - Preconditions.checkState(pendingEvictionList.size() == 0); - } - } - - /** - * Check cache consistency. - * @return true if the cache internal structure is consistent to the best of - * its knowledge, false if found to be inconsistent and details logged. - */ - @VisibleForTesting - public boolean isConsistent() { - // Uses dbMap as the source of truth for this check, whether dbMap entries - // are in OM DB's snapshotInfoTable is out of the scope of this check. - - LOG.info("dbMap has {} entries", dbMap.size()); - LOG.info("pendingEvictionList has {} entries", - pendingEvictionList.size()); - - // pendingEvictionList must be empty if cache size exceeds limit - if (dbMap.size() > cacheSizeLimit) { - if (pendingEvictionList.size() != 0) { - // cleanup() is not functioning correctly - LOG.error("pendingEvictionList is not empty even when cache size" - + "exceeds limit"); - } - } - - dbMap.forEach((k, v) -> { - if (v.getTotalRefCount() == 0L) { - long threadRefCount = v.getCurrentThreadRefCount(); - if (threadRefCount != 0L) { - LOG.error("snapshotTableKey='{}' instance has inconsistent " - + "ref count. Total ref count is 0 but thread " - + "ref count is {}", k, threadRefCount); - } - // Zero ref count values in dbMap must be in pendingEvictionList - if (!pendingEvictionList.contains(v)) { - LOG.error("snapshotTableKey='{}' instance has zero ref count but " - + "not in pendingEvictionList", k); + for (Map.Entry> entry : dbMap.entrySet()) { + dbMap.compute(entry.getKey(), (k, v) -> { + if (v == null) { + throw new IllegalStateException("Key '" + k + "' does not exist in cache. The RocksDB " + + "instance of the Snapshot may not be closed properly."); } - } - }); - pendingEvictionList.forEach(v -> { - // Objects in pendingEvictionList should still be in dbMap - if (!dbMap.contains(v)) { - LOG.error("Instance '{}' is in pendingEvictionList but not in " - + "dbMap", v); - } - // Instances in pendingEvictionList must have ref count equals 0 - if (v.getTotalRefCount() != 0L) { - LOG.error("Instance '{}' is in pendingEvictionList but ref count " - + "is not zero", v); - } - }); - - return true; + if (v.getTotalRefCount() > 0) { + LOG.debug("Snapshot {} is still being referenced ({}), skipping its clean up", + k, v.getTotalRefCount()); + return v; + } else { + LOG.debug("Closing Snapshot {}. It is not being referenced anymore.", k); + // Close the instance, which also closes its DB handle. + try { + ((OmSnapshot) v.get()).close(); + } catch (IOException ex) { + throw new IllegalStateException("Error while closing snapshot DB", ex); + } + return null; + } + }); + } } - } diff --git a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/snapshot/TestSnapshotCache.java b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/snapshot/TestSnapshotCache.java index fa3ac8609656..b2510ce76790 100644 --- a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/snapshot/TestSnapshotCache.java +++ b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/snapshot/TestSnapshotCache.java @@ -101,7 +101,6 @@ void testGet() throws IOException { assertNotNull(omSnapshot.get()); assertTrue(omSnapshot.get() instanceof OmSnapshot); assertEquals(1, snapshotCache.size()); - assertTrue(snapshotCache.isConsistent()); } @Test @@ -112,7 +111,6 @@ void testGetTwice() throws IOException { snapshotCache.get(dbKey1); assertNotNull(omSnapshot1); assertEquals(1, snapshotCache.size()); - assertTrue(snapshotCache.isConsistent()); ReferenceCounted omSnapshot1again = snapshotCache.get(dbKey1); @@ -120,7 +118,6 @@ void testGetTwice() throws IOException { assertEquals(omSnapshot1, omSnapshot1again); assertEquals(omSnapshot1.get(), omSnapshot1again.get()); assertEquals(1, snapshotCache.size()); - assertTrue(snapshotCache.isConsistent()); } @Test @@ -132,15 +129,10 @@ void testReleaseByDbKey() throws IOException { assertNotNull(omSnapshot1); assertNotNull(omSnapshot1.get()); assertEquals(1, snapshotCache.size()); - assertEquals(0, snapshotCache.getPendingEvictionList().size()); - assertTrue(snapshotCache.isConsistent()); snapshotCache.release(dbKey1); // Entry will not be immediately evicted assertEquals(1, snapshotCache.size()); - // Entry is queued for eviction as its ref count reaches zero - assertEquals(1, snapshotCache.getPendingEvictionList().size()); - assertTrue(snapshotCache.isConsistent()); } @Test @@ -151,15 +143,10 @@ void testReleaseByOmSnapshotInstance() throws IOException { snapshotCache.get(dbKey1); assertNotNull(omSnapshot1); assertEquals(1, snapshotCache.size()); - assertEquals(0, snapshotCache.getPendingEvictionList().size()); - assertTrue(snapshotCache.isConsistent()); snapshotCache.release((OmSnapshot) omSnapshot1.get()); // Entry will not be immediately evicted assertEquals(1, snapshotCache.size()); - // Entry is queued for eviction as its ref count reaches zero - assertEquals(1, snapshotCache.getPendingEvictionList().size()); - assertTrue(snapshotCache.isConsistent()); } @Test @@ -170,16 +157,13 @@ void testInvalidate() throws IOException { snapshotCache.get(dbKey1); assertNotNull(omSnapshot); assertEquals(1, snapshotCache.size()); - assertTrue(snapshotCache.isConsistent()); snapshotCache.release(dbKey1); // Entry will not be immediately evicted assertEquals(1, snapshotCache.size()); - assertTrue(snapshotCache.isConsistent()); snapshotCache.invalidate(dbKey1); assertEquals(0, snapshotCache.size()); - assertTrue(snapshotCache.isConsistent()); } @Test @@ -190,7 +174,6 @@ void testInvalidateAll() throws IOException { snapshotCache.get(dbKey1); assertNotNull(omSnapshot1); assertEquals(1, snapshotCache.size()); - assertTrue(snapshotCache.isConsistent()); final String dbKey2 = "dbKey2"; ReferenceCounted omSnapshot2 = @@ -199,31 +182,22 @@ void testInvalidateAll() throws IOException { assertEquals(2, snapshotCache.size()); // Should be difference omSnapshot instances assertNotEquals(omSnapshot1, omSnapshot2); - assertTrue(snapshotCache.isConsistent()); final String dbKey3 = "dbKey3"; ReferenceCounted omSnapshot3 = snapshotCache.get(dbKey3); assertNotNull(omSnapshot3); assertEquals(3, snapshotCache.size()); - assertEquals(0, snapshotCache.getPendingEvictionList().size()); - assertTrue(snapshotCache.isConsistent()); snapshotCache.release(dbKey1); // Entry will not be immediately evicted assertEquals(3, snapshotCache.size()); - assertEquals(1, snapshotCache.getPendingEvictionList().size()); - assertTrue(snapshotCache.isConsistent()); snapshotCache.invalidate(dbKey1); assertEquals(2, snapshotCache.size()); - assertEquals(0, snapshotCache.getPendingEvictionList().size()); - assertTrue(snapshotCache.isConsistent()); snapshotCache.invalidateAll(); assertEquals(0, snapshotCache.size()); - assertEquals(0, snapshotCache.getPendingEvictionList().size()); - assertTrue(snapshotCache.isConsistent()); } private void assertEntryExistence(String key, boolean shouldExist) { @@ -247,39 +221,27 @@ void testEviction1() throws IOException { final String dbKey1 = "dbKey1"; snapshotCache.get(dbKey1); assertEquals(1, snapshotCache.size()); - assertTrue(snapshotCache.isConsistent()); snapshotCache.release(dbKey1); assertEquals(1, snapshotCache.size()); - assertEquals(1, snapshotCache.getPendingEvictionList().size()); - assertTrue(snapshotCache.isConsistent()); final String dbKey2 = "dbKey2"; snapshotCache.get(dbKey2); assertEquals(2, snapshotCache.size()); - assertTrue(snapshotCache.isConsistent()); snapshotCache.release(dbKey2); assertEquals(2, snapshotCache.size()); - assertEquals(2, snapshotCache.getPendingEvictionList().size()); - assertTrue(snapshotCache.isConsistent()); final String dbKey3 = "dbKey3"; snapshotCache.get(dbKey3); assertEquals(3, snapshotCache.size()); - assertTrue(snapshotCache.isConsistent()); snapshotCache.release(dbKey3); assertEquals(3, snapshotCache.size()); - assertEquals(3, snapshotCache.getPendingEvictionList().size()); - assertTrue(snapshotCache.isConsistent()); final String dbKey4 = "dbKey4"; snapshotCache.get(dbKey4); - // dbKey1 would have been evicted by the end of the last get() because - // it was release()d. - assertEquals(3, snapshotCache.size()); - assertTrue(snapshotCache.isConsistent()); + // dbKey1, dbKey2 and dbKey3 would have been evicted by the end of the last get() because + // those were release()d. + assertEquals(1, snapshotCache.size()); assertEntryExistence(dbKey1, false); - assertEquals(2, snapshotCache.getPendingEvictionList().size()); - assertTrue(snapshotCache.isConsistent()); } @Test @@ -289,25 +251,20 @@ void testEviction2() throws IOException { final String dbKey1 = "dbKey1"; snapshotCache.get(dbKey1); assertEquals(1, snapshotCache.size()); - assertTrue(snapshotCache.isConsistent()); final String dbKey2 = "dbKey2"; snapshotCache.get(dbKey2); assertEquals(2, snapshotCache.size()); - assertTrue(snapshotCache.isConsistent()); final String dbKey3 = "dbKey3"; snapshotCache.get(dbKey3); assertEquals(3, snapshotCache.size()); - assertTrue(snapshotCache.isConsistent()); final String dbKey4 = "dbKey4"; snapshotCache.get(dbKey4); // dbKey1 would not have been evicted because it is not release()d assertEquals(4, snapshotCache.size()); assertEntryExistence(dbKey1, true); - assertEquals(0, snapshotCache.getPendingEvictionList().size()); - assertTrue(snapshotCache.isConsistent()); // Releasing dbKey2 at this point should immediately trigger its eviction // because the cache size exceeded the soft limit @@ -315,8 +272,6 @@ void testEviction2() throws IOException { assertEquals(3, snapshotCache.size()); assertEntryExistence(dbKey2, false); assertEntryExistence(dbKey1, true); - assertEquals(0, snapshotCache.getPendingEvictionList().size()); - assertTrue(snapshotCache.isConsistent()); } @Test @@ -324,71 +279,44 @@ void testEviction2() throws IOException { void testEviction3WithClose() throws IOException { final String dbKey1 = "dbKey1"; - try (ReferenceCounted rcOmSnapshot = - snapshotCache.get(dbKey1)) { + try (ReferenceCounted rcOmSnapshot = snapshotCache.get(dbKey1)) { assertEquals(1L, rcOmSnapshot.getTotalRefCount()); assertEquals(1, snapshotCache.size()); - assertEquals(0, snapshotCache.getPendingEvictionList().size()); - assertTrue(snapshotCache.isConsistent()); } // ref count should have been decreased because it would be close()d // upon exiting try-with-resources. assertEquals(0L, snapshotCache.getDbMap().get(dbKey1).getTotalRefCount()); assertEquals(1, snapshotCache.size()); - assertEquals(1, snapshotCache.getPendingEvictionList().size()); - assertTrue(snapshotCache.isConsistent()); final String dbKey2 = "dbKey2"; - try (ReferenceCounted rcOmSnapshot = - snapshotCache.get(dbKey2)) { + try (ReferenceCounted rcOmSnapshot = snapshotCache.get(dbKey2)) { assertEquals(1L, rcOmSnapshot.getTotalRefCount()); assertEquals(2, snapshotCache.size()); - assertEquals(1, snapshotCache.getPendingEvictionList().size()); - assertTrue(snapshotCache.isConsistent()); // Get dbKey2 entry a second time - try (ReferenceCounted rcOmSnapshot2 = - snapshotCache.get(dbKey2)) { + try (ReferenceCounted rcOmSnapshot2 = snapshotCache.get(dbKey2)) { assertEquals(2L, rcOmSnapshot.getTotalRefCount()); assertEquals(2L, rcOmSnapshot2.getTotalRefCount()); assertEquals(2, snapshotCache.size()); - assertEquals(1, snapshotCache.getPendingEvictionList().size()); - assertTrue(snapshotCache.isConsistent()); } assertEquals(1L, rcOmSnapshot.getTotalRefCount()); - assertTrue(snapshotCache.isConsistent()); } assertEquals(0L, snapshotCache.getDbMap().get(dbKey2).getTotalRefCount()); assertEquals(2, snapshotCache.size()); - assertEquals(2, snapshotCache.getPendingEvictionList().size()); - assertTrue(snapshotCache.isConsistent()); final String dbKey3 = "dbKey3"; - try (ReferenceCounted rcOmSnapshot = - snapshotCache.get(dbKey3)) { + try (ReferenceCounted rcOmSnapshot = snapshotCache.get(dbKey3)) { assertEquals(1L, rcOmSnapshot.getTotalRefCount()); assertEquals(3, snapshotCache.size()); - assertEquals(2, snapshotCache.getPendingEvictionList().size()); - assertTrue(snapshotCache.isConsistent()); } assertEquals(0L, snapshotCache.getDbMap().get(dbKey3).getTotalRefCount()); assertEquals(3, snapshotCache.size()); - assertEquals(3, snapshotCache.getPendingEvictionList().size()); - assertTrue(snapshotCache.isConsistent()); final String dbKey4 = "dbKey4"; - try (ReferenceCounted rcOmSnapshot = - snapshotCache.get(dbKey4)) { + try (ReferenceCounted rcOmSnapshot = snapshotCache.get(dbKey4)) { assertEquals(1L, rcOmSnapshot.getTotalRefCount()); - assertEquals(3, snapshotCache.size()); - // An entry has been evicted at this point - assertEquals(2, snapshotCache.getPendingEvictionList().size()); - assertTrue(snapshotCache.isConsistent()); + assertEquals(1, snapshotCache.size()); } assertEquals(0L, snapshotCache.getDbMap().get(dbKey4).getTotalRefCount()); - // Reached cache size limit - assertEquals(3, snapshotCache.size()); - assertEquals(3, snapshotCache.getPendingEvictionList().size()); - assertTrue(snapshotCache.isConsistent()); + assertEquals(1, snapshotCache.size()); } - } diff --git a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/security/TestOzoneDelegationTokenSecretManager.java b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/security/TestOzoneDelegationTokenSecretManager.java index de00db61170f..4f6ac5341d8b 100644 --- a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/security/TestOzoneDelegationTokenSecretManager.java +++ b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/security/TestOzoneDelegationTokenSecretManager.java @@ -103,9 +103,9 @@ public void setUp() throws Exception { serviceRpcAdd = new Text("localhost"); final Map s3Secrets = new HashMap<>(); s3Secrets.put("testuser1", - new S3SecretValue("testuser1", "dbaksbzljandlkandlsd")); + S3SecretValue.of("testuser1", "dbaksbzljandlkandlsd")); s3Secrets.put("abc", - new S3SecretValue("abc", "djakjahkd")); + S3SecretValue.of("abc", "djakjahkd")); om = Mockito.mock(OzoneManager.class); OMMetadataManager metadataManager = new OmMetadataManagerImpl(conf, om); Mockito.when(om.getMetadataManager()).thenReturn(metadataManager); diff --git a/hadoop-ozone/pom.xml b/hadoop-ozone/pom.xml index b084cbd653f3..370401b6dc69 100644 --- a/hadoop-ozone/pom.xml +++ b/hadoop-ozone/pom.xml @@ -275,11 +275,6 @@ test-jar ${hdds.version} - - org.bouncycastle - bcprov-jdk15on - ${bouncycastle.version} - org.apache.ozone hdds-rocks-native diff --git a/hadoop-ozone/s3-secret-store/src/main/java/org/apache/hadoop/ozone/s3/remote/vault/VaultS3SecretStore.java b/hadoop-ozone/s3-secret-store/src/main/java/org/apache/hadoop/ozone/s3/remote/vault/VaultS3SecretStore.java index 892b86eaa7e4..c9bb4d6435ea 100644 --- a/hadoop-ozone/s3-secret-store/src/main/java/org/apache/hadoop/ozone/s3/remote/vault/VaultS3SecretStore.java +++ b/hadoop-ozone/s3-secret-store/src/main/java/org/apache/hadoop/ozone/s3/remote/vault/VaultS3SecretStore.java @@ -115,7 +115,7 @@ public S3SecretValue getSecret(String kerberosID) throws IOException { return null; } - return new S3SecretValue(kerberosID, s3Secret); + return S3SecretValue.of(kerberosID, s3Secret); } catch (VaultException e) { LOG.error("Failed to read secret", e); throw new IOException("Failed to read secret", e); diff --git a/hadoop-ozone/s3-secret-store/src/test/java/org/apache/hadoop/ozone/s3/remote/vault/TestVaultS3SecretStore.java b/hadoop-ozone/s3-secret-store/src/test/java/org/apache/hadoop/ozone/s3/remote/vault/TestVaultS3SecretStore.java index 4700a283cd49..082a7da3f270 100644 --- a/hadoop-ozone/s3-secret-store/src/test/java/org/apache/hadoop/ozone/s3/remote/vault/TestVaultS3SecretStore.java +++ b/hadoop-ozone/s3-secret-store/src/test/java/org/apache/hadoop/ozone/s3/remote/vault/TestVaultS3SecretStore.java @@ -89,7 +89,7 @@ public void clean() { @Test public void testReadWrite() throws IOException { SUCCESS_OPERATION_LIMIT.set(2); - S3SecretValue secret = new S3SecretValue("id", "value"); + S3SecretValue secret = S3SecretValue.of("id", "value"); s3SecretStore.storeSecret( "id", secret); @@ -101,7 +101,7 @@ public void testReadWrite() throws IOException { public void testReAuth() throws IOException { SUCCESS_OPERATION_LIMIT.set(1); AUTH_OPERATION_PROVIDER.set(1); - S3SecretValue secret = new S3SecretValue("id", "value"); + S3SecretValue secret = S3SecretValue.of("id", "value"); s3SecretStore.storeSecret("id", secret); assertEquals(secret, s3SecretStore.getSecret("id")); @@ -112,7 +112,7 @@ public void testReAuth() throws IOException { @Test public void testAuthFail() throws IOException { SUCCESS_OPERATION_LIMIT.set(1); - S3SecretValue secret = new S3SecretValue("id", "value"); + S3SecretValue secret = S3SecretValue.of("id", "value"); s3SecretStore.storeSecret("id", secret); assertThrows(IOException.class, diff --git a/hadoop-ozone/s3gateway/src/main/java/org/apache/hadoop/ozone/s3secret/S3SecretManagementEndpoint.java b/hadoop-ozone/s3gateway/src/main/java/org/apache/hadoop/ozone/s3secret/S3SecretManagementEndpoint.java index 3c932da57d77..a86a92820c06 100644 --- a/hadoop-ozone/s3gateway/src/main/java/org/apache/hadoop/ozone/s3secret/S3SecretManagementEndpoint.java +++ b/hadoop-ozone/s3gateway/src/main/java/org/apache/hadoop/ozone/s3secret/S3SecretManagementEndpoint.java @@ -32,6 +32,7 @@ import javax.ws.rs.core.Response; import java.io.IOException; +import static javax.ws.rs.core.Response.Status.BAD_REQUEST; import static javax.ws.rs.core.Response.Status.NOT_FOUND; /** @@ -55,15 +56,26 @@ public Response generate(@PathParam("username") String username) return generateInternal(username); } - private Response generateInternal(@Nullable String username) - throws IOException { - S3SecretResponse s3SecretResponse = new S3SecretResponse(); - S3SecretValue s3SecretValue = generateS3Secret(username); - s3SecretResponse.setAwsSecret(s3SecretValue.getAwsSecret()); - s3SecretResponse.setAwsAccessKey(s3SecretValue.getAwsAccessKey()); - AUDIT.logReadSuccess(buildAuditMessageForSuccess( - S3GAction.GENERATE_SECRET, getAuditParameters())); - return Response.ok(s3SecretResponse).build(); + private Response generateInternal(@Nullable String username) throws IOException { + try { + S3SecretValue s3SecretValue = generateS3Secret(username); + + S3SecretResponse s3SecretResponse = new S3SecretResponse(); + s3SecretResponse.setAwsSecret(s3SecretValue.getAwsSecret()); + s3SecretResponse.setAwsAccessKey(s3SecretValue.getAwsAccessKey()); + AUDIT.logWriteSuccess(buildAuditMessageForSuccess( + S3GAction.GENERATE_SECRET, getAuditParameters())); + return Response.ok(s3SecretResponse).build(); + } catch (OMException e) { + AUDIT.logWriteFailure(buildAuditMessageForFailure( + S3GAction.GENERATE_SECRET, getAuditParameters(), e)); + if (e.getResult() == OMException.ResultCodes.S3_SECRET_ALREADY_EXISTS) { + return Response.status(BAD_REQUEST.getStatusCode(), e.getResult().toString()).build(); + } else { + LOG.error("Can't execute get secret request: ", e); + return Response.serverError().build(); + } + } } private S3SecretValue generateS3Secret(@Nullable String username) diff --git a/hadoop-ozone/s3gateway/src/test/java/org/apache/hadoop/ozone/client/ClientProtocolStub.java b/hadoop-ozone/s3gateway/src/test/java/org/apache/hadoop/ozone/client/ClientProtocolStub.java index 174af69e255d..961bda6f921a 100644 --- a/hadoop-ozone/s3gateway/src/test/java/org/apache/hadoop/ozone/client/ClientProtocolStub.java +++ b/hadoop-ozone/s3gateway/src/test/java/org/apache/hadoop/ozone/client/ClientProtocolStub.java @@ -379,7 +379,7 @@ public void cancelDelegationToken(Token token) @Override @Nonnull public S3SecretValue getS3Secret(String kerberosID) throws IOException { - return new S3SecretValue(STUB_KERBEROS_ID, STUB_SECRET); + return S3SecretValue.of(STUB_KERBEROS_ID, STUB_SECRET); } @Override diff --git a/hadoop-ozone/s3gateway/src/test/java/org/apache/hadoop/ozone/s3secret/TestSecretGenerate.java b/hadoop-ozone/s3gateway/src/test/java/org/apache/hadoop/ozone/s3secret/TestSecretGenerate.java index f3c17d5807ef..007fa9099ee3 100644 --- a/hadoop-ozone/s3gateway/src/test/java/org/apache/hadoop/ozone/s3secret/TestSecretGenerate.java +++ b/hadoop-ozone/s3gateway/src/test/java/org/apache/hadoop/ozone/s3secret/TestSecretGenerate.java @@ -22,6 +22,7 @@ import java.security.Principal; import javax.ws.rs.container.ContainerRequestContext; import javax.ws.rs.core.MultivaluedHashMap; +import javax.ws.rs.core.Response; import javax.ws.rs.core.SecurityContext; import javax.ws.rs.core.UriInfo; @@ -30,6 +31,7 @@ import org.apache.hadoop.ozone.client.OzoneClient; import org.apache.hadoop.ozone.client.OzoneClientStub; import org.apache.hadoop.ozone.client.protocol.ClientProtocol; +import org.apache.hadoop.ozone.om.exceptions.OMException; import org.apache.hadoop.ozone.om.helpers.S3SecretValue; import org.junit.jupiter.api.BeforeEach; import org.junit.jupiter.api.Test; @@ -38,15 +40,16 @@ import org.mockito.invocation.InvocationOnMock; import org.mockito.junit.jupiter.MockitoExtension; +import static javax.ws.rs.core.Response.Status.BAD_REQUEST; import static org.junit.jupiter.api.Assertions.assertEquals; -import static org.mockito.ArgumentMatchers.any; +import static org.mockito.ArgumentMatchers.notNull; import static org.mockito.Mockito.when; /** * Test for S3 secret generate endpoint. */ @ExtendWith(MockitoExtension.class) -public class TestSecretGenerate { +class TestSecretGenerate { private static final String USER_NAME = "test"; private static final String OTHER_USER_NAME = "test2"; private static final String USER_SECRET = "test_secret"; @@ -66,12 +69,11 @@ public class TestSecretGenerate { private static S3SecretValue getS3SecretValue(InvocationOnMock invocation) { Object[] args = invocation.getArguments(); - return new S3SecretValue((String) args[0], USER_SECRET); + return S3SecretValue.of((String) args[0], USER_SECRET); } @BeforeEach - void setUp() throws IOException { - when(proxy.getS3Secret(any())).then(TestSecretGenerate::getS3SecretValue); + void setUp() { OzoneConfiguration conf = new OzoneConfiguration(); OzoneClient client = new OzoneClientStub(new ObjectStoreStub(conf, proxy)); @@ -86,21 +88,51 @@ void setUp() throws IOException { @Test void testSecretGenerate() throws IOException { - when(principal.getName()).thenReturn(USER_NAME); - when(securityContext.getUserPrincipal()).thenReturn(principal); - when(context.getSecurityContext()).thenReturn(securityContext); + setupSecurityContext(); + hasNoSecretYet(); S3SecretResponse response = (S3SecretResponse) endpoint.generate().getEntity(); + assertEquals(USER_SECRET, response.getAwsSecret()); assertEquals(USER_NAME, response.getAwsAccessKey()); } + @Test + void testIfSecretAlreadyExists() throws IOException { + setupSecurityContext(); + hasSecretAlready(); + + Response response = endpoint.generate(); + + assertEquals(BAD_REQUEST.getStatusCode(), response.getStatus()); + assertEquals(OMException.ResultCodes.S3_SECRET_ALREADY_EXISTS.toString(), + response.getStatusInfo().getReasonPhrase()); + } + @Test void testSecretGenerateWithUsername() throws IOException { + hasNoSecretYet(); + S3SecretResponse response = (S3SecretResponse) endpoint.generate(OTHER_USER_NAME).getEntity(); assertEquals(USER_SECRET, response.getAwsSecret()); assertEquals(OTHER_USER_NAME, response.getAwsAccessKey()); } + + private void setupSecurityContext() { + when(principal.getName()).thenReturn(USER_NAME); + when(securityContext.getUserPrincipal()).thenReturn(principal); + when(context.getSecurityContext()).thenReturn(securityContext); + } + + private void hasNoSecretYet() throws IOException { + when(proxy.getS3Secret(notNull())) + .then(TestSecretGenerate::getS3SecretValue); + } + + private void hasSecretAlready() throws IOException { + when(proxy.getS3Secret(notNull())) + .thenThrow(new OMException("Secret already exists", OMException.ResultCodes.S3_SECRET_ALREADY_EXISTS)); + } } diff --git a/pom.xml b/pom.xml index d2497a434948..591b69011836 100644 --- a/pom.xml +++ b/pom.xml @@ -116,18 +116,18 @@ xsi:schemaLocation="http://maven.apache.org/POM/4.0.0 http://maven.apache.org/xs 1.6.0 1.15 3.2.2 - 1.21 - 2.1.1 + 1.26.0 + 2.8.0 1.5.2-5 1.0.13 2.11.0 - 3.7 + 3.14.0 1.2 1.1 3.1.1 3.10.0 2.6.0 - 1.10.0 + 1.11.0 1.6 1.5 1.7.1 @@ -141,7 +141,7 @@ xsi:schemaLocation="http://maven.apache.org/POM/4.0.0 http://maven.apache.org/xs 3.12.2 5.0.4 0.8.0.RELEASE - 1.67 + 1.77 3.2.0 10.14.2.0 3.0.2 @@ -288,7 +288,7 @@ xsi:schemaLocation="http://maven.apache.org/POM/4.0.0 http://maven.apache.org/xs 3.1.0 9.3 1200 - 1.12.261 + 1.12.661 1.12.0 @@ -1410,9 +1410,18 @@ xsi:schemaLocation="http://maven.apache.org/POM/4.0.0 http://maven.apache.org/xs org.bouncycastle - bcprov-jdk16 + bcprov-jdk18on + ${bouncycastle.version} + + + org.bouncycastle + bcpkix-jdk18on + ${bouncycastle.version} + + + org.bouncycastle + bcutil-jdk18on ${bouncycastle.version} - test