diff --git a/.github/dependabot.yml b/.github/dependabot.yml index 5a028ccf7ce..4b4c37f399b 100644 --- a/.github/dependabot.yml +++ b/.github/dependabot.yml @@ -21,6 +21,13 @@ updates: interval: "weekly" commit-message: prefix: "[Recon] Dependabot Package Upgrade: " + groups: + minor-updates: + patterns: + - "*" + update-types: + - "minor" + - "patch" ignore: - dependency-name: "*" update-types: ["version-update:semver-major"] @@ -31,7 +38,5 @@ updates: directory: "/" schedule: interval: "weekly" - commit-message: - prefix: "[Java] Dependabot Package Upgrade: " pull-request-branch-name: - separator: "-" \ No newline at end of file + separator: "-" diff --git a/.github/workflows/ci.yml b/.github/workflows/ci.yml index 1415a43abf1..74c4756cfd0 100644 --- a/.github/workflows/ci.yml +++ b/.github/workflows/ci.yml @@ -38,11 +38,11 @@ jobs: needs-kubernetes-tests: ${{ steps.selective-checks.outputs.needs-kubernetes-tests }} steps: - name: "Checkout ${{ github.ref }} ( ${{ github.sha }} )" - uses: actions/checkout@v3 + uses: actions/checkout@v4 with: persist-credentials: false - name: Fetch incoming commit ${{ github.sha }} with its parent - uses: actions/checkout@v3 + uses: actions/checkout@v4 with: ref: ${{ github.sha }} fetch-depth: 2 @@ -81,9 +81,9 @@ jobs: fail-fast: false steps: - name: Checkout project - uses: actions/checkout@v3 + uses: actions/checkout@v4 - name: Cache for npm dependencies - uses: actions/cache@v3 + uses: actions/cache@v4 with: path: | ~/.pnpm-store @@ -92,7 +92,7 @@ jobs: restore-keys: | ${{ runner.os }}-pnpm- - name: Cache for maven dependencies - uses: actions/cache@v3 + uses: actions/cache@v4 with: path: | ~/.m2/repository @@ -101,7 +101,7 @@ jobs: restore-keys: | maven-repo- - name: Setup java - uses: actions/setup-java@v3 + uses: actions/setup-java@v4 with: distribution: 'temurin' java-version: ${{ matrix.java }} @@ -110,7 +110,7 @@ jobs: env: GRADLE_ENTERPRISE_ACCESS_KEY: ${{ secrets.GE_ACCESS_TOKEN }} - name: Store binaries for tests - uses: actions/upload-artifact@v3 + uses: actions/upload-artifact@v4 with: name: ozone-bin path: | @@ -118,13 +118,13 @@ jobs: !hadoop-ozone/dist/target/ozone-*-src.tar.gz retention-days: 1 - name: Store source tarball for compilation - uses: actions/upload-artifact@v3 + uses: actions/upload-artifact@v4 with: name: ozone-src path: hadoop-ozone/dist/target/ozone-*-src.tar.gz retention-days: 1 - name: Store Maven repo for tests - uses: actions/upload-artifact@v3 + uses: actions/upload-artifact@v4 with: name: ozone-repo path: | @@ -135,16 +135,20 @@ jobs: - build-info - build - basic - runs-on: ubuntu-20.04 - timeout-minutes: 30 + timeout-minutes: 45 if: needs.build-info.outputs.needs-compile == 'true' strategy: matrix: java: [ 11, 17, 21 ] + include: + - os: ubuntu-20.04 + - java: 8 + os: macos-12 fail-fast: false + runs-on: ${{ matrix.os }} steps: - name: Download Ozone source tarball - uses: actions/download-artifact@v3 + uses: actions/download-artifact@v4 with: name: ozone-src - name: Untar sources @@ -157,7 +161,7 @@ jobs: git config user.email 'noreply@github.com' git commit --allow-empty -a -m 'workaround for HADOOP-19011' - name: Cache for maven dependencies - uses: actions/cache/restore@v3 + uses: actions/cache/restore@v4 with: path: | ~/.m2/repository @@ -166,7 +170,7 @@ jobs: restore-keys: | maven-repo- - name: Setup java - uses: actions/setup-java@v3 + uses: actions/setup-java@v4 with: distribution: 'temurin' java-version: ${{ matrix.java }} @@ -187,15 +191,15 @@ jobs: fail-fast: false steps: - name: Checkout project - uses: actions/checkout@v3 + uses: actions/checkout@v4 if: matrix.check != 'bats' - name: Checkout project with history - uses: actions/checkout@v3 + uses: actions/checkout@v4 with: fetch-depth: 0 if: matrix.check == 'bats' - name: Cache for maven dependencies - uses: actions/cache/restore@v3 + uses: actions/cache/restore@v4 with: path: | ~/.m2/repository @@ -205,7 +209,7 @@ jobs: maven-repo- if: ${{ !contains('author,bats,docs', matrix.check) }} - name: Setup java - uses: actions/setup-java@v3 + uses: actions/setup-java@v4 with: distribution: 'temurin' java-version: 8 @@ -218,7 +222,7 @@ jobs: run: hadoop-ozone/dev-support/checks/_summary.sh target/${{ matrix.check }}/summary.txt if: ${{ !cancelled() }} - name: Archive build results - uses: actions/upload-artifact@v3 + uses: actions/upload-artifact@v4 if: ${{ !cancelled() }} with: name: ${{ matrix.check }} @@ -237,9 +241,9 @@ jobs: fail-fast: false steps: - name: Checkout project - uses: actions/checkout@v3 + uses: actions/checkout@v4 - name: Cache for maven dependencies - uses: actions/cache/restore@v3 + uses: actions/cache/restore@v4 with: path: | ~/.m2/repository @@ -248,7 +252,7 @@ jobs: restore-keys: | maven-repo- - name: Setup java - uses: actions/setup-java@v3 + uses: actions/setup-java@v4 with: distribution: 'temurin' java-version: 8 @@ -261,7 +265,7 @@ jobs: run: hadoop-ozone/dev-support/checks/_summary.sh target/${{ matrix.check }}/summary.txt if: ${{ !cancelled() }} - name: Archive build results - uses: actions/upload-artifact@v3 + uses: actions/upload-artifact@v4 if: ${{ !cancelled() }} with: name: ${{ matrix.check }} @@ -276,9 +280,9 @@ jobs: if: needs.build-info.outputs.needs-dependency-check == 'true' steps: - name: Checkout project - uses: actions/checkout@v3 + uses: actions/checkout@v4 - name: Download compiled Ozone binaries - uses: actions/download-artifact@v3 + uses: actions/download-artifact@v4 with: name: ozone-bin - name: Untar binaries @@ -290,7 +294,7 @@ jobs: export OZONE_DIST_DIR=`pwd`/dist ./hadoop-ozone/dev-support/checks/dependency.sh - name: Archive build results - uses: actions/upload-artifact@v3 + uses: actions/upload-artifact@v4 if: always() with: name: dependency @@ -305,9 +309,9 @@ jobs: if: needs.build-info.outputs.needs-dependency-check == 'true' steps: - name: Checkout project - uses: actions/checkout@v3 + uses: actions/checkout@v4 - name: Cache for maven dependencies - uses: actions/cache/restore@v3 + uses: actions/cache/restore@v4 with: path: | ~/.m2/repository @@ -317,7 +321,7 @@ jobs: maven-repo- - name: Download Ozone repo id: download-ozone-repo - uses: actions/download-artifact@v3 + uses: actions/download-artifact@v4 with: name: ozone-repo path: | @@ -330,7 +334,7 @@ jobs: run: hadoop-ozone/dev-support/checks/_summary.sh target/${{ github.job }}/summary.txt if: ${{ !cancelled() }} - name: Archive build results - uses: actions/upload-artifact@v3 + uses: actions/upload-artifact@v4 if: always() with: name: ${{ github.job }} @@ -350,9 +354,9 @@ jobs: fail-fast: false steps: - name: Checkout project - uses: actions/checkout@v3 + uses: actions/checkout@v4 - name: Download compiled Ozone binaries - uses: actions/download-artifact@v3 + uses: actions/download-artifact@v4 with: name: ozone-bin - name: Untar binaries @@ -376,7 +380,7 @@ jobs: run: hadoop-ozone/dev-support/checks/_summary.sh target/${{ github.job }}/summary.txt if: ${{ !cancelled() }} - name: Archive build results - uses: actions/upload-artifact@v3 + uses: actions/upload-artifact@v4 if: always() with: name: acceptance-${{ matrix.suite }} @@ -392,9 +396,9 @@ jobs: if: needs.build-info.outputs.needs-kubernetes-tests == 'true' steps: - name: Checkout project - uses: actions/checkout@v3 + uses: actions/checkout@v4 - name: Download compiled Ozone binaries - uses: actions/download-artifact@v3 + uses: actions/download-artifact@v4 with: name: ozone-bin - name: Untar binaries @@ -412,7 +416,7 @@ jobs: run: hadoop-ozone/dev-support/checks/_summary.sh target/${{ github.job }}/summary.txt if: ${{ !cancelled() }} - name: Archive build results - uses: actions/upload-artifact@v3 + uses: actions/upload-artifact@v4 if: always() with: name: kubernetes @@ -435,15 +439,14 @@ jobs: - hdds - om - ozone - - scm - shell - flaky fail-fast: false steps: - name: Checkout project - uses: actions/checkout@v3 + uses: actions/checkout@v4 - name: Cache for maven dependencies - uses: actions/cache/restore@v3 + uses: actions/cache/restore@v4 with: path: | ~/.m2/repository @@ -453,14 +456,14 @@ jobs: maven-repo- - name: Download Ozone repo id: download-ozone-repo - uses: actions/download-artifact@v3 + uses: actions/download-artifact@v4 with: name: ozone-repo path: | ~/.m2/repository/org/apache/ozone continue-on-error: true - name: Setup java - uses: actions/setup-java@v3 + uses: actions/setup-java@v4 with: distribution: 'temurin' java-version: 8 @@ -483,7 +486,7 @@ jobs: run: hadoop-ozone/dev-support/checks/_summary.sh target/${{ github.job }}/summary.txt if: ${{ !cancelled() }} - name: Archive build results - uses: actions/upload-artifact@v3 + uses: actions/upload-artifact@v4 if: always() with: name: it-${{ matrix.profile }} @@ -499,11 +502,11 @@ jobs: - integration steps: - name: Checkout project - uses: actions/checkout@v3 + uses: actions/checkout@v4 with: fetch-depth: 0 - name: Cache for maven dependencies - uses: actions/cache/restore@v3 + uses: actions/cache/restore@v4 with: path: | ~/.m2/repository @@ -512,7 +515,7 @@ jobs: restore-keys: | maven-repo- - name: Download artifacts - uses: actions/download-artifact@v3 + uses: actions/download-artifact@v4 with: path: target/artifacts - name: Untar binaries @@ -522,7 +525,7 @@ jobs: - name: Calculate combined coverage run: ./hadoop-ozone/dev-support/checks/coverage.sh - name: Setup java 17 - uses: actions/setup-java@v3 + uses: actions/setup-java@v4 with: distribution: 'temurin' java-version: 17 @@ -533,7 +536,7 @@ jobs: GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }} GRADLE_ENTERPRISE_ACCESS_KEY: ${{ secrets.GE_ACCESS_TOKEN }} - name: Archive build results - uses: actions/upload-artifact@v3 + uses: actions/upload-artifact@v4 with: name: coverage path: target/coverage diff --git a/.github/workflows/close-pending.yaml b/.github/workflows/close-pending.yaml index 2a7cec99215..ce18152407d 100644 --- a/.github/workflows/close-pending.yaml +++ b/.github/workflows/close-pending.yaml @@ -24,7 +24,7 @@ jobs: runs-on: ubuntu-20.04 steps: - name: Checkout code - uses: actions/checkout@v3 + uses: actions/checkout@v4 - name: Execute close-pending script if: github.repository == 'apache/ozone' run: ./.github/close-pending.sh diff --git a/.github/workflows/comments.yaml b/.github/workflows/comments.yaml index 4ffcc061d78..8f7d41c2bda 100644 --- a/.github/workflows/comments.yaml +++ b/.github/workflows/comments.yaml @@ -26,7 +26,7 @@ jobs: runs-on: ubuntu-20.04 steps: - name: Checkout code - uses: actions/checkout@v3 + uses: actions/checkout@v4 - name: Execute process-comment script run: ./.github/process-comment.sh env: diff --git a/.github/workflows/dependabot-ci.yml b/.github/workflows/dependabot-ci.yml index dc02ff72cda..aa216d1c58a 100644 --- a/.github/workflows/dependabot-ci.yml +++ b/.github/workflows/dependabot-ci.yml @@ -49,7 +49,7 @@ jobs: #Delete the lockfile created by dependabot rm -rf hadoop-ozone/recon/src/main/resources/webapps/recon/ozone-recon-web/pnpm-lock.yaml - name: Install NodeJS v${{ env.NODE_VERSION }} - uses: actions/setup-node@v3 + uses: actions/setup-node@v4 with: node-version: ${{ env.NODE_VERSION }} - name: Install pnpm v${{ env.PNPM_VERSION }} and recreate lockfile @@ -75,4 +75,4 @@ jobs: git config --global user.name 'Github Actions' git config --global user.email 'noreply@github.com' git commit -m "[auto] Generated pnpm-lock from actions for $OZONE_SHA" || true - git push origin HEAD:${{ steps.get_branch_name.outputs.branch_name }} \ No newline at end of file + git push origin HEAD:${{ steps.get_branch_name.outputs.branch_name }} diff --git a/.github/workflows/intermittent-test-check.yml b/.github/workflows/intermittent-test-check.yml index d686ec41aaa..3239215aa90 100644 --- a/.github/workflows/intermittent-test-check.yml +++ b/.github/workflows/intermittent-test-check.yml @@ -34,7 +34,7 @@ on: required: true splits: description: Number of splits - default: 2 + default: 10 required: true fail-fast: description: Stop after first failure @@ -52,48 +52,55 @@ jobs: runs-on: ubuntu-20.04 outputs: matrix: ${{steps.generate.outputs.matrix}} - test_type: ${{steps.check-test-existence.outputs.test_type}} steps: - - uses: actions/checkout@v3 + - uses: actions/checkout@v4 with: ref: ${{ github.event.inputs.ref }} - - name: Check for Test File - id: check-test-existence - run: | - filename="$TEST_CLASS.java" - found_file=$(find . -name "$filename" -type f -print -quit) - test_type=unit - if [ -n "$found_file" ]; then - echo "File path : $found_file" - if [[ "$found_file" == *"integration-test"* ]]; then - test_type=integration - fi - if [ "$TEST_METHOD" != "ALL" ]; then - if grep -q "public void $TEST_METHOD(" "$found_file"; then - echo "Test method $TEST_METHOD exists in $filename" - else - echo "Test method $TEST_METHOD does not exist in $filename.Stopping!" - exit 1 - fi - fi - echo "Test file $filename found. Continuing.." - else - echo "Test file $filename not found.Stopping!" - exit 1 - fi - echo "test_type=$test_type" >> $GITHUB_OUTPUT - id: generate name: Generate test matrix run: | splits=() for ((i = 1; i <= ${{ github.event.inputs.splits }}; i++)); do - splits+=("$i") + splits+=("$i") done printf -v x "%s," "${splits[@]}" split_matrix="[${x%,}]" echo "matrix=$split_matrix" >> $GITHUB_OUTPUT + build: + needs: + - prepare-job + runs-on: ubuntu-20.04 + timeout-minutes: 60 + steps: + - name: Checkout project + uses: actions/checkout@v4 + - name: Cache for maven dependencies + uses: actions/cache@v4 + with: + path: | + ~/.m2/repository + !~/.m2/repository/org/apache/ozone + key: maven-repo-${{ hashFiles('**/pom.xml') }} + restore-keys: | + maven-repo- + - name: Setup java + uses: actions/setup-java@v4 + with: + distribution: 'temurin' + java-version: 8 + - name: Build (most) of Ozone + run: hadoop-ozone/dev-support/checks/build.sh -Dskip.npx -Dskip.installnpx -DskipShade + - name: Store Maven repo for tests + uses: actions/upload-artifact@v4 + with: + name: ozone-repo + path: | + ~/.m2/repository/org/apache/ozone + retention-days: 1 run-test: - needs: prepare-job + needs: + - prepare-job + - build name: Run-Split runs-on: ubuntu-20.04 strategy: @@ -101,63 +108,66 @@ jobs: split: ${{fromJson(needs.prepare-job.outputs.matrix)}} # Define splits fail-fast: ${{ fromJson(github.event.inputs.fail-fast) }} steps: - - uses: actions/checkout@v3 + - uses: actions/checkout@v4 with: ref: ${{ github.event.inputs.ref }} - name: Cache for maven dependencies - uses: actions/cache@v3 + uses: actions/cache/restore@v4 with: - path: ~/.m2/repository - key: maven-repo-${{ hashFiles('**/pom.xml') }}-8-single + path: | + ~/.m2/repository + !~/.m2/repository/org/apache/ozone + key: maven-repo-${{ hashFiles('**/pom.xml') }} restore-keys: | - maven-repo-${{ hashFiles('**/pom.xml') }}-8 - maven-repo-${{ hashFiles('**/pom.xml') }} maven-repo- + - name: Download Ozone repo + id: download-ozone-repo + uses: actions/download-artifact@v4 + with: + name: ozone-repo + path: | + ~/.m2/repository/org/apache/ozone + continue-on-error: true - name: Setup java - uses: actions/setup-java@v3 + uses: actions/setup-java@v4 with: distribution: 'temurin' java-version: 8 - name: Execute tests run: | - test_type=${{ needs.prepare-job.outputs.test_type }} - args="-DexcludedGroups=unhealthy" - if [ "$test_type" = "integration" ]; then - args="$args -pl :ozone-integration-test,:mini-chaos-tests" + if [[ -e "${{ steps.download-ozone-repo.outputs.download-path }}" ]]; then + export OZONE_REPO_CACHED=true fi + + args="-DexcludedGroups=native|slow|unhealthy" if [ "$TEST_METHOD" = "ALL" ]; then - echo "Running all tests from $TEST_CLASS" - hadoop-ozone/dev-support/checks/junit.sh $args -Dtest=$TEST_CLASS + echo "Running all tests from $TEST_CLASS" + set -x + hadoop-ozone/dev-support/checks/junit.sh $args -Dtest="$TEST_CLASS,Abstract*Test*\$*" else - echo "Running test: $TEST_METHOD from $TEST_CLASS" - hadoop-ozone/dev-support/checks/junit.sh $args -Dtest=$TEST_CLASS#$TEST_METHOD + echo "Running test: $TEST_METHOD from $TEST_CLASS" + set -x + hadoop-ozone/dev-support/checks/junit.sh $args -Dtest="$TEST_CLASS#$TEST_METHOD,Abstract*Test*\$*" fi continue-on-error: true env: - CHECK: ${{ needs.prepare-job.outputs.test_type }} GRADLE_ENTERPRISE_ACCESS_KEY: ${{ secrets.GE_ACCESS_TOKEN }} - name: Summary of failures - run: hadoop-ozone/dev-support/checks/_summary.sh target/${{ needs.prepare-job.outputs.test_type }}/summary.txt + run: hadoop-ozone/dev-support/checks/_summary.sh target/unit/summary.txt if: ${{ !cancelled() }} - name: Archive build results - uses: actions/upload-artifact@v3 + uses: actions/upload-artifact@v4 if: always() with: - name: result-${{ env.TEST_CLASS }}-split-${{ matrix.split }} - path: target/${{ needs.prepare-job.outputs.test_type }} - - name: Delete temporary build artifacts before caching - run: | - #Never cache local artifacts - rm -rf ~/.m2/repository/org/apache/ozone/hdds* - rm -rf ~/.m2/repository/org/apache/ozone/ozone* - if: always() + name: result-${{ github.run_id }}-${{ github.run_number }}-${{ matrix.split }} + path: target/unit count-failures: if: ${{ always() }} needs: run-test runs-on: ubuntu-20.04 steps: - name: Download build results - uses: actions/download-artifact@v3 + uses: actions/download-artifact@v4 - name: Count failures run: | failures=$(find . -name 'summary.txt' | grep -v 'iteration' | xargs grep -v 'exit code: 0' | wc -l) diff --git a/.github/workflows/pull-request.yml b/.github/workflows/pull-request.yml index f37e680a5f5..53ba44e0f2f 100644 --- a/.github/workflows/pull-request.yml +++ b/.github/workflows/pull-request.yml @@ -28,7 +28,7 @@ jobs: runs-on: ubuntu-20.04 steps: - name: Checkout project - uses: actions/checkout@v3 + uses: actions/checkout@v4 - name: Check pull request title env: TITLE: ${{ github.event.pull_request.title }} diff --git a/.github/workflows/repeat-acceptance.yml b/.github/workflows/repeat-acceptance.yml index e19cc2cd267..7269a9c417a 100644 --- a/.github/workflows/repeat-acceptance.yml +++ b/.github/workflows/repeat-acceptance.yml @@ -55,7 +55,7 @@ jobs: outputs: matrix: ${{steps.generate.outputs.matrix}} steps: - - uses: actions/checkout@v3 + - uses: actions/checkout@v4 with: ref: ${{ github.event.inputs.ref }} - name: Verify Test Filter @@ -80,9 +80,9 @@ jobs: timeout-minutes: 60 steps: - name: Checkout project - uses: actions/checkout@v3 + uses: actions/checkout@v4 - name: Cache for npm dependencies - uses: actions/cache@v3 + uses: actions/cache@v4 with: path: | ~/.pnpm-store @@ -91,7 +91,7 @@ jobs: restore-keys: | ${{ runner.os }}-pnpm- - name: Cache for maven dependencies - uses: actions/cache@v3 + uses: actions/cache@v4 with: path: ~/.m2/repository key: maven-repo-${{ hashFiles('**/pom.xml') }}-${{ env.JAVA_VERSION }} @@ -99,7 +99,7 @@ jobs: maven-repo-${{ hashFiles('**/pom.xml') }} maven-repo- - name: Setup java - uses: actions/setup-java@v3 + uses: actions/setup-java@v4 with: distribution: 'temurin' java-version: ${{ env.JAVA_VERSION }} @@ -108,7 +108,7 @@ jobs: env: GRADLE_ENTERPRISE_ACCESS_KEY: ${{ secrets.GE_ACCESS_TOKEN }} - name: Store binaries for tests - uses: actions/upload-artifact@v3 + uses: actions/upload-artifact@v4 with: name: ozone-bin path: | @@ -132,11 +132,11 @@ jobs: split: ${{ fromJson(needs.prepare-job.outputs.matrix) }} fail-fast: ${{ fromJson(github.event.inputs.fail-fast) }} steps: - - uses: actions/checkout@v3 + - uses: actions/checkout@v4 with: ref: ${{ github.event.inputs.ref }} - name: Download compiled Ozone binaries - uses: actions/download-artifact@v3 + uses: actions/download-artifact@v4 with: name: ozone-bin - name: Untar binaries @@ -159,7 +159,7 @@ jobs: run: hadoop-ozone/dev-support/checks/_summary.sh target/${{ github.job }}/summary.txt if: ${{ !cancelled() }} - name: Archive build results - uses: actions/upload-artifact@v3 + uses: actions/upload-artifact@v4 if: always() with: name: acceptance-${{ matrix.split }} diff --git a/hadoop-hdds/client/pom.xml b/hadoop-hdds/client/pom.xml index f500faa7a75..5c85fda966d 100644 --- a/hadoop-hdds/client/pom.xml +++ b/hadoop-hdds/client/pom.xml @@ -30,7 +30,6 @@ https://maven.apache.org/xsd/maven-4.0.0.xsd"> jar - false diff --git a/hadoop-hdds/client/src/main/java/org/apache/hadoop/hdds/scm/OzoneClientConfig.java b/hadoop-hdds/client/src/main/java/org/apache/hadoop/hdds/scm/OzoneClientConfig.java index 44af34cb919..d1dcc654b10 100644 --- a/hadoop-hdds/client/src/main/java/org/apache/hadoop/hdds/scm/OzoneClientConfig.java +++ b/hadoop-hdds/client/src/main/java/org/apache/hadoop/hdds/scm/OzoneClientConfig.java @@ -223,6 +223,15 @@ public enum ChecksumCombineMode { tags = ConfigTag.CLIENT) private String fsDefaultBucketLayout = "FILE_SYSTEM_OPTIMIZED"; + @Config(key = "incremental.chunk.list", + defaultValue = "false", + type = ConfigType.BOOLEAN, + description = "Client PutBlock request can choose incremental chunk " + + "list rather than full chunk list to optimize performance. " + + "Critical to HBase.", + tags = ConfigTag.CLIENT) + private boolean incrementalChunkList = false; + @PostConstruct private void validate() { Preconditions.checkState(streamBufferSize > 0); @@ -404,4 +413,12 @@ public boolean isDatastreamPipelineMode() { public void setDatastreamPipelineMode(boolean datastreamPipelineMode) { this.datastreamPipelineMode = datastreamPipelineMode; } + + public void setIncrementalChunkList(boolean enable) { + this.incrementalChunkList = enable; + } + + public boolean getIncrementalChunkList() { + return this.incrementalChunkList; + } } diff --git a/hadoop-hdds/client/src/main/java/org/apache/hadoop/hdds/scm/XceiverClientFactory.java b/hadoop-hdds/client/src/main/java/org/apache/hadoop/hdds/scm/XceiverClientFactory.java index d1b56e7ebf3..36c134b87a4 100644 --- a/hadoop-hdds/client/src/main/java/org/apache/hadoop/hdds/scm/XceiverClientFactory.java +++ b/hadoop-hdds/client/src/main/java/org/apache/hadoop/hdds/scm/XceiverClientFactory.java @@ -17,7 +17,6 @@ */ package org.apache.hadoop.hdds.scm; -import java.io.Closeable; import java.io.IOException; import org.apache.hadoop.hdds.scm.pipeline.Pipeline; @@ -25,7 +24,7 @@ /** * Interface to provide XceiverClient when needed. */ -public interface XceiverClientFactory extends Closeable { +public interface XceiverClientFactory extends AutoCloseable { XceiverClientSpi acquireClient(Pipeline pipeline) throws IOException; diff --git a/hadoop-hdds/client/src/main/java/org/apache/hadoop/hdds/scm/XceiverClientManager.java b/hadoop-hdds/client/src/main/java/org/apache/hadoop/hdds/scm/XceiverClientManager.java index 62156c7e400..f77670a454a 100644 --- a/hadoop-hdds/client/src/main/java/org/apache/hadoop/hdds/scm/XceiverClientManager.java +++ b/hadoop-hdds/client/src/main/java/org/apache/hadoop/hdds/scm/XceiverClientManager.java @@ -18,7 +18,6 @@ package org.apache.hadoop.hdds.scm; -import java.io.Closeable; import java.io.IOException; import java.util.concurrent.Callable; import java.util.concurrent.TimeUnit; @@ -62,7 +61,7 @@ * without reestablishing connection. But the connection will be closed if * not being used for a period of time. */ -public class XceiverClientManager implements Closeable, XceiverClientFactory { +public class XceiverClientManager implements XceiverClientFactory { private static final Logger LOG = LoggerFactory.getLogger(XceiverClientManager.class); //TODO : change this to SCM configuration class diff --git a/hadoop-hdds/client/src/main/java/org/apache/hadoop/hdds/scm/client/HddsClientUtils.java b/hadoop-hdds/client/src/main/java/org/apache/hadoop/hdds/scm/client/HddsClientUtils.java index cf6e09d95ac..2b07dacf1ea 100644 --- a/hadoop-hdds/client/src/main/java/org/apache/hadoop/hdds/scm/client/HddsClientUtils.java +++ b/hadoop-hdds/client/src/main/java/org/apache/hadoop/hdds/scm/client/HddsClientUtils.java @@ -18,11 +18,6 @@ package org.apache.hadoop.hdds.scm.client; -import java.text.ParseException; -import java.time.Instant; -import java.time.ZoneId; -import java.time.ZonedDateTime; -import java.time.format.DateTimeFormatter; import java.util.HashMap; import java.util.List; import java.util.Map; @@ -72,37 +67,6 @@ private HddsClientUtils() { .add(NotReplicatedException.class) .build(); - /** - * Date format that used in ozone. Here the format is thread safe to use. - */ - private static final ThreadLocal DATE_FORMAT = - ThreadLocal.withInitial(() -> { - DateTimeFormatter format = - DateTimeFormatter.ofPattern(OzoneConsts.OZONE_DATE_FORMAT); - return format.withZone(ZoneId.of(OzoneConsts.OZONE_TIME_ZONE)); - }); - - - /** - * Convert time in millisecond to a human readable format required in ozone. - * @return a human readable string for the input time - */ - public static String formatDateTime(long millis) { - ZonedDateTime dateTime = ZonedDateTime.ofInstant( - Instant.ofEpochMilli(millis), DATE_FORMAT.get().getZone()); - return DATE_FORMAT.get().format(dateTime); - } - - /** - * Convert time in ozone date format to millisecond. - * @return time in milliseconds - */ - public static long formatDateTime(String date) throws ParseException { - Preconditions.checkNotNull(date, "Date string should not be null."); - return ZonedDateTime.parse(date, DATE_FORMAT.get()) - .toInstant().toEpochMilli(); - } - private static void doNameChecks(String resName) { if (resName == null) { throw new IllegalArgumentException("Bucket or Volume name is null"); @@ -208,17 +172,6 @@ public static void verifyResourceName(String resName, boolean isStrictS3) { } } - /** - * verifies that bucket / volume name is a valid DNS name. - * - * @param resourceNames Array of bucket / volume names to be verified. - */ - public static void verifyResourceName(String... resourceNames) { - for (String resourceName : resourceNames) { - HddsClientUtils.verifyResourceName(resourceName); - } - } - /** * verifies that key name is a valid name. * diff --git a/hadoop-hdds/client/src/main/java/org/apache/hadoop/hdds/scm/storage/BlockDataStreamOutput.java b/hadoop-hdds/client/src/main/java/org/apache/hadoop/hdds/scm/storage/BlockDataStreamOutput.java index 005402efa78..d5423d4ec0b 100644 --- a/hadoop-hdds/client/src/main/java/org/apache/hadoop/hdds/scm/storage/BlockDataStreamOutput.java +++ b/hadoop-hdds/client/src/main/java/org/apache/hadoop/hdds/scm/storage/BlockDataStreamOutput.java @@ -18,7 +18,6 @@ package org.apache.hadoop.hdds.scm.storage; -import com.google.common.annotations.VisibleForTesting; import com.google.common.base.Preconditions; import org.apache.hadoop.hdds.client.BlockID; import org.apache.hadoop.hdds.protocol.DatanodeDetails; @@ -135,7 +134,7 @@ public class BlockDataStreamOutput implements ByteBufferStreamOutput { private final DataStreamOutput out; private CompletableFuture dataStreamCloseReply; private List> futures = new ArrayList<>(); - private final long syncSize = 0; // TODO: disk sync is disabled for now + private static final long SYNC_SIZE = 0; // TODO: disk sync is disabled for now private long syncPosition = 0; private StreamBuffer currentBuffer; private XceiverClientMetrics metrics; @@ -239,11 +238,6 @@ public List getFailedServers() { return failedServers; } - @VisibleForTesting - public XceiverClientRatis getXceiverClient() { - return xceiverClient; - } - public IOException getIoException() { return ioException.get(); } @@ -331,10 +325,6 @@ private void updateFlushLength() { totalDataFlushedLength = writtenDataLength; } - @VisibleForTesting - public long getTotalDataFlushedLength() { - return totalDataFlushedLength; - } /** * Will be called on the retryPath in case closedContainerException/ * TimeoutException. @@ -640,9 +630,9 @@ public boolean isClosed() { } private boolean needSync(long position) { - if (syncSize > 0) { + if (SYNC_SIZE > 0) { // TODO: or position >= fileLength - if (position - syncPosition >= syncSize) { + if (position - syncPosition >= SYNC_SIZE) { syncPosition = position; return true; } @@ -703,11 +693,6 @@ private void writeChunkToContainer(ByteBuffer buf) containerBlockData.addChunks(chunkInfo); } - @VisibleForTesting - public void setXceiverClient(XceiverClientRatis xceiverClient) { - this.xceiverClient = xceiverClient; - } - /** * Handles InterruptedExecution. * diff --git a/hadoop-hdds/client/src/main/java/org/apache/hadoop/hdds/scm/storage/BlockInputStream.java b/hadoop-hdds/client/src/main/java/org/apache/hadoop/hdds/scm/storage/BlockInputStream.java index a12f9067ce2..d06b1816dc5 100644 --- a/hadoop-hdds/client/src/main/java/org/apache/hadoop/hdds/scm/storage/BlockInputStream.java +++ b/hadoop-hdds/client/src/main/java/org/apache/hadoop/hdds/scm/storage/BlockInputStream.java @@ -63,7 +63,7 @@ */ public class BlockInputStream extends BlockExtendedInputStream { - private static final Logger LOG = + public static final Logger LOG = LoggerFactory.getLogger(BlockInputStream.class); private final BlockID blockID; @@ -256,8 +256,8 @@ protected BlockData getBlockDataUsingClient() throws IOException { final Pipeline pipeline = xceiverClient.getPipeline(); if (LOG.isDebugEnabled()) { - LOG.debug("Initializing BlockInputStream for get key to access {}", - blockID.getContainerID()); + LOG.debug("Initializing BlockInputStream for get key to access block {}", + blockID); } DatanodeBlockID.Builder blkIDBuilder = @@ -594,8 +594,4 @@ public synchronized List getChunkStreams() { return chunkStreams; } - @VisibleForTesting - public static Logger getLog() { - return LOG; - } } diff --git a/hadoop-hdds/client/src/main/java/org/apache/hadoop/hdds/scm/storage/BlockOutputStream.java b/hadoop-hdds/client/src/main/java/org/apache/hadoop/hdds/scm/storage/BlockOutputStream.java index ac21411ea5a..a6cd98e48ad 100644 --- a/hadoop-hdds/client/src/main/java/org/apache/hadoop/hdds/scm/storage/BlockOutputStream.java +++ b/hadoop-hdds/client/src/main/java/org/apache/hadoop/hdds/scm/storage/BlockOutputStream.java @@ -19,6 +19,8 @@ package org.apache.hadoop.hdds.scm.storage; import java.io.IOException; import java.io.OutputStream; +import java.nio.BufferOverflowException; +import java.nio.ByteBuffer; import java.util.ArrayList; import java.util.List; import java.util.concurrent.CompletableFuture; @@ -80,6 +82,12 @@ public class BlockOutputStream extends OutputStream { LoggerFactory.getLogger(BlockOutputStream.class); public static final String EXCEPTION_MSG = "Unexpected Storage Container Exception: "; + public static final String INCREMENTAL_CHUNK_LIST = "incremental"; + public static final KeyValue INCREMENTAL_CHUNK_LIST_KV = + KeyValue.newBuilder().setKey(INCREMENTAL_CHUNK_LIST).build(); + public static final String FULL_CHUNK = "full"; + public static final KeyValue FULL_CHUNK_KV = + KeyValue.newBuilder().setKey(FULL_CHUNK).build(); private AtomicReference blockID; private final AtomicReference previousChunkInfo @@ -123,6 +131,10 @@ public class BlockOutputStream extends OutputStream { private int currentBufferRemaining; //current buffer allocated to write private ChunkBuffer currentBuffer; + // last chunk holds the buffer after the last complete chunk, which may be + // different from currentBuffer. We need this to calculate checksum. + private ByteBuffer lastChunkBuffer; + private long lastChunkOffset; private final Token token; private final String tokenString; private int replicationIndex; @@ -164,6 +176,13 @@ public BlockOutputStream( } this.containerBlockData = BlockData.newBuilder().setBlockID( blkIDBuilder.build()).addMetadata(keyValue); + // tell DataNode I will send incremental chunk list + if (config.getIncrementalChunkList()) { + this.containerBlockData.addMetadata(INCREMENTAL_CHUNK_LIST_KV); + this.lastChunkBuffer = + ByteBuffer.allocate(config.getStreamBufferSize()); + this.lastChunkOffset = 0; + } this.xceiverClient = xceiverClientManager.acquireClient(pipeline); this.bufferPool = bufferPool; this.token = token; @@ -235,10 +254,6 @@ public IOException getIoException() { return ioException.get(); } - XceiverClientSpi getXceiverClientSpi() { - return this.xceiverClient; - } - public BlockData.Builder getContainerBlockData() { return this.containerBlockData; } @@ -327,10 +342,6 @@ private void updateFlushLength() { totalDataFlushedLength = writtenDataLength; } - private boolean isBufferPoolFull() { - return bufferPool.computeBufferData() == streamBufferArgs.getStreamBufferMaxSize(); - } - /** * Will be called on the retryPath in case closedContainerException/ * TimeoutException. @@ -468,6 +479,14 @@ ContainerCommandResponseProto> executePutBlock(boolean close, ContainerCommandResponseProto> flushFuture = null; try { BlockData blockData = containerBlockData.build(); + LOG.debug("sending PutBlock {}", blockData); + + if (config.getIncrementalChunkList()) { + // remove any chunks in the containerBlockData list. + // since they are sent. + containerBlockData.clearChunks(); + } + XceiverClientReply asyncReply = putBlockAsync(xceiverClient, blockData, close, tokenString); CompletableFuture future = @@ -746,7 +765,12 @@ CompletableFuture writeChunkToContainer( setIoException(ce); throw ce; }); - containerBlockData.addChunks(chunkInfo); + if (config.getIncrementalChunkList()) { + updateBlockDataForWriteChunk(chunk); + } else { + containerBlockData.addChunks(chunkInfo); + } + clientMetrics.recordWriteChunk(pipeline, chunkInfo.getLen()); return validateFuture; } catch (IOException | ExecutionException e) { @@ -758,6 +782,156 @@ CompletableFuture writeChunkToContainer( return null; } + /** + * Update container block data, which is later sent to DataNodes via PutBlock, + * using the new chunks sent out via WriteChunk. + * + * This method is only used when incremental chunk list is enabled. + * @param chunk the chunk buffer to be sent out by WriteChunk. + * @throws OzoneChecksumException + */ + private void updateBlockDataForWriteChunk(ChunkBuffer chunk) + throws OzoneChecksumException { + // Update lastChunkBuffer using the new chunk data. + // This is used to calculate checksum for the last partial chunk in + // containerBlockData which will used by PutBlock. + + // the last partial chunk in containerBlockData will be replaced. + // So remove it. + removeLastPartialChunk(); + chunk.rewind(); + LOG.debug("Adding chunk pos {} limit {} remaining {}." + + "lastChunkBuffer pos {} limit {} remaining {} lastChunkOffset = {}", + chunk.position(), chunk.limit(), chunk.remaining(), + lastChunkBuffer.position(), lastChunkBuffer.limit(), + lastChunkBuffer.remaining(), lastChunkOffset); + + // Append the chunk to the last chunk buffer. + // if the resulting size exceeds limit (4MB), + // drop the full chunk and keep the rest. + if (lastChunkBuffer.position() + chunk.remaining() <= + lastChunkBuffer.capacity()) { + appendLastChunkBuffer(chunk, 0, chunk.remaining()); + } else { + int remainingBufferSize = + lastChunkBuffer.capacity() - lastChunkBuffer.position(); + appendLastChunkBuffer(chunk, 0, remainingBufferSize); + updateBlockDataWithLastChunkBuffer(); + appendLastChunkBuffer(chunk, remainingBufferSize, + chunk.remaining() - remainingBufferSize); + } + LOG.debug("after append, lastChunkBuffer={} lastChunkOffset={}", + lastChunkBuffer, lastChunkOffset); + + updateBlockDataWithLastChunkBuffer(); + } + + private void updateBlockDataWithLastChunkBuffer() + throws OzoneChecksumException { + // create chunk info for lastChunkBuffer + ChunkInfo lastChunkInfo = createChunkInfo(lastChunkOffset); + LOG.debug("lastChunkInfo = {}", lastChunkInfo); + long lastChunkSize = lastChunkInfo.getLen(); + addToBlockData(lastChunkInfo); + + lastChunkBuffer.clear(); + if (lastChunkSize == config.getStreamBufferSize()) { + lastChunkOffset += config.getStreamBufferSize(); + } else { + lastChunkBuffer.position((int) lastChunkSize); + } + } + + private void appendLastChunkBuffer(ChunkBuffer chunkBuffer, int offset, + int length) { + LOG.debug("copying to last chunk buffer offset={} length={}", + offset, length); + int pos = 0; + int uncopied = length; + for (ByteBuffer bb : chunkBuffer.asByteBufferList()) { + if (pos + bb.remaining() >= offset) { + int copyStart = offset < pos ? 0 : offset - pos; + int copyLen = Math.min(uncopied, bb.remaining()); + try { + LOG.debug("put into last chunk buffer start = {} len = {}", + copyStart, copyLen); + lastChunkBuffer.put(bb.array(), copyStart, copyLen); + } catch (BufferOverflowException e) { + LOG.error("appending from " + copyStart + " for len=" + copyLen + + ". lastChunkBuffer remaining=" + lastChunkBuffer.remaining() + + " pos=" + lastChunkBuffer.position() + + " limit=" + lastChunkBuffer.limit() + + " capacity=" + lastChunkBuffer.capacity()); + throw e; + } + + uncopied -= copyLen; + } + + pos += bb.remaining(); + if (pos >= offset + length) { + return; + } + if (uncopied == 0) { + return; + } + } + } + + private void removeLastPartialChunk() { + // remove the last chunk if it's partial. + if (containerBlockData.getChunksList().isEmpty()) { + return; + } + int lastChunkIndex = containerBlockData.getChunksCount() - 1; + ChunkInfo lastChunkInBlockData = containerBlockData.getChunks( + lastChunkIndex); + if (!isFullChunk(lastChunkInBlockData)) { + containerBlockData.removeChunks(lastChunkIndex); + } + } + + private ChunkInfo createChunkInfo(long lastPartialChunkOffset) + throws OzoneChecksumException { + lastChunkBuffer.flip(); + int revisedChunkSize = lastChunkBuffer.remaining(); + // create the chunk info to be sent in PutBlock. + ChecksumData revisedChecksumData = + checksum.computeChecksum(lastChunkBuffer); + + long chunkID = lastPartialChunkOffset / config.getStreamBufferSize(); + ChunkInfo.Builder revisedChunkInfo = ChunkInfo.newBuilder() + .setChunkName(blockID.get().getLocalID() + "_chunk_" + chunkID) + .setOffset(lastPartialChunkOffset) + .setLen(revisedChunkSize) + .setChecksumData(revisedChecksumData.getProtoBufMessage()); + // if full chunk + if (revisedChunkSize == config.getStreamBufferSize()) { + revisedChunkInfo.addMetadata(FULL_CHUNK_KV); + } + return revisedChunkInfo.build(); + } + + private boolean isFullChunk(ChunkInfo chunkInfo) { + Preconditions.checkState( + chunkInfo.getLen() <= config.getStreamBufferSize()); + return chunkInfo.getLen() == config.getStreamBufferSize(); + } + + private void addToBlockData(ChunkInfo revisedChunkInfo) { + LOG.debug("containerBlockData chunk: {}", containerBlockData); + if (containerBlockData.getChunksCount() > 0) { + ChunkInfo lastChunk = containerBlockData.getChunks( + containerBlockData.getChunksCount() - 1); + LOG.debug("revisedChunkInfo chunk: {}", revisedChunkInfo); + Preconditions.checkState(lastChunk.getOffset() + lastChunk.getLen() == + revisedChunkInfo.getOffset(), + "lastChunk.getOffset() + lastChunk.getLen() " + + "!= revisedChunkInfo.getOffset()"); + } + containerBlockData.addChunks(revisedChunkInfo); + } + @VisibleForTesting public void setXceiverClient(XceiverClientSpi xceiverClient) { this.xceiverClient = xceiverClient; diff --git a/hadoop-hdds/client/src/main/java/org/apache/hadoop/hdds/scm/storage/RatisBlockOutputStream.java b/hadoop-hdds/client/src/main/java/org/apache/hadoop/hdds/scm/storage/RatisBlockOutputStream.java index ee708bf0de1..b52fc2af917 100644 --- a/hadoop-hdds/client/src/main/java/org/apache/hadoop/hdds/scm/storage/RatisBlockOutputStream.java +++ b/hadoop-hdds/client/src/main/java/org/apache/hadoop/hdds/scm/storage/RatisBlockOutputStream.java @@ -30,8 +30,6 @@ import org.apache.hadoop.ozone.common.ChunkBuffer; import org.apache.hadoop.security.token.Token; import org.apache.hadoop.security.token.TokenIdentifier; -import org.slf4j.Logger; -import org.slf4j.LoggerFactory; import java.io.IOException; import java.io.OutputStream; @@ -58,8 +56,6 @@ */ public class RatisBlockOutputStream extends BlockOutputStream implements Syncable { - public static final Logger LOG = LoggerFactory.getLogger( - RatisBlockOutputStream.class); // This object will maintain the commitIndexes and byteBufferList in order // Also, corresponding to the logIndex, the corresponding list of buffers will diff --git a/hadoop-hdds/client/src/main/java/org/apache/hadoop/ozone/client/io/ByteBufferOutputStream.java b/hadoop-hdds/client/src/main/java/org/apache/hadoop/ozone/client/io/ByteBufferOutputStream.java index cff7a8ecd3c..174fd8c75f6 100644 --- a/hadoop-hdds/client/src/main/java/org/apache/hadoop/ozone/client/io/ByteBufferOutputStream.java +++ b/hadoop-hdds/client/src/main/java/org/apache/hadoop/ozone/client/io/ByteBufferOutputStream.java @@ -20,7 +20,7 @@ import org.apache.hadoop.hdds.scm.storage.ByteBufferStreamOutput; -import javax.annotation.Nonnull; +import jakarta.annotation.Nonnull; import java.io.IOException; import java.io.OutputStream; import java.nio.ByteBuffer; diff --git a/hadoop-hdds/client/src/main/java/org/apache/hadoop/ozone/client/io/ECBlockInputStream.java b/hadoop-hdds/client/src/main/java/org/apache/hadoop/ozone/client/io/ECBlockInputStream.java index ea4f3d743f9..e85bf27d530 100644 --- a/hadoop-hdds/client/src/main/java/org/apache/hadoop/ozone/client/io/ECBlockInputStream.java +++ b/hadoop-hdds/client/src/main/java/org/apache/hadoop/ozone/client/io/ECBlockInputStream.java @@ -107,17 +107,6 @@ protected int availableDataLocations(int expectedLocations) { return count; } - protected int availableParityLocations() { - int count = 0; - for (int i = repConfig.getData(); - i < repConfig.getData() + repConfig.getParity(); i++) { - if (dataLocations[i] != null) { - count++; - } - } - return count; - } - public ECBlockInputStream(ECReplicationConfig repConfig, BlockLocationInfo blockInfo, boolean verifyChecksum, XceiverClientFactory xceiverClientFactory, diff --git a/hadoop-hdds/client/src/test/java/org/apache/hadoop/hdds/scm/client/TestHddsClientUtils.java b/hadoop-hdds/client/src/test/java/org/apache/hadoop/hdds/scm/client/TestHddsClientUtils.java index 21b5a6c6e7f..29b71e9b18d 100644 --- a/hadoop-hdds/client/src/test/java/org/apache/hadoop/hdds/scm/client/TestHddsClientUtils.java +++ b/hadoop-hdds/client/src/test/java/org/apache/hadoop/hdds/scm/client/TestHddsClientUtils.java @@ -50,7 +50,6 @@ import static org.junit.jupiter.api.Assertions.assertSame; import static org.junit.jupiter.api.Assertions.assertThrows; import static org.junit.jupiter.api.Assertions.assertTrue; -import static org.junit.jupiter.api.Assertions.fail; /** * This test class verifies the parsing of SCM endpoint config settings. The @@ -102,7 +101,6 @@ public void testGetScmClientAddressForHA() { conf.set(ScmConfigKeys.OZONE_SCM_NODE_ID_KEY, "scm1"); int port = 9880; - int i = 1; for (String nodeId : nodes) { conf.setInt(ConfUtils.addKeySuffixes(OZONE_SCM_CLIENT_PORT_KEY, scmServiceId, nodeId), port); @@ -229,17 +227,13 @@ public void testVerifyResourceName() { invalidNames.add(tooShort); for (String name : invalidNames) { - try { - HddsClientUtils.verifyResourceName(name); - fail("Did not reject invalid string [" + name + "] as a name"); - } catch (IllegalArgumentException e) { - // throwing up on an invalid name. we're good - } + assertThrows(IllegalArgumentException.class, () -> HddsClientUtils.verifyResourceName(name), + "Did not reject invalid string [" + name + "] as a name"); } } @Test - public void testVerifyKeyName() { + void testVerifyKeyName() throws IllegalArgumentException { List invalidNames = new ArrayList<>(); invalidNames.add("#"); invalidNames.add("ab^cd"); @@ -258,12 +252,8 @@ public void testVerifyKeyName() { for (String name : invalidNames) { - try { - HddsClientUtils.verifyKeyName(name); - fail("Did not reject invalid string [" + name + "] as a name"); - } catch (IllegalArgumentException e) { - // throwing up on an invalid name. it's working. - } + assertThrows(IllegalArgumentException.class, () -> HddsClientUtils.verifyKeyName(name), + "Did not reject invalid string [" + name + "] as a name"); } List validNames = new ArrayList<>(); @@ -285,13 +275,7 @@ public void testVerifyKeyName() { validNames.add("dollar$"); for (String name : validNames) { - try { - HddsClientUtils.verifyKeyName(name); - // not throwing up on a valid name. it's working. - } catch (IllegalArgumentException e) { - // throwing up on an valid name. it's not working. - fail("Rejected valid string [" + name + "] as a name"); - } + HddsClientUtils.verifyKeyName(name); } } diff --git a/hadoop-hdds/client/src/test/java/org/apache/hadoop/hdds/scm/storage/TestBlockInputStream.java b/hadoop-hdds/client/src/test/java/org/apache/hadoop/hdds/scm/storage/TestBlockInputStream.java index 9d1feafb9a4..4db569b7c07 100644 --- a/hadoop-hdds/client/src/test/java/org/apache/hadoop/hdds/scm/storage/TestBlockInputStream.java +++ b/hadoop-hdds/client/src/test/java/org/apache/hadoop/hdds/scm/storage/TestBlockInputStream.java @@ -19,9 +19,9 @@ package org.apache.hadoop.hdds.scm.storage; import com.google.common.primitives.Bytes; +import org.apache.commons.lang3.RandomUtils; import org.apache.hadoop.hdds.client.BlockID; import org.apache.hadoop.hdds.client.ContainerBlockID; -import org.apache.hadoop.hdds.protocol.DatanodeDetails; import org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos; import org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos.ChecksumType; import org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos.ChunkInfo; @@ -47,11 +47,9 @@ import java.io.IOException; import java.nio.ByteBuffer; import java.util.ArrayList; -import java.util.Collections; import java.util.HashMap; import java.util.List; import java.util.Map; -import java.util.Random; import java.util.concurrent.ExecutionException; import java.util.concurrent.ThreadLocalRandom; import java.util.concurrent.atomic.AtomicBoolean; @@ -64,8 +62,8 @@ import static org.junit.jupiter.api.Assertions.assertFalse; import static org.junit.jupiter.api.Assertions.assertThrows; import static org.junit.jupiter.api.Assertions.assertTrue; -import static org.mockito.ArgumentMatchers.any; -import static org.mockito.ArgumentMatchers.anyInt; +import static org.mockito.Mockito.any; +import static org.mockito.Mockito.anyInt; import static org.mockito.Mockito.mock; import static org.mockito.Mockito.never; import static org.mockito.Mockito.reset; @@ -187,9 +185,8 @@ public void testSeek() throws Exception { assertThrows(EOFException.class, () -> seekAndVerify(finalPos)); // Seek to random positions between 0 and the block size. - Random random = new Random(); for (int i = 0; i < 10; i++) { - pos = random.nextInt(blockSize); + pos = RandomUtils.nextInt(0, blockSize); seekAndVerify(pos); } } @@ -383,12 +380,6 @@ public void testReadNotRetriedOnOtherException(IOException ex) } } - private Pipeline samePipelineWithNewId(Pipeline pipeline) { - List reverseOrder = new ArrayList<>(pipeline.getNodes()); - Collections.reverse(reverseOrder); - return MockPipeline.createPipeline(reverseOrder); - } - @ParameterizedTest @MethodSource("exceptionsTriggersRefresh") public void testRefreshOnReadFailureAfterUnbuffer(IOException ex) diff --git a/hadoop-hdds/client/src/test/java/org/apache/hadoop/hdds/scm/storage/TestBlockOutputStreamCorrectness.java b/hadoop-hdds/client/src/test/java/org/apache/hadoop/hdds/scm/storage/TestBlockOutputStreamCorrectness.java index 3d2ff00d64f..9b061f5392d 100644 --- a/hadoop-hdds/client/src/test/java/org/apache/hadoop/hdds/scm/storage/TestBlockOutputStreamCorrectness.java +++ b/hadoop-hdds/client/src/test/java/org/apache/hadoop/hdds/scm/storage/TestBlockOutputStreamCorrectness.java @@ -20,10 +20,10 @@ import java.io.IOException; import java.util.Map; -import java.util.Random; import java.util.concurrent.CompletableFuture; import java.util.concurrent.atomic.AtomicInteger; +import org.apache.commons.lang3.RandomUtils; import org.apache.hadoop.hdds.client.BlockID; import org.apache.hadoop.hdds.protocol.DatanodeDetails; import org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos.ChecksumType; @@ -42,12 +42,13 @@ import org.apache.hadoop.hdds.scm.XceiverClientSpi; import org.apache.hadoop.hdds.scm.pipeline.MockPipeline; import org.apache.hadoop.hdds.scm.pipeline.Pipeline; - +import org.apache.hadoop.ozone.OzoneConsts; import org.apache.ratis.thirdparty.com.google.protobuf.ByteString; -import org.junit.jupiter.api.Test; +import org.junit.jupiter.params.ParameterizedTest; +import org.junit.jupiter.params.provider.ValueSource; import static org.junit.jupiter.api.Assertions.assertEquals; -import static org.mockito.ArgumentMatchers.any; +import static org.mockito.Mockito.any; import static org.mockito.Mockito.mock; import static org.mockito.Mockito.when; @@ -56,37 +57,28 @@ *

* Compares bytes written to the stream and received in the ChunkWriteRequests. */ -public class TestBlockOutputStreamCorrectness { - - private static final long SEED = 18480315L; +class TestBlockOutputStreamCorrectness { - private int writeUnitSize = 1; + private static final int DATA_SIZE = 256 * (int) OzoneConsts.MB; + private static final byte[] DATA = RandomUtils.nextBytes(DATA_SIZE); - @Test - public void test() throws IOException { + @ParameterizedTest + @ValueSource(ints = { 1, 1024, 1024 * 1024 }) + void test(final int writeSize) throws IOException { + assertEquals(0, DATA_SIZE % writeSize); final BufferPool bufferPool = new BufferPool(4 * 1024 * 1024, 32 / 4); for (int block = 0; block < 10; block++) { - BlockOutputStream outputStream = - createBlockOutputStream(bufferPool); - - Random random = new Random(SEED); - - int max = 256 * 1024 * 1024 / writeUnitSize; - - byte[] writeBuffer = new byte[writeUnitSize]; - for (int t = 0; t < max; t++) { - if (writeUnitSize > 1) { - for (int i = 0; i < writeBuffer.length; i++) { - writeBuffer[i] = (byte) random.nextInt(); + try (BlockOutputStream outputStream = createBlockOutputStream(bufferPool)) { + for (int i = 0; i < DATA_SIZE / writeSize; i++) { + if (writeSize > 1) { + outputStream.write(DATA, i * writeSize, writeSize); + } else { + outputStream.write(DATA[i]); } - outputStream.write(writeBuffer, 0, writeBuffer.length); - } else { - outputStream.write((byte) random.nextInt()); } } - outputStream.close(); } } @@ -126,9 +118,8 @@ private static class MockXceiverClientSpi extends XceiverClientSpi { private final Pipeline pipeline; - private final Random expectedRandomStream = new Random(SEED); - private final AtomicInteger counter = new AtomicInteger(); + private int i; MockXceiverClientSpi(Pipeline pipeline) { super(); @@ -175,8 +166,8 @@ public XceiverClientReply sendCommandAsync( ByteString data = request.getWriteChunk().getData(); final byte[] writePayload = data.toByteArray(); for (byte b : writePayload) { - byte expectedByte = (byte) expectedRandomStream.nextInt(); - assertEquals(expectedByte, b); + assertEquals(DATA[i], b); + ++i; } break; default: diff --git a/hadoop-hdds/client/src/test/java/org/apache/hadoop/hdds/scm/storage/TestBufferPool.java b/hadoop-hdds/client/src/test/java/org/apache/hadoop/hdds/scm/storage/TestBufferPool.java index aabbbb3eedc..b56c503df9b 100644 --- a/hadoop-hdds/client/src/test/java/org/apache/hadoop/hdds/scm/storage/TestBufferPool.java +++ b/hadoop-hdds/client/src/test/java/org/apache/hadoop/hdds/scm/storage/TestBufferPool.java @@ -26,8 +26,8 @@ import java.util.LinkedList; import java.util.concurrent.ThreadLocalRandom; +import static org.assertj.core.api.Assertions.assertThat; import static org.junit.jupiter.api.Assertions.assertEquals; -import static org.junit.jupiter.api.Assertions.assertFalse; import static org.junit.jupiter.api.Assertions.assertSame; import static org.junit.jupiter.api.Assertions.assertThrows; @@ -79,8 +79,8 @@ private static Deque assertAllocate(BufferPool pool) { assertEmpty(allocated, size); fill(allocated); // make buffer contents unique, for equals check - assertFalse(buffers.contains(allocated), - () -> "buffer " + n + ": " + allocated + " already in: " + buffers); + assertThat(buffers).withFailMessage("buffer " + n + ": " + allocated + " already in: " + buffers) + .doesNotContain(allocated); buffers.addLast(allocated); } diff --git a/hadoop-hdds/client/src/test/java/org/apache/hadoop/hdds/scm/storage/TestChunkInputStream.java b/hadoop-hdds/client/src/test/java/org/apache/hadoop/hdds/scm/storage/TestChunkInputStream.java index a5de86a84f6..acd8a613ab9 100644 --- a/hadoop-hdds/client/src/test/java/org/apache/hadoop/hdds/scm/storage/TestChunkInputStream.java +++ b/hadoop-hdds/client/src/test/java/org/apache/hadoop/hdds/scm/storage/TestChunkInputStream.java @@ -49,7 +49,7 @@ import static org.junit.jupiter.api.Assertions.assertFalse; import static org.junit.jupiter.api.Assertions.assertThrows; import static org.apache.hadoop.hdds.scm.protocolPB.ContainerCommandResponseBuilders.getReadChunkResponse; -import static org.mockito.ArgumentMatchers.any; +import static org.mockito.Mockito.any; import static org.mockito.Mockito.mock; import static org.mockito.Mockito.verify; import static org.mockito.Mockito.when; diff --git a/hadoop-hdds/client/src/test/java/org/apache/hadoop/ozone/client/io/ECStreamTestUtil.java b/hadoop-hdds/client/src/test/java/org/apache/hadoop/ozone/client/io/ECStreamTestUtil.java index 8db662cee07..41bf46a8ea2 100644 --- a/hadoop-hdds/client/src/test/java/org/apache/hadoop/ozone/client/io/ECStreamTestUtil.java +++ b/hadoop-hdds/client/src/test/java/org/apache/hadoop/ozone/client/io/ECStreamTestUtil.java @@ -281,7 +281,6 @@ public synchronized BlockExtendedInputStream create( public static class TestBlockInputStream extends BlockExtendedInputStream { private ByteBuffer data; - private boolean closed = false; private BlockID blockID; private long length; private boolean shouldError = false; @@ -304,10 +303,6 @@ public static class TestBlockInputStream extends BlockExtendedInputStream { data.position(0); } - public boolean isClosed() { - return closed; - } - public void setShouldErrorOnSeek(boolean val) { this.shouldErrorOnSeek = val; } @@ -377,9 +372,7 @@ protected int readWithStrategy(ByteReaderStrategy strategy) throws } @Override - public void close() { - closed = true; - } + public void close() { } @Override public void unbuffer() { diff --git a/hadoop-hdds/client/src/test/java/org/apache/hadoop/ozone/client/io/TestECBlockInputStreamProxy.java b/hadoop-hdds/client/src/test/java/org/apache/hadoop/ozone/client/io/TestECBlockInputStreamProxy.java index e8ada43b08a..97bf71c204a 100644 --- a/hadoop-hdds/client/src/test/java/org/apache/hadoop/ozone/client/io/TestECBlockInputStreamProxy.java +++ b/hadoop-hdds/client/src/test/java/org/apache/hadoop/ozone/client/io/TestECBlockInputStreamProxy.java @@ -169,7 +169,7 @@ public void testCorrectStreamCreatedDependingOnDataLocations() BlockLocationInfo blockInfo = ECStreamTestUtil.createKeyInfo(repConfig, blockLength, dnMap); - try (ECBlockInputStreamProxy bis = createBISProxy(repConfig, blockInfo)) { + try (ECBlockInputStreamProxy ignored = createBISProxy(repConfig, blockInfo)) { // Not all locations present, so we expect on;y the "missing=true" stream // to be present. assertThat(streamFactory.getStreams()).containsKey(false); @@ -181,7 +181,7 @@ public void testCorrectStreamCreatedDependingOnDataLocations() dnMap = ECStreamTestUtil.createIndexMap(2, 3, 4, 5); blockInfo = ECStreamTestUtil.createKeyInfo(repConfig, blockLength, dnMap); - try (ECBlockInputStreamProxy bis = createBISProxy(repConfig, blockInfo)) { + try (ECBlockInputStreamProxy ignored = createBISProxy(repConfig, blockInfo)) { // Not all locations present, so we expect on;y the "missing=true" stream // to be present. assertThat(streamFactory.getStreams()).doesNotContainKey(false); diff --git a/hadoop-hdds/client/src/test/java/org/apache/hadoop/ozone/client/io/TestECBlockReconstructedStripeInputStream.java b/hadoop-hdds/client/src/test/java/org/apache/hadoop/ozone/client/io/TestECBlockReconstructedStripeInputStream.java index c708fc28ddb..f7a4bb0643e 100644 --- a/hadoop-hdds/client/src/test/java/org/apache/hadoop/ozone/client/io/TestECBlockReconstructedStripeInputStream.java +++ b/hadoop-hdds/client/src/test/java/org/apache/hadoop/ozone/client/io/TestECBlockReconstructedStripeInputStream.java @@ -18,6 +18,7 @@ package org.apache.hadoop.ozone.client.io; import com.google.common.collect.ImmutableSet; +import org.apache.commons.lang3.RandomUtils; import org.apache.hadoop.hdds.client.ECReplicationConfig; import org.apache.hadoop.hdds.protocol.DatanodeDetails; import org.apache.hadoop.hdds.protocol.MockDatanodeDetails; @@ -41,7 +42,6 @@ import java.util.HashSet; import java.util.List; import java.util.Map; -import java.util.Random; import java.util.Set; import java.util.SplittableRandom; import java.util.concurrent.ExecutorService; @@ -52,6 +52,7 @@ import static java.util.Collections.singleton; import static java.util.stream.Collectors.toSet; import static org.apache.hadoop.ozone.client.io.ECStreamTestUtil.generateParity; +import static org.assertj.core.api.Assertions.assertThat; import static org.junit.jupiter.api.Assertions.assertEquals; import static org.junit.jupiter.api.Assertions.assertFalse; import static org.junit.jupiter.api.Assertions.assertThrows; @@ -645,7 +646,7 @@ public void testSeekToPartialOffsetFails() { } private Integer getRandomStreamIndex(Set set) { - return set.stream().skip(new Random().nextInt(set.size())) + return set.stream().skip(RandomUtils.nextInt(0, set.size())) .findFirst().orElse(null); } @@ -800,7 +801,7 @@ public void testFailedLocationsAreNotRead() throws IOException { // created in the stream factory, indicating we did not read them. List streams = streamFactory.getBlockStreams(); for (TestBlockInputStream stream : streams) { - assertTrue(stream.getEcReplicaIndex() > 2); + assertThat(stream.getEcReplicaIndex()).isGreaterThan(2); } } } diff --git a/hadoop-hdds/common/pom.xml b/hadoop-hdds/common/pom.xml index b3cf683ec80..20dce15d4d1 100644 --- a/hadoop-hdds/common/pom.xml +++ b/hadoop-hdds/common/pom.xml @@ -29,7 +29,6 @@ https://maven.apache.org/xsd/maven-4.0.0.xsd"> jar - false @@ -86,6 +85,10 @@ https://maven.apache.org/xsd/maven-4.0.0.xsd"> javax.annotation javax.annotation-api + + jakarta.annotation + jakarta.annotation-api + io.dropwizard.metrics @@ -102,7 +105,7 @@ https://maven.apache.org/xsd/maven-4.0.0.xsd"> org.bouncycastle - bcprov-jdk15on + bcprov-jdk18on @@ -138,7 +141,7 @@ https://maven.apache.org/xsd/maven-4.0.0.xsd"> org.bouncycastle - bcpkix-jdk15on + bcpkix-jdk18on ${bouncycastle.version} diff --git a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/HddsConfigKeys.java b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/HddsConfigKeys.java index 7b007fdca1f..787f023df2e 100644 --- a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/HddsConfigKeys.java +++ b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/HddsConfigKeys.java @@ -392,4 +392,9 @@ private HddsConfigKeys() { public static final String OZONE_AUDIT_LOG_DEBUG_CMD_LIST_DNAUDIT = "ozone.audit.log.debug.cmd.list.dnaudit"; + + public static final String HDDS_DATANODE_SLOW_OP_WARNING_THRESHOLD_KEY = + "hdds.datanode.slow.op.warning.threshold"; + public static final String HDDS_DATANODE_SLOW_OP_WARNING_THRESHOLD_DEFAULT = + "500ms"; } diff --git a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/HddsIdFactory.java b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/HddsIdFactory.java index b244b8cf75d..7a94d77c770 100644 --- a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/HddsIdFactory.java +++ b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/HddsIdFactory.java @@ -17,7 +17,6 @@ */ package org.apache.hadoop.hdds; -import java.util.UUID; import java.util.concurrent.atomic.AtomicLong; /** @@ -41,13 +40,4 @@ public static long getLongId() { return LONG_COUNTER.incrementAndGet(); } - /** - * Returns a uuid. - * - * @return UUID. - */ - public static UUID getUUId() { - return UUID.randomUUID(); - } - } diff --git a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/HddsUtils.java b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/HddsUtils.java index 06885ed3dc6..ee1c9669a1b 100644 --- a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/HddsUtils.java +++ b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/HddsUtils.java @@ -20,8 +20,8 @@ import com.google.protobuf.ServiceException; -import javax.annotation.Nonnull; -import javax.annotation.Nullable; +import jakarta.annotation.Nonnull; +import jakarta.annotation.Nullable; import javax.management.ObjectName; import java.io.File; import java.io.IOException; @@ -105,17 +105,6 @@ public final class HddsUtils { private static final Logger LOG = LoggerFactory.getLogger(HddsUtils.class); - /** - * The service ID of the solitary Ozone SCM service. - */ - public static final String OZONE_SCM_SERVICE_ID = "OzoneScmService"; - public static final String OZONE_SCM_SERVICE_INSTANCE_ID = - "OzoneScmServiceInstance"; - - private static final String MULTIPLE_SCM_NOT_YET_SUPPORTED = - ScmConfigKeys.OZONE_SCM_NAMES + " must contain a single hostname." - + " Multiple SCM hosts are currently unsupported"; - public static final ByteString REDACTED = ByteString.copyFromUtf8(""); @@ -654,30 +643,6 @@ public static File createDir(String dirPath) { return dirFile; } - /** - * Leverages the Configuration.getPassword method to attempt to get - * passwords from the CredentialProvider API before falling back to - * clear text in config - if falling back is allowed. - * @param conf Configuration instance - * @param alias name of the credential to retrieve - * @return String credential value or null - */ - static String getPassword(ConfigurationSource conf, String alias) { - String password = null; - try { - char[] passchars = conf.getPassword(alias); - if (passchars != null) { - password = new String(passchars); - } - } catch (IOException ioe) { - LOG.warn("Setting password to null since IOException is caught" - + " when getting password", ioe); - - password = null; - } - return password; - } - /** * Utility string formatter method to display SCM roles. * diff --git a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/NodeDetails.java b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/NodeDetails.java index 8349b12e6bf..fc4e796ffff 100644 --- a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/NodeDetails.java +++ b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/NodeDetails.java @@ -111,15 +111,6 @@ public String getRatisHostPortStr() { return hostPort.toString(); } - public String getRatisAddressPortStr() { - StringBuilder hostPort = new StringBuilder(); - hostPort.append(getInetAddress().getHostAddress()) - .append(":") - .append(ratisPort); - return hostPort.toString(); - } - - public int getRatisPort() { return ratisPort; } diff --git a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/client/OzoneQuota.java b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/client/OzoneQuota.java index 8a4d75a31fb..b3a762e2eda 100644 --- a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/client/OzoneQuota.java +++ b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/client/OzoneQuota.java @@ -19,8 +19,6 @@ package org.apache.hadoop.hdds.client; import com.google.common.base.Strings; -import org.slf4j.Logger; -import org.slf4j.LoggerFactory; import static org.apache.hadoop.ozone.OzoneConsts.GB; import static org.apache.hadoop.ozone.OzoneConsts.KB; @@ -33,8 +31,6 @@ * a storage volume. */ public final class OzoneQuota { - public static final Logger LOG = - LoggerFactory.getLogger(OzoneQuota.class); public static final String OZONE_QUOTA_B = "B"; public static final String OZONE_QUOTA_KB = "KB"; @@ -144,16 +140,6 @@ private OzoneQuota(long quotaInNamespace, RawQuotaInBytes rawQuotaInBytes) { this.quotaInBytes = rawQuotaInBytes.sizeInBytes(); } - /** - * Formats a quota as a string. - * - * @param quota the quota to format - * @return string representation of quota - */ - public static String formatQuota(OzoneQuota quota) { - return String.valueOf(quota.getRawSize()) + quota.getUnit(); - } - /** * Parses a user provided string space quota and returns the * Quota Object. diff --git a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/conf/OzoneConfiguration.java b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/conf/OzoneConfiguration.java index bb5ff0067f4..69cce8db6d6 100644 --- a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/conf/OzoneConfiguration.java +++ b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/conf/OzoneConfiguration.java @@ -101,16 +101,6 @@ public static T newInstanceOf(Class configurationClass) { return conf.getObject(configurationClass); } - /** - * @return a new {@code OzoneConfiguration} instance set from the given - * {@code configObject} - */ - public static OzoneConfiguration fromObject(T configObject) { - OzoneConfiguration conf = new OzoneConfiguration(); - conf.setFromObject(configObject); - return conf; - } - public OzoneConfiguration() { OzoneConfiguration.activate(); loadDefaults(); diff --git a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/freon/FakeClusterTopology.java b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/freon/FakeClusterTopology.java index ddecf1f0607..2d29dc8565c 100644 --- a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/freon/FakeClusterTopology.java +++ b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/freon/FakeClusterTopology.java @@ -52,7 +52,7 @@ public class FakeClusterTopology { public FakeClusterTopology() { try { for (int i = 0; i < 9; i++) { - datanodes.add(createDatanode(i)); + datanodes.add(createDatanode()); if ((i + 1) % 3 == 0) { pipelines.add(Pipeline.newBuilder() .setId(PipelineID.randomId().getProtobuf()) @@ -69,7 +69,7 @@ public FakeClusterTopology() { } } - private DatanodeDetailsProto createDatanode(int index) { + private DatanodeDetailsProto createDatanode() { return DatanodeDetailsProto.newBuilder() .setUuid(UUID.randomUUID().toString()) .setHostName("localhost") diff --git a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/fs/CachingSpaceUsageSource.java b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/fs/CachingSpaceUsageSource.java index 7808dccaf5d..6f776072d9c 100644 --- a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/fs/CachingSpaceUsageSource.java +++ b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/fs/CachingSpaceUsageSource.java @@ -24,7 +24,7 @@ import org.slf4j.Logger; import org.slf4j.LoggerFactory; -import javax.annotation.Nullable; +import jakarta.annotation.Nullable; import java.time.Duration; import java.util.OptionalLong; import java.util.concurrent.Executors; diff --git a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/function/package-info.java b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/function/package-info.java deleted file mode 100644 index 915fe3557e2..00000000000 --- a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/function/package-info.java +++ /dev/null @@ -1,22 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -/** - * Functional interfaces for ozone, similar to java.util.function. - */ -package org.apache.hadoop.hdds.function; diff --git a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/protocol/DatanodeDetails.java b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/protocol/DatanodeDetails.java index 739f6ebd656..5b6fb6fe9b8 100644 --- a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/protocol/DatanodeDetails.java +++ b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/protocol/DatanodeDetails.java @@ -21,7 +21,6 @@ import java.util.ArrayList; import java.util.EnumSet; import java.util.List; -import java.util.Objects; import java.util.Set; import java.util.UUID; @@ -563,14 +562,6 @@ public int hashCode() { return uuid.hashCode(); } - // Skip The OpStates which may change in Runtime. - public int getSignature() { - return Objects - .hash(uuid, uuidString, ipAddress, hostName, ports, - certSerialId, version, setupTime, revision, buildDate, - initialVersion, currentVersion); - } - /** * Returns DatanodeDetails.Builder instance. * diff --git a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/ratis/ServerNotLeaderException.java b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/ratis/ServerNotLeaderException.java index 5a1e2864b5d..89e6a05b6bb 100644 --- a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/ratis/ServerNotLeaderException.java +++ b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/ratis/ServerNotLeaderException.java @@ -28,7 +28,6 @@ * Exception thrown when a server is not a leader for Ratis group. */ public class ServerNotLeaderException extends IOException { - private final String currentPeerId; private final String leader; private static final Pattern CURRENT_PEER_ID_PATTERN = Pattern.compile("Server:(.*) is not the leader[.]+.*", Pattern.DOTALL); @@ -39,7 +38,6 @@ public class ServerNotLeaderException extends IOException { public ServerNotLeaderException(RaftPeerId currentPeerId) { super("Server:" + currentPeerId + " is not the leader. Could not " + "determine the leader node."); - this.currentPeerId = currentPeerId.toString(); this.leader = null; } @@ -47,7 +45,6 @@ public ServerNotLeaderException(RaftPeerId currentPeerId, String suggestedLeader) { super("Server:" + currentPeerId + " is not the leader. Suggested leader is" + " Server:" + suggestedLeader + "."); - this.currentPeerId = currentPeerId.toString(); this.leader = suggestedLeader; } @@ -57,7 +54,6 @@ public ServerNotLeaderException(String message) { Matcher currentLeaderMatcher = CURRENT_PEER_ID_PATTERN.matcher(message); if (currentLeaderMatcher.matches()) { - this.currentPeerId = currentLeaderMatcher.group(1); Matcher suggestedLeaderMatcher = SUGGESTED_LEADER_PATTERN.matcher(message); @@ -77,7 +73,6 @@ public ServerNotLeaderException(String message) { this.leader = null; } } else { - this.currentPeerId = null; this.leader = null; } } diff --git a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/RemoveSCMRequest.java b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/RemoveSCMRequest.java index e47411f3f33..014f62343c6 100644 --- a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/RemoveSCMRequest.java +++ b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/RemoveSCMRequest.java @@ -35,59 +35,10 @@ public RemoveSCMRequest(String clusterId, String scmId, String addr) { this.ratisAddr = addr; } - public static RemoveSCMRequest getFromProtobuf( - HddsProtos.RemoveScmRequestProto proto) { - return new Builder().setClusterId(proto.getClusterId()) - .setScmId(proto.getScmId()).setRatisAddr(proto.getRatisAddr()).build(); - } - public HddsProtos.RemoveScmRequestProto getProtobuf() { return HddsProtos.RemoveScmRequestProto.newBuilder().setClusterId(clusterId) .setScmId(scmId).setRatisAddr(ratisAddr).build(); } - /** - * Builder for RemoveSCMRequest. - */ - public static class Builder { - private String clusterId; - private String scmId; - private String ratisAddr; - - - /** - * sets the cluster id. - * @param cid clusterId to be set - * @return Builder for RemoveSCMRequest - */ - public RemoveSCMRequest.Builder setClusterId(String cid) { - this.clusterId = cid; - return this; - } - - /** - * sets the scmId. - * @param id scmId - * @return Builder for RemoveSCMRequest - */ - public RemoveSCMRequest.Builder setScmId(String id) { - this.scmId = id; - return this; - } - - /** - * Set ratis address in Scm HA. - * @param addr address in the format of [ip|hostname]:port - * @return Builder for RemoveSCMRequest - */ - public RemoveSCMRequest.Builder setRatisAddr(String addr) { - this.ratisAddr = addr; - return this; - } - - public RemoveSCMRequest build() { - return new RemoveSCMRequest(clusterId, scmId, ratisAddr); - } - } /** * Gets the clusterId from the Version file. diff --git a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/ScmConfig.java b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/ScmConfig.java index 46816a63d34..2fc04e00f23 100644 --- a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/ScmConfig.java +++ b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/ScmConfig.java @@ -73,8 +73,17 @@ public class ScmConfig extends ReconfigurableConfig { + "org.apache.hadoop.hdds.scm.PipelineChoosePolicy. " + "The class decides which pipeline will be used to find or " + "allocate Ratis containers. If not set, " - + "org.apache.hadoop.hdds.scm.pipeline.choose.algorithms. " - + "RandomPipelineChoosePolicy will be used as default value." + + "org.apache.hadoop.hdds.scm.pipeline.choose.algorithms." + + "RandomPipelineChoosePolicy will be used as default value. " + + "The following values can be used, " + + "(1) org.apache.hadoop.hdds.scm.pipeline.choose.algorithms." + + "RandomPipelineChoosePolicy : random choose one pipeline. " + + "(2) org.apache.hadoop.hdds.scm.pipeline.choose.algorithms." + + "HealthyPipelineChoosePolicy : random choose one healthy pipeline. " + + "(3) org.apache.hadoop.hdds.scm.pipeline.choose.algorithms." + + "CapacityPipelineChoosePolicy : choose the pipeline with lower " + + "utilization from the two pipelines. Note that random choose " + + "method will be executed twice in this policy." ) private String pipelineChoosePolicyName; @@ -85,11 +94,20 @@ public class ScmConfig extends ReconfigurableConfig { tags = { ConfigTag.SCM, ConfigTag.PIPELINE }, description = "The full name of class which implements " - + "org.apache.hadoop.hdds.scm.PipelineChoosePolicy. " - + "The class decides which pipeline will be used when " - + "selecting an EC Pipeline. If not set, " - + "org.apache.hadoop.hdds.scm.pipeline.choose.algorithms. " - + "RandomPipelineChoosePolicy will be used as default value." + + "org.apache.hadoop.hdds.scm.PipelineChoosePolicy. " + + "The class decides which pipeline will be used when " + + "selecting an EC Pipeline. If not set, " + + "org.apache.hadoop.hdds.scm.pipeline.choose.algorithms." + + "RandomPipelineChoosePolicy will be used as default value. " + + "The following values can be used, " + + "(1) org.apache.hadoop.hdds.scm.pipeline.choose.algorithms." + + "RandomPipelineChoosePolicy : random choose one pipeline. " + + "(2) org.apache.hadoop.hdds.scm.pipeline.choose.algorithms." + + "HealthyPipelineChoosePolicy : random choose one healthy pipeline. " + + "(3) org.apache.hadoop.hdds.scm.pipeline.choose.algorithms." + + "CapacityPipelineChoosePolicy : choose the pipeline with lower " + + "utilization from the two pipelines. Note that random choose " + + "method will be executed twice in this policy." ) private String ecPipelineChoosePolicyName; diff --git a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/client/ScmClient.java b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/client/ScmClient.java index b03cead27e7..402398e36c3 100644 --- a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/client/ScmClient.java +++ b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/client/ScmClient.java @@ -20,10 +20,12 @@ import org.apache.commons.lang3.tuple.Pair; import org.apache.hadoop.hdds.annotation.InterfaceStability; import org.apache.hadoop.hdds.client.ReplicationConfig; +import org.apache.hadoop.hdds.protocol.DatanodeDetails; import org.apache.hadoop.hdds.protocol.proto.HddsProtos.DeletedBlocksTransactionInfo; import org.apache.hadoop.hdds.protocol.proto.StorageContainerLocationProtocolProtos.DecommissionScmResponseProto; import org.apache.hadoop.hdds.protocol.proto.StorageContainerLocationProtocolProtos.StartContainerBalancerResponseProto; import org.apache.hadoop.hdds.scm.DatanodeAdminError; +import org.apache.hadoop.hdds.scm.container.ContainerID; import org.apache.hadoop.hdds.scm.container.ContainerReplicaInfo; import org.apache.hadoop.hdds.scm.container.ReplicationManagerReport; import org.apache.hadoop.hdds.scm.container.common.helpers.ContainerWithPipeline; @@ -39,6 +41,7 @@ import java.util.List; import java.util.Map; import java.util.Optional; +import java.util.UUID; /** * The interface to call into underlying container layer. @@ -178,6 +181,14 @@ ContainerWithPipeline createContainer(HddsProtos.ReplicationType type, HddsProtos.ReplicationFactor replicationFactor, String owner) throws IOException; + /** + * Gets the list of underReplicated and unClosed containers on a decommissioning node. + * + * @param dn - Datanode detail + * @return Lists of underReplicated and Unclosed containers + */ + Map> getContainersOnDecomNode(DatanodeDetails dn) throws IOException; + /** * Returns a set of Nodes that meet a query criteria. Passing null for opState * or nodeState acts like a wild card, returning all nodes in that state. @@ -194,6 +205,14 @@ List queryNode(HddsProtos.NodeOperationalState opState, HddsProtos.NodeState nodeState, HddsProtos.QueryScope queryScope, String poolName) throws IOException; + /** + * Returns a node with the given UUID. + * @param uuid - datanode uuid string + * @return A nodes that matches the requested UUID. + * @throws IOException + */ + HddsProtos.Node queryNode(UUID uuid) throws IOException; + /** * Allows a list of hosts to be decommissioned. The hosts are identified * by their hostname and optionally port in the format foo.com:port. diff --git a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/container/ReplicationManagerReport.java b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/container/ReplicationManagerReport.java index 1dbbc738432..df8e9d45e13 100644 --- a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/container/ReplicationManagerReport.java +++ b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/container/ReplicationManagerReport.java @@ -74,7 +74,10 @@ public enum HealthState { "OpenUnhealthyContainers"), QUASI_CLOSED_STUCK( "Containers QuasiClosed with insufficient datanode origins", - "StuckQuasiClosedContainers"); + "StuckQuasiClosedContainers"), + OPEN_WITHOUT_PIPELINE( + "Containers in OPEN state without any healthy Pipeline", + "OpenContainersWithoutPipeline"); private String description; private String metricName; @@ -129,11 +132,6 @@ public void incrementAndSample(HealthState stat, ContainerID container) { incrementAndSample(stat.toString(), container); } - public void incrementAndSample(HddsProtos.LifeCycleState stat, - ContainerID container) { - incrementAndSample(stat.toString(), container); - } - public void setComplete() { reportTimeStamp = System.currentTimeMillis(); } @@ -238,10 +236,6 @@ protected void setSample(String stat, List sample) { containerSample.put(stat, sample); } - public List getSample(HddsProtos.LifeCycleState stat) { - return getSample(stat.toString()); - } - public List getSample(HealthState stat) { return getSample(stat.toString()); } diff --git a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/ha/SCMHAUtils.java b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/ha/SCMHAUtils.java index 9ba766bc941..80e09af172b 100644 --- a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/ha/SCMHAUtils.java +++ b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/ha/SCMHAUtils.java @@ -115,10 +115,6 @@ public static Collection getSCMNodeIds(ConfigurationSource conf, return conf.getTrimmedStringCollection(key); } - public static String getLocalSCMNodeId(String scmServiceId) { - return addSuffix(ScmConfigKeys.OZONE_SCM_NODES_KEY, scmServiceId); - } - /** * Add non empty and non null suffix to a key. */ diff --git a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/net/NetConstants.java b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/net/NetConstants.java index 633ffba9e96..8ee6decc9c4 100644 --- a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/net/NetConstants.java +++ b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/net/NetConstants.java @@ -36,7 +36,6 @@ private NetConstants() { public static final int NODE_COST_DEFAULT = 0; public static final int ANCESTOR_GENERATION_DEFAULT = 0; public static final int ROOT_LEVEL = 1; - public static final String NODE_COST_PREFIX = "$"; public static final String DEFAULT_RACK = "/default-rack"; public static final String DEFAULT_NODEGROUP = "/default-nodegroup"; public static final String DEFAULT_DATACENTER = "/default-datacenter"; diff --git a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/net/NetUtils.java b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/net/NetUtils.java index 7463c52e953..18c530140e8 100644 --- a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/net/NetUtils.java +++ b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/net/NetUtils.java @@ -26,7 +26,6 @@ import java.util.Collection; import java.util.Iterator; import java.util.List; -import java.util.regex.Pattern; /** * Utility class to facilitate network topology functions. @@ -35,9 +34,6 @@ public final class NetUtils { private static final Logger LOG = LoggerFactory.getLogger(NetUtils.class); - private static final Pattern TRAILING_PATH_SEPARATOR = - Pattern.compile(NetConstants.PATH_SEPARATOR_STR + "+$"); - private NetUtils() { // Prevent instantiation } diff --git a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/net/NetworkTopologyImpl.java b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/net/NetworkTopologyImpl.java index 5e0697eaafd..2dc86c1b685 100644 --- a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/net/NetworkTopologyImpl.java +++ b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/net/NetworkTopologyImpl.java @@ -657,7 +657,7 @@ private Node chooseNodeInternal(String scope, int leafIndex, ancestorGen); if (availableNodes <= 0) { - LOG.warn("No available node in (scope=\"{}\" excludedScope=\"{}\" " + + LOG.info("No available node in (scope=\"{}\" excludedScope=\"{}\" " + "excludedNodes=\"{}\" ancestorGen=\"{}\").", scopeNode.getNetworkFullPath(), excludedScopes, excludedNodes, ancestorGen); diff --git a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/protocol/StorageContainerLocationProtocol.java b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/protocol/StorageContainerLocationProtocol.java index b587cc924b0..e8bddb42cfb 100644 --- a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/protocol/StorageContainerLocationProtocol.java +++ b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/protocol/StorageContainerLocationProtocol.java @@ -19,6 +19,7 @@ import org.apache.commons.lang3.tuple.Pair; import org.apache.hadoop.hdds.client.ReplicationConfig; +import org.apache.hadoop.hdds.protocol.DatanodeDetails; import org.apache.hadoop.hdds.protocol.proto.HddsProtos; import org.apache.hadoop.hdds.protocol.proto.HddsProtos.DeletedBlocksTransactionInfo; import org.apache.hadoop.hdds.protocol.proto.StorageContainerLocationProtocolProtos.DecommissionScmResponseProto; @@ -44,6 +45,7 @@ import java.util.Map; import java.util.Optional; import java.util.Set; +import java.util.UUID; /** * ContainerLocationProtocol is used by an HDFS node to find the set of nodes @@ -53,7 +55,8 @@ .HDDS_SCM_KERBEROS_PRINCIPAL_KEY) public interface StorageContainerLocationProtocol extends Closeable { - @SuppressWarnings("checkstyle:ConstantName") + // Accessed and checked via reflection in Hadoop RPC - changing it is incompatible + @SuppressWarnings({"checkstyle:ConstantName", "unused"}) /** * Version 1: Initial version. */ @@ -219,6 +222,14 @@ List listContainer(long startContainerID, */ void deleteContainer(long containerID) throws IOException; + /** + * Gets the list of underReplicated and unClosed containers on a decommissioning node. + * + * @param dn - Datanode detail + * @return Lists of underReplicated and unClosed containers + */ + Map> getContainersOnDecomNode(DatanodeDetails dn) throws IOException; + /** * Queries a list of Node Statuses. Passing a null for either opState or * state acts like a wildcard returning all nodes in that state. @@ -232,6 +243,8 @@ List queryNode(HddsProtos.NodeOperationalState opState, HddsProtos.NodeState state, HddsProtos.QueryScope queryScope, String poolName, int clientVersion) throws IOException; + HddsProtos.Node queryNode(UUID uuid) throws IOException; + List decommissionNodes(List nodes) throws IOException; diff --git a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/protocolPB/ContainerCommandResponseBuilders.java b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/protocolPB/ContainerCommandResponseBuilders.java index e0458f03472..9acb0e5c33a 100644 --- a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/protocolPB/ContainerCommandResponseBuilders.java +++ b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/protocolPB/ContainerCommandResponseBuilders.java @@ -144,17 +144,6 @@ public static ContainerCommandResponseProto putBlockResponseSuccess( .build(); } - /** - * Returns successful blockResponse. - * @param msg - Request. - * @return Response. - */ - public static ContainerCommandResponseProto getBlockResponseSuccess( - ContainerCommandRequestProto msg) { - - return getSuccessResponse(msg); - } - public static ContainerCommandResponseProto getBlockDataResponse( ContainerCommandRequestProto msg, BlockData data) { diff --git a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/security/ssl/PemFileBasedKeyStoresFactory.java b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/security/ssl/PemFileBasedKeyStoresFactory.java index 25bec2145f0..9a9002195c8 100644 --- a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/security/ssl/PemFileBasedKeyStoresFactory.java +++ b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/security/ssl/PemFileBasedKeyStoresFactory.java @@ -19,7 +19,6 @@ import org.apache.hadoop.hdds.annotation.InterfaceAudience; import org.apache.hadoop.hdds.annotation.InterfaceStability; -import org.apache.hadoop.hdds.security.SecurityConfig; import org.apache.hadoop.hdds.security.x509.certificate.client.CertificateClient; import org.apache.hadoop.hdds.security.x509.certificate.client.CertificateNotification; import org.slf4j.Logger; @@ -58,8 +57,7 @@ public class PemFileBasedKeyStoresFactory implements KeyStoresFactory, private TrustManager[] trustManagers; private final CertificateClient caClient; - public PemFileBasedKeyStoresFactory(SecurityConfig securityConfig, - CertificateClient client) { + public PemFileBasedKeyStoresFactory(CertificateClient client) { this.caClient = client; } diff --git a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/security/token/OzoneBlockTokenIdentifier.java b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/security/token/OzoneBlockTokenIdentifier.java index 16cbaf9be7d..91a98b799c2 100644 --- a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/security/token/OzoneBlockTokenIdentifier.java +++ b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/security/token/OzoneBlockTokenIdentifier.java @@ -24,7 +24,6 @@ import org.apache.hadoop.hdds.protocol.proto.HddsProtos.BlockTokenSecretProto; import org.apache.hadoop.hdds.protocol.proto.HddsProtos.BlockTokenSecretProto.AccessModeProto; import org.apache.hadoop.io.Text; -import org.apache.hadoop.security.token.Token.TrivialRenewer; import org.apache.hadoop.util.ProtobufUtils; import java.io.DataInput; @@ -185,17 +184,5 @@ public byte[] getBytes() { } return builder.build().toByteArray(); } - - /** - * Default TrivialRenewer. - */ - @InterfaceAudience.Private - public static class Renewer extends TrivialRenewer { - - @Override - protected Text getKind() { - return KIND_NAME; - } - } } diff --git a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/security/token/OzoneBlockTokenSelector.java b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/security/token/OzoneBlockTokenSelector.java index 989c8df0fb3..6f13ac34a29 100644 --- a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/security/token/OzoneBlockTokenSelector.java +++ b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/security/token/OzoneBlockTokenSelector.java @@ -55,25 +55,4 @@ public Token selectToken(Text service, } return null; } - - /** - * Static method to avoid instantiation. - * */ - @SuppressWarnings("unchecked") - public static Token selectBlockToken(Text service, - Collection> tokens) { - if (service == null) { - return null; - } - for (Token token : tokens) { - if (OzoneBlockTokenIdentifier.KIND_NAME.equals(token.getKind()) - && token.getService().equals(service)) { - if (LOG.isTraceEnabled()) { - LOG.trace("Getting token for service:{}", service); - } - return (Token) token; - } - } - return null; - } } diff --git a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/security/x509/certificate/client/CertificateClient.java b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/security/x509/certificate/client/CertificateClient.java index 47982449853..e196d0df9d7 100644 --- a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/security/x509/certificate/client/CertificateClient.java +++ b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/security/x509/certificate/client/CertificateClient.java @@ -21,10 +21,8 @@ import org.apache.hadoop.hdds.security.exception.OzoneSecurityException; import org.apache.hadoop.hdds.security.ssl.KeyStoresFactory; -import org.apache.hadoop.hdds.security.x509.certificate.authority.CAType; import org.apache.hadoop.hdds.security.x509.certificate.utils.CertificateSignRequest; import org.apache.hadoop.hdds.security.x509.exception.CertificateException; -import org.bouncycastle.pkcs.PKCS10CertificationRequest; import java.io.Closeable; import java.io.IOException; @@ -138,17 +136,6 @@ X509Certificate getCertificate(String certSerialId) */ List getCAList(); - /** - * Return the pem encoded CA certificate list. - * - * If list is null, fetch the list from SCM and returns the list. - * If list is not null, return the pem encoded CA certificate list. - * - * @return list of pem encoded CA certificates. - * @throws IOException - */ - List listCA() throws IOException; - /** * Update and returns the pem encoded CA certificate list. * @return list of pem encoded CA certificates. @@ -156,16 +143,6 @@ X509Certificate getCertificate(String certSerialId) */ List updateCAList() throws IOException; - /** - * Creates digital signature over the data stream using the components private - * key. - * - * @param data data to be signed - * @return byte array - containing the signature - * @throws CertificateException - on Error - */ - byte[] signData(byte[] data) throws CertificateException; - /** * Verifies a digital Signature, given the signature and the certificate of * the signer. @@ -186,26 +163,6 @@ boolean verifySignature(byte[] data, byte[] signature, CertificateSignRequest.Builder getCSRBuilder() throws CertificateException; - /** - * Send request to SCM to sign the certificate and save certificates returned - * by SCM to PEM files on disk. - * - * @return the serial ID of the new certificate - */ - String signAndStoreCertificate(PKCS10CertificationRequest request) - throws CertificateException; - - /** - * Stores the Certificate for this client. Don't use this api to add - * trusted certificates of others. - * - * @param pemEncodedCert - pem encoded X509 Certificate - * @param caType - Is CA certificate. - * @throws CertificateException - on Error. - */ - void storeCertificate(String pemEncodedCert, CAType caType) - throws CertificateException; - default void assertValidKeysAndCertificate() throws OzoneSecurityException { try { Objects.requireNonNull(getPublicKey()); diff --git a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/security/x509/certificate/utils/SelfSignedCertificate.java b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/security/x509/certificate/utils/SelfSignedCertificate.java index 802c3ff07ee..87834cdb456 100644 --- a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/security/x509/certificate/utils/SelfSignedCertificate.java +++ b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/security/x509/certificate/utils/SelfSignedCertificate.java @@ -262,15 +262,6 @@ public Builder addIpAddress(String ip) { return this; } - public Builder addServiceName( - String serviceName) { - Preconditions.checkNotNull( - serviceName, "Service Name cannot be null"); - - this.addAltName(GeneralName.otherName, serviceName); - return this; - } - private Builder addAltName(int tag, String name) { if (altNames == null) { altNames = new ArrayList<>(); diff --git a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/security/x509/exception/CertificateException.java b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/security/x509/exception/CertificateException.java index df7cdde0473..208cff7c816 100644 --- a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/security/x509/exception/CertificateException.java +++ b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/security/x509/exception/CertificateException.java @@ -88,7 +88,6 @@ public enum ErrorCode { BOOTSTRAP_ERROR, CSR_ERROR, CRYPTO_SIGNATURE_VERIFICATION_ERROR, - CERTIFICATE_NOT_FOUND_ERROR, RENEW_ERROR, ROLLBACK_ERROR } diff --git a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/server/JsonUtils.java b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/server/JsonUtils.java index cad0f7ffc28..d14129972c6 100644 --- a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/server/JsonUtils.java +++ b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/server/JsonUtils.java @@ -31,7 +31,6 @@ import com.fasterxml.jackson.databind.SerializationFeature; import com.fasterxml.jackson.databind.node.ArrayNode; import com.fasterxml.jackson.databind.node.ObjectNode; -import com.fasterxml.jackson.databind.type.CollectionType; import com.fasterxml.jackson.datatype.jsr310.JavaTimeModule; /** @@ -74,22 +73,6 @@ public static ObjectNode createObjectNode(Object next) { return MAPPER.valueToTree(next); } - /** - * Deserialize a list of elements from a given string, - * each element in the list is in the given type. - * - * @param str json string. - * @param elementType element type. - * @return List of elements of type elementType - * @throws IOException - */ - public static List toJsonList(String str, Class elementType) - throws IOException { - CollectionType type = MAPPER.getTypeFactory() - .constructCollectionType(List.class, elementType); - return MAPPER.readValue(str, type); - } - /** * Utility to sequentially write a large collection of items to a file. */ diff --git a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/server/ServerUtils.java b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/server/ServerUtils.java index 6c0272e256e..31aaca568e4 100644 --- a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/server/ServerUtils.java +++ b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/server/ServerUtils.java @@ -35,7 +35,6 @@ import org.apache.hadoop.ozone.OzoneConfigKeys; import org.apache.hadoop.security.UserGroupInformation; -import org.apache.http.client.methods.HttpRequestBase; import org.slf4j.Logger; import org.slf4j.LoggerFactory; @@ -127,17 +126,6 @@ public static InetSocketAddress updateListenAddress(OzoneConfiguration conf, return updatedAddr; } - - /** - * Releases a http connection if the request is not null. - * @param request - */ - public static void releaseConnection(HttpRequestBase request) { - if (request != null) { - request.releaseConnection(); - } - } - /** * Get the location where SCM should store its metadata directories. * Fall back to OZONE_METADATA_DIRS if not defined. diff --git a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/tracing/GrpcServerInterceptor.java b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/tracing/GrpcServerInterceptor.java index bd35d56c77b..802c1531230 100644 --- a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/tracing/GrpcServerInterceptor.java +++ b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/tracing/GrpcServerInterceptor.java @@ -45,7 +45,7 @@ public void onMessage(ReqT message) { .importAndCreateSpan( call.getMethodDescriptor().getFullMethodName(), headers.get(GrpcClientInterceptor.TRACING_HEADER)); - try (Scope scope = GlobalTracer.get().activateSpan(span)) { + try (Scope ignored = GlobalTracer.get().activateSpan(span)) { super.onMessage(message); } finally { span.finish(); diff --git a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/tracing/TracingUtil.java b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/tracing/TracingUtil.java index d30e50f8034..b968d407232 100644 --- a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/tracing/TracingUtil.java +++ b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/tracing/TracingUtil.java @@ -149,16 +149,6 @@ public static R executeInNewSpan(String spanName, return executeInSpan(span, supplier); } - /** - * Execute a function inside an activated new span. - */ - public static void executeInNewSpan(String spanName, - CheckedRunnable runnable) throws E { - Span span = GlobalTracer.get() - .buildSpan(spanName).start(); - executeInSpan(span, runnable); - } - /** * Execute {@code supplier} in the given {@code span}. * @return the value returned by {@code supplier} @@ -190,15 +180,6 @@ private static void executeInSpan(Span span, } } - /** - * Execute a new function as a child span of the parent. - */ - public static R executeAsChildSpan(String spanName, - String parentName, CheckedSupplier supplier) throws E { - Span span = TracingUtil.importAndCreateSpan(spanName, parentName); - return executeInSpan(span, supplier); - } - /** * Execute a new function as a child span of the parent. */ diff --git a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/utils/IOUtils.java b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/utils/IOUtils.java index 109f4b3df05..4620a483385 100644 --- a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/utils/IOUtils.java +++ b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/utils/IOUtils.java @@ -20,7 +20,6 @@ import org.slf4j.Logger; -import java.io.Closeable; import java.util.Arrays; import java.util.Collection; @@ -40,11 +39,11 @@ private IOUtils() { * null. * @param closeables the objects to close */ - public static void cleanupWithLogger(Logger logger, Closeable... closeables) { + public static void cleanupWithLogger(Logger logger, AutoCloseable... closeables) { if (closeables == null) { return; } - for (Closeable c : closeables) { + for (AutoCloseable c : closeables) { if (c != null) { try { c.close(); diff --git a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/utils/db/BooleanCodec.java b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/utils/db/BooleanCodec.java index 53058c96ff3..2ec396c0ffa 100644 --- a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/utils/db/BooleanCodec.java +++ b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/utils/db/BooleanCodec.java @@ -17,7 +17,7 @@ */ package org.apache.hadoop.hdds.utils.db; -import javax.annotation.Nonnull; +import jakarta.annotation.Nonnull; /** * Codec to serialize/deserialize {@link Boolean}. diff --git a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/utils/db/Codec.java b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/utils/db/Codec.java index 1675905a674..46779648e67 100644 --- a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/utils/db/Codec.java +++ b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/utils/db/Codec.java @@ -17,7 +17,7 @@ */ package org.apache.hadoop.hdds.utils.db; -import javax.annotation.Nonnull; +import jakarta.annotation.Nonnull; import java.io.IOException; /** @@ -42,14 +42,6 @@ default boolean supportCodecBuffer() { return false; } - /** - * @return an upper bound, which should be obtained without serialization, - * of the serialized size of the given object. - */ - default int getSerializedSizeUpperBound(T object) { - throw new UnsupportedOperationException(); - } - /** * Serialize the given object to bytes. * diff --git a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/utils/db/CodecBuffer.java b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/utils/db/CodecBuffer.java index 97311b921c8..64e494a5af1 100644 --- a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/utils/db/CodecBuffer.java +++ b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/utils/db/CodecBuffer.java @@ -420,17 +420,6 @@ public CodecBuffer put(byte val) { return this; } - /** - * Similar to {@link ByteBuffer#put(byte[])}. - * - * @return this object. - */ - public CodecBuffer put(byte[] array) { - assertRefCnt(1); - buf.writeBytes(array); - return this; - } - /** * Similar to {@link ByteBuffer#put(ByteBuffer)}. * diff --git a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/utils/db/DelegatedCodec.java b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/utils/db/DelegatedCodec.java index 0ab907dfb3c..dff0b015ed5 100644 --- a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/utils/db/DelegatedCodec.java +++ b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/utils/db/DelegatedCodec.java @@ -19,7 +19,7 @@ import org.apache.ratis.util.function.CheckedFunction; -import javax.annotation.Nonnull; +import jakarta.annotation.Nonnull; import java.io.IOException; /** diff --git a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/utils/db/IntegerCodec.java b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/utils/db/IntegerCodec.java index cd082d30e16..50488053159 100644 --- a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/utils/db/IntegerCodec.java +++ b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/utils/db/IntegerCodec.java @@ -18,7 +18,7 @@ package org.apache.hadoop.hdds.utils.db; -import javax.annotation.Nonnull; +import jakarta.annotation.Nonnull; import java.nio.ByteBuffer; /** diff --git a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/utils/db/LongCodec.java b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/utils/db/LongCodec.java index 912485052f3..9e776cc18f7 100644 --- a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/utils/db/LongCodec.java +++ b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/utils/db/LongCodec.java @@ -18,7 +18,7 @@ */ package org.apache.hadoop.hdds.utils.db; -import javax.annotation.Nonnull; +import jakarta.annotation.Nonnull; import java.nio.ByteBuffer; /** diff --git a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/utils/db/Proto2Codec.java b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/utils/db/Proto2Codec.java index e6b8338d5d0..96d12d1ebe5 100644 --- a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/utils/db/Proto2Codec.java +++ b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/utils/db/Proto2Codec.java @@ -22,7 +22,7 @@ import com.google.protobuf.Parser; import org.apache.ratis.util.function.CheckedFunction; -import javax.annotation.Nonnull; +import jakarta.annotation.Nonnull; import java.io.IOException; import java.io.InputStream; import java.io.OutputStream; diff --git a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/utils/db/Proto3Codec.java b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/utils/db/Proto3Codec.java index d353a489d9f..30245e033e0 100644 --- a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/utils/db/Proto3Codec.java +++ b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/utils/db/Proto3Codec.java @@ -22,7 +22,7 @@ import org.apache.ratis.thirdparty.com.google.protobuf.MessageLite; import org.apache.ratis.thirdparty.com.google.protobuf.Parser; -import javax.annotation.Nonnull; +import jakarta.annotation.Nonnull; import java.io.IOException; import java.nio.ByteBuffer; import java.util.concurrent.ConcurrentHashMap; diff --git a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/utils/db/ShortCodec.java b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/utils/db/ShortCodec.java index 8f2e9d322ad..f6482e5712c 100644 --- a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/utils/db/ShortCodec.java +++ b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/utils/db/ShortCodec.java @@ -20,7 +20,7 @@ import java.nio.ByteBuffer; -import javax.annotation.Nonnull; +import jakarta.annotation.Nonnull; /** * Codec to serialize/deserialize {@link Short}. diff --git a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/utils/db/StringCodecBase.java b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/utils/db/StringCodecBase.java index 58d2edec762..1df55237937 100644 --- a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/utils/db/StringCodecBase.java +++ b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/utils/db/StringCodecBase.java @@ -23,7 +23,7 @@ import org.slf4j.Logger; import org.slf4j.LoggerFactory; -import javax.annotation.Nonnull; +import jakarta.annotation.Nonnull; import java.io.IOException; import java.nio.ByteBuffer; import java.nio.CharBuffer; @@ -88,8 +88,7 @@ public boolean isFixedLength() { * When {@link #isFixedLength()} is true, * the upper bound equals to the serialized size. */ - @Override - public int getSerializedSizeUpperBound(String s) { + private int getSerializedSizeUpperBound(String s) { return maxBytesPerChar * s.length(); } @@ -177,8 +176,7 @@ public CodecBuffer toCodecBuffer(@Nonnull String object, } @Override - public String fromCodecBuffer(@Nonnull CodecBuffer buffer) - throws IOException { + public String fromCodecBuffer(@Nonnull CodecBuffer buffer) { return decode(buffer.asReadOnlyByteBuffer()); } diff --git a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/utils/db/UuidCodec.java b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/utils/db/UuidCodec.java index 33ef2895fae..dfccaa0ab75 100644 --- a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/utils/db/UuidCodec.java +++ b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/utils/db/UuidCodec.java @@ -18,7 +18,7 @@ */ package org.apache.hadoop.hdds.utils.db; -import javax.annotation.Nonnull; +import jakarta.annotation.Nonnull; import java.nio.ByteBuffer; import java.util.UUID; diff --git a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/utils/io/ByteBufferInputStream.java b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/utils/io/ByteBufferInputStream.java index d2301bed2cb..94e762dea96 100644 --- a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/utils/io/ByteBufferInputStream.java +++ b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/utils/io/ByteBufferInputStream.java @@ -18,7 +18,7 @@ package org.apache.hadoop.hdds.utils.io; -import javax.annotation.Nonnull; +import jakarta.annotation.Nonnull; import java.io.IOException; import java.io.InputStream; import java.nio.ByteBuffer; diff --git a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/utils/io/LengthOutputStream.java b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/utils/io/LengthOutputStream.java index 3f8fcd9f56c..1ffae958952 100644 --- a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/utils/io/LengthOutputStream.java +++ b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/utils/io/LengthOutputStream.java @@ -17,7 +17,7 @@ */ package org.apache.hadoop.hdds.utils.io; -import javax.annotation.Nonnull; +import jakarta.annotation.Nonnull; import java.io.FilterOutputStream; import java.io.IOException; import java.io.OutputStream; diff --git a/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/OzoneConfigKeys.java b/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/OzoneConfigKeys.java index c7fb230119e..a0d4b59db16 100644 --- a/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/OzoneConfigKeys.java +++ b/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/OzoneConfigKeys.java @@ -145,9 +145,6 @@ public final class OzoneConfigKeys { "dfs.container.ratis.ipc.random.port"; public static final boolean DFS_CONTAINER_RATIS_IPC_RANDOM_PORT_DEFAULT = false; - public static final String OZONE_TRACE_ENABLED_KEY = - "ozone.trace.enabled"; - public static final boolean OZONE_TRACE_ENABLED_DEFAULT = false; public static final String OZONE_METADATA_STORE_ROCKSDB_STATISTICS = "ozone.metastore.rocksdb.statistics"; diff --git a/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/OzoneConsts.java b/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/OzoneConsts.java index 37741f8cff9..9069c425e7d 100644 --- a/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/OzoneConsts.java +++ b/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/OzoneConsts.java @@ -148,17 +148,6 @@ public final class OzoneConsts { public static final String MULTIPART_FORM_DATA_BOUNDARY = "---XXX"; - /** - * Supports Bucket Versioning. - */ - public enum Versioning { - NOT_DEFINED, ENABLED, DISABLED; - - public static Versioning getVersioning(boolean versioning) { - return versioning ? ENABLED : DISABLED; - } - } - // Block ID prefixes used in datanode containers. public static final String DELETING_KEY_PREFIX = "#deleting#"; diff --git a/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/OzoneManagerVersion.java b/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/OzoneManagerVersion.java index 00f2e55f97f..985c238fd77 100644 --- a/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/OzoneManagerVersion.java +++ b/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/OzoneManagerVersion.java @@ -44,7 +44,6 @@ public enum OzoneManagerVersion implements ComponentVersion { + " newer and an unknown server version has arrived to the client."); public static final OzoneManagerVersion CURRENT = latest(); - public static final int CURRENT_VERSION = CURRENT.version; private static final Map BY_PROTO_VALUE = Arrays.stream(values()) diff --git a/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/common/Checksum.java b/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/common/Checksum.java index 059ed650f37..f8b3febfeca 100644 --- a/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/common/Checksum.java +++ b/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/common/Checksum.java @@ -33,8 +33,6 @@ import org.apache.hadoop.ozone.common.utils.BufferUtils; import org.apache.ratis.thirdparty.com.google.protobuf.ByteString; import org.apache.ratis.thirdparty.com.google.protobuf.UnsafeByteOperations; -import org.slf4j.Logger; -import org.slf4j.LoggerFactory; /** * Class to compute and verify checksums for chunks. @@ -42,8 +40,6 @@ * This class is not thread safe. */ public class Checksum { - public static final Logger LOG = LoggerFactory.getLogger(Checksum.class); - private static Function newMessageDigestFunction( String algorithm) { final MessageDigest md; diff --git a/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/common/MonotonicClock.java b/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/common/MonotonicClock.java deleted file mode 100644 index 62a323d2538..00000000000 --- a/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/common/MonotonicClock.java +++ /dev/null @@ -1,81 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package org.apache.hadoop.ozone.common; - -import org.apache.hadoop.util.Time; - -import java.time.Clock; -import java.time.Instant; -import java.time.ZoneId; - -/** - * This is a class which implements the Clock interface. It is a copy of the - * Java Clock.SystemClock only it uses MonotonicNow (nanotime) rather than - * System.currentTimeMills. - */ - -public final class MonotonicClock extends Clock { - - private final ZoneId zoneId; - - public MonotonicClock(ZoneId zone) { - this.zoneId = zone; - } - - @Override - public ZoneId getZone() { - return zoneId; - } - - @Override - public Clock withZone(ZoneId zone) { - if (zone.equals(this.zoneId)) { // intentional NPE - return this; - } - return new MonotonicClock(zone); - } - - @Override - public long millis() { - return Time.monotonicNow(); - } - - @Override - public Instant instant() { - return Instant.ofEpochMilli(millis()); - } - - @Override - public boolean equals(Object obj) { - if (obj instanceof MonotonicClock) { - return zoneId.equals(((MonotonicClock) obj).zoneId); - } - return false; - } - - @Override - public int hashCode() { - return zoneId.hashCode() + 1; - } - - @Override - public String toString() { - return "MonotonicClock[" + zoneId + "]"; - } - -} diff --git a/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/common/OzoneChecksumException.java b/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/common/OzoneChecksumException.java index 8ba7b6da1eb..7bc7f618a3f 100644 --- a/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/common/OzoneChecksumException.java +++ b/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/common/OzoneChecksumException.java @@ -18,7 +18,6 @@ package org.apache.hadoop.ozone.common; import java.io.IOException; -import java.security.NoSuchAlgorithmException; import org.apache.hadoop.hdds.annotation.InterfaceAudience; import org.apache.hadoop.hdds.annotation.InterfaceStability; import org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos; @@ -46,17 +45,6 @@ public OzoneChecksumException( unrecognizedChecksumType)); } - /** - * OzoneChecksumException to wrap around NoSuchAlgorithmException. - * @param algorithm name of algorithm - * @param ex original exception thrown - */ - public OzoneChecksumException( - String algorithm, NoSuchAlgorithmException ex) { - super(String.format("NoSuchAlgorithmException thrown while computing " + - "SHA-256 checksum using algorithm %s", algorithm), ex); - } - /** * OzoneChecksumException to throw with custom message. */ diff --git a/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/common/utils/BufferUtils.java b/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/common/utils/BufferUtils.java index 8bfb7490c4d..c6ad754f19b 100644 --- a/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/common/utils/BufferUtils.java +++ b/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/common/utils/BufferUtils.java @@ -136,10 +136,4 @@ public static int getNumberOfBins(long numElements, int maxElementsPerBin) { } return Math.toIntExact(n); } - - public static void clearBuffers(ByteBuffer[] byteBuffers) { - for (ByteBuffer buffer : byteBuffers) { - buffer.clear(); - } - } } diff --git a/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/container/common/helpers/BlockData.java b/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/container/common/helpers/BlockData.java index 8f89be3c118..4bd170df8e8 100644 --- a/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/container/common/helpers/BlockData.java +++ b/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/container/common/helpers/BlockData.java @@ -146,22 +146,6 @@ public synchronized Map getMetadata() { return Collections.unmodifiableMap(this.metadata); } - /** - * Returns value of a key. - */ - public synchronized String getValue(String key) { - return metadata.get(key); - } - - /** - * Deletes a metadata entry from the map. - * - * @param key - Key - */ - public synchronized void deleteKey(String key) { - metadata.remove(key); - } - @SuppressWarnings("unchecked") private List castChunkList() { return (List)chunkList; diff --git a/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/container/common/helpers/ChunkInfoList.java b/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/container/common/helpers/ChunkInfoList.java index 6f31ee40c4b..fdf40af9e09 100644 --- a/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/container/common/helpers/ChunkInfoList.java +++ b/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/container/common/helpers/ChunkInfoList.java @@ -48,10 +48,6 @@ public ChunkInfoList(List chunks) { this.chunks = Collections.unmodifiableList(chunks); } - public List asList() { - return chunks; - } - /** * @return A new {@link ChunkInfoList} created from protobuf data. */ diff --git a/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/lease/LeaseManager.java b/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/lease/LeaseManager.java index f2bad543a9e..7dfcf3eb8c8 100644 --- a/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/lease/LeaseManager.java +++ b/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/lease/LeaseManager.java @@ -282,7 +282,7 @@ public void run() { try { // ignore return value, just used for wait - boolean b = semaphore.tryAcquire(sleepTime, TimeUnit.MILLISECONDS); + boolean ignored = semaphore.tryAcquire(sleepTime, TimeUnit.MILLISECONDS); } catch (InterruptedException e) { LOG.warn("Lease manager is interrupted. Shutting down...", e); Thread.currentThread().interrupt(); diff --git a/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/lock/ActiveLock.java b/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/lock/ActiveLock.java deleted file mode 100644 index 2740c177901..00000000000 --- a/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/lock/ActiveLock.java +++ /dev/null @@ -1,172 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one or more - * contributor license agreements. See the NOTICE file distributed with this - * work for additional information regarding copyright ownership. The ASF - * licenses this file to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - *

- * http://www.apache.org/licenses/LICENSE-2.0 - *

- * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS,WITHOUT - * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the - * License for the specific language governing permissions and limitations under - * the License. - */ - -package org.apache.hadoop.ozone.lock; - -import java.util.concurrent.atomic.AtomicInteger; -import java.util.concurrent.locks.ReentrantReadWriteLock; - -/** - * Lock implementation which also maintains counter. - */ -public final class ActiveLock { - - private ReentrantReadWriteLock lock; - private AtomicInteger count; - - /** - * Use ActiveLock#newInstance to create instance. - * - * @param fairness - if true the lock uses a fair ordering policy, else - * non-fair ordering. - */ - private ActiveLock(boolean fairness) { - this.lock = new ReentrantReadWriteLock(fairness); - this.count = new AtomicInteger(0); - } - - /** - * Creates a new instance of ActiveLock. - * - * @return new ActiveLock - */ - public static ActiveLock newInstance(boolean fairness) { - return new ActiveLock(fairness); - } - - /** - * Acquires read lock. - * - *

Acquires the read lock if the write lock is not held by - * another thread and returns immediately. - * - *

If the write lock is held by another thread then - * the current thread becomes disabled for thread scheduling - * purposes and lies dormant until the read lock has been acquired. - */ - void readLock() { - lock.readLock().lock(); - } - - /** - * Attempts to release the read lock. - * - *

If the number of readers is now zero then the lock - * is made available for write lock attempts. - */ - void readUnlock() { - lock.readLock().unlock(); - } - - /** - * Acquires write lock. - * - *

Acquires the write lock if neither the read nor write lock - * are held by another thread - * and returns immediately, setting the write lock hold count to - * one. - * - *

If the current thread already holds the write lock then the - * hold count is incremented by one and the method returns - * immediately. - * - *

If the lock is held by another thread then the current - * thread becomes disabled for thread scheduling purposes and - * lies dormant until the write lock has been acquired. - */ - void writeLock() { - lock.writeLock().lock(); - } - - /** - * Attempts to release the write lock. - * - *

If the current thread is the holder of this lock then - * the hold count is decremented. If the hold count is now - * zero then the lock is released. - */ - void writeUnlock() { - lock.writeLock().unlock(); - } - - /** - * Increment the active count of the lock. - */ - void incrementActiveCount() { - count.incrementAndGet(); - } - - /** - * Decrement the active count of the lock. - */ - void decrementActiveCount() { - count.decrementAndGet(); - } - - /** - * Returns the active count on the lock. - * - * @return Number of active leases on the lock. - */ - int getActiveLockCount() { - return count.get(); - } - - /** - * Returns the number of reentrant read holds on this lock by the current - * thread. - * - * @return the number of holds on the read lock by the current thread, - * or zero if the read lock is not held by the current thread - */ - int getReadHoldCount() { - return lock.getReadHoldCount(); - } - - /** - * Returns the number of reentrant write holds on this lock by the current - * thread. - * - * @return the number of holds on the write lock by the current thread, - * or zero if the write lock is not held by the current thread - */ - int getWriteHoldCount() { - return lock.getWriteHoldCount(); - } - - /** - * Queries if the write lock is held by the current thread. - * - * @return {@code true} if the current thread holds the write lock and - * {@code false} otherwise - */ - boolean isWriteLockedByCurrentThread() { - return lock.isWriteLockedByCurrentThread(); - } - - /** - * Resets the active count on the lock. - */ - void resetCounter() { - count.set(0); - } - - @Override - public String toString() { - return lock.toString(); - } -} diff --git a/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/lock/LockManager.java b/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/lock/LockManager.java deleted file mode 100644 index 1cbe758736a..00000000000 --- a/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/lock/LockManager.java +++ /dev/null @@ -1,284 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one or more - * contributor license agreements. See the NOTICE file distributed with this - * work for additional information regarding copyright ownership. The ASF - * licenses this file to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - *

- * http://www.apache.org/licenses/LICENSE-2.0 - *

- * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS,WITHOUT - * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the - * License for the specific language governing permissions and limitations under - * the License. - */ - -package org.apache.hadoop.ozone.lock; - -import org.apache.commons.pool2.impl.GenericObjectPool; -import org.apache.hadoop.hdds.conf.ConfigurationSource; -import org.slf4j.Logger; -import org.slf4j.LoggerFactory; - -import java.util.Map; -import java.util.concurrent.ConcurrentHashMap; -import java.util.function.Consumer; - -/** - * Manages the locks on a given resource. A new lock is created for each - * and every unique resource. Uniqueness of resource depends on the - * {@code equals} implementation of it. - */ -public class LockManager { - - private static final Logger LOG = LoggerFactory.getLogger(LockManager.class); - - private final Map activeLocks = new ConcurrentHashMap<>(); - private final GenericObjectPool lockPool; - - /** - * Creates new LockManager instance with the given Configuration.and uses - * non-fair mode for locks. - * - * @param conf Configuration object - */ - public LockManager(final ConfigurationSource conf) { - this(conf, false); - } - - - /** - * Creates new LockManager instance with the given Configuration. - * - * @param conf Configuration object - * @param fair - true to use fair lock ordering, else non-fair lock ordering. - */ - public LockManager(final ConfigurationSource conf, boolean fair) { - lockPool = - new GenericObjectPool<>(new PooledLockFactory(fair)); - lockPool.setMaxTotal(-1); - } - - /** - * Acquires the lock on given resource. - * - *

If the lock is not available then the current thread becomes - * disabled for thread scheduling purposes and lies dormant until the - * lock has been acquired. - * - * @param resource on which the lock has to be acquired - * @deprecated Use {@link LockManager#writeLock} instead - */ - public void lock(final R resource) { - writeLock(resource); - } - - /** - * Releases the lock on given resource. - * - * @param resource for which the lock has to be released - * @deprecated Use {@link LockManager#writeUnlock} instead - */ - public void unlock(final R resource) { - writeUnlock(resource); - } - - /** - * Acquires the read lock on given resource. - * - *

Acquires the read lock on resource if the write lock is not held by - * another thread and returns immediately. - * - *

If the write lock on resource is held by another thread then - * the current thread becomes disabled for thread scheduling - * purposes and lies dormant until the read lock has been acquired. - * - * @param resource on which the read lock has to be acquired - */ - public void readLock(final R resource) { - acquire(resource, ActiveLock::readLock); - } - - /** - * Releases the read lock on given resource. - * - * @param resource for which the read lock has to be released - * @throws IllegalMonitorStateException if the current thread does not - * hold this lock - */ - public void readUnlock(final R resource) throws IllegalMonitorStateException { - release(resource, ActiveLock::readUnlock); - } - - /** - * Acquires the write lock on given resource. - * - *

Acquires the write lock on resource if neither the read nor write lock - * are held by another thread and returns immediately. - * - *

If the current thread already holds the write lock then the - * hold count is incremented by one and the method returns - * immediately. - * - *

If the lock is held by another thread then the current - * thread becomes disabled for thread scheduling purposes and - * lies dormant until the write lock has been acquired. - * - * @param resource on which the lock has to be acquired - */ - public void writeLock(final R resource) { - acquire(resource, ActiveLock::writeLock); - } - - /** - * Releases the write lock on given resource. - * - * @param resource for which the lock has to be released - * @throws IllegalMonitorStateException if the current thread does not - * hold this lock - */ - public void writeUnlock(final R resource) - throws IllegalMonitorStateException { - release(resource, ActiveLock::writeUnlock); - } - - /** - * Acquires the lock on given resource using the provided lock function. - * - * @param resource on which the lock has to be acquired - * @param lockFn function to acquire the lock - */ - private void acquire(final R resource, final Consumer lockFn) { - lockFn.accept(getLockForLocking(resource)); - } - - /** - * Releases the lock on given resource using the provided release function. - * - * @param resource for which the lock has to be released - * @param releaseFn function to release the lock - */ - private void release(final R resource, final Consumer releaseFn) { - final ActiveLock lock = getLockForReleasing(resource); - releaseFn.accept(lock); - decrementActiveLockCount(resource); - } - - /** - * Returns {@link ActiveLock} instance for the given resource, - * on which the lock can be acquired. - * - * @param resource on which the lock has to be acquired - * @return {@link ActiveLock} instance - */ - private ActiveLock getLockForLocking(final R resource) { - /* - * While getting a lock object for locking we should - * atomically increment the active count of the lock. - * - * This is to avoid cases where the selected lock could - * be removed from the activeLocks map and returned to - * the object pool. - */ - return activeLocks.compute(resource, (k, v) -> { - final ActiveLock lock; - try { - if (v == null) { - lock = lockPool.borrowObject(); - } else { - lock = v; - } - lock.incrementActiveCount(); - } catch (Exception ex) { - LOG.error("Unable to obtain lock.", ex); - throw new RuntimeException(ex); - } - return lock; - }); - } - - /** - * Returns {@link ActiveLock} instance for the given resource, - * for which the lock has to be released. - * - * @param resource for which the lock has to be released - * @return {@link ActiveLock} instance - */ - private ActiveLock getLockForReleasing(final R resource) { - if (activeLocks.containsKey(resource)) { - return activeLocks.get(resource); - } - // Someone is releasing a lock which was never acquired. - LOG.error("Trying to release the lock on {}, which was never acquired.", - resource); - throw new IllegalMonitorStateException("Releasing lock on resource " - + resource + " without acquiring lock"); - } - - /** - * Decrements the active lock count and returns the {@link ActiveLock} - * object to pool if the active count is 0. - * - * @param resource resource to which the ActiveLock is associated - */ - private void decrementActiveLockCount(final R resource) { - activeLocks.computeIfPresent(resource, (k, v) -> { - v.decrementActiveCount(); - if (v.getActiveLockCount() != 0) { - return v; - } - lockPool.returnObject(v); - return null; - }); - } - - /** - * Returns the number of reentrant read holds on this lock by the current - * thread on a given resource. - * - * @param resource for which the read lock hold count has to be returned - * @return the number of holds on the read lock by the current thread, - * or zero if the read lock is not held by the current thread - */ - public int getReadHoldCount(final R resource) { - ActiveLock activeLock = activeLocks.get(resource); - if (activeLock != null) { - return activeLock.getReadHoldCount(); - } - return 0; - } - - /** - * Returns the number of reentrant write holds on this lock by the current - * thread on a given resource. - * - * @param resource for which the write lock hold count has to be returned - * @return the number of holds on the write lock by the current thread, - * or zero if the write lock is not held by the current thread - */ - public int getWriteHoldCount(final R resource) { - ActiveLock activeLock = activeLocks.get(resource); - if (activeLock != null) { - return activeLock.getWriteHoldCount(); - } - return 0; - } - - /** - * Queries if the write lock is held by the current thread on a given - * resource. - * - * @param resource for which the query has to be returned - * @return {@code true} if the current thread holds the write lock and - * {@code false} otherwise - */ - public boolean isWriteLockedByCurrentThread(final R resource) { - ActiveLock activeLock = activeLocks.get(resource); - if (activeLock != null) { - return activeLock.isWriteLockedByCurrentThread(); - } - return false; - } -} diff --git a/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/lock/PooledLockFactory.java b/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/lock/PooledLockFactory.java deleted file mode 100644 index 1e3ba05a3a2..00000000000 --- a/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/lock/PooledLockFactory.java +++ /dev/null @@ -1,48 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one or more - * contributor license agreements. See the NOTICE file distributed with this - * work for additional information regarding copyright ownership. The ASF - * licenses this file to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - *

- * http://www.apache.org/licenses/LICENSE-2.0 - *

- * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS,WITHOUT - * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the - * License for the specific language governing permissions and limitations under - * the License. - */ - -package org.apache.hadoop.ozone.lock; - -import org.apache.commons.pool2.BasePooledObjectFactory; -import org.apache.commons.pool2.PooledObject; -import org.apache.commons.pool2.impl.DefaultPooledObject; - -/** - * Pool factory to create {@code ActiveLock} instances. - */ -public class PooledLockFactory extends BasePooledObjectFactory { - - private boolean fairness; - - PooledLockFactory(boolean fair) { - this.fairness = fair; - } - @Override - public ActiveLock create() throws Exception { - return ActiveLock.newInstance(fairness); - } - - @Override - public PooledObject wrap(ActiveLock activeLock) { - return new DefaultPooledObject<>(activeLock); - } - - @Override - public void activateObject(PooledObject pooledObject) { - pooledObject.getObject().resetCounter(); - } -} diff --git a/hadoop-hdds/common/src/main/resources/ozone-default.xml b/hadoop-hdds/common/src/main/resources/ozone-default.xml index 048af241a33..47067de5fed 100644 --- a/hadoop-hdds/common/src/main/resources/ozone-default.xml +++ b/hadoop-hdds/common/src/main/resources/ozone-default.xml @@ -1298,15 +1298,6 @@ see ozone.scm.heartbeat.thread.interval before changing this value. - - ozone.trace.enabled - false - OZONE, DEBUG - - Setting this flag to true dumps the HTTP request/ response in - the logs. Very useful when debugging REST protocol. - - ozone.key.preallocation.max.blocks @@ -3440,6 +3431,14 @@ Timeout for the request submitted directly to Ratis in datanode. + + hdds.datanode.slow.op.warning.threshold + OZONE, DATANODE, PERFORMANCE + 500ms + + Thresholds for printing slow-operation audit logs. + + ozone.om.keyname.character.check.enabled OZONE, OM diff --git a/hadoop-hdds/common/src/test/java/org/apache/hadoop/hdds/client/TestReplicationConfig.java b/hadoop-hdds/common/src/test/java/org/apache/hadoop/hdds/client/TestReplicationConfig.java index fa5e9e60abe..44f08176267 100644 --- a/hadoop-hdds/common/src/test/java/org/apache/hadoop/hdds/client/TestReplicationConfig.java +++ b/hadoop-hdds/common/src/test/java/org/apache/hadoop/hdds/client/TestReplicationConfig.java @@ -43,7 +43,6 @@ public class TestReplicationConfig { private static final int MB = 1024 * 1024; - private static final int KB = 1024; //NOTE: if a new chunkSize is used/added in the parameters other than KB or MB // please revisit the method createECDescriptor, to handle the new chunkSize. diff --git a/hadoop-hdds/common/src/test/java/org/apache/hadoop/hdds/fs/MockSpaceUsageSource.java b/hadoop-hdds/common/src/test/java/org/apache/hadoop/hdds/fs/MockSpaceUsageSource.java index 26c861dc68b..76b6a0db89b 100644 --- a/hadoop-hdds/common/src/test/java/org/apache/hadoop/hdds/fs/MockSpaceUsageSource.java +++ b/hadoop-hdds/common/src/test/java/org/apache/hadoop/hdds/fs/MockSpaceUsageSource.java @@ -22,10 +22,6 @@ */ public final class MockSpaceUsageSource { - public static SpaceUsageSource zero() { - return fixed(0, 0); - } - public static SpaceUsageSource unlimited() { return fixed(Long.MAX_VALUE, Long.MAX_VALUE); } diff --git a/hadoop-hdds/common/src/test/java/org/apache/hadoop/hdds/fs/TestCachingSpaceUsageSource.java b/hadoop-hdds/common/src/test/java/org/apache/hadoop/hdds/fs/TestCachingSpaceUsageSource.java index 9c701ca1fc7..674c1233dee 100644 --- a/hadoop-hdds/common/src/test/java/org/apache/hadoop/hdds/fs/TestCachingSpaceUsageSource.java +++ b/hadoop-hdds/common/src/test/java/org/apache/hadoop/hdds/fs/TestCachingSpaceUsageSource.java @@ -20,6 +20,7 @@ import org.apache.commons.lang3.RandomUtils; import org.apache.hadoop.hdds.fs.MockSpaceUsageCheckParams.Builder; import org.junit.jupiter.api.Test; +import org.junit.jupiter.api.io.TempDir; import org.mockito.stubbing.Answer; import java.io.File; @@ -30,11 +31,10 @@ import java.util.concurrent.atomic.AtomicLong; import static org.apache.hadoop.hdds.fs.MockSpaceUsageCheckParams.newBuilder; -import static org.apache.ozone.test.GenericTestUtils.getTestDir; import static org.junit.jupiter.api.Assertions.assertEquals; -import static org.mockito.ArgumentMatchers.any; -import static org.mockito.ArgumentMatchers.anyLong; -import static org.mockito.ArgumentMatchers.eq; +import static org.mockito.Mockito.any; +import static org.mockito.Mockito.anyLong; +import static org.mockito.Mockito.eq; import static org.mockito.Mockito.mock; import static org.mockito.Mockito.verify; import static org.mockito.Mockito.when; @@ -44,8 +44,8 @@ */ public class TestCachingSpaceUsageSource { - private static final File DIR = - getTestDir(TestCachingSpaceUsageSource.class.getSimpleName()); + @TempDir + private static File dir; @Test public void providesInitialValueUntilStarted() { @@ -156,7 +156,7 @@ private static Builder paramsBuilder(AtomicLong savedValue) { } private static Builder paramsBuilder() { - return newBuilder(DIR) + return newBuilder(dir) .withSource(MockSpaceUsageSource.fixed(10000, 1000)) .withRefresh(Duration.ofMinutes(5)); } diff --git a/hadoop-hdds/common/src/test/java/org/apache/hadoop/hdds/fs/TestDU.java b/hadoop-hdds/common/src/test/java/org/apache/hadoop/hdds/fs/TestDU.java index a87f3fad25e..8363f8b41b6 100644 --- a/hadoop-hdds/common/src/test/java/org/apache/hadoop/hdds/fs/TestDU.java +++ b/hadoop-hdds/common/src/test/java/org/apache/hadoop/hdds/fs/TestDU.java @@ -17,16 +17,13 @@ */ package org.apache.hadoop.hdds.fs; -import org.apache.hadoop.fs.FileUtil; import org.apache.hadoop.util.Shell; -import org.junit.jupiter.api.AfterEach; import org.junit.jupiter.api.BeforeEach; import org.junit.jupiter.api.Test; +import org.junit.jupiter.api.io.TempDir; import static org.apache.hadoop.ozone.OzoneConsts.KB; -import static org.apache.ozone.test.GenericTestUtils.getTestDir; import static org.assertj.core.api.Assertions.assertThat; -import static org.junit.jupiter.api.Assertions.assertTrue; import static org.junit.jupiter.api.Assumptions.assumeFalse; import static org.junit.jupiter.api.Assumptions.assumeTrue; @@ -40,18 +37,12 @@ */ public class TestDU { - private static final File DIR = getTestDir(TestDU.class.getSimpleName()); + @TempDir + private File dir; @BeforeEach void setUp() { assumeFalse(Shell.WINDOWS); - FileUtil.fullyDelete(DIR); - assertTrue(DIR.mkdirs()); - } - - @AfterEach - void tearDown() throws IOException { - FileUtil.fullyDelete(DIR); } static void createFile(File newFile, int size) throws IOException { @@ -80,7 +71,7 @@ static void createFile(File newFile, int size) throws IOException { @Test void testGetUsed() throws Exception { final long writtenSize = 32 * KB; - File file = new File(DIR, "data"); + File file = new File(dir, "data"); createFile(file, (int) writtenSize); SpaceUsageSource du = new DU(file); @@ -91,9 +82,9 @@ void testGetUsed() throws Exception { @Test void testExcludePattern() throws IOException { - createFile(new File(DIR, "include.txt"), (int) (4 * KB)); - createFile(new File(DIR, "exclude.tmp"), (int) (100 * KB)); - SpaceUsageSource du = new DU(DIR, "*.tmp"); + createFile(new File(dir, "include.txt"), (int) (4 * KB)); + createFile(new File(dir, "exclude.tmp"), (int) (100 * KB)); + SpaceUsageSource du = new DU(dir, "*.tmp"); long usedSpace = du.getUsedSpace(); diff --git a/hadoop-hdds/common/src/test/java/org/apache/hadoop/hdds/fs/TestDUFactory.java b/hadoop-hdds/common/src/test/java/org/apache/hadoop/hdds/fs/TestDUFactory.java index 6e603f8ff0b..4e8379c9498 100644 --- a/hadoop-hdds/common/src/test/java/org/apache/hadoop/hdds/fs/TestDUFactory.java +++ b/hadoop-hdds/common/src/test/java/org/apache/hadoop/hdds/fs/TestDUFactory.java @@ -22,8 +22,8 @@ import org.apache.hadoop.hdds.conf.OzoneConfiguration; import org.junit.jupiter.api.Test; +import org.junit.jupiter.api.io.TempDir; -import static org.apache.ozone.test.GenericTestUtils.getTestDir; import static org.junit.jupiter.api.Assertions.assertEquals; import static org.junit.jupiter.api.Assertions.assertSame; @@ -38,8 +38,7 @@ public void testCreateViaConfig() { } @Test - public void testParams() { - File dir = getTestDir(getClass().getSimpleName()); + public void testParams(@TempDir File dir) { Duration refresh = Duration.ofHours(1); OzoneConfiguration conf = new OzoneConfiguration(); diff --git a/hadoop-hdds/common/src/test/java/org/apache/hadoop/hdds/fs/TestDedicatedDiskSpaceUsage.java b/hadoop-hdds/common/src/test/java/org/apache/hadoop/hdds/fs/TestDedicatedDiskSpaceUsage.java index 85b21df86b9..04cfd420317 100644 --- a/hadoop-hdds/common/src/test/java/org/apache/hadoop/hdds/fs/TestDedicatedDiskSpaceUsage.java +++ b/hadoop-hdds/common/src/test/java/org/apache/hadoop/hdds/fs/TestDedicatedDiskSpaceUsage.java @@ -17,45 +17,30 @@ */ package org.apache.hadoop.hdds.fs; -import org.apache.hadoop.fs.FileUtil; -import org.junit.jupiter.api.AfterEach; -import org.junit.jupiter.api.BeforeEach; import org.junit.jupiter.api.Test; +import org.junit.jupiter.api.io.TempDir; import java.io.File; import java.io.IOException; import static org.apache.hadoop.hdds.fs.TestDU.createFile; -import static org.apache.ozone.test.GenericTestUtils.getTestDir; import static org.assertj.core.api.Assertions.assertThat; -import static org.junit.jupiter.api.Assertions.assertTrue; /** * Tests for {@link DedicatedDiskSpaceUsage}. */ class TestDedicatedDiskSpaceUsage { - private static final File DIR = - getTestDir(TestDedicatedDiskSpaceUsage.class.getSimpleName()); + @TempDir + private File dir; private static final int FILE_SIZE = 1024; - @BeforeEach - void setUp() { - FileUtil.fullyDelete(DIR); - assertTrue(DIR.mkdirs()); - } - - @AfterEach - void tearDown() { - FileUtil.fullyDelete(DIR); - } - @Test void testGetUsed() throws IOException { - File file = new File(DIR, "data"); + File file = new File(dir, "data"); createFile(file, FILE_SIZE); - SpaceUsageSource subject = new DedicatedDiskSpaceUsage(DIR); + SpaceUsageSource subject = new DedicatedDiskSpaceUsage(dir); // condition comes from TestDFCachingGetSpaceUsed in Hadoop Common assertThat(subject.getUsedSpace()).isGreaterThanOrEqualTo(FILE_SIZE - 20); diff --git a/hadoop-hdds/common/src/test/java/org/apache/hadoop/hdds/fs/TestDedicatedDiskSpaceUsageFactory.java b/hadoop-hdds/common/src/test/java/org/apache/hadoop/hdds/fs/TestDedicatedDiskSpaceUsageFactory.java index 0142ee56af7..8391976da09 100644 --- a/hadoop-hdds/common/src/test/java/org/apache/hadoop/hdds/fs/TestDedicatedDiskSpaceUsageFactory.java +++ b/hadoop-hdds/common/src/test/java/org/apache/hadoop/hdds/fs/TestDedicatedDiskSpaceUsageFactory.java @@ -23,8 +23,9 @@ import org.apache.hadoop.hdds.conf.OzoneConfiguration; import static org.apache.hadoop.hdds.fs.DedicatedDiskSpaceUsageFactory.Conf.configKeyForRefreshPeriod; -import static org.apache.ozone.test.GenericTestUtils.getTestDir; import org.junit.jupiter.api.Test; +import org.junit.jupiter.api.io.TempDir; + import static org.junit.jupiter.api.Assertions.assertEquals; import static org.junit.jupiter.api.Assertions.assertSame; @@ -40,10 +41,9 @@ public void testCreateViaConfig() { } @Test - public void testParams() { + public void testParams(@TempDir File dir) { OzoneConfiguration conf = new OzoneConfiguration(); conf.set(configKeyForRefreshPeriod(), "2m"); - File dir = getTestDir(getClass().getSimpleName()); SpaceUsageCheckParams params = new DedicatedDiskSpaceUsageFactory() .setConfiguration(conf) diff --git a/hadoop-hdds/common/src/test/java/org/apache/hadoop/hdds/fs/TestSaveSpaceUsageToFile.java b/hadoop-hdds/common/src/test/java/org/apache/hadoop/hdds/fs/TestSaveSpaceUsageToFile.java index f35e6975082..6a901d6cbc4 100644 --- a/hadoop-hdds/common/src/test/java/org/apache/hadoop/hdds/fs/TestSaveSpaceUsageToFile.java +++ b/hadoop-hdds/common/src/test/java/org/apache/hadoop/hdds/fs/TestSaveSpaceUsageToFile.java @@ -18,10 +18,9 @@ package org.apache.hadoop.hdds.fs; import org.apache.commons.io.FileUtils; -import org.apache.hadoop.fs.FileUtil; -import org.junit.jupiter.api.AfterEach; import org.junit.jupiter.api.BeforeEach; import org.junit.jupiter.api.Test; +import org.junit.jupiter.api.io.TempDir; import java.io.File; import java.io.IOException; @@ -30,7 +29,6 @@ import java.time.Instant; import java.util.OptionalLong; -import static org.apache.ozone.test.GenericTestUtils.getTestDir; import static org.apache.ozone.test.GenericTestUtils.waitFor; import static org.junit.jupiter.api.Assertions.assertEquals; import static org.junit.jupiter.api.Assertions.assertFalse; @@ -41,8 +39,8 @@ */ public class TestSaveSpaceUsageToFile { - private static final File DIR = - getTestDir(TestSaveSpaceUsageToFile.class.getSimpleName()); + @TempDir + private File dir; private static final Duration LONG_EXPIRY = Duration.ofMinutes(15); @@ -53,14 +51,7 @@ public class TestSaveSpaceUsageToFile { @BeforeEach public void setup() { - FileUtil.fullyDelete(DIR); - assertTrue(DIR.mkdirs()); - file = new File(DIR, "space_usage.txt"); - } - - @AfterEach - public void cleanup() { - FileUtil.fullyDelete(DIR); + file = new File(dir, "space_usage.txt"); } @Test diff --git a/hadoop-hdds/common/src/test/java/org/apache/hadoop/hdds/protocol/TestDatanodeDetails.java b/hadoop-hdds/common/src/test/java/org/apache/hadoop/hdds/protocol/TestDatanodeDetails.java index 4030f6e46d4..b05deaa0d66 100644 --- a/hadoop-hdds/common/src/test/java/org/apache/hadoop/hdds/protocol/TestDatanodeDetails.java +++ b/hadoop-hdds/common/src/test/java/org/apache/hadoop/hdds/protocol/TestDatanodeDetails.java @@ -29,7 +29,6 @@ import static org.apache.hadoop.ozone.ClientVersion.VERSION_HANDLES_UNKNOWN_DN_PORTS; import static org.assertj.core.api.Assertions.assertThat; import static org.junit.jupiter.api.Assertions.assertEquals; -import static org.junit.jupiter.api.Assertions.fail; /** * Test for {@link DatanodeDetails}. @@ -50,14 +49,10 @@ void protoIncludesNewPortsOnlyForV1() { } public static void assertPorts(HddsProtos.DatanodeDetailsProto dn, - Set expectedPorts) { + Set expectedPorts) throws IllegalArgumentException { assertEquals(expectedPorts.size(), dn.getPortsCount()); for (HddsProtos.Port port : dn.getPortsList()) { - try { - assertThat(expectedPorts).contains(Port.Name.valueOf(port.getName())); - } catch (IllegalArgumentException e) { - fail("Unknown port: " + port.getName()); - } + assertThat(expectedPorts).contains(Port.Name.valueOf(port.getName())); } } diff --git a/hadoop-hdds/common/src/test/java/org/apache/hadoop/hdds/scm/container/TestReplicationManagerReport.java b/hadoop-hdds/common/src/test/java/org/apache/hadoop/hdds/scm/container/TestReplicationManagerReport.java index 3bf2ef40231..f022a6030c0 100644 --- a/hadoop-hdds/common/src/test/java/org/apache/hadoop/hdds/scm/container/TestReplicationManagerReport.java +++ b/hadoop-hdds/common/src/test/java/org/apache/hadoop/hdds/scm/container/TestReplicationManagerReport.java @@ -112,6 +112,7 @@ void testJsonOutput() throws IOException { assertEquals(0, stats.get("EMPTY").longValue()); assertEquals(0, stats.get("OPEN_UNHEALTHY").longValue()); assertEquals(0, stats.get("QUASI_CLOSED_STUCK").longValue()); + assertEquals(0, stats.get("OPEN_WITHOUT_PIPELINE").longValue()); JsonNode samples = json.get("samples"); assertEquals(ARRAY, samples.get("UNDER_REPLICATED").getNodeType()); diff --git a/hadoop-hdds/common/src/test/java/org/apache/hadoop/hdds/scm/net/TestNetworkTopologyImpl.java b/hadoop-hdds/common/src/test/java/org/apache/hadoop/hdds/scm/net/TestNetworkTopologyImpl.java index 67b210a05eb..f737ec23a0c 100644 --- a/hadoop-hdds/common/src/test/java/org/apache/hadoop/hdds/scm/net/TestNetworkTopologyImpl.java +++ b/hadoop-hdds/common/src/test/java/org/apache/hadoop/hdds/scm/net/TestNetworkTopologyImpl.java @@ -52,7 +52,7 @@ import static org.junit.jupiter.api.Assertions.fail; import static org.junit.jupiter.api.Assumptions.assumeTrue; import static org.junit.jupiter.params.provider.Arguments.arguments; -import static org.mockito.ArgumentMatchers.any; +import static org.mockito.Mockito.any; import static org.mockito.Mockito.doAnswer; import static org.mockito.Mockito.never; import static org.mockito.Mockito.reset; diff --git a/hadoop-hdds/common/src/test/java/org/apache/hadoop/hdds/tracing/TestTracingUtil.java b/hadoop-hdds/common/src/test/java/org/apache/hadoop/hdds/tracing/TestTracingUtil.java index 8d5c56fc0d5..39884fcd5a9 100644 --- a/hadoop-hdds/common/src/test/java/org/apache/hadoop/hdds/tracing/TestTracingUtil.java +++ b/hadoop-hdds/common/src/test/java/org/apache/hadoop/hdds/tracing/TestTracingUtil.java @@ -51,8 +51,7 @@ public void testInitTracing() { Configuration config = Configuration.fromEnv("testInitTracing"); JaegerTracer tracer = config.getTracerBuilder().build(); GlobalTracer.registerIfAbsent(tracer); - try (AutoCloseable scope = TracingUtil - .createActivatedSpan("initTracing")) { + try (AutoCloseable ignored = TracingUtil.createActivatedSpan("initTracing")) { exportCurrentSpan(); } catch (Exception e) { fail("Should not get exception"); diff --git a/hadoop-hdds/common/src/test/java/org/apache/hadoop/hdds/upgrade/TestHDDSLayoutVersionManager.java b/hadoop-hdds/common/src/test/java/org/apache/hadoop/hdds/upgrade/TestHDDSLayoutVersionManager.java index 253cf7dfe47..c73dfdea03a 100644 --- a/hadoop-hdds/common/src/test/java/org/apache/hadoop/hdds/upgrade/TestHDDSLayoutVersionManager.java +++ b/hadoop-hdds/common/src/test/java/org/apache/hadoop/hdds/upgrade/TestHDDSLayoutVersionManager.java @@ -26,7 +26,7 @@ import static org.junit.jupiter.api.Assertions.assertEquals; import static org.junit.jupiter.api.Assertions.assertFalse; import static org.junit.jupiter.api.Assertions.assertTrue; -import static org.mockito.ArgumentMatchers.any; +import static org.mockito.Mockito.any; import static org.mockito.Mockito.doCallRealMethod; import static org.mockito.Mockito.mock; import static org.mockito.Mockito.times; diff --git a/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/common/NativeCheckSumCRC32.java b/hadoop-hdds/common/src/test/java/org/apache/hadoop/ozone/common/NativeCheckSumCRC32.java similarity index 100% rename from hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/common/NativeCheckSumCRC32.java rename to hadoop-hdds/common/src/test/java/org/apache/hadoop/ozone/common/NativeCheckSumCRC32.java diff --git a/hadoop-hdds/common/src/test/java/org/apache/hadoop/ozone/common/TestChecksumByteBuffer.java b/hadoop-hdds/common/src/test/java/org/apache/hadoop/ozone/common/TestChecksumByteBuffer.java index 5b88f5cb300..9567fa2c281 100644 --- a/hadoop-hdds/common/src/test/java/org/apache/hadoop/ozone/common/TestChecksumByteBuffer.java +++ b/hadoop-hdds/common/src/test/java/org/apache/hadoop/ozone/common/TestChecksumByteBuffer.java @@ -22,7 +22,7 @@ import org.junit.jupiter.api.Test; import java.nio.charset.StandardCharsets; -import java.util.Random; +import org.apache.commons.lang3.RandomUtils; import java.util.zip.Checksum; import static org.junit.jupiter.api.Assertions.assertEquals; @@ -59,11 +59,9 @@ void testCorrectness() { checkBytes("hello world!".getBytes(StandardCharsets.UTF_8)); - final Random random = new Random(); - final byte[] bytes = new byte[1 << 10]; + final int len = 1 << 10; for (int i = 0; i < 1000; i++) { - random.nextBytes(bytes); - checkBytes(bytes, random.nextInt(bytes.length)); + checkBytes(RandomUtils.nextBytes(len), RandomUtils.nextInt(0, len)); } } diff --git a/hadoop-hdds/common/src/test/java/org/apache/hadoop/ozone/common/TestChunkBuffer.java b/hadoop-hdds/common/src/test/java/org/apache/hadoop/ozone/common/TestChunkBuffer.java index 414754092f9..3d6d38f3d3b 100644 --- a/hadoop-hdds/common/src/test/java/org/apache/hadoop/ozone/common/TestChunkBuffer.java +++ b/hadoop-hdds/common/src/test/java/org/apache/hadoop/ozone/common/TestChunkBuffer.java @@ -37,7 +37,6 @@ import static org.junit.jupiter.api.Assertions.assertEquals; import static org.junit.jupiter.api.Assertions.assertThrows; import static org.junit.jupiter.api.Assertions.assertFalse; -import static org.junit.jupiter.api.Assertions.fail; /** * Test {@link ChunkBuffer} implementations. @@ -49,7 +48,7 @@ private static int nextInt(int n) { @Test @Timeout(1) - public void testImplWithByteBuffer() { + void testImplWithByteBuffer() throws IOException { runTestImplWithByteBuffer(1); runTestImplWithByteBuffer(1 << 10); for (int i = 0; i < 10; i++) { @@ -57,7 +56,7 @@ public void testImplWithByteBuffer() { } } - private static void runTestImplWithByteBuffer(int n) { + private static void runTestImplWithByteBuffer(int n) throws IOException { final byte[] expected = new byte[n]; ThreadLocalRandom.current().nextBytes(expected); runTestImpl(expected, 0, ChunkBuffer.allocate(n)); @@ -65,7 +64,7 @@ private static void runTestImplWithByteBuffer(int n) { @Test @Timeout(1) - public void testIncrementalChunkBuffer() { + void testIncrementalChunkBuffer() throws IOException { runTestIncrementalChunkBuffer(1, 1); runTestIncrementalChunkBuffer(4, 8); runTestIncrementalChunkBuffer(16, 1 << 10); @@ -76,7 +75,7 @@ public void testIncrementalChunkBuffer() { } } - private static void runTestIncrementalChunkBuffer(int increment, int n) { + private static void runTestIncrementalChunkBuffer(int increment, int n) throws IOException { final byte[] expected = new byte[n]; ThreadLocalRandom.current().nextBytes(expected); runTestImpl(expected, increment, @@ -85,7 +84,7 @@ private static void runTestIncrementalChunkBuffer(int increment, int n) { @Test @Timeout(1) - public void testImplWithList() { + void testImplWithList() throws IOException { runTestImplWithList(4, 8); runTestImplWithList(16, 1 << 10); for (int i = 0; i < 10; i++) { @@ -95,7 +94,7 @@ public void testImplWithList() { } } - private static void runTestImplWithList(int count, int n) { + private static void runTestImplWithList(int count, int n) throws IOException { final byte[] expected = new byte[n]; ThreadLocalRandom.current().nextBytes(expected); @@ -117,7 +116,7 @@ private static void runTestImplWithList(int count, int n) { runTestImpl(expected, -1, impl); } - private static void runTestImpl(byte[] expected, int bpc, ChunkBuffer impl) { + private static void runTestImpl(byte[] expected, int bpc, ChunkBuffer impl) throws IOException { final int n = expected.length; System.out.println("n=" + n + ", impl=" + impl); @@ -207,33 +206,13 @@ private static void assertToByteString( "offset=" + offset + ", length=" + length); } - private static void assertWrite(byte[] expected, ChunkBuffer impl) { + private static void assertWrite(byte[] expected, ChunkBuffer impl) throws IOException { impl.rewind(); assertEquals(0, impl.position()); ByteArrayOutputStream output = new ByteArrayOutputStream(expected.length); - - try { - impl.writeTo(new MockGatheringChannel(Channels.newChannel(output))); - } catch (IOException e) { - fail("Unexpected error: " + e); - } - + impl.writeTo(new MockGatheringChannel(Channels.newChannel(output))); assertArrayEquals(expected, output.toByteArray()); assertFalse(impl.hasRemaining()); } - - private static String toString(byte[] arr) { - if (arr == null || arr.length == 0) { - return ""; - } - - StringBuilder sb = new StringBuilder(); - for (byte b : arr) { - sb.append(Character.forDigit((b >> 4) & 0xF, 16)) - .append(Character.forDigit((b & 0xF), 16)) - .append(" "); - } - return sb.deleteCharAt(sb.length() - 1).toString(); - } } diff --git a/hadoop-hdds/common/src/test/java/org/apache/hadoop/ozone/container/ContainerTestHelper.java b/hadoop-hdds/common/src/test/java/org/apache/hadoop/ozone/container/ContainerTestHelper.java index c30f788397a..92754c9fa69 100644 --- a/hadoop-hdds/common/src/test/java/org/apache/hadoop/ozone/container/ContainerTestHelper.java +++ b/hadoop-hdds/common/src/test/java/org/apache/hadoop/ozone/container/ContainerTestHelper.java @@ -442,11 +442,9 @@ public static Builder newGetBlockRequestBuilder( /** * Verify the response against the request. * - * @param request - Request * @param response - Response */ - public static void verifyGetBlock(ContainerCommandRequestProto request, - ContainerCommandResponseProto response, int expectedChunksCount) { + public static void verifyGetBlock(ContainerCommandResponseProto response, int expectedChunksCount) { assertEquals(ContainerProtos.Result.SUCCESS, response.getResult()); assertEquals(expectedChunksCount, response.getGetBlock().getBlockData().getChunksCount()); @@ -495,23 +493,6 @@ public static ContainerCommandRequestProto getCloseContainer( Pipeline pipeline, long containerID) throws IOException { return getCloseContainer(pipeline, containerID, null); } - /** - * Returns a simple request without traceId. - * @param pipeline - pipeline - * @param containerID - ID of the container. - * @return ContainerCommandRequestProto without traceId. - */ - public static ContainerCommandRequestProto getRequestWithoutTraceId( - Pipeline pipeline, long containerID) throws IOException { - Preconditions.checkNotNull(pipeline); - return ContainerCommandRequestProto.newBuilder() - .setCmdType(ContainerProtos.Type.CloseContainer) - .setContainerID(containerID) - .setCloseContainer( - ContainerProtos.CloseContainerRequestProto.getDefaultInstance()) - .setDatanodeUuid(pipeline.getFirstNode().getUuidString()) - .build(); - } /** * Returns a delete container request. @@ -535,14 +516,6 @@ public static ContainerCommandRequestProto getDeleteContainer( .build(); } - private static void sleep(long milliseconds) { - try { - Thread.sleep(milliseconds); - } catch (InterruptedException e) { - Thread.currentThread().interrupt(); - } - } - public static BlockID getTestBlockID(long containerID) { return new BlockID(containerID, UniqueId.next()); } diff --git a/hadoop-hdds/common/src/test/java/org/apache/hadoop/ozone/lock/TestLockManager.java b/hadoop-hdds/common/src/test/java/org/apache/hadoop/ozone/lock/TestLockManager.java deleted file mode 100644 index 62b8e6ac50b..00000000000 --- a/hadoop-hdds/common/src/test/java/org/apache/hadoop/ozone/lock/TestLockManager.java +++ /dev/null @@ -1,208 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one or more - * contributor license agreements. See the NOTICE file distributed with this - * work for additional information regarding copyright ownership. The ASF - * licenses this file to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - *

- * http://www.apache.org/licenses/LICENSE-2.0 - *

- * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS,WITHOUT - * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the - * License for the specific language governing permissions and limitations under - * the License. - */ - -package org.apache.hadoop.ozone.lock; - -import org.apache.hadoop.hdds.conf.OzoneConfiguration; -import org.apache.ozone.test.GenericTestUtils; -import org.apache.hadoop.util.Daemon; -import org.junit.jupiter.api.Test; -import org.junit.jupiter.api.Timeout; - -import java.util.concurrent.atomic.AtomicBoolean; -import java.util.concurrent.atomic.AtomicInteger; - -import static org.junit.jupiter.api.Assertions.assertDoesNotThrow; -import static org.junit.jupiter.api.Assertions.assertEquals; -import static org.junit.jupiter.api.Assertions.assertFalse; -import static org.junit.jupiter.api.Assertions.assertTrue; - -/** - * Test-cases to test LockManager. - */ -class TestLockManager { - - @Test - @Timeout(1) - void testWriteLockWithDifferentResource() { - final LockManager manager = - new LockManager<>(new OzoneConfiguration()); - manager.writeLock("/resourceOne"); - // This should work, as they are different resource. - assertDoesNotThrow(() -> manager.writeLock("/resourceTwo")); - manager.writeUnlock("/resourceOne"); - manager.writeUnlock("/resourceTwo"); - } - - @Test - void testWriteLockWithSameResource() throws Exception { - final LockManager manager = - new LockManager<>(new OzoneConfiguration()); - final AtomicBoolean gotLock = new AtomicBoolean(false); - manager.writeLock("/resourceOne"); - new Thread(() -> { - manager.writeLock("/resourceOne"); - gotLock.set(true); - manager.writeUnlock("/resourceOne"); - }).start(); - // Let's give some time for the other thread to run - Thread.sleep(100); - // Since the other thread is trying to get write lock on same object, - // it will wait. - assertFalse(gotLock.get()); - manager.writeUnlock("/resourceOne"); - // Since we have released the write lock, the other thread should have - // the lock now - // Let's give some time for the other thread to run - Thread.sleep(100); - assertTrue(gotLock.get()); - } - - @Test - @Timeout(1) - void testReadLockWithDifferentResource() { - final LockManager manager = - new LockManager<>(new OzoneConfiguration()); - manager.readLock("/resourceOne"); - assertDoesNotThrow(() -> manager.readLock("/resourceTwo")); - manager.readUnlock("/resourceOne"); - manager.readUnlock("/resourceTwo"); - } - - @Test - void testReadLockWithSameResource() throws Exception { - final LockManager manager = - new LockManager<>(new OzoneConfiguration()); - final AtomicBoolean gotLock = new AtomicBoolean(false); - manager.readLock("/resourceOne"); - new Thread(() -> { - manager.readLock("/resourceOne"); - gotLock.set(true); - manager.readUnlock("/resourceOne"); - }).start(); - // Let's give some time for the other thread to run - Thread.sleep(100); - // Since the new thread is trying to get read lock, it should work. - assertTrue(gotLock.get()); - manager.readUnlock("/resourceOne"); - } - - @Test - void testWriteReadLockWithSameResource() throws Exception { - final LockManager manager = - new LockManager<>(new OzoneConfiguration()); - final AtomicBoolean gotLock = new AtomicBoolean(false); - manager.writeLock("/resourceOne"); - new Thread(() -> { - manager.readLock("/resourceOne"); - gotLock.set(true); - manager.readUnlock("/resourceOne"); - }).start(); - // Let's give some time for the other thread to run - Thread.sleep(100); - // Since the other thread is trying to get read lock on same object, - // it will wait. - assertFalse(gotLock.get()); - manager.writeUnlock("/resourceOne"); - // Since we have released the write lock, the other thread should have - // the lock now - // Let's give some time for the other thread to run - Thread.sleep(100); - assertTrue(gotLock.get()); - } - - @Test - void testReadWriteLockWithSameResource() throws Exception { - final LockManager manager = - new LockManager<>(new OzoneConfiguration()); - final AtomicBoolean gotLock = new AtomicBoolean(false); - manager.readLock("/resourceOne"); - new Thread(() -> { - manager.writeLock("/resourceOne"); - gotLock.set(true); - manager.writeUnlock("/resourceOne"); - }).start(); - // Let's give some time for the other thread to run - Thread.sleep(100); - // Since the other thread is trying to get write lock on same object, - // it will wait. - assertFalse(gotLock.get()); - manager.readUnlock("/resourceOne"); - // Since we have released the read lock, the other thread should have - // the lock now - // Let's give some time for the other thread to run - Thread.sleep(100); - assertTrue(gotLock.get()); - } - - @Test - void testMultiReadWriteLockWithSameResource() throws Exception { - final LockManager manager = - new LockManager<>(new OzoneConfiguration()); - final AtomicBoolean gotLock = new AtomicBoolean(false); - manager.readLock("/resourceOne"); - manager.readLock("/resourceOne"); - new Thread(() -> { - manager.writeLock("/resourceOne"); - gotLock.set(true); - manager.writeUnlock("/resourceOne"); - }).start(); - // Let's give some time for the other thread to run - Thread.sleep(100); - // Since the other thread is trying to get write lock on same object, - // it will wait. - assertFalse(gotLock.get()); - manager.readUnlock("/resourceOne"); - //We have only released one read lock, we still hold another read lock. - Thread.sleep(100); - assertFalse(gotLock.get()); - manager.readUnlock("/resourceOne"); - // Since we have released the read lock, the other thread should have - // the lock now - // Let's give some time for the other thread to run - Thread.sleep(100); - assertTrue(gotLock.get()); - } - - @Test - void testConcurrentWriteLockWithDifferentResource() throws Exception { - OzoneConfiguration conf = new OzoneConfiguration(); - final int count = 100; - final LockManager manager = new LockManager<>(conf); - final int sleep = 10; - final AtomicInteger done = new AtomicInteger(); - for (int i = 0; i < count; i++) { - final Integer id = i; - Daemon d1 = new Daemon(() -> { - try { - manager.writeLock(id); - Thread.sleep(sleep); - } catch (InterruptedException e) { - e.printStackTrace(); - } finally { - manager.writeUnlock(id); - } - done.getAndIncrement(); - }); - d1.setName("Locker-" + i); - d1.start(); - } - GenericTestUtils.waitFor(() -> done.get() == count, 100, - 10 * count * sleep); - assertEquals(count, done.get()); - } -} diff --git a/hadoop-hdds/common/src/test/java/org/apache/hadoop/ozone/lock/package-info.java b/hadoop-hdds/common/src/test/java/org/apache/hadoop/ozone/lock/package-info.java deleted file mode 100644 index cf4eb657f29..00000000000 --- a/hadoop-hdds/common/src/test/java/org/apache/hadoop/ozone/lock/package-info.java +++ /dev/null @@ -1,21 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one or more - * contributor license agreements. See the NOTICE file distributed with this - * work for additional information regarding copyright ownership. The ASF - * licenses this file to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS,WITHOUT - * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the - * License for the specific language governing permissions and limitations under - * the License. - */ - -package org.apache.hadoop.ozone.lock; -/* - This package contains the lock related test classes. - */ diff --git a/hadoop-hdds/common/src/test/java/org/apache/hadoop/ozone/upgrade/TestLayoutVersionInstanceFactory.java b/hadoop-hdds/common/src/test/java/org/apache/hadoop/ozone/upgrade/TestLayoutVersionInstanceFactory.java index a40eecc62b7..028a82b4fc6 100644 --- a/hadoop-hdds/common/src/test/java/org/apache/hadoop/ozone/upgrade/TestLayoutVersionInstanceFactory.java +++ b/hadoop-hdds/common/src/test/java/org/apache/hadoop/ozone/upgrade/TestLayoutVersionInstanceFactory.java @@ -169,17 +169,12 @@ private LayoutVersionManager getMockLvm(int mlv, int slv) { * Mock Interface. */ interface MockInterface { - String mockMethod(); } /** * Mock Impl v1. */ static class MockClassV1 implements MockInterface { - @Override - public String mockMethod() { - return getClass().getSimpleName(); - } } /** diff --git a/hadoop-hdds/common/src/main/java/org/apache/hadoop/util/NativeCRC32Wrapper.java b/hadoop-hdds/common/src/test/java/org/apache/hadoop/util/NativeCRC32Wrapper.java similarity index 100% rename from hadoop-hdds/common/src/main/java/org/apache/hadoop/util/NativeCRC32Wrapper.java rename to hadoop-hdds/common/src/test/java/org/apache/hadoop/util/NativeCRC32Wrapper.java diff --git a/hadoop-hdds/config/pom.xml b/hadoop-hdds/config/pom.xml index e372c4b3558..fb72f93570b 100644 --- a/hadoop-hdds/config/pom.xml +++ b/hadoop-hdds/config/pom.xml @@ -29,7 +29,6 @@ https://maven.apache.org/xsd/maven-4.0.0.xsd"> jar - false diff --git a/hadoop-hdds/config/src/main/java/org/apache/hadoop/hdds/conf/ConfigurationTarget.java b/hadoop-hdds/config/src/main/java/org/apache/hadoop/hdds/conf/ConfigurationTarget.java index 7ac5d885e39..d4bfb360b9c 100644 --- a/hadoop-hdds/config/src/main/java/org/apache/hadoop/hdds/conf/ConfigurationTarget.java +++ b/hadoop-hdds/config/src/main/java/org/apache/hadoop/hdds/conf/ConfigurationTarget.java @@ -17,7 +17,6 @@ */ package org.apache.hadoop.hdds.conf; -import java.time.temporal.TemporalUnit; import java.util.concurrent.TimeUnit; import org.apache.hadoop.hdds.conf.TimeDurationUtil.ParsedTimeDuration; @@ -53,10 +52,6 @@ default void setTimeDuration(String name, long value, TimeUnit unit) { set(name, value + ParsedTimeDuration.unitFor(unit).suffix()); } - default void setTimeDuration(String name, long value, TemporalUnit unit) { - set(name, value + ParsedTimeDuration.unitFor(unit).suffix()); - } - default void setStorageSize(String name, long value, StorageUnit unit) { set(name, value + unit.getShortName()); } diff --git a/hadoop-hdds/config/src/main/java/org/apache/hadoop/hdds/conf/TimeDurationUtil.java b/hadoop-hdds/config/src/main/java/org/apache/hadoop/hdds/conf/TimeDurationUtil.java index c8775fbf94b..c9b8a1d2c17 100644 --- a/hadoop-hdds/config/src/main/java/org/apache/hadoop/hdds/conf/TimeDurationUtil.java +++ b/hadoop-hdds/config/src/main/java/org/apache/hadoop/hdds/conf/TimeDurationUtil.java @@ -207,14 +207,5 @@ static ParsedTimeDuration unitFor(TimeUnit unit) { } return null; } - - static ParsedTimeDuration unitFor(TemporalUnit unit) { - for (ParsedTimeDuration ptd : values()) { - if (ptd.temporalUnit() == unit) { - return ptd; - } - } - return null; - } } } diff --git a/hadoop-hdds/container-service/pom.xml b/hadoop-hdds/container-service/pom.xml index 058274e5668..13973c871e6 100644 --- a/hadoop-hdds/container-service/pom.xml +++ b/hadoop-hdds/container-service/pom.xml @@ -28,7 +28,6 @@ https://maven.apache.org/xsd/maven-4.0.0.xsd"> Apache Ozone HDDS Container Service jar - false diff --git a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/HddsDatanodeService.java b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/HddsDatanodeService.java index 3cd0477ffd7..f59622cb0fa 100644 --- a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/HddsDatanodeService.java +++ b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/HddsDatanodeService.java @@ -82,6 +82,7 @@ import static org.apache.hadoop.ozone.OzoneConfigKeys.OZONE_BLOCK_DELETING_SERVICE_WORKERS; import static org.apache.hadoop.ozone.conf.OzoneServiceConfig.DEFAULT_SHUTDOWN_HOOK_PRIORITY; import static org.apache.hadoop.ozone.common.Storage.StorageState.INITIALIZED; +import static org.apache.hadoop.ozone.container.replication.ReplicationServer.ReplicationConfig.REPLICATION_STREAMS_LIMIT_KEY; import static org.apache.hadoop.security.UserGroupInformation.getCurrentUser; import static org.apache.hadoop.ozone.container.common.statemachine.DatanodeConfiguration.HDDS_DATANODE_BLOCK_DELETE_THREAD_MAX; import static org.apache.hadoop.util.ExitUtil.terminate; @@ -291,7 +292,9 @@ public void start() { .register(HDDS_DATANODE_BLOCK_DELETE_THREAD_MAX, this::reconfigBlockDeleteThreadMax) .register(OZONE_BLOCK_DELETING_SERVICE_WORKERS, - this::reconfigDeletingServiceWorkers); + this::reconfigDeletingServiceWorkers) + .register(REPLICATION_STREAMS_LIMIT_KEY, + this::reconfigReplicationStreamsLimit); datanodeStateMachine = new DatanodeStateMachine(datanodeDetails, conf, dnCertClient, secretKeyClient, this::terminateDatanode, dnCRLStore, @@ -667,4 +670,12 @@ private String reconfigDeletingServiceWorkers(String value) { .setPoolSize(Integer.parseInt(value)); return value; } + + private String reconfigReplicationStreamsLimit(String value) { + getConf().set(REPLICATION_STREAMS_LIMIT_KEY, value); + + getDatanodeStateMachine().getContainer().getReplicationServer() + .setPoolSize(Integer.parseInt(value)); + return value; + } } diff --git a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/impl/ContainerData.java b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/impl/ContainerData.java index 94b51223228..3c202ba60a8 100644 --- a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/impl/ContainerData.java +++ b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/impl/ContainerData.java @@ -41,7 +41,7 @@ import java.util.concurrent.atomic.AtomicLong; import org.yaml.snakeyaml.Yaml; -import javax.annotation.Nullable; +import jakarta.annotation.Nullable; import static org.apache.hadoop.ozone.OzoneConsts.CHECKSUM; import static org.apache.hadoop.ozone.OzoneConsts.CONTAINER_ID; diff --git a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/impl/HddsDispatcher.java b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/impl/HddsDispatcher.java index ddb21937710..f20615d23f8 100644 --- a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/impl/HddsDispatcher.java +++ b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/impl/HddsDispatcher.java @@ -75,7 +75,9 @@ import java.util.Optional; import java.util.Set; import java.util.TreeMap; +import java.util.concurrent.TimeUnit; +import static org.apache.hadoop.ozone.audit.AuditLogger.PerformanceStringBuilder; import static org.apache.hadoop.hdds.scm.protocolPB.ContainerCommandResponseBuilders.malformedRequest; import static org.apache.hadoop.hdds.scm.protocolPB.ContainerCommandResponseBuilders.unsupportedRequest; import static org.apache.hadoop.ozone.container.common.interfaces.Container.ScanResult; @@ -101,6 +103,7 @@ public class HddsDispatcher implements ContainerDispatcher, Auditor { private String clusterId; private ContainerMetrics metrics; private final TokenVerifier tokenVerifier; + private long slowOpThresholdMs; /** * Constructs an OzoneContainer that receives calls from @@ -121,6 +124,7 @@ public HddsDispatcher(ConfigurationSource config, ContainerSet contSet, HddsConfigKeys.HDDS_CONTAINER_CLOSE_THRESHOLD_DEFAULT); this.tokenVerifier = tokenVerifier != null ? tokenVerifier : new NoopTokenVerifier(); + this.slowOpThresholdMs = getSlowOpThresholdMs(conf); protocolMetrics = new ProtocolMessageMetrics<>( @@ -196,6 +200,7 @@ private ContainerCommandResponseProto dispatchRequest( AuditAction action = getAuditAction(msg.getCmdType()); EventType eventType = getEventType(msg); Map params = getAuditParams(msg); + PerformanceStringBuilder perf = new PerformanceStringBuilder(); ContainerType containerType; ContainerCommandResponseProto responseProto = null; @@ -326,10 +331,11 @@ private ContainerCommandResponseProto dispatchRequest( audit(action, eventType, params, AuditEventStatus.FAILURE, ex); return ContainerUtils.logAndReturnError(LOG, ex, msg); } + perf.appendPreOpLatencyMs(Time.monotonicNow() - startTime); responseProto = handler.handle(msg, container, dispatcherContext); + long oPLatencyMS = Time.monotonicNow() - startTime; if (responseProto != null) { - metrics.incContainerOpsLatencies(cmdType, - Time.monotonicNow() - startTime); + metrics.incContainerOpsLatencies(cmdType, oPLatencyMS); // If the request is of Write Type and the container operation // is unsuccessful, it implies the applyTransaction on the container @@ -402,6 +408,8 @@ private ContainerCommandResponseProto dispatchRequest( audit(action, eventType, params, AuditEventStatus.FAILURE, new Exception(responseProto.getMessage())); } + perf.appendOpLatencyMs(oPLatencyMS); + performanceAudit(action, params, perf, oPLatencyMS); return responseProto; } else { @@ -412,6 +420,13 @@ private ContainerCommandResponseProto dispatchRequest( } } + private long getSlowOpThresholdMs(ConfigurationSource config) { + return config.getTimeDuration( + HddsConfigKeys.HDDS_DATANODE_SLOW_OP_WARNING_THRESHOLD_KEY, + HddsConfigKeys.HDDS_DATANODE_SLOW_OP_WARNING_THRESHOLD_DEFAULT, + TimeUnit.MILLISECONDS); + } + private void updateBCSID(Container container, DispatcherContext dispatcherContext, Type cmdType) { if (dispatcherContext != null && (cmdType == Type.PutBlock @@ -682,6 +697,26 @@ private void audit(AuditAction action, EventType eventType, } } + private void performanceAudit(AuditAction action, Map params, + PerformanceStringBuilder performance, long opLatencyMs) { + if (isOperationSlow(opLatencyMs)) { + AuditMessage msg = + buildAuditMessageForPerformance(action, params, performance); + AUDIT.logPerformance(msg); + } + } + + public AuditMessage buildAuditMessageForPerformance(AuditAction op, + Map auditMap, PerformanceStringBuilder performance) { + return new AuditMessage.Builder() + .setUser(null) + .atIp(null) + .forOperation(op) + .withParams(auditMap) + .setPerformance(performance) + .build(); + } + //TODO: use GRPC to fetch user and ip details @Override public AuditMessage buildAuditMessageForSuccess(AuditAction op, @@ -846,6 +881,8 @@ private static Map getAuditParams( case ReadChunk: auditParams.put("blockData", BlockID.getFromProtobuf(msg.getReadChunk().getBlockID()).toString()); + auditParams.put("blockDataSize", + String.valueOf(msg.getReadChunk().getChunkData().getLen())); return auditParams; case DeleteChunk: @@ -858,6 +895,8 @@ private static Map getAuditParams( auditParams.put("blockData", BlockID.getFromProtobuf(msg.getWriteChunk().getBlockID()) .toString()); + auditParams.put("blockDataSize", + String.valueOf(msg.getWriteChunk().getChunkData().getLen())); return auditParams; case ListChunk: @@ -874,6 +913,8 @@ private static Map getAuditParams( auditParams.put("blockData", BlockData.getFromProtoBuf(msg.getPutSmallFile() .getBlock().getBlockData()).toString()); + auditParams.put("blockDataSize", + String.valueOf(msg.getPutSmallFile().getChunkInfo().getLen())); } catch (IOException ex) { if (LOG.isTraceEnabled()) { LOG.trace("Encountered error parsing BlockData from protobuf: " @@ -911,4 +952,7 @@ private static Map getAuditParams( } + private boolean isOperationSlow(long opLatencyMs) { + return opLatencyMs >= slowOpThresholdMs; + } } diff --git a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/statemachine/StateContext.java b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/statemachine/StateContext.java index 13f7ad61502..6bbf8e47946 100644 --- a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/statemachine/StateContext.java +++ b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/statemachine/StateContext.java @@ -893,18 +893,21 @@ public Map getCommandStatusMap() { } /** - * Updates status of a pending status command. + * Updates the command status of a pending command. * @param cmdId command id * @param cmdStatusUpdater Consumer to update command status. - * @return true if command status updated successfully else false. + * @return true if command status updated successfully else if the command + * associated with the command id does not exist in the context. */ public boolean updateCommandStatus(Long cmdId, Consumer cmdStatusUpdater) { - if (cmdStatusMap.containsKey(cmdId)) { - cmdStatusUpdater.accept(cmdStatusMap.get(cmdId)); - return true; - } - return false; + CommandStatus updatedCommandStatus = cmdStatusMap.computeIfPresent(cmdId, + (key, value) -> { + cmdStatusUpdater.accept(value); + return value; + } + ); + return updatedCommandStatus != null; } public void configureHeartbeatFrequency() { diff --git a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/transport/server/ratis/XceiverServerRatis.java b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/transport/server/ratis/XceiverServerRatis.java index 6d119b17b3b..fcc611ea3f1 100644 --- a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/transport/server/ratis/XceiverServerRatis.java +++ b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/transport/server/ratis/XceiverServerRatis.java @@ -25,12 +25,11 @@ import java.util.Collection; import java.util.Collections; import java.util.List; -import java.util.Map; import java.util.Objects; -import java.util.Set; import java.util.UUID; import java.util.concurrent.BlockingQueue; import java.util.concurrent.ConcurrentHashMap; +import java.util.concurrent.ConcurrentMap; import java.util.concurrent.ExecutionException; import java.util.concurrent.ExecutorService; import java.util.concurrent.LinkedBlockingDeque; @@ -126,6 +125,27 @@ public final class XceiverServerRatis implements XceiverServerSpi { private static final Logger LOG = LoggerFactory .getLogger(XceiverServerRatis.class); + + private static class ActivePipelineContext { + /** The current datanode is the current leader of the pipeline. */ + private final boolean isPipelineLeader; + /** The heartbeat containing pipeline close action has been triggered. */ + private final boolean isPendingClose; + + ActivePipelineContext(boolean isPipelineLeader, boolean isPendingClose) { + this.isPipelineLeader = isPipelineLeader; + this.isPendingClose = isPendingClose; + } + + public boolean isPipelineLeader() { + return isPipelineLeader; + } + + public boolean isPendingClose() { + return isPendingClose; + } + } + private static final AtomicLong CALL_ID_COUNTER = new AtomicLong(); private static final List DEFAULT_PRIORITY_LIST = new ArrayList<>( @@ -151,11 +171,8 @@ private static long nextCallId() { private final ConfigurationSource conf; // TODO: Remove the gids set when Ratis supports an api to query active // pipelines - private final Set raftGids = ConcurrentHashMap.newKeySet(); + private final ConcurrentMap activePipelines = new ConcurrentHashMap<>(); private final RaftPeerId raftPeerId; - // pipelines for which I am the leader - private final Map groupLeaderMap = - new ConcurrentHashMap<>(); // Timeout used while calling submitRequest directly. private final long requestTimeout; private final boolean shouldDeleteRatisLogDirectory; @@ -731,11 +748,11 @@ private void handlePipelineFailure(RaftGroupId groupId, } triggerPipelineClose(groupId, msg, - ClosePipelineInfo.Reason.PIPELINE_FAILED, false); + ClosePipelineInfo.Reason.PIPELINE_FAILED); } private void triggerPipelineClose(RaftGroupId groupId, String detail, - ClosePipelineInfo.Reason reasonCode, boolean triggerHB) { + ClosePipelineInfo.Reason reasonCode) { PipelineID pipelineID = PipelineID.valueOf(groupId.getUuid()); ClosePipelineInfo.Builder closePipelineInfo = ClosePipelineInfo.newBuilder() @@ -749,9 +766,12 @@ private void triggerPipelineClose(RaftGroupId groupId, String detail, .build(); if (context != null) { context.addPipelineActionIfAbsent(action); - // wait for the next HB timeout or right away? - if (triggerHB) { + if (!activePipelines.get(groupId).isPendingClose()) { + // if pipeline close action has not been triggered before, we need trigger pipeline close immediately to + // prevent SCM to allocate blocks on the failed pipeline context.getParent().triggerHeartbeat(); + activePipelines.computeIfPresent(groupId, + (key, value) -> new ActivePipelineContext(value.isPipelineLeader(), true)); } } LOG.error("pipeline Action {} on pipeline {}.Reason : {}", @@ -761,7 +781,7 @@ private void triggerPipelineClose(RaftGroupId groupId, String detail, @Override public boolean isExist(HddsProtos.PipelineID pipelineId) { - return raftGids.contains( + return activePipelines.containsKey( RaftGroupId.valueOf(PipelineID.getFromProtobuf(pipelineId).getId())); } @@ -785,9 +805,11 @@ public List getPipelineReport() { for (RaftGroupId groupId : gids) { HddsProtos.PipelineID pipelineID = PipelineID .valueOf(groupId.getUuid()).getProtobuf(); + boolean isLeader = activePipelines.getOrDefault(groupId, + new ActivePipelineContext(false, false)).isPipelineLeader(); reports.add(PipelineReport.newBuilder() .setPipelineID(pipelineID) - .setIsLeader(groupLeaderMap.getOrDefault(groupId, Boolean.FALSE)) + .setIsLeader(isLeader) .setBytesWritten(calculatePipelineBytesWritten(pipelineID)) .build()); } @@ -877,7 +899,7 @@ void handleApplyTransactionFailure(RaftGroupId groupId, "Ratis Transaction failure in datanode " + dnId + " with role " + role + " .Triggering pipeline close action."; triggerPipelineClose(groupId, msg, - ClosePipelineInfo.Reason.STATEMACHINE_TRANSACTION_FAILED, true); + ClosePipelineInfo.Reason.STATEMACHINE_TRANSACTION_FAILED); } /** * The fact that the snapshot contents cannot be used to actually catch up @@ -913,7 +935,7 @@ public void handleNodeLogFailure(RaftGroupId groupId, Throwable t) { : t.getMessage(); triggerPipelineClose(groupId, msg, - ClosePipelineInfo.Reason.PIPELINE_LOG_FAILED, true); + ClosePipelineInfo.Reason.PIPELINE_LOG_FAILED); } public long getMinReplicatedIndex(PipelineID pipelineID) throws IOException { @@ -930,13 +952,12 @@ public Collection getRaftPeersInPipeline(PipelineID pipelineId) throws } public void notifyGroupRemove(RaftGroupId gid) { - raftGids.remove(gid); - // Remove any entries for group leader map - groupLeaderMap.remove(gid); + // Remove Group ID entry from the active pipeline map + activePipelines.remove(gid); } void notifyGroupAdd(RaftGroupId gid) { - raftGids.add(gid); + activePipelines.put(gid, new ActivePipelineContext(false, false)); sendPipelineReport(); } @@ -946,7 +967,9 @@ void handleLeaderChangedNotification(RaftGroupMemberId groupMemberId, "leaderId: {}", groupMemberId.getGroupId(), raftPeerId1); // Save the reported leader to be sent with the report to SCM boolean leaderForGroup = this.raftPeerId.equals(raftPeerId1); - groupLeaderMap.put(groupMemberId.getGroupId(), leaderForGroup); + activePipelines.compute(groupMemberId.getGroupId(), + (key, value) -> value == null ? new ActivePipelineContext(leaderForGroup, false) : + new ActivePipelineContext(leaderForGroup, value.isPendingClose())); if (context != null && leaderForGroup) { // Publish new report from leader sendPipelineReport(); diff --git a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/volume/AbstractFuture.java b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/volume/AbstractFuture.java index 1e0d2ecd3ad..0a2375b4f44 100644 --- a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/volume/AbstractFuture.java +++ b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/volume/AbstractFuture.java @@ -32,7 +32,7 @@ import static java.util.concurrent.atomic.AtomicReferenceFieldUpdater .newUpdater; -import javax.annotation.Nullable; +import jakarta.annotation.Nullable; import java.security.AccessController; import java.security.PrivilegedActionException; import java.security.PrivilegedExceptionAction; diff --git a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/volume/HddsVolume.java b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/volume/HddsVolume.java index 44bd4cf19a4..d8ba919cefb 100644 --- a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/volume/HddsVolume.java +++ b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/volume/HddsVolume.java @@ -41,7 +41,7 @@ import org.slf4j.Logger; import org.slf4j.LoggerFactory; -import javax.annotation.Nullable; +import jakarta.annotation.Nullable; import static org.apache.hadoop.ozone.OzoneConsts.CONTAINER_DB_NAME; import static org.apache.hadoop.ozone.container.common.utils.HddsVolumeUtil.initPerDiskDBStore; diff --git a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/volume/StorageVolume.java b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/volume/StorageVolume.java index 44ae1c0e795..d9d5a667b30 100644 --- a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/volume/StorageVolume.java +++ b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/volume/StorageVolume.java @@ -36,7 +36,7 @@ import org.slf4j.Logger; import org.slf4j.LoggerFactory; -import javax.annotation.Nullable; +import jakarta.annotation.Nullable; import java.io.File; import java.io.IOException; import java.nio.file.Files; diff --git a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/volume/StorageVolumeChecker.java b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/volume/StorageVolumeChecker.java index eddc77f18e4..4917810bd97 100644 --- a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/volume/StorageVolumeChecker.java +++ b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/volume/StorageVolumeChecker.java @@ -18,7 +18,7 @@ package org.apache.hadoop.ozone.container.common.volume; -import javax.annotation.Nonnull; +import jakarta.annotation.Nonnull; import java.io.IOException; import java.util.ArrayList; import java.util.Collection; @@ -50,7 +50,7 @@ import com.google.common.util.concurrent.Futures; import com.google.common.util.concurrent.ListenableFuture; import com.google.common.util.concurrent.ThreadFactoryBuilder; -import org.checkerframework.checker.nullness.qual.Nullable; +import jakarta.annotation.Nullable; import org.slf4j.Logger; import org.slf4j.LoggerFactory; diff --git a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/volume/ThrottledAsyncChecker.java b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/volume/ThrottledAsyncChecker.java index 2b8b19176ff..991f105d15b 100644 --- a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/volume/ThrottledAsyncChecker.java +++ b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/volume/ThrottledAsyncChecker.java @@ -28,11 +28,11 @@ import org.apache.hadoop.hdfs.server.datanode.checker.Checkable; import org.apache.hadoop.util.Timer; -import org.checkerframework.checker.nullness.qual.Nullable; +import jakarta.annotation.Nullable; import org.slf4j.Logger; import org.slf4j.LoggerFactory; -import javax.annotation.Nonnull; +import jakarta.annotation.Nonnull; import java.util.HashMap; import java.util.Map; import java.util.Optional; diff --git a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/volume/TimeoutFuture.java b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/volume/TimeoutFuture.java index 0d55c084b3d..42e2ed5758e 100644 --- a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/volume/TimeoutFuture.java +++ b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/volume/TimeoutFuture.java @@ -25,7 +25,7 @@ import org.slf4j.Logger; import org.slf4j.LoggerFactory; -import javax.annotation.Nullable; +import jakarta.annotation.Nullable; import java.util.concurrent.Future; import java.util.concurrent.ScheduledExecutorService; import java.util.concurrent.TimeUnit; diff --git a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/ec/reconstruction/ECContainerOperationClient.java b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/ec/reconstruction/ECContainerOperationClient.java index b862f832d76..9dedd65565f 100644 --- a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/ec/reconstruction/ECContainerOperationClient.java +++ b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/ec/reconstruction/ECContainerOperationClient.java @@ -37,7 +37,7 @@ import org.apache.hadoop.security.token.Token; import org.apache.hadoop.security.token.TokenIdentifier; import org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos.ContainerDataProto.State; -import org.jetbrains.annotations.NotNull; +import jakarta.annotation.Nonnull; import org.slf4j.Logger; import org.slf4j.LoggerFactory; @@ -68,7 +68,7 @@ public ECContainerOperationClient(ConfigurationSource conf, this(createClientManager(conf, certificateClient)); } - @NotNull + @Nonnull private static XceiverClientManager createClientManager( ConfigurationSource conf, CertificateClient certificateClient) throws IOException { diff --git a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/ec/reconstruction/ECReconstructionCoordinator.java b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/ec/reconstruction/ECReconstructionCoordinator.java index 24e76821f9c..234439a00c2 100644 --- a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/ec/reconstruction/ECReconstructionCoordinator.java +++ b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/ec/reconstruction/ECReconstructionCoordinator.java @@ -112,6 +112,7 @@ public class ECReconstructionCoordinator implements Closeable { private final ContainerClientMetrics clientMetrics; private final ECReconstructionMetrics metrics; private final StateContext context; + private final OzoneClientConfig ozoneClientConfig; public ECReconstructionCoordinator( ConfigurationSource conf, CertificateClient certificateClient, @@ -125,10 +126,10 @@ public ECReconstructionCoordinator( ThreadFactory threadFactory = new ThreadFactoryBuilder() .setNameFormat(threadNamePrefix + "ec-reconstruct-reader-TID-%d") .build(); + ozoneClientConfig = conf.getObject(OzoneClientConfig.class); this.ecReconstructExecutor = new ThreadPoolExecutor(EC_RECONSTRUCT_STRIPE_READ_POOL_MIN_SIZE, - conf.getObject(OzoneClientConfig.class) - .getEcReconstructStripeReadPoolLimit(), + ozoneClientConfig.getEcReconstructStripeReadPoolLimit(), 60, TimeUnit.SECONDS, new SynchronousQueue<>(), @@ -222,16 +223,15 @@ public void reconstructECContainerGroup(long containerID, private ECBlockOutputStream getECBlockOutputStream( BlockLocationInfo blockLocationInfo, DatanodeDetails datanodeDetails, - ECReplicationConfig repConfig, int replicaIndex, - OzoneClientConfig configuration) throws IOException { + ECReplicationConfig repConfig, int replicaIndex) throws IOException { StreamBufferArgs streamBufferArgs = - StreamBufferArgs.getDefaultStreamBufferArgs(repConfig, configuration); + StreamBufferArgs.getDefaultStreamBufferArgs(repConfig, ozoneClientConfig); return new ECBlockOutputStream( blockLocationInfo.getBlockID(), containerOperationClient.getXceiverClientManager(), containerOperationClient.singleNodePipeline(datanodeDetails, repConfig, replicaIndex), - BufferPool.empty(), configuration, + BufferPool.empty(), ozoneClientConfig, blockLocationInfo.getToken(), clientMetrics, streamBufferArgs); } @@ -277,15 +277,14 @@ public void reconstructECBlockGroup(BlockLocationInfo blockLocationInfo, ECBlockOutputStream[] targetBlockStreams = new ECBlockOutputStream[toReconstructIndexes.size()]; ByteBuffer[] bufs = new ByteBuffer[toReconstructIndexes.size()]; - OzoneClientConfig configuration = new OzoneClientConfig(); try { for (int i = 0; i < toReconstructIndexes.size(); i++) { int replicaIndex = toReconstructIndexes.get(i); DatanodeDetails datanodeDetails = targetMap.get(replicaIndex); targetBlockStreams[i] = getECBlockOutputStream(blockLocationInfo, - datanodeDetails, repConfig, replicaIndex, - configuration); + datanodeDetails, repConfig, replicaIndex + ); bufs[i] = byteBufferPool.getBuffer(false, repConfig.getEcChunkSize()); // Make sure it's clean. Don't want to reuse the erroneously returned // buffers from the pool. diff --git a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/keyvalue/KeyValueHandler.java b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/keyvalue/KeyValueHandler.java index 6e817fdce98..59009ef9dfe 100644 --- a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/keyvalue/KeyValueHandler.java +++ b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/keyvalue/KeyValueHandler.java @@ -629,7 +629,6 @@ ContainerCommandResponseProto handleGetBlock( try { BlockID blockID = BlockID.getFromProtobuf( request.getGetBlock().getBlockID()); - checkContainerIsHealthy(kvContainer, blockID, Type.GetBlock); responseData = blockManager.getBlock(kvContainer, blockID) .getProtoBufMessage(); final long numBytes = responseData.getSerializedSize(); @@ -670,8 +669,6 @@ ContainerCommandResponseProto handleGetCommittedBlockLength( try { BlockID blockID = BlockID .getFromProtobuf(request.getGetCommittedBlockLength().getBlockID()); - checkContainerIsHealthy(kvContainer, blockID, - Type.GetCommittedBlockLength); BlockUtils.verifyBCSId(kvContainer, blockID); blockLength = blockManager.getCommittedBlockLength(kvContainer, blockID); } catch (StorageContainerException ex) { @@ -758,7 +755,6 @@ ContainerCommandResponseProto handleReadChunk( .getChunkData()); Preconditions.checkNotNull(chunkInfo); - checkContainerIsHealthy(kvContainer, blockID, Type.ReadChunk); BlockUtils.verifyBCSId(kvContainer, blockID); if (dispatcherContext == null) { dispatcherContext = DispatcherContext.getHandleReadChunk(); @@ -796,25 +792,6 @@ ContainerCommandResponseProto handleReadChunk( return getReadChunkResponse(request, data, byteBufferToByteString); } - /** - * Throw an exception if the container is unhealthy. - * - * @throws StorageContainerException if the container is unhealthy. - */ - @VisibleForTesting - void checkContainerIsHealthy(KeyValueContainer kvContainer, BlockID blockID, - Type cmd) { - kvContainer.readLock(); - try { - if (kvContainer.getContainerData().getState() == State.UNHEALTHY) { - LOG.warn("{} request {} for UNHEALTHY container {} replica", cmd, - blockID, kvContainer.getContainerData().getContainerID()); - } - } finally { - kvContainer.readUnlock(); - } - } - /** * Handle Delete Chunk operation. Calls ChunkManager to process the request. */ @@ -860,6 +837,7 @@ ContainerCommandResponseProto handleWriteChunk( WriteChunkRequestProto writeChunk = request.getWriteChunk(); BlockID blockID = BlockID.getFromProtobuf(writeChunk.getBlockID()); ContainerProtos.ChunkInfo chunkInfoProto = writeChunk.getChunkData(); + ChunkInfo chunkInfo = ChunkInfo.getFromProtoBuf(chunkInfoProto); Preconditions.checkNotNull(chunkInfo); @@ -978,7 +956,6 @@ ContainerCommandResponseProto handleGetSmallFile( try { BlockID blockID = BlockID.getFromProtobuf(getSmallFileReq.getBlock() .getBlockID()); - checkContainerIsHealthy(kvContainer, blockID, Type.GetSmallFile); BlockData responseData = blockManager.getBlock(kvContainer, blockID); ContainerProtos.ChunkInfo chunkInfoProto = null; diff --git a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/keyvalue/impl/ChunkManagerDispatcher.java b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/keyvalue/impl/ChunkManagerDispatcher.java index 49d54b78c90..6a1d5533cf2 100644 --- a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/keyvalue/impl/ChunkManagerDispatcher.java +++ b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/keyvalue/impl/ChunkManagerDispatcher.java @@ -38,7 +38,7 @@ import org.slf4j.Logger; import org.slf4j.LoggerFactory; -import javax.annotation.Nonnull; +import jakarta.annotation.Nonnull; import java.io.IOException; import java.util.EnumMap; import java.util.Map; diff --git a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/metadata/AbstractDatanodeStore.java b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/metadata/AbstractDatanodeStore.java index 36200d890aa..c5a59da537e 100644 --- a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/metadata/AbstractDatanodeStore.java +++ b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/metadata/AbstractDatanodeStore.java @@ -32,14 +32,12 @@ import org.apache.hadoop.hdds.utils.db.TableIterator; import org.apache.hadoop.hdds.utils.db.managed.ManagedColumnFamilyOptions; import org.apache.hadoop.hdds.utils.db.managed.ManagedDBOptions; -import org.apache.hadoop.hdds.utils.db.managed.ManagedStatistics; import org.apache.hadoop.ozone.container.common.helpers.BlockData; import org.apache.hadoop.ozone.container.common.helpers.ChunkInfoList; import org.apache.hadoop.ozone.container.common.interfaces.BlockIterator; import org.apache.hadoop.ozone.container.common.statemachine.DatanodeConfiguration; import org.apache.hadoop.ozone.container.common.utils.db.DatanodeDBProfile; import org.rocksdb.InfoLogLevel; -import org.rocksdb.StatsLevel; import org.slf4j.Logger; import org.slf4j.LoggerFactory; @@ -49,9 +47,6 @@ import static org.apache.hadoop.hdds.HddsConfigKeys.HDDS_DB_PROFILE; import static org.apache.hadoop.hdds.utils.db.DBStoreBuilder.HDDS_DEFAULT_DB_PROFILE; -import static org.apache.hadoop.ozone.OzoneConfigKeys.OZONE_METADATA_STORE_ROCKSDB_STATISTICS; -import static org.apache.hadoop.ozone.OzoneConfigKeys.OZONE_METADATA_STORE_ROCKSDB_STATISTICS_DEFAULT; -import static org.apache.hadoop.ozone.OzoneConfigKeys.OZONE_METADATA_STORE_ROCKSDB_STATISTICS_OFF; /** * Implementation of the {@link DatanodeStore} interface that contains @@ -119,16 +114,6 @@ public void start(ConfigurationSource config) options.setMaxTotalWalSize(maxWalSize); } - String rocksDbStat = config.getTrimmed( - OZONE_METADATA_STORE_ROCKSDB_STATISTICS, - OZONE_METADATA_STORE_ROCKSDB_STATISTICS_DEFAULT); - - if (!rocksDbStat.equals(OZONE_METADATA_STORE_ROCKSDB_STATISTICS_OFF)) { - ManagedStatistics statistics = new ManagedStatistics(); - statistics.setStatsLevel(StatsLevel.valueOf(rocksDbStat)); - options.setStatistics(statistics); - } - DatanodeConfiguration dc = config.getObject(DatanodeConfiguration.class); // Config user log files diff --git a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/metadata/DatanodeStoreWithIncrementalChunkList.java b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/metadata/DatanodeStoreWithIncrementalChunkList.java index 51e45335008..84000ba2fb9 100644 --- a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/metadata/DatanodeStoreWithIncrementalChunkList.java +++ b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/metadata/DatanodeStoreWithIncrementalChunkList.java @@ -94,12 +94,16 @@ private void reconcilePartialChunks( LOG.debug("blockData={}, lastChunk={}", blockData.getChunks(), lastChunk.getChunks()); Preconditions.checkState(lastChunk.getChunks().size() == 1); - ContainerProtos.ChunkInfo lastChunkInBlockData = - blockData.getChunks().get(blockData.getChunks().size() - 1); - Preconditions.checkState( - lastChunkInBlockData.getOffset() + lastChunkInBlockData.getLen() - == lastChunk.getChunks().get(0).getOffset(), - "chunk offset does not match"); + if (!blockData.getChunks().isEmpty()) { + ContainerProtos.ChunkInfo lastChunkInBlockData = + blockData.getChunks().get(blockData.getChunks().size() - 1); + if (lastChunkInBlockData != null) { + Preconditions.checkState( + lastChunkInBlockData.getOffset() + lastChunkInBlockData.getLen() + == lastChunk.getChunks().get(0).getOffset(), + "chunk offset does not match"); + } + } // append last partial chunk to the block data List chunkInfos = @@ -136,7 +140,7 @@ private static boolean shouldAppendLastChunk(boolean endOfBlock, public void putBlockByID(BatchOperation batch, boolean incremental, long localID, BlockData data, KeyValueContainerData containerData, boolean endOfBlock) throws IOException { - if (!incremental && !isPartialChunkList(data)) { + if (!incremental || !isPartialChunkList(data)) { // Case (1) old client: override chunk list. getBlockDataTable().putWithBatch( batch, containerData.getBlockKey(localID), data); @@ -151,14 +155,21 @@ public void putBlockByID(BatchOperation batch, boolean incremental, private void moveLastChunkToBlockData(BatchOperation batch, long localID, BlockData data, KeyValueContainerData containerData) throws IOException { + // if data has no chunks, fetch the last chunk info from lastChunkInfoTable + if (data.getChunks().isEmpty()) { + BlockData lastChunk = getLastChunkInfoTable().get(containerData.getBlockKey(localID)); + if (lastChunk != null) { + reconcilePartialChunks(lastChunk, data); + } + } // if eob or if the last chunk is full, // the 'data' is full so append it to the block table's chunk info // and then remove from lastChunkInfo BlockData blockData = getBlockDataTable().get( containerData.getBlockKey(localID)); if (blockData == null) { - // Case 2.1 if the block did not have full chunks before, - // the block's chunk is what received from client this time. + // Case 2.1 if the block did not have full chunks before + // the block's chunk is what received from client this time, plus the chunks in lastChunkInfoTable blockData = data; } else { // case 2.2 the block already has some full chunks diff --git a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/ozoneimpl/ContainerReader.java b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/ozoneimpl/ContainerReader.java index edbff14aca8..1685d1c5fe2 100644 --- a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/ozoneimpl/ContainerReader.java +++ b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/ozoneimpl/ContainerReader.java @@ -78,18 +78,18 @@ public class ContainerReader implements Runnable { private final ConfigurationSource config; private final File hddsVolumeDir; private final MutableVolumeSet volumeSet; - private final boolean shouldDeleteRecovering; + private final boolean shouldDelete; public ContainerReader( MutableVolumeSet volSet, HddsVolume volume, ContainerSet cset, - ConfigurationSource conf, boolean shouldDeleteRecovering) { + ConfigurationSource conf, boolean shouldDelete) { Preconditions.checkNotNull(volume); this.hddsVolume = volume; this.hddsVolumeDir = hddsVolume.getHddsRootDir(); this.containerSet = cset; this.config = conf; this.volumeSet = volSet; - this.shouldDeleteRecovering = shouldDeleteRecovering; + this.shouldDelete = shouldDelete; } @Override @@ -148,7 +148,7 @@ public void readVolume(File hddsVolumeRootDir) { LOG.info("Start to verify containers on volume {}", hddsVolumeRootDir); File currentDir = new File(idDir, Storage.STORAGE_DIR_CURRENT); File[] containerTopDirs = currentDir.listFiles(); - if (containerTopDirs != null) { + if (containerTopDirs != null && containerTopDirs.length > 0) { for (File containerTopDir : containerTopDirs) { if (containerTopDir.isDirectory()) { File[] containerDirs = containerTopDir.listFiles(); @@ -214,7 +214,7 @@ public void verifyAndFixupContainerData(ContainerData containerData) KeyValueContainer kvContainer = new KeyValueContainer(kvContainerData, config); if (kvContainer.getContainerState() == RECOVERING) { - if (shouldDeleteRecovering) { + if (shouldDelete) { kvContainer.markContainerUnhealthy(); LOG.info("Stale recovering container {} marked UNHEALTHY", kvContainerData.getContainerID()); @@ -223,7 +223,9 @@ public void verifyAndFixupContainerData(ContainerData containerData) return; } if (kvContainer.getContainerState() == DELETED) { - cleanupContainer(hddsVolume, kvContainer); + if (shouldDelete) { + cleanupContainer(hddsVolume, kvContainer); + } return; } try { @@ -232,8 +234,10 @@ public void verifyAndFixupContainerData(ContainerData containerData) if (e.getResult() != ContainerProtos.Result.CONTAINER_EXISTS) { throw e; } - resolveDuplicate((KeyValueContainer) containerSet.getContainer( - kvContainer.getContainerData().getContainerID()), kvContainer); + if (shouldDelete) { + resolveDuplicate((KeyValueContainer) containerSet.getContainer( + kvContainer.getContainerData().getContainerID()), kvContainer); + } } } else { throw new StorageContainerException("Container File is corrupted. " + diff --git a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/ozoneimpl/OzoneContainer.java b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/ozoneimpl/OzoneContainer.java index f050c96a459..aef3965dcd4 100644 --- a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/ozoneimpl/OzoneContainer.java +++ b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/ozoneimpl/OzoneContainer.java @@ -169,7 +169,6 @@ public OzoneContainer( containerSet = new ContainerSet(recoveringContainerTimeout); metadataScanner = null; - buildContainerSet(); metrics = ContainerMetrics.create(conf); handlers = Maps.newHashMap(); @@ -286,9 +285,10 @@ public GrpcTlsConfig getTlsClientConfig() { } /** - * Build's container map. + * Build's container map after volume format. */ - private void buildContainerSet() { + @VisibleForTesting + public void buildContainerSet() { Iterator volumeSetIterator = volumeSet.getVolumesList() .iterator(); ArrayList volumeThreads = new ArrayList<>(); @@ -442,6 +442,8 @@ public void start(String clusterId) throws IOException { return; } + buildContainerSet(); + // Start background volume checks, which will begin after the configured // delay. volumeChecker.start(); @@ -584,4 +586,8 @@ public BlockDeletingService getBlockDeletingService() { return blockDeletingService; } + public ReplicationServer getReplicationServer() { + return replicationServer; + } + } diff --git a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/replication/ContainerImporter.java b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/replication/ContainerImporter.java index 82aa975066c..1929c16089b 100644 --- a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/replication/ContainerImporter.java +++ b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/replication/ContainerImporter.java @@ -30,6 +30,7 @@ import org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos; import org.apache.hadoop.hdds.scm.ScmConfigKeys; import org.apache.hadoop.hdds.scm.container.common.helpers.StorageContainerException; +import org.apache.hadoop.ozone.container.common.helpers.ContainerUtils; import org.apache.hadoop.ozone.container.common.impl.ContainerDataYaml; import org.apache.hadoop.ozone.container.common.impl.ContainerSet; import org.apache.hadoop.ozone.container.common.interfaces.Container; @@ -41,7 +42,7 @@ import org.apache.hadoop.ozone.container.keyvalue.KeyValueContainerData; import org.apache.hadoop.ozone.container.keyvalue.TarContainerPacker; import org.apache.hadoop.ozone.container.ozoneimpl.ContainerController; -import org.jetbrains.annotations.NotNull; +import jakarta.annotation.Nonnull; import org.slf4j.Logger; import org.slf4j.LoggerFactory; @@ -64,10 +65,12 @@ public class ContainerImporter { private final Set importContainerProgress = Collections.synchronizedSet(new HashSet<>()); - public ContainerImporter(@NotNull ConfigurationSource conf, - @NotNull ContainerSet containerSet, - @NotNull ContainerController controller, - @NotNull MutableVolumeSet volumeSet) { + private final ConfigurationSource conf; + + public ContainerImporter(@Nonnull ConfigurationSource conf, + @Nonnull ContainerSet containerSet, + @Nonnull ContainerController controller, + @Nonnull MutableVolumeSet volumeSet) { this.containerSet = containerSet; this.controller = controller; this.volumeSet = volumeSet; @@ -79,6 +82,7 @@ public ContainerImporter(@NotNull ConfigurationSource conf, containerSize = (long) conf.getStorageSize( ScmConfigKeys.OZONE_SCM_CONTAINER_SIZE, ScmConfigKeys.OZONE_SCM_CONTAINER_SIZE_DEFAULT, StorageUnit.BYTES); + this.conf = conf; } public boolean isAllowedContainerImport(long containerID) { @@ -112,14 +116,14 @@ public void importContainer(long containerID, Path tarFilePath, } KeyValueContainerData containerData; - TarContainerPacker packer = new TarContainerPacker(compression); + TarContainerPacker packer = getPacker(compression); try (FileInputStream input = new FileInputStream(tarFilePath.toFile())) { byte[] containerDescriptorYaml = packer.unpackContainerDescriptor(input); - containerData = (KeyValueContainerData) ContainerDataYaml - .readContainer(containerDescriptorYaml); + containerData = getKeyValueContainerData(containerDescriptorYaml); } + ContainerUtils.verifyChecksum(containerData, conf); containerData.setVolume(targetVolume); try (FileInputStream input = new FileInputStream(tarFilePath.toFile())) { @@ -154,4 +158,19 @@ public static Path getUntarDirectory(HddsVolume hddsVolume) return Paths.get(hddsVolume.getVolumeRootDir()) .resolve(CONTAINER_COPY_TMP_DIR).resolve(CONTAINER_COPY_DIR); } + + protected KeyValueContainerData getKeyValueContainerData( + byte[] containerDescriptorYaml) throws IOException { + return (KeyValueContainerData) ContainerDataYaml + .readContainer(containerDescriptorYaml); + } + + protected Set getImportContainerProgress() { + return this.importContainerProgress; + } + + protected TarContainerPacker getPacker(CopyContainerCompression compression) { + return new TarContainerPacker(compression); + } + } diff --git a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/replication/GrpcOutputStream.java b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/replication/GrpcOutputStream.java index 2d0955d47e5..8506364f983 100644 --- a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/replication/GrpcOutputStream.java +++ b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/replication/GrpcOutputStream.java @@ -24,7 +24,7 @@ import org.slf4j.Logger; import org.slf4j.LoggerFactory; -import javax.annotation.Nonnull; +import jakarta.annotation.Nonnull; import java.io.IOException; import java.io.OutputStream; import java.util.concurrent.atomic.AtomicBoolean; diff --git a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/replication/ReplicationServer.java b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/replication/ReplicationServer.java index 3feb5747486..d2407a61d0b 100644 --- a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/replication/ReplicationServer.java +++ b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/replication/ReplicationServer.java @@ -23,6 +23,7 @@ import java.util.concurrent.ThreadPoolExecutor; import java.util.concurrent.TimeUnit; +import com.google.common.annotations.VisibleForTesting; import com.google.common.util.concurrent.ThreadFactoryBuilder; import org.apache.hadoop.hdds.conf.Config; import org.apache.hadoop.hdds.conf.ConfigGroup; @@ -154,6 +155,31 @@ public int getPort() { return port; } + public void setPoolSize(int size) { + if (size <= 0) { + throw new IllegalArgumentException("Pool size must be positive."); + } + + int currentCorePoolSize = executor.getCorePoolSize(); + + // In ThreadPoolExecutor, maximumPoolSize must always be greater than or + // equal to the corePoolSize. We must make sure this invariant holds when + // changing the pool size. Therefore, we take into account whether the + // new size is greater or smaller than the current core pool size. + if (size > currentCorePoolSize) { + executor.setMaximumPoolSize(size); + executor.setCorePoolSize(size); + } else { + executor.setCorePoolSize(size); + executor.setMaximumPoolSize(size); + } + } + + @VisibleForTesting + public ThreadPoolExecutor getExecutor() { + return executor; + } + /** * Replication-related configuration. */ diff --git a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/protocol/StorageContainerNodeProtocol.java b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/protocol/StorageContainerNodeProtocol.java index e8d80e5dc92..fd8cf05b294 100644 --- a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/protocol/StorageContainerNodeProtocol.java +++ b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/protocol/StorageContainerNodeProtocol.java @@ -72,24 +72,20 @@ RegisteredCommand register(DatanodeDetails datanodeDetails, * TODO: Cleanup and update tests, HDDS-9642. * * @param datanodeDetails - Datanode ID. - * @param layoutVersionInfo - Layout Version Proto. * @return Commands to be sent to the datanode. */ - default List processHeartbeat(DatanodeDetails datanodeDetails, - LayoutVersionProto layoutVersionInfo) { - return processHeartbeat(datanodeDetails, layoutVersionInfo, null); + default List processHeartbeat(DatanodeDetails datanodeDetails) { + return processHeartbeat(datanodeDetails, null); }; /** * Send heartbeat to indicate the datanode is alive and doing well. * @param datanodeDetails - Datanode ID. - * @param layoutVersionInfo - Layout Version Proto. * @param queueReport - The CommandQueueReportProto report from the * heartbeating datanode. * @return Commands to be sent to the datanode. */ List processHeartbeat(DatanodeDetails datanodeDetails, - LayoutVersionProto layoutVersionInfo, CommandQueueReportProto queueReport); /** diff --git a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/hdds/datanode/metadata/TestDatanodeCRLStoreImpl.java b/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/hdds/datanode/metadata/TestDatanodeCRLStoreImpl.java index 8d3de5218af..f73f14f0c27 100644 --- a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/hdds/datanode/metadata/TestDatanodeCRLStoreImpl.java +++ b/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/hdds/datanode/metadata/TestDatanodeCRLStoreImpl.java @@ -17,7 +17,6 @@ */ package org.apache.hadoop.hdds.datanode.metadata; -import org.apache.hadoop.fs.FileUtil; import org.apache.hadoop.hdds.HddsConfigKeys; import org.apache.hadoop.hdds.conf.OzoneConfiguration; import org.apache.hadoop.hdds.security.SecurityConfig; @@ -27,13 +26,13 @@ import org.apache.hadoop.hdds.security.x509.crl.CRLInfo; import org.apache.hadoop.ozone.OzoneConsts; import org.apache.hadoop.security.ssl.KeyStoreTestUtil; -import org.apache.ozone.test.GenericTestUtils; import org.bouncycastle.asn1.x509.CRLReason; import org.bouncycastle.cert.X509CertificateHolder; import org.bouncycastle.cert.X509v2CRLBuilder; import org.junit.jupiter.api.AfterEach; import org.junit.jupiter.api.BeforeEach; import org.junit.jupiter.api.Test; +import org.junit.jupiter.api.io.TempDir; import java.io.File; import java.security.KeyPair; @@ -47,6 +46,7 @@ * Test class for {@link DatanodeCRLStoreImpl}. */ public class TestDatanodeCRLStoreImpl { + @TempDir private File testDir; private OzoneConfiguration conf; private DatanodeCRLStore dnCRLStore; @@ -56,7 +56,6 @@ public class TestDatanodeCRLStoreImpl { @BeforeEach public void setUp() throws Exception { - testDir = GenericTestUtils.getRandomizedTestDir(); conf = new OzoneConfiguration(); conf.set(HddsConfigKeys.OZONE_METADATA_DIRS, testDir.getPath()); dnCRLStore = new DatanodeCRLStoreImpl(conf); @@ -71,7 +70,6 @@ public void destroyDbStore() throws Exception { if (dnCRLStore.getStore() != null) { dnCRLStore.getStore().close(); } - FileUtil.fullyDelete(testDir); } @Test public void testCRLStore() throws Exception { diff --git a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/TestHddsDatanodeService.java b/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/TestHddsDatanodeService.java index 8a3921d7953..cc88940611a 100644 --- a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/TestHddsDatanodeService.java +++ b/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/TestHddsDatanodeService.java @@ -24,7 +24,6 @@ import java.util.List; import java.util.UUID; -import org.apache.hadoop.fs.FileUtil; import org.apache.hadoop.hdds.DFSConfigKeysLegacy; import org.apache.hadoop.hdds.HddsConfigKeys; import org.apache.hadoop.hdds.conf.OzoneConfiguration; @@ -39,11 +38,8 @@ import org.apache.hadoop.ozone.container.keyvalue.ContainerTestVersionInfo; import org.apache.hadoop.ozone.container.keyvalue.KeyValueContainer; import org.apache.hadoop.ozone.container.keyvalue.helpers.KeyValueContainerUtil; -import org.apache.ozone.test.GenericTestUtils; import org.apache.hadoop.util.ServicePlugin; -import org.junit.jupiter.api.AfterEach; - import static org.apache.hadoop.hdds.HddsConfigKeys.HDDS_BLOCK_TOKEN_ENABLED; import static org.apache.hadoop.hdds.HddsConfigKeys.HDDS_CONTAINER_TOKEN_ENABLED; import static org.apache.hadoop.hdds.scm.ScmConfigKeys.OZONE_SCM_NAMES; @@ -57,6 +53,7 @@ import static org.junit.jupiter.api.Assertions.assertNull; import org.junit.jupiter.api.BeforeEach; import org.junit.jupiter.api.Test; +import org.junit.jupiter.api.io.TempDir; import org.junit.jupiter.params.ParameterizedTest; import org.junit.jupiter.params.provider.ValueSource; import org.slf4j.Logger; @@ -68,6 +65,7 @@ public class TestHddsDatanodeService { + @TempDir private File testDir; private static final Logger LOG = LoggerFactory.getLogger(TestHddsDatanodeService.class); @@ -92,7 +90,6 @@ public void setUp() throws IOException { conf.setStrings(ScmConfigKeys.OZONE_SCM_NAMES, serverAddresses.toArray(new String[0])); - testDir = GenericTestUtils.getRandomizedTestDir(); conf.set(HddsConfigKeys.OZONE_METADATA_DIRS, testDir.getPath()); conf.set(OZONE_SCM_NAMES, "localhost"); conf.setClass(OzoneConfigKeys.HDDS_DATANODE_PLUGINS_KEY, MockService.class, @@ -109,11 +106,6 @@ public void setUp() throws IOException { conf.set(DFSConfigKeysLegacy.DFS_DATANODE_DATA_DIR_KEY, volumeDir); } - @AfterEach - public void tearDown() { - FileUtil.fullyDelete(testDir); - } - @Test public void testStartup() { service.start(conf); diff --git a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/TestHddsSecureDatanodeInit.java b/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/TestHddsSecureDatanodeInit.java index 95d136cdfca..10d2bc91a71 100644 --- a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/TestHddsSecureDatanodeInit.java +++ b/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/TestHddsSecureDatanodeInit.java @@ -29,7 +29,6 @@ import java.util.List; import java.util.concurrent.Callable; -import org.apache.hadoop.fs.FileUtil; import org.apache.hadoop.hdds.DFSConfigKeysLegacy; import org.apache.hadoop.hdds.HddsConfigKeys; import org.apache.hadoop.hdds.conf.OzoneConfiguration; @@ -59,23 +58,24 @@ import static org.junit.jupiter.api.Assertions.assertNotNull; import static org.junit.jupiter.api.Assertions.assertNull; import static org.junit.jupiter.api.Assertions.assertThrows; -import static org.mockito.ArgumentMatchers.any; -import static org.mockito.ArgumentMatchers.anyString; +import static org.mockito.Mockito.any; +import static org.mockito.Mockito.anyString; import static org.mockito.Mockito.mock; import static org.mockito.Mockito.when; import org.apache.ozone.test.tag.Flaky; import org.bouncycastle.cert.X509CertificateHolder; -import org.junit.jupiter.api.AfterAll; import org.junit.jupiter.api.AfterEach; import org.junit.jupiter.api.BeforeEach; import org.junit.jupiter.api.BeforeAll; import org.junit.jupiter.api.Test; +import org.junit.jupiter.api.io.TempDir; /** * Test class for {@link HddsDatanodeService}. */ public class TestHddsSecureDatanodeInit { + @TempDir private static File testDir; private static OzoneConfiguration conf; private static HddsDatanodeService service; @@ -96,7 +96,6 @@ public class TestHddsSecureDatanodeInit { @BeforeAll public static void setUp() throws Exception { - testDir = GenericTestUtils.getRandomizedTestDir(); conf = new OzoneConfiguration(); conf.set(HddsConfigKeys.OZONE_METADATA_DIRS, testDir.getPath()); //conf.set(ScmConfigKeys.OZONE_SCM_NAMES, "localhost"); @@ -143,11 +142,6 @@ SCMSecurityProtocolClientSideTranslatorPB createScmSecurityClient() scmClient = mock(SCMSecurityProtocolClientSideTranslatorPB.class); } - @AfterAll - public static void tearDown() { - FileUtil.fullyDelete(testDir); - } - @BeforeEach public void setUpDNCertClient() throws IOException { diff --git a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/ContainerTestUtils.java b/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/ContainerTestUtils.java index e04d8f00493..33bc4a85166 100644 --- a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/ContainerTestUtils.java +++ b/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/ContainerTestUtils.java @@ -73,7 +73,7 @@ import java.util.UUID; import java.util.concurrent.atomic.AtomicLong; -import static org.mockito.ArgumentMatchers.any; +import static org.mockito.Mockito.any; import static org.mockito.Mockito.mock; import static org.mockito.Mockito.when; diff --git a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/SCMTestUtils.java b/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/SCMTestUtils.java index a7f6f537048..7917a4ce55c 100644 --- a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/SCMTestUtils.java +++ b/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/SCMTestUtils.java @@ -22,7 +22,6 @@ import java.net.InetSocketAddress; import java.net.ServerSocket; -import org.apache.commons.io.FileUtils; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.hdds.HddsConfigKeys; import org.apache.hadoop.hdds.conf.ConfigurationSource; @@ -40,12 +39,9 @@ import org.apache.hadoop.ozone.protocol.StorageContainerDatanodeProtocol; import org.apache.hadoop.ozone.protocolPB.StorageContainerDatanodeProtocolPB; import org.apache.hadoop.ozone.protocolPB.StorageContainerDatanodeProtocolServerSideTranslatorPB; -import org.apache.ozone.test.GenericTestUtils; import com.google.protobuf.BlockingService; -import static org.apache.commons.lang3.RandomStringUtils.randomAlphanumeric; -import static org.apache.logging.log4j.util.StackLocatorUtil.getCallerClass; import static org.mockito.Mockito.mock; /** @@ -125,15 +121,6 @@ public static InetSocketAddress getReuseableAddress() throws IOException { } } - public static OzoneConfiguration getConf() { - String name = getCallerClass(2).getSimpleName() - + "-" + randomAlphanumeric(10); - File testDir = GenericTestUtils.getTestDir(name); - Runtime.getRuntime().addShutdownHook(new Thread( - () -> FileUtils.deleteQuietly(testDir))); - return getConf(testDir); - } - public static OzoneConfiguration getConf(File testDir) { OzoneConfiguration conf = new OzoneConfiguration(); conf.set(ScmConfigKeys.HDDS_DATANODE_DIR_KEY, diff --git a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/TestBlockDeletingService.java b/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/TestBlockDeletingService.java index b408ec201de..bc56141fb08 100644 --- a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/TestBlockDeletingService.java +++ b/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/TestBlockDeletingService.java @@ -19,7 +19,6 @@ import com.google.common.collect.Lists; -import org.apache.commons.io.FileUtils; import org.apache.commons.lang3.StringUtils; import org.apache.hadoop.hdds.HddsConfigKeys; import org.apache.hadoop.hdds.client.BlockID; @@ -76,6 +75,7 @@ import org.junit.jupiter.api.Assumptions; import org.junit.jupiter.api.BeforeEach; import org.junit.jupiter.api.Timeout; +import org.junit.jupiter.api.io.TempDir; import org.slf4j.LoggerFactory; import java.io.File; @@ -111,7 +111,7 @@ import static org.junit.jupiter.api.Assertions.assertEquals; import static org.junit.jupiter.api.Assertions.assertFalse; import static org.junit.jupiter.api.Assertions.assertTrue; -import static org.mockito.ArgumentMatchers.any; +import static org.mockito.Mockito.any; import static org.mockito.Mockito.mock; import static org.mockito.Mockito.when; @@ -121,6 +121,7 @@ @Timeout(30) public class TestBlockDeletingService { + @TempDir private File testRoot; private String scmId; private String datanodeUuid; @@ -134,12 +135,6 @@ public class TestBlockDeletingService { @BeforeEach public void init() throws IOException { CodecBuffer.enableLeakDetection(); - - testRoot = GenericTestUtils - .getTestDir(TestBlockDeletingService.class.getSimpleName()); - if (testRoot.exists()) { - FileUtils.cleanDirectory(testRoot); - } scmId = UUID.randomUUID().toString(); conf.set(ScmConfigKeys.HDDS_DATANODE_DIR_KEY, testRoot.getAbsolutePath()); conf.set(HddsConfigKeys.OZONE_METADATA_DIRS, testRoot.getAbsolutePath()); @@ -152,7 +147,6 @@ public void init() throws IOException { @AfterEach public void cleanup() throws IOException { BlockUtils.shutdownCache(conf); - FileUtils.deleteDirectory(testRoot); CodecBuffer.assertNoLeaks(); } diff --git a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/TestContainerCache.java b/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/TestContainerCache.java index 208f521ec36..2381209bb6b 100644 --- a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/TestContainerCache.java +++ b/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/TestContainerCache.java @@ -33,7 +33,6 @@ import java.io.IOException; import java.util.ArrayList; import java.util.List; -import java.util.concurrent.ExecutionException; import java.util.concurrent.ExecutorService; import java.util.concurrent.Executors; import java.util.concurrent.Future; @@ -145,7 +144,7 @@ public void testContainerCacheEviction() throws Exception { } @Test - public void testConcurrentDBGet() throws Exception { + void testConcurrentDBGet() throws Exception { File root = new File(testRoot); root.mkdirs(); root.deleteOnExit(); @@ -172,11 +171,7 @@ public void testConcurrentDBGet() throws Exception { futureList.add(executorService.submit(task)); futureList.add(executorService.submit(task)); for (Future future: futureList) { - try { - future.get(); - } catch (InterruptedException | ExecutionException e) { - fail("Should get the DB instance"); - } + future.get(); } ReferenceCountedDB db = cache.getDB(1, "RocksDB", diff --git a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/TestDatanodeStateMachine.java b/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/TestDatanodeStateMachine.java index a7291e9018f..5738f5c1106 100644 --- a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/TestDatanodeStateMachine.java +++ b/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/TestDatanodeStateMachine.java @@ -28,8 +28,6 @@ import java.util.concurrent.TimeUnit; import java.util.concurrent.TimeoutException; -import org.apache.hadoop.fs.FileUtil; -import org.apache.hadoop.hdds.HddsConfigKeys; import org.apache.hadoop.hdds.conf.OzoneConfiguration; import org.apache.hadoop.hdds.protocol.DatanodeDetails; import org.apache.hadoop.hdds.scm.ScmConfigKeys; @@ -52,10 +50,10 @@ import org.junit.jupiter.api.AfterEach; import org.junit.jupiter.api.BeforeEach; import org.junit.jupiter.api.Test; +import org.junit.jupiter.api.io.TempDir; import org.slf4j.Logger; import org.slf4j.LoggerFactory; -import static org.apache.hadoop.hdds.scm.ScmConfigKeys.HDDS_DATANODE_DIR_KEY; import static org.apache.hadoop.hdds.scm.ScmConfigKeys.OZONE_SCM_HEARTBEAT_RPC_TIMEOUT; import static org.junit.jupiter.api.Assertions.assertEquals; import static org.junit.jupiter.api.Assertions.assertNotNull; @@ -70,17 +68,18 @@ public class TestDatanodeStateMachine { LoggerFactory.getLogger(TestDatanodeStateMachine.class); // Changing it to 1, as current code checks for multiple scm directories, // and fail if exists - private final int scmServerCount = 1; + private static final int SCM_SERVER_COUNT = 1; private List serverAddresses; private List scmServers; private List mockServers; private ExecutorService executorService; private OzoneConfiguration conf; + @TempDir private File testRoot; @BeforeEach - public void setUp() throws Exception { - conf = SCMTestUtils.getConf(); + void setUp() throws Exception { + conf = SCMTestUtils.getConf(testRoot); conf.setTimeDuration(OZONE_SCM_HEARTBEAT_RPC_TIMEOUT, 500, TimeUnit.MILLISECONDS); conf.setBoolean(OzoneConfigKeys.DFS_CONTAINER_RATIS_IPC_RANDOM_PORT, true); @@ -92,7 +91,7 @@ public void setUp() throws Exception { serverAddresses = new ArrayList<>(); scmServers = new ArrayList<>(); mockServers = new ArrayList<>(); - for (int x = 0; x < scmServerCount; x++) { + for (int x = 0; x < SCM_SERVER_COUNT; x++) { int port = SCMTestUtils.getReuseableAddress().getPort(); String address = "127.0.0.1"; serverAddresses.add(address + ":" + port); @@ -105,22 +104,6 @@ public void setUp() throws Exception { conf.setStrings(ScmConfigKeys.OZONE_SCM_NAMES, serverAddresses.toArray(new String[0])); - String path = GenericTestUtils - .getTempPath(TestDatanodeStateMachine.class.getSimpleName()); - testRoot = new File(path); - if (!testRoot.mkdirs()) { - LOG.info("Required directories {} already exist.", testRoot); - } - - File dataDir = new File(testRoot, "data"); - conf.set(HDDS_DATANODE_DIR_KEY, dataDir.getAbsolutePath()); - if (!dataDir.mkdirs()) { - LOG.info("Data dir create failed."); - } - conf.set(HddsConfigKeys.OZONE_METADATA_DIRS, - new File(testRoot, "scm").getAbsolutePath()); - path = new File(testRoot, "datanodeID").getAbsolutePath(); - conf.set(ScmConfigKeys.OZONE_SCM_DATANODE_ID_DIR, path); executorService = HadoopExecutors.newCachedThreadPool( new ThreadFactoryBuilder().setDaemon(true) .setNameFormat("TestDataNodeStateMachineThread-%d").build()); @@ -149,8 +132,6 @@ public void tearDown() throws Exception { } } catch (Exception e) { //ignore all exception from the shutdown - } finally { - FileUtil.fullyDelete(testRoot); } } diff --git a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/TestDatanodeStoreCache.java b/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/TestDatanodeStoreCache.java index 32b1fc284bf..e00df0579d3 100644 --- a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/TestDatanodeStoreCache.java +++ b/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/TestDatanodeStoreCache.java @@ -31,7 +31,6 @@ import static org.junit.jupiter.api.Assertions.assertEquals; import static org.junit.jupiter.api.Assertions.assertSame; -import static org.junit.jupiter.api.Assertions.fail; /** * Test DatanodeStoreCache. @@ -43,7 +42,7 @@ public class TestDatanodeStoreCache { private OzoneConfiguration conf = new OzoneConfiguration(); @Test - public void testBasicOperations() throws IOException { + void testBasicOperations() throws IOException { DatanodeStoreCache cache = DatanodeStoreCache.getInstance(); String dbPath1 = Files.createDirectory(folder.resolve("basic1")) .toFile().toString(); @@ -71,11 +70,7 @@ public void testBasicOperations() throws IOException { assertEquals(1, cache.size()); // test remove non-exist - try { - cache.removeDB(dbPath1); - } catch (Exception e) { - fail("Should not throw " + e); - } + cache.removeDB(dbPath1); // test shutdown cache.shutdownCache(); diff --git a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/TestSchemaOneBackwardsCompatibility.java b/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/TestSchemaOneBackwardsCompatibility.java index 2465b03a68b..2235b23ce88 100644 --- a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/TestSchemaOneBackwardsCompatibility.java +++ b/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/TestSchemaOneBackwardsCompatibility.java @@ -72,8 +72,7 @@ import static org.junit.jupiter.api.Assertions.assertNull; import static org.junit.jupiter.api.Assertions.assertThrows; import static org.junit.jupiter.api.Assertions.assertTrue; -import static org.junit.jupiter.api.Assertions.fail; -import static org.mockito.ArgumentMatchers.any; +import static org.mockito.Mockito.any; import static org.mockito.Mockito.mock; import static org.mockito.Mockito.when; @@ -370,13 +369,8 @@ public void testReadDeletedBlockChunkInfo(String schemaVersion) for (Table.KeyValue chunkListKV: deletedBlocks) { preUpgradeBlocks.add(chunkListKV.getKey()); - try { - chunkListKV.getValue(); - fail("No exception thrown when trying to retrieve old " + - "deleted blocks values as chunk lists."); - } catch (IOException ex) { - // Exception thrown as expected. - } + assertThrows(IOException.class, () -> chunkListKV.getValue(), + "No exception thrown when trying to retrieve old deleted blocks values as chunk lists."); } assertEquals(TestDB.NUM_DELETED_BLOCKS, preUpgradeBlocks.size()); diff --git a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/TestSchemaTwoBackwardsCompatibility.java b/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/TestSchemaTwoBackwardsCompatibility.java index a828c1e6924..0c4612b79fa 100644 --- a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/TestSchemaTwoBackwardsCompatibility.java +++ b/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/TestSchemaTwoBackwardsCompatibility.java @@ -75,7 +75,7 @@ import static org.apache.hadoop.ozone.container.common.ContainerTestUtils.COMMIT_STAGE; import static org.apache.hadoop.ozone.container.common.ContainerTestUtils.WRITE_STAGE; import static org.junit.jupiter.api.Assertions.assertEquals; -import static org.mockito.ArgumentMatchers.any; +import static org.mockito.Mockito.any; import static org.mockito.Mockito.mock; import static org.mockito.Mockito.when; diff --git a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/TestStaleRecoveringContainerScrubbingService.java b/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/TestStaleRecoveringContainerScrubbingService.java index 84dfa3b2464..644ee014e9f 100644 --- a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/TestStaleRecoveringContainerScrubbingService.java +++ b/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/TestStaleRecoveringContainerScrubbingService.java @@ -57,8 +57,8 @@ import static org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos.ContainerDataProto.State.RECOVERING; import static org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos.ContainerDataProto.State.UNHEALTHY; import static org.junit.jupiter.api.Assertions.assertEquals; -import static org.mockito.ArgumentMatchers.anyList; -import static org.mockito.ArgumentMatchers.anyLong; +import static org.mockito.Mockito.anyList; +import static org.mockito.Mockito.anyLong; import static org.mockito.Mockito.mock; import static org.mockito.Mockito.when; diff --git a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/impl/TestContainerDataYaml.java b/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/impl/TestContainerDataYaml.java index 4ccfb2e35de..ec78398824e 100644 --- a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/impl/TestContainerDataYaml.java +++ b/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/impl/TestContainerDataYaml.java @@ -43,7 +43,6 @@ import static org.junit.jupiter.api.Assertions.assertEquals; import static org.junit.jupiter.api.Assertions.assertThrows; import static org.junit.jupiter.api.Assertions.assertTrue; -import static org.junit.jupiter.api.Assertions.fail; /** * This class tests create/read .container files. @@ -205,41 +204,35 @@ public void testIncorrectContainerFile(ContainerLayoutVersion layout) { @ContainerLayoutTestInfo.ContainerTest - public void testCheckBackWardCompatibilityOfContainerFile( - ContainerLayoutVersion layout) { + void testCheckBackWardCompatibilityOfContainerFile( + ContainerLayoutVersion layout) throws Exception { setLayoutVersion(layout); // This test is for if we upgrade, and then .container files added by new // server will have new fields added to .container file, after a while we // decided to rollback. Then older ozone can read .container files // created or not. - try { - String containerFile = "additionalfields.container"; - //Get file from resources folder - ClassLoader classLoader = getClass().getClassLoader(); - File file = new File(classLoader.getResource(containerFile).getFile()); - KeyValueContainerData kvData = (KeyValueContainerData) ContainerDataYaml - .readContainerFile(file); - ContainerUtils.verifyChecksum(kvData, conf); + String containerFile = "additionalfields.container"; + //Get file from resources folder + ClassLoader classLoader = getClass().getClassLoader(); + File file = new File(classLoader.getResource(containerFile).getFile()); + KeyValueContainerData kvData = (KeyValueContainerData) ContainerDataYaml + .readContainerFile(file); + ContainerUtils.verifyChecksum(kvData, conf); - //Checking the Container file data is consistent or not - assertEquals(ContainerProtos.ContainerDataProto.State.CLOSED, kvData - .getState()); - assertEquals(CONTAINER_DB_TYPE, kvData.getContainerDBType()); - assertEquals(ContainerProtos.ContainerType.KeyValueContainer, kvData - .getContainerType()); - assertEquals(9223372036854775807L, kvData.getContainerID()); - assertEquals("/hdds/current/aed-fg4-hji-jkl/containerDir0/1", kvData - .getChunksPath()); - assertEquals("/hdds/current/aed-fg4-hji-jkl/containerDir0/1", kvData - .getMetadataPath()); - assertEquals(FILE_PER_CHUNK, kvData.getLayoutVersion()); - assertEquals(2, kvData.getMetadata().size()); - - } catch (Exception ex) { - ex.printStackTrace(); - fail("testCheckBackWardCompatibilityOfContainerFile failed"); - } + //Checking the Container file data is consistent or not + assertEquals(ContainerProtos.ContainerDataProto.State.CLOSED, kvData + .getState()); + assertEquals(CONTAINER_DB_TYPE, kvData.getContainerDBType()); + assertEquals(ContainerProtos.ContainerType.KeyValueContainer, kvData + .getContainerType()); + assertEquals(9223372036854775807L, kvData.getContainerID()); + assertEquals("/hdds/current/aed-fg4-hji-jkl/containerDir0/1", kvData + .getChunksPath()); + assertEquals("/hdds/current/aed-fg4-hji-jkl/containerDir0/1", kvData + .getMetadataPath()); + assertEquals(FILE_PER_CHUNK, kvData.getLayoutVersion()); + assertEquals(2, kvData.getMetadata().size()); } /** diff --git a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/impl/TestContainerDeletionChoosingPolicy.java b/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/impl/TestContainerDeletionChoosingPolicy.java index cd5f6c0f9b6..890bca18cb1 100644 --- a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/impl/TestContainerDeletionChoosingPolicy.java +++ b/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/impl/TestContainerDeletionChoosingPolicy.java @@ -40,8 +40,8 @@ import org.apache.hadoop.ozone.container.keyvalue.KeyValueContainerData; import org.apache.hadoop.ozone.container.common.impl.BlockDeletingService.ContainerBlockInfo; import org.apache.hadoop.ozone.container.ozoneimpl.OzoneContainer; -import org.apache.ozone.test.GenericTestUtils; import org.junit.jupiter.api.BeforeEach; +import org.junit.jupiter.api.io.TempDir; import static org.assertj.core.api.Assertions.assertThat; import static org.junit.jupiter.api.Assertions.assertEquals; @@ -54,6 +54,8 @@ * The class for testing container deletion choosing policy. */ public class TestContainerDeletionChoosingPolicy { + @TempDir + private File tempFile; private String path; private OzoneContainer ozoneContainer; private ContainerSet containerSet; @@ -63,23 +65,15 @@ public class TestContainerDeletionChoosingPolicy { private static final int SERVICE_TIMEOUT_IN_MILLISECONDS = 0; private static final int SERVICE_INTERVAL_IN_MILLISECONDS = 1000; - private ContainerLayoutVersion layoutVersion; - - public void setLayoutVersion(ContainerLayoutVersion layout) { - this.layoutVersion = layout; - } - @BeforeEach public void init() throws Throwable { conf = new OzoneConfiguration(); - path = GenericTestUtils - .getTempPath(TestContainerDeletionChoosingPolicy.class.getSimpleName()); + path = tempFile.getPath(); } @ContainerLayoutTestInfo.ContainerTest public void testRandomChoosingPolicy(ContainerLayoutVersion layout) throws IOException { - setLayoutVersion(layout); File containerDir = new File(path); if (containerDir.exists()) { FileUtils.deleteDirectory(new File(path)); @@ -143,7 +137,6 @@ public void testRandomChoosingPolicy(ContainerLayoutVersion layout) @ContainerLayoutTestInfo.ContainerTest public void testTopNOrderedChoosingPolicy(ContainerLayoutVersion layout) throws IOException { - setLayoutVersion(layout); File containerDir = new File(path); if (containerDir.exists()) { FileUtils.deleteDirectory(new File(path)); diff --git a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/impl/TestContainerPersistence.java b/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/impl/TestContainerPersistence.java index fcc48fef1bd..3ff8f9e625d 100644 --- a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/impl/TestContainerPersistence.java +++ b/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/impl/TestContainerPersistence.java @@ -73,7 +73,6 @@ import org.apache.hadoop.ozone.container.keyvalue.interfaces.BlockManager; import org.apache.hadoop.ozone.container.keyvalue.interfaces.ChunkManager; import org.apache.hadoop.ozone.container.metadata.DatanodeStoreSchemaThreeImpl; -import org.apache.ozone.test.GenericTestUtils; import com.google.common.collect.Maps; import org.apache.commons.io.FileUtils; @@ -82,6 +81,7 @@ import org.junit.jupiter.api.BeforeEach; import org.junit.jupiter.api.BeforeAll; import org.junit.jupiter.api.Timeout; +import org.junit.jupiter.api.io.TempDir; import org.slf4j.Logger; import org.slf4j.LoggerFactory; @@ -98,7 +98,6 @@ import static org.junit.jupiter.api.Assertions.assertSame; import static org.junit.jupiter.api.Assertions.assertThrows; import static org.junit.jupiter.api.Assertions.assertTrue; -import static org.junit.jupiter.api.Assertions.fail; import static org.junit.jupiter.api.Assumptions.assumeFalse; import static org.junit.jupiter.api.Assumptions.assumeTrue; @@ -113,6 +112,8 @@ public class TestContainerPersistence { private static final String SCM_ID = UUID.randomUUID().toString(); private static final Logger LOGGER = LoggerFactory.getLogger(TestContainerPersistence.class); + @TempDir + private static File hddsFile; private static String hddsPath; private static OzoneConfiguration conf; private static VolumeChoosingPolicy volumeChoosingPolicy; @@ -138,8 +139,7 @@ private void initSchemaAndVersionInfo(ContainerTestVersionInfo versionInfo) { @BeforeAll public static void init() { conf = new OzoneConfiguration(); - hddsPath = GenericTestUtils - .getTempPath(TestContainerPersistence.class.getSimpleName()); + hddsPath = hddsFile.getPath(); conf.set(ScmConfigKeys.HDDS_DATANODE_DIR_KEY, hddsPath); conf.set(OzoneConfigKeys.OZONE_METADATA_DIRS, hddsPath); volumeChoosingPolicy = new RoundRobinVolumeChoosingPolicy(); @@ -248,12 +248,8 @@ public void testCreateDuplicateContainer(ContainerTestVersionInfo versionInfo) long testContainerID = getTestContainerID(); Container container = addContainer(containerSet, testContainerID); - try { - containerSet.addContainer(container); - fail("Expected Exception not thrown."); - } catch (IOException ex) { - assertNotNull(ex); - } + IOException ex = assertThrows(IOException.class, () -> containerSet.addContainer(container)); + assertNotNull(ex); } @ContainerTestVersionInfo.ContainerTest @@ -544,7 +540,7 @@ public void testGetContainerReports(ContainerTestVersionInfo versionInfo) long actualContainerID = report.getContainerID(); assertTrue(containerIDs.remove(actualContainerID)); } - assertTrue(containerIDs.isEmpty()); + assertThat(containerIDs).isEmpty(); } /** @@ -585,7 +581,7 @@ public void testListContainer(ContainerTestVersionInfo versionInfo) } // Assert that we listed all the keys that we had put into // container. - assertTrue(testMap.isEmpty()); + assertThat(testMap).isEmpty(); } private ChunkInfo writeChunkHelper(BlockID blockID) throws IOException { @@ -799,26 +795,23 @@ public void testPutBlockWithInvalidBCSId(ContainerTestVersionInfo versionInfo) blockData.setBlockCommitSequenceId(4); blockManager.putBlock(container, blockData); BlockData readBlockData; - try { + StorageContainerException sce = assertThrows(StorageContainerException.class, () -> { blockID1.setBlockCommitSequenceId(5); // read with bcsId higher than container bcsId blockManager. getBlock(container, blockID1); - fail("Expected exception not thrown"); - } catch (StorageContainerException sce) { - assertSame(UNKNOWN_BCSID, sce.getResult()); - } + }); + assertSame(UNKNOWN_BCSID, sce.getResult()); - try { + sce = assertThrows(StorageContainerException.class, () -> { blockID1.setBlockCommitSequenceId(4); // read with bcsId lower than container bcsId but greater than committed // bcsId. blockManager. getBlock(container, blockID1); - fail("Expected exception not thrown"); - } catch (StorageContainerException sce) { - assertSame(BCSID_MISMATCH, sce.getResult()); - } + }); + assertSame(BCSID_MISMATCH, sce.getResult()); + readBlockData = blockManager. getBlock(container, blockData.getBlockID()); ChunkInfo readChunk = diff --git a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/impl/TestHddsDispatcher.java b/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/impl/TestHddsDispatcher.java index bd035632403..95df6c647f8 100644 --- a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/impl/TestHddsDispatcher.java +++ b/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/impl/TestHddsDispatcher.java @@ -20,7 +20,6 @@ import com.google.common.collect.Maps; import org.apache.commons.codec.digest.DigestUtils; -import org.apache.commons.io.FileUtils; import org.apache.hadoop.conf.StorageUnit; import org.apache.hadoop.hdds.HddsConfigKeys; import org.apache.hadoop.hdds.client.BlockID; @@ -63,6 +62,7 @@ import org.apache.ozone.test.GenericTestUtils; import org.apache.ratis.thirdparty.com.google.protobuf.ByteString; import org.junit.jupiter.api.Test; +import org.junit.jupiter.api.io.TempDir; import org.slf4j.Logger; import org.slf4j.LoggerFactory; @@ -87,7 +87,7 @@ import static org.junit.jupiter.api.Assertions.assertFalse; import static org.junit.jupiter.api.Assertions.assertThrows; import static org.junit.jupiter.api.Assertions.assertTrue; -import static org.mockito.ArgumentMatchers.any; +import static org.mockito.Mockito.any; import static org.mockito.Mockito.doReturn; import static org.mockito.Mockito.mock; import static org.mockito.Mockito.spy; @@ -101,6 +101,8 @@ public class TestHddsDispatcher { private static final Logger LOG = LoggerFactory.getLogger( TestHddsDispatcher.class); + @TempDir + private File testDir; public static final IncrementalReportSender NO_OP_ICR_SENDER = c -> { @@ -110,11 +112,10 @@ public class TestHddsDispatcher { public void testContainerCloseActionWhenFull( ContainerLayoutVersion layout) throws IOException { - String testDir = GenericTestUtils.getTempPath( - TestHddsDispatcher.class.getSimpleName()); + String testDirPath = testDir.getPath(); OzoneConfiguration conf = new OzoneConfiguration(); - conf.set(HDDS_DATANODE_DIR_KEY, testDir); - conf.set(OzoneConfigKeys.OZONE_METADATA_DIRS, testDir); + conf.set(HDDS_DATANODE_DIR_KEY, testDirPath); + conf.set(OzoneConfigKeys.OZONE_METADATA_DIRS, testDirPath); DatanodeDetails dd = randomDatanodeDetails(); MutableVolumeSet volumeSet = new MutableVolumeSet(dd.getUuidString(), conf, null, StorageVolume.VolumeType.DATA_VOLUME, null); @@ -160,22 +161,20 @@ public void testContainerCloseActionWhenFull( } finally { volumeSet.shutdown(); ContainerMetrics.remove(); - FileUtils.deleteDirectory(new File(testDir)); } } @ContainerLayoutTestInfo.ContainerTest public void testContainerCloseActionWhenVolumeFull( ContainerLayoutVersion layoutVersion) throws Exception { - String testDir = GenericTestUtils.getTempPath( - TestHddsDispatcher.class.getSimpleName()); + String testDirPath = testDir.getPath(); OzoneConfiguration conf = new OzoneConfiguration(); conf.setStorageSize(HddsConfigKeys.HDDS_DATANODE_VOLUME_MIN_FREE_SPACE, 100.0, StorageUnit.BYTES); DatanodeDetails dd = randomDatanodeDetails(); HddsVolume.Builder volumeBuilder = - new HddsVolume.Builder(testDir).datanodeUuid(dd.getUuidString()) + new HddsVolume.Builder(testDirPath).datanodeUuid(dd.getUuidString()) .conf(conf).usageCheckFactory(MockSpaceUsageCheckFactory.NONE); // state of cluster : available (140) > 100 ,datanode volume // utilisation threshold not yet reached. container creates are successful. @@ -237,19 +236,17 @@ public void testContainerCloseActionWhenVolumeFull( } finally { volumeSet.shutdown(); ContainerMetrics.remove(); - FileUtils.deleteDirectory(new File(testDir)); } } @Test public void testCreateContainerWithWriteChunk() throws IOException { - String testDir = - GenericTestUtils.getTempPath(TestHddsDispatcher.class.getSimpleName()); + String testDirPath = testDir.getPath(); try { UUID scmId = UUID.randomUUID(); OzoneConfiguration conf = new OzoneConfiguration(); - conf.set(HDDS_DATANODE_DIR_KEY, testDir); - conf.set(OzoneConfigKeys.OZONE_METADATA_DIRS, testDir); + conf.set(HDDS_DATANODE_DIR_KEY, testDirPath); + conf.set(OzoneConfigKeys.OZONE_METADATA_DIRS, testDirPath); DatanodeDetails dd = randomDatanodeDetails(); HddsDispatcher hddsDispatcher = createDispatcher(dd, scmId, conf); ContainerCommandRequestProto writeChunkRequest = @@ -292,19 +289,17 @@ public void testCreateContainerWithWriteChunk() throws IOException { } } finally { ContainerMetrics.remove(); - FileUtils.deleteDirectory(new File(testDir)); } } @Test public void testContainerNotFoundWithCommitChunk() throws IOException { - String testDir = - GenericTestUtils.getTempPath(TestHddsDispatcher.class.getSimpleName()); + String testDirPath = testDir.getPath(); try { UUID scmId = UUID.randomUUID(); OzoneConfiguration conf = new OzoneConfiguration(); - conf.set(HDDS_DATANODE_DIR_KEY, testDir); - conf.set(OzoneConfigKeys.OZONE_METADATA_DIRS, testDir); + conf.set(HDDS_DATANODE_DIR_KEY, testDirPath); + conf.set(OzoneConfigKeys.OZONE_METADATA_DIRS, testDirPath); DatanodeDetails dd = randomDatanodeDetails(); HddsDispatcher hddsDispatcher = createDispatcher(dd, scmId, conf); ContainerCommandRequestProto writeChunkRequest = @@ -329,19 +324,17 @@ public void testContainerNotFoundWithCommitChunk() throws IOException { + " does not exist"); } finally { ContainerMetrics.remove(); - FileUtils.deleteDirectory(new File(testDir)); } } @Test public void testWriteChunkWithCreateContainerFailure() throws IOException { - String testDir = GenericTestUtils.getTempPath( - TestHddsDispatcher.class.getSimpleName()); + String testDirPath = testDir.getPath(); try { UUID scmId = UUID.randomUUID(); OzoneConfiguration conf = new OzoneConfiguration(); - conf.set(HDDS_DATANODE_DIR_KEY, testDir); - conf.set(OzoneConfigKeys.OZONE_METADATA_DIRS, testDir); + conf.set(HDDS_DATANODE_DIR_KEY, testDirPath); + conf.set(OzoneConfigKeys.OZONE_METADATA_DIRS, testDirPath); DatanodeDetails dd = randomDatanodeDetails(); HddsDispatcher hddsDispatcher = createDispatcher(dd, scmId, conf); ContainerCommandRequestProto writeChunkRequest = getWriteChunkRequest( @@ -366,19 +359,17 @@ public void testWriteChunkWithCreateContainerFailure() throws IOException { + " creation failed , Result: DISK_OUT_OF_SPACE"); } finally { ContainerMetrics.remove(); - FileUtils.deleteDirectory(new File(testDir)); } } @Test public void testDuplicateWriteChunkAndPutBlockRequest() throws IOException { - String testDir = GenericTestUtils.getTempPath( - TestHddsDispatcher.class.getSimpleName()); + String testDirPath = testDir.getPath(); try { UUID scmId = UUID.randomUUID(); OzoneConfiguration conf = new OzoneConfiguration(); - conf.set(HDDS_DATANODE_DIR_KEY, testDir); - conf.set(OzoneConfigKeys.OZONE_METADATA_DIRS, testDir); + conf.set(HDDS_DATANODE_DIR_KEY, testDirPath); + conf.set(OzoneConfigKeys.OZONE_METADATA_DIRS, testDirPath); DatanodeDetails dd = randomDatanodeDetails(); HddsDispatcher hddsDispatcher = createDispatcher(dd, scmId, conf); ContainerCommandRequestProto writeChunkRequest = getWriteChunkRequest( @@ -426,7 +417,6 @@ public void testDuplicateWriteChunkAndPutBlockRequest() throws IOException { } } finally { ContainerMetrics.remove(); - FileUtils.deleteDirectory(new File(testDir)); } } @@ -548,11 +538,10 @@ private ContainerCommandRequestProto getReadChunkRequest( @Test public void testValidateToken() throws Exception { - final String testDir = GenericTestUtils.getRandomizedTempPath(); try { final OzoneConfiguration conf = new OzoneConfiguration(); - conf.set(HDDS_DATANODE_DIR_KEY, testDir); - conf.set(OzoneConfigKeys.OZONE_METADATA_DIRS, testDir); + conf.set(HDDS_DATANODE_DIR_KEY, testDir.getPath()); + conf.set(OzoneConfigKeys.OZONE_METADATA_DIRS, testDir.getPath()); final DatanodeDetails dd = randomDatanodeDetails(); final UUID scmId = UUID.randomUUID(); @@ -611,7 +600,6 @@ public void verify(String user, Token token, } } finally { ContainerMetrics.remove(); - FileUtils.deleteDirectory(new File(testDir)); } } diff --git a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/report/TestReportManager.java b/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/report/TestReportManager.java index a4568414fc9..43aadc37c04 100644 --- a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/report/TestReportManager.java +++ b/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/report/TestReportManager.java @@ -24,8 +24,8 @@ import org.junit.jupiter.api.Test; -import static org.mockito.ArgumentMatchers.any; -import static org.mockito.ArgumentMatchers.eq; +import static org.mockito.Mockito.any; +import static org.mockito.Mockito.eq; import static org.mockito.Mockito.mock; import static org.mockito.Mockito.times; import static org.mockito.Mockito.verify; diff --git a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/statemachine/commandhandler/TestCloseContainerCommandHandler.java b/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/statemachine/commandhandler/TestCloseContainerCommandHandler.java index f05a8c6dfe3..219645c8edc 100644 --- a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/statemachine/commandhandler/TestCloseContainerCommandHandler.java +++ b/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/statemachine/commandhandler/TestCloseContainerCommandHandler.java @@ -43,8 +43,8 @@ import static org.apache.hadoop.ozone.OzoneConsts.GB; import static org.assertj.core.api.Assertions.assertThat; import static org.junit.jupiter.api.Assertions.assertThrows; -import static org.mockito.ArgumentMatchers.any; -import static org.mockito.ArgumentMatchers.eq; +import static org.mockito.Mockito.any; +import static org.mockito.Mockito.eq; import static org.mockito.Mockito.mock; import static org.mockito.Mockito.never; import static org.mockito.Mockito.verify; diff --git a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/statemachine/commandhandler/TestClosePipelineCommandHandler.java b/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/statemachine/commandhandler/TestClosePipelineCommandHandler.java index d161f5537ae..ac60fba1ae9 100644 --- a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/statemachine/commandhandler/TestClosePipelineCommandHandler.java +++ b/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/statemachine/commandhandler/TestClosePipelineCommandHandler.java @@ -45,9 +45,9 @@ import java.util.List; import java.util.stream.Collectors; -import static org.mockito.ArgumentMatchers.any; -import static org.mockito.ArgumentMatchers.anyBoolean; -import static org.mockito.ArgumentMatchers.eq; +import static org.mockito.Mockito.any; +import static org.mockito.Mockito.anyBoolean; +import static org.mockito.Mockito.eq; import static org.mockito.Mockito.lenient; import static org.mockito.Mockito.mock; import static org.mockito.Mockito.times; diff --git a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/statemachine/commandhandler/TestCreatePipelineCommandHandler.java b/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/statemachine/commandhandler/TestCreatePipelineCommandHandler.java index bfce4065d08..4e9005979b9 100644 --- a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/statemachine/commandhandler/TestCreatePipelineCommandHandler.java +++ b/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/statemachine/commandhandler/TestCreatePipelineCommandHandler.java @@ -48,7 +48,7 @@ import java.util.Collections; import java.util.List; -import static org.mockito.ArgumentMatchers.any; +import static org.mockito.Mockito.any; import static org.mockito.Mockito.mock; import static org.mockito.Mockito.times; import static org.mockito.Mockito.verify; diff --git a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/statemachine/commandhandler/TestDeleteBlocksCommandHandler.java b/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/statemachine/commandhandler/TestDeleteBlocksCommandHandler.java index 15ac94056b8..90ed4ca4cc9 100644 --- a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/statemachine/commandhandler/TestDeleteBlocksCommandHandler.java +++ b/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/statemachine/commandhandler/TestDeleteBlocksCommandHandler.java @@ -73,8 +73,8 @@ import static org.junit.jupiter.api.Assertions.assertEquals; import static org.junit.jupiter.api.Assertions.assertFalse; import static org.junit.jupiter.api.Assertions.assertTrue; -import static org.mockito.ArgumentMatchers.any; -import static org.mockito.ArgumentMatchers.eq; +import static org.mockito.Mockito.any; +import static org.mockito.Mockito.eq; import static org.mockito.Mockito.doAnswer; import static org.mockito.Mockito.mock; import static org.mockito.Mockito.spy; diff --git a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/states/endpoint/TestHeartbeatEndpointTask.java b/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/states/endpoint/TestHeartbeatEndpointTask.java index 8b7241f7851..09fa8a99177 100644 --- a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/states/endpoint/TestHeartbeatEndpointTask.java +++ b/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/states/endpoint/TestHeartbeatEndpointTask.java @@ -25,7 +25,7 @@ import static org.junit.jupiter.api.Assertions.assertFalse; import static org.junit.jupiter.api.Assertions.assertNotEquals; import static org.junit.jupiter.api.Assertions.assertTrue; -import static org.mockito.ArgumentMatchers.any; +import static org.mockito.Mockito.any; import static org.mockito.Mockito.mock; import static org.mockito.Mockito.when; diff --git a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/volume/TestCapacityVolumeChoosingPolicy.java b/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/volume/TestCapacityVolumeChoosingPolicy.java index 9ee0d17dde6..4718df3ae3f 100644 --- a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/volume/TestCapacityVolumeChoosingPolicy.java +++ b/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/volume/TestCapacityVolumeChoosingPolicy.java @@ -18,7 +18,6 @@ package org.apache.hadoop.ozone.container.common.volume; -import org.apache.hadoop.fs.FileUtil; import org.apache.hadoop.hdds.conf.OzoneConfiguration; import org.apache.hadoop.hdds.fs.MockSpaceUsageCheckFactory; import org.apache.hadoop.hdds.fs.MockSpaceUsageSource; @@ -29,8 +28,9 @@ import org.junit.jupiter.api.AfterEach; import org.junit.jupiter.api.BeforeEach; import org.junit.jupiter.api.Test; +import org.junit.jupiter.api.io.TempDir; -import java.io.File; +import java.nio.file.Path; import java.time.Duration; import java.util.ArrayList; import java.util.HashMap; @@ -38,7 +38,6 @@ import java.util.Map; import static org.apache.hadoop.hdds.HddsConfigKeys.HDDS_DATANODE_VOLUME_CHOOSING_POLICY; -import static org.apache.ozone.test.GenericTestUtils.getTestDir; import static org.assertj.core.api.Assertions.assertThat; import static org.junit.jupiter.api.Assertions.assertEquals; import static org.junit.jupiter.api.Assertions.assertThrows; @@ -52,35 +51,34 @@ public class TestCapacityVolumeChoosingPolicy { private final List volumes = new ArrayList<>(); private static final OzoneConfiguration CONF = new OzoneConfiguration(); - private static final String BASE_DIR = - getTestDir(TestCapacityVolumeChoosingPolicy.class.getSimpleName()) - .getAbsolutePath(); - private static final String VOLUME_1 = BASE_DIR + "disk1"; - private static final String VOLUME_2 = BASE_DIR + "disk2"; - private static final String VOLUME_3 = BASE_DIR + "disk3"; + @TempDir + private Path baseDir; @BeforeEach public void setup() throws Exception { + String volume1 = baseDir + "disk1"; + String volume2 = baseDir + "disk2"; + String volume3 = baseDir + "disk3"; policy = new CapacityVolumeChoosingPolicy(); SpaceUsageSource source1 = MockSpaceUsageSource.fixed(500, 100); SpaceUsageCheckFactory factory1 = MockSpaceUsageCheckFactory.of( source1, Duration.ZERO, SpaceUsagePersistence.None.INSTANCE); - HddsVolume vol1 = new HddsVolume.Builder(VOLUME_1) + HddsVolume vol1 = new HddsVolume.Builder(volume1) .conf(CONF) .usageCheckFactory(factory1) .build(); SpaceUsageSource source2 = MockSpaceUsageSource.fixed(500, 200); SpaceUsageCheckFactory factory2 = MockSpaceUsageCheckFactory.of( source2, Duration.ZERO, SpaceUsagePersistence.None.INSTANCE); - HddsVolume vol2 = new HddsVolume.Builder(VOLUME_2) + HddsVolume vol2 = new HddsVolume.Builder(volume2) .conf(CONF) .usageCheckFactory(factory2) .build(); SpaceUsageSource source3 = MockSpaceUsageSource.fixed(500, 300); SpaceUsageCheckFactory factory3 = MockSpaceUsageCheckFactory.of( source3, Duration.ZERO, SpaceUsagePersistence.None.INSTANCE); - HddsVolume vol3 = new HddsVolume.Builder(VOLUME_3) + HddsVolume vol3 = new HddsVolume.Builder(volume3) .conf(CONF) .usageCheckFactory(factory3) .build(); @@ -94,9 +92,6 @@ public void setup() throws Exception { @AfterEach public void cleanUp() { volumes.forEach(HddsVolume::shutdown); - FileUtil.fullyDelete(new File(VOLUME_1)); - FileUtil.fullyDelete(new File(VOLUME_2)); - FileUtil.fullyDelete(new File(VOLUME_3)); } @Test diff --git a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/volume/TestRoundRobinVolumeChoosingPolicy.java b/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/volume/TestRoundRobinVolumeChoosingPolicy.java index 72f1d451b52..cc6fe87e19d 100644 --- a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/volume/TestRoundRobinVolumeChoosingPolicy.java +++ b/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/volume/TestRoundRobinVolumeChoosingPolicy.java @@ -18,12 +18,11 @@ package org.apache.hadoop.ozone.container.common.volume; -import java.io.File; +import java.nio.file.Path; import java.time.Duration; import java.util.ArrayList; import java.util.List; -import org.apache.hadoop.fs.FileUtil; import org.apache.hadoop.hdds.conf.OzoneConfiguration; import org.apache.hadoop.hdds.fs.MockSpaceUsageCheckFactory; import org.apache.hadoop.hdds.fs.MockSpaceUsageSource; @@ -35,8 +34,8 @@ import org.junit.jupiter.api.AfterEach; import org.junit.jupiter.api.BeforeEach; import org.junit.jupiter.api.Test; +import org.junit.jupiter.api.io.TempDir; -import static org.apache.ozone.test.GenericTestUtils.getTestDir; import static org.assertj.core.api.Assertions.assertThat; import static org.junit.jupiter.api.Assertions.assertEquals; import static org.junit.jupiter.api.Assertions.assertThrows; @@ -50,27 +49,26 @@ public class TestRoundRobinVolumeChoosingPolicy { private final List volumes = new ArrayList<>(); private static final OzoneConfiguration CONF = new OzoneConfiguration(); - private static final String BASE_DIR = - getTestDir(TestRoundRobinVolumeChoosingPolicy.class.getSimpleName()) - .getAbsolutePath(); - private static final String VOLUME_1 = BASE_DIR + "disk1"; - private static final String VOLUME_2 = BASE_DIR + "disk2"; + @TempDir + private Path baseDir; @BeforeEach public void setup() throws Exception { + String volume1 = baseDir + "disk1"; + String volume2 = baseDir + "disk2"; policy = new RoundRobinVolumeChoosingPolicy(); SpaceUsageSource source1 = MockSpaceUsageSource.fixed(500, 100); SpaceUsageCheckFactory factory1 = MockSpaceUsageCheckFactory.of( source1, Duration.ZERO, SpaceUsagePersistence.None.INSTANCE); - HddsVolume vol1 = new HddsVolume.Builder(VOLUME_1) + HddsVolume vol1 = new HddsVolume.Builder(volume1) .conf(CONF) .usageCheckFactory(factory1) .build(); SpaceUsageSource source2 = MockSpaceUsageSource.fixed(500, 200); SpaceUsageCheckFactory factory2 = MockSpaceUsageCheckFactory.of( source2, Duration.ZERO, SpaceUsagePersistence.None.INSTANCE); - HddsVolume vol2 = new HddsVolume.Builder(VOLUME_2) + HddsVolume vol2 = new HddsVolume.Builder(volume2) .conf(CONF) .usageCheckFactory(factory2) .build(); @@ -83,8 +81,6 @@ public void setup() throws Exception { @AfterEach public void cleanUp() { volumes.forEach(HddsVolume::shutdown); - FileUtil.fullyDelete(new File(VOLUME_1)); - FileUtil.fullyDelete(new File(VOLUME_2)); } @Test diff --git a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/volume/TestVolumeSet.java b/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/volume/TestVolumeSet.java index 9d90659552e..1159d4277c7 100644 --- a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/volume/TestVolumeSet.java +++ b/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/volume/TestVolumeSet.java @@ -20,22 +20,18 @@ import java.io.IOException; import org.apache.commons.io.FileUtils; -import org.junit.jupiter.api.Timeout; -import org.slf4j.LoggerFactory; -import org.apache.hadoop.fs.FileUtil; import org.apache.hadoop.hdds.DFSConfigKeysLegacy; import org.apache.hadoop.hdds.HddsConfigKeys; import org.apache.hadoop.hdds.conf.OzoneConfiguration; -import org.apache.hadoop.hdfs.MiniDFSCluster; import org.apache.hadoop.ozone.OzoneConfigKeys; import org.apache.hadoop.ozone.container.common.utils.HddsVolumeUtil; -import org.apache.ozone.test.GenericTestUtils; import org.apache.ozone.test.GenericTestUtils.LogCapturer; import static org.apache.hadoop.hdds.scm.ScmConfigKeys.HDDS_DATANODE_DIR_KEY; import static org.apache.hadoop.ozone.container.common.volume.HddsVolume .HDDS_VOLUME_DIR; import static org.assertj.core.api.Assertions.assertThat; +import static org.assertj.core.api.Assumptions.assumeThat; import static org.junit.jupiter.api.Assertions.assertEquals; import static org.junit.jupiter.api.Assertions.assertFalse; import static org.junit.jupiter.api.Assertions.assertNotNull; @@ -44,9 +40,13 @@ import org.junit.jupiter.api.AfterEach; import org.junit.jupiter.api.BeforeEach; import org.junit.jupiter.api.Test; +import org.junit.jupiter.api.Timeout; +import org.junit.jupiter.api.io.TempDir; +import org.slf4j.LoggerFactory; import java.io.File; import java.lang.reflect.Method; +import java.nio.file.Path; import java.util.ArrayList; import java.util.List; import java.util.UUID; @@ -59,12 +59,13 @@ public class TestVolumeSet { private OzoneConfiguration conf; private MutableVolumeSet volumeSet; - private final String baseDir = MiniDFSCluster.getBaseDirectory(); - private final String volume1 = baseDir + "disk1"; - private final String volume2 = baseDir + "disk2"; - private final List volumes = new ArrayList<>(); - private static final String DUMMY_IP_ADDR = "0.0.0.0"; + @TempDir + private Path baseDir; + + private String volume1; + private String volume2; + private final List volumes = new ArrayList<>(); private void initializeVolumeSet() throws Exception { volumeSet = new MutableVolumeSet(UUID.randomUUID().toString(), conf, @@ -74,6 +75,9 @@ private void initializeVolumeSet() throws Exception { @BeforeEach public void setup() throws Exception { conf = new OzoneConfiguration(); + volume1 = baseDir.resolve("disk1").toString(); + volume2 = baseDir.resolve("disk2").toString(); + String dataDirKey = volume1 + "," + volume2; volumes.add(volume1); volumes.add(volume2); @@ -94,8 +98,6 @@ public void shutdown() throws IOException { FileUtils.deleteDirectory(volume.getStorageDir()); } volumeSet.shutdown(); - - FileUtil.fullyDelete(new File(baseDir)); } private boolean checkVolumeExistsInVolumeSet(String volumeRoot) { @@ -115,11 +117,11 @@ public void testVolumeSetInitialization() throws Exception { // VolumeSet initialization should add volume1 and volume2 to VolumeSet assertEquals(volumesList.size(), volumes.size(), - "VolumeSet intialization is incorrect"); + "VolumeSet initialization is incorrect"); assertTrue(checkVolumeExistsInVolumeSet(volume1), - "VolumeSet not initailized correctly"); + "VolumeSet not initialized correctly"); assertTrue(checkVolumeExistsInVolumeSet(volume2), - "VolumeSet not initailized correctly"); + "VolumeSet not initialized correctly"); } @Test @@ -128,7 +130,7 @@ public void testAddVolume() { assertEquals(2, volumeSet.getVolumesList().size()); // Add a volume to VolumeSet - String volume3 = baseDir + "disk3"; + String volume3 = baseDir.resolve("disk3").toString(); boolean success = volumeSet.addVolume(volume3); assertTrue(success); @@ -223,31 +225,21 @@ public void testShutdown() throws Exception { } @Test - public void testFailVolumes() throws Exception { - MutableVolumeSet volSet = null; - File readOnlyVolumePath = new File(baseDir); + void testFailVolumes(@TempDir File readOnlyVolumePath, @TempDir File volumePath) throws Exception { //Set to readonly, so that this volume will be failed - readOnlyVolumePath.setReadOnly(); - File volumePath = GenericTestUtils.getRandomizedTestDir(); + assumeThat(readOnlyVolumePath.setReadOnly()).isTrue(); OzoneConfiguration ozoneConfig = new OzoneConfiguration(); ozoneConfig.set(HDDS_DATANODE_DIR_KEY, readOnlyVolumePath.getAbsolutePath() + "," + volumePath.getAbsolutePath()); ozoneConfig.set(HddsConfigKeys.OZONE_METADATA_DIRS, volumePath.getAbsolutePath()); - volSet = new MutableVolumeSet(UUID.randomUUID().toString(), ozoneConfig, + MutableVolumeSet volSet = new MutableVolumeSet(UUID.randomUUID().toString(), ozoneConfig, null, StorageVolume.VolumeType.DATA_VOLUME, null); assertEquals(1, volSet.getFailedVolumesList().size()); assertEquals(readOnlyVolumePath, volSet.getFailedVolumesList().get(0) .getStorageDir()); - //Set back to writable - try { - readOnlyVolumePath.setWritable(true); - volSet.shutdown(); - } finally { - FileUtil.fullyDelete(volumePath); - } - + volSet.shutdown(); } @Test diff --git a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/volume/TestVolumeSetDiskChecks.java b/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/volume/TestVolumeSetDiskChecks.java index 27e1195a24b..e3c610bfe47 100644 --- a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/volume/TestVolumeSetDiskChecks.java +++ b/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/volume/TestVolumeSetDiskChecks.java @@ -40,7 +40,6 @@ import org.apache.hadoop.ozone.OzoneConsts; import org.apache.hadoop.ozone.container.ContainerTestHelper; import org.apache.hadoop.ozone.container.common.ContainerTestUtils; -import org.apache.hadoop.ozone.container.common.TestDatanodeStateMachine; import org.apache.hadoop.ozone.container.common.impl.ContainerLayoutVersion; import org.apache.hadoop.ozone.container.common.impl.ContainerSet; import org.apache.hadoop.ozone.container.common.statemachine.DatanodeConfiguration; @@ -49,7 +48,6 @@ import org.apache.hadoop.ozone.container.keyvalue.KeyValueContainer; import org.apache.hadoop.ozone.container.keyvalue.KeyValueContainerData; import org.apache.hadoop.ozone.container.ozoneimpl.OzoneContainer; -import org.apache.ozone.test.GenericTestUtils; import org.apache.hadoop.util.DiskChecker.DiskErrorException; import org.apache.hadoop.util.Timer; @@ -59,9 +57,11 @@ import org.junit.jupiter.api.AfterEach; import org.junit.jupiter.api.Test; import org.junit.jupiter.api.Timeout; +import org.junit.jupiter.api.io.TempDir; import org.slf4j.Logger; import org.slf4j.LoggerFactory; +import static org.apache.commons.lang3.RandomStringUtils.randomAlphanumeric; import static org.assertj.core.api.Assertions.assertThat; import static org.junit.jupiter.api.Assertions.assertEquals; import static org.junit.jupiter.api.Assertions.assertNotNull; @@ -78,6 +78,8 @@ public class TestVolumeSetDiskChecks { public static final Logger LOG = LoggerFactory.getLogger( TestVolumeSetDiskChecks.class); + @TempDir + private File dir; private OzoneConfiguration conf = null; @@ -217,21 +219,21 @@ private OzoneConfiguration getConfWithDataNodeDirs(int numDirs) { final OzoneConfiguration ozoneConf = new OzoneConfiguration(); final List dirs = new ArrayList<>(); for (int i = 0; i < numDirs; ++i) { - dirs.add(GenericTestUtils.getRandomizedTestDir().getPath()); + dirs.add(new File(dir, randomAlphanumeric(10)).toString()); } ozoneConf.set(DFSConfigKeysLegacy.DFS_DATANODE_DATA_DIR_KEY, String.join(",", dirs)); final List metaDirs = new ArrayList<>(); for (int i = 0; i < numDirs; ++i) { - metaDirs.add(GenericTestUtils.getRandomizedTestDir().getPath()); + metaDirs.add(new File(dir, randomAlphanumeric(10)).toString()); } ozoneConf.set(OzoneConfigKeys.DFS_CONTAINER_RATIS_DATANODE_STORAGE_DIR, String.join(",", metaDirs)); final List dbDirs = new ArrayList<>(); for (int i = 0; i < numDirs; ++i) { - dbDirs.add(GenericTestUtils.getRandomizedTestDir().getPath()); + dbDirs.add(new File(dir, randomAlphanumeric(10)).toString()); } ozoneConf.set(OzoneConfigKeys.HDDS_DATANODE_CONTAINER_DB_DIR, String.join(",", dbDirs)); @@ -264,8 +266,7 @@ public void testVolumeFailure() throws IOException { ContainerSet conSet = new ContainerSet(20); when(ozoneContainer.getContainerSet()).thenReturn(conSet); - String path = GenericTestUtils - .getTempPath(TestDatanodeStateMachine.class.getSimpleName()); + String path = dir.getPath(); File testRoot = new File(path); conf.set(HddsConfigKeys.OZONE_METADATA_DIRS, diff --git a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/keyvalue/TestKeyValueBlockIterator.java b/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/keyvalue/TestKeyValueBlockIterator.java index 52316c43264..49ddd5f674d 100644 --- a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/keyvalue/TestKeyValueBlockIterator.java +++ b/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/keyvalue/TestKeyValueBlockIterator.java @@ -28,7 +28,6 @@ import java.util.UUID; import org.apache.hadoop.conf.StorageUnit; -import org.apache.hadoop.fs.FileUtil; import org.apache.hadoop.hdds.client.BlockID; import org.apache.hadoop.hdds.conf.OzoneConfiguration; import org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos; @@ -46,7 +45,6 @@ import org.apache.hadoop.ozone.container.common.volume.RoundRobinVolumeChoosingPolicy; import org.apache.hadoop.ozone.container.common.volume.StorageVolume; import org.apache.hadoop.ozone.container.keyvalue.helpers.BlockUtils; -import org.apache.ozone.test.GenericTestUtils; import static java.util.stream.Collectors.toList; import static org.apache.hadoop.hdds.scm.ScmConfigKeys.HDDS_DATANODE_DIR_KEY; @@ -60,6 +58,7 @@ import static org.junit.jupiter.api.Assertions.assertThrows; import static org.junit.jupiter.api.Assertions.assertTrue; +import org.junit.jupiter.api.io.TempDir; import org.junit.jupiter.params.ParameterizedTest; import org.junit.jupiter.params.provider.Arguments; import org.junit.jupiter.params.provider.MethodSource; @@ -75,6 +74,7 @@ public class TestKeyValueBlockIterator { private KeyValueContainerData containerData; private MutableVolumeSet volumeSet; private OzoneConfiguration conf; + @TempDir private File testRoot; private DBHandle db; private ContainerLayoutVersion layout; @@ -110,7 +110,6 @@ private static List provideTestData() { } public void setup() throws Exception { - testRoot = GenericTestUtils.getRandomizedTestDir(); conf.set(HDDS_DATANODE_DIR_KEY, testRoot.getAbsolutePath()); conf.set(OzoneConfigKeys.OZONE_METADATA_DIRS, testRoot.getAbsolutePath()); volumeSet = new MutableVolumeSet(datanodeID, clusterID, conf, null, @@ -135,7 +134,6 @@ public void tearDown() throws Exception { db.cleanup(); BlockUtils.shutdownCache(conf); volumeSet.shutdown(); - FileUtil.fullyDelete(testRoot); } @ParameterizedTest diff --git a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/keyvalue/TestKeyValueContainer.java b/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/keyvalue/TestKeyValueContainer.java index d340ffe77c4..15d0faefdf9 100644 --- a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/keyvalue/TestKeyValueContainer.java +++ b/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/keyvalue/TestKeyValueContainer.java @@ -51,10 +51,8 @@ import org.apache.hadoop.ozone.container.metadata.AbstractDatanodeStore; import org.apache.hadoop.ozone.container.metadata.DatanodeStore; import org.apache.hadoop.ozone.container.replication.CopyContainerCompression; -import org.apache.ozone.test.GenericTestUtils; import org.apache.hadoop.util.DiskChecker; -import org.assertj.core.api.Fail; import org.junit.jupiter.api.AfterEach; import org.junit.jupiter.api.Test; import org.junit.jupiter.api.io.TempDir; @@ -98,8 +96,8 @@ import static org.junit.jupiter.api.Assertions.assertTrue; import static org.junit.jupiter.api.Assertions.fail; import static org.junit.jupiter.api.Assumptions.assumeTrue; -import static org.mockito.ArgumentMatchers.anyList; -import static org.mockito.ArgumentMatchers.anyLong; +import static org.mockito.Mockito.anyList; +import static org.mockito.Mockito.anyLong; import static org.mockito.Mockito.mock; import static org.mockito.Mockito.reset; import static org.mockito.Mockito.when; @@ -110,7 +108,7 @@ public class TestKeyValueContainer { @TempDir - private Path folder; + private File folder; private String scmId = UUID.randomUUID().toString(); private VolumeSet volumeSet; @@ -229,7 +227,7 @@ public void testNextVolumeTriedOnWriteFailure( ContainerTestVersionInfo versionInfo) throws Exception { init(versionInfo); String volumeDirPath = - Files.createDirectory(folder.resolve("volumeDir")) + Files.createDirectory(folder.toPath().resolve("volumeDir")) .toFile().getAbsolutePath(); HddsVolume newVolume = new HddsVolume.Builder(volumeDirPath) .conf(CONF).datanodeUuid(datanodeId.toString()).build(); @@ -276,7 +274,7 @@ public void testEmptyContainerImportExport( //destination path File exportTar = Files.createFile( - folder.resolve("export.tar")).toFile(); + folder.toPath().resolve("export.tar")).toFile(); TarContainerPacker packer = new TarContainerPacker(NO_COMPRESSION); //export the container try (FileOutputStream fos = new FileOutputStream(exportTar)) { @@ -309,7 +307,7 @@ public void testUnhealthyContainerImportExport( keyValueContainer.update(data.getMetadata(), true); //destination path - File exportTar = Files.createFile(folder.resolve("export.tar")).toFile(); + File exportTar = Files.createFile(folder.toPath().resolve("export.tar")).toFile(); TarContainerPacker packer = new TarContainerPacker(NO_COMPRESSION); //export the container try (FileOutputStream fos = new FileOutputStream(exportTar)) { @@ -347,7 +345,7 @@ public void testContainerImportExport(ContainerTestVersionInfo versionInfo) //destination path File folderToExport = Files.createFile( - folder.resolve("export.tar")).toFile(); + folder.toPath().resolve("export.tar")).toFile(); for (CopyContainerCompression compr : CopyContainerCompression.values()) { TarContainerPacker packer = new TarContainerPacker(compr); @@ -394,15 +392,14 @@ public void testContainerImportExport(ContainerTestVersionInfo versionInfo) containerData.getBytesUsed()); //Can't overwrite existing container - try { + KeyValueContainer finalContainer = container; + assertThrows(IOException.class, () -> { try (FileInputStream fis = new FileInputStream(folderToExport)) { - container.importContainerData(fis, packer); + finalContainer.importContainerData(fis, packer); } - fail("Container is imported twice. Previous files are overwritten"); - } catch (IOException ex) { - //all good - assertTrue(container.getContainerFile().exists()); - } + }, "Container is imported twice. Previous files are overwritten"); + //all good + assertTrue(container.getContainerFile().exists()); //Import failure should cleanup the container directory containerData = @@ -416,18 +413,18 @@ public void testContainerImportExport(ContainerTestVersionInfo versionInfo) containerVolume = volumeChoosingPolicy.chooseVolume( StorageVolumeUtil.getHddsVolumesList(volumeSet.getVolumesList()), 1); container.populatePathFields(scmId, containerVolume); - try { - FileInputStream fis = new FileInputStream(folderToExport); - fis.close(); - container.importContainerData(fis, packer); - fail("Container import should fail"); - } catch (Exception ex) { - assertInstanceOf(IOException.class, ex); - } finally { - File directory = - new File(container.getContainerData().getContainerPath()); - assertFalse(directory.exists()); - } + KeyValueContainer finalContainer1 = container; + assertThrows(IOException.class, () -> { + try { + FileInputStream fis = new FileInputStream(folderToExport); + fis.close(); + finalContainer1.importContainerData(fis, packer); + } finally { + File directory = + new File(finalContainer1.getContainerData().getContainerPath()); + assertFalse(directory.exists()); + } + }); } } @@ -524,7 +521,7 @@ public void concurrentExport(ContainerTestVersionInfo versionInfo) .mapToObj(i -> new Thread(() -> { try { File file = Files.createFile( - folder.resolve("concurrent" + i + ".tar")).toFile(); + folder.toPath().resolve("concurrent" + i + ".tar")).toFile(); try (OutputStream out = Files.newOutputStream(file.toPath())) { keyValueContainer.exportContainerData(out, packer); } @@ -814,13 +811,13 @@ public void testKeyValueDataProtoBufMsg(ContainerTestVersionInfo versionInfo) } @ContainerTestVersionInfo.ContainerTest - public void testAutoCompactionSmallSstFile( + void testAutoCompactionSmallSstFile( ContainerTestVersionInfo versionInfo) throws Exception { init(versionInfo); assumeTrue(isSameSchemaVersion(schemaVersion, OzoneConsts.SCHEMA_V3)); // Create a new HDDS volume String volumeDirPath = - Files.createDirectory(folder.resolve("volumeDir")).toFile() + Files.createDirectory(folder.toPath().resolve("volumeDir")).toFile() .getAbsolutePath(); HddsVolume newVolume = new HddsVolume.Builder(volumeDirPath) .conf(CONF).datanodeUuid(datanodeId.toString()).build(); @@ -859,7 +856,7 @@ public void testAutoCompactionSmallSstFile( if (volume == newVolume) { File folderToExport = Files.createFile( - folder.resolve(containerId + "_exported.tar.gz")).toFile(); + folder.toPath().resolve(containerId + "_exported.tar.gz")).toFile(); TarContainerPacker packer = new TarContainerPacker(NO_COMPRESSION); //export the container try (FileOutputStream fos = new FileOutputStream(folderToExport)) { @@ -905,8 +902,6 @@ public void testAutoCompactionSmallSstFile( List fileMetaDataList2 = ((RDBStore)(dnStore.getStore())).getDb().getLiveFilesMetaData(); assertThat(fileMetaDataList2.size()).isLessThan(fileMetaDataList1.size()); - } catch (Exception e) { - Fail.fail("TestAutoCompactionSmallSstFile failed"); } finally { // clean up for (KeyValueContainer c : containerList) { @@ -929,7 +924,7 @@ public void testIsEmptyContainerStateWhileImport( //destination path File folderToExport = Files.createFile( - folder.resolve("export.tar")).toFile(); + folder.toPath().resolve("export.tar")).toFile(); for (CopyContainerCompression compr : CopyContainerCompression.values()) { TarContainerPacker packer = new TarContainerPacker(compr); @@ -978,7 +973,7 @@ public void testIsEmptyContainerStateWhileImportWithoutBlock( //destination path File folderToExport = Files.createFile( - folder.resolve("export.tar")).toFile(); + folder.toPath().resolve("export.tar")).toFile(); for (CopyContainerCompression compr : CopyContainerCompression.values()) { TarContainerPacker packer = new TarContainerPacker(compr); @@ -1022,14 +1017,8 @@ public void testIsEmptyContainerStateWhileImportWithoutBlock( public void testImportV2ReplicaToV3HddsVolume( ContainerTestVersionInfo versionInfo) throws Exception { init(versionInfo); - final String testDir = GenericTestUtils.getTempPath( - TestKeyValueContainer.class.getSimpleName() + "-" - + UUID.randomUUID()); - try { - testMixedSchemaImport(testDir, false); - } finally { - FileUtils.deleteDirectory(new File(testDir)); - } + final String testDir = folder.getPath(); + testMixedSchemaImport(testDir, false); } /** @@ -1039,14 +1028,8 @@ public void testImportV2ReplicaToV3HddsVolume( public void testImportV3ReplicaToV2HddsVolume( ContainerTestVersionInfo versionInfo) throws Exception { init(versionInfo); - final String testDir = GenericTestUtils.getTempPath( - TestKeyValueContainer.class.getSimpleName() + "-" - + UUID.randomUUID()); - try { - testMixedSchemaImport(testDir, true); - } finally { - FileUtils.deleteDirectory(new File(testDir)); - } + final String testDir = folder.getPath(); + testMixedSchemaImport(testDir, true); } private void testMixedSchemaImport(String dir, diff --git a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/keyvalue/TestKeyValueContainerCheck.java b/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/keyvalue/TestKeyValueContainerCheck.java index 4145509413d..60dfe8509bd 100644 --- a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/keyvalue/TestKeyValueContainerCheck.java +++ b/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/keyvalue/TestKeyValueContainerCheck.java @@ -41,8 +41,8 @@ import static org.junit.jupiter.api.Assertions.assertEquals; import static org.junit.jupiter.api.Assertions.assertFalse; import static org.junit.jupiter.api.Assertions.assertTrue; -import static org.mockito.ArgumentMatchers.anyLong; -import static org.mockito.ArgumentMatchers.eq; +import static org.mockito.Mockito.anyLong; +import static org.mockito.Mockito.eq; import static org.mockito.Mockito.mock; import static org.mockito.Mockito.when; diff --git a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/keyvalue/TestKeyValueContainerIntegrityChecks.java b/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/keyvalue/TestKeyValueContainerIntegrityChecks.java index 51ecb322243..9c531069e9c 100644 --- a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/keyvalue/TestKeyValueContainerIntegrityChecks.java +++ b/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/keyvalue/TestKeyValueContainerIntegrityChecks.java @@ -18,7 +18,6 @@ package org.apache.hadoop.ozone.container.keyvalue; import org.apache.commons.lang3.RandomStringUtils; -import org.apache.hadoop.fs.FileUtil; import org.apache.hadoop.hdds.client.BlockID; import org.apache.hadoop.hdds.conf.OzoneConfiguration; import org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos; @@ -34,8 +33,8 @@ import org.apache.hadoop.ozone.container.common.volume.RoundRobinVolumeChoosingPolicy; import org.apache.hadoop.ozone.container.keyvalue.helpers.BlockUtils; import org.apache.hadoop.ozone.container.keyvalue.interfaces.ChunkManager; -import org.apache.ozone.test.GenericTestUtils; import org.junit.jupiter.api.AfterEach; +import org.junit.jupiter.api.io.TempDir; import org.slf4j.Logger; import org.slf4j.LoggerFactory; @@ -63,6 +62,7 @@ public class TestKeyValueContainerIntegrityChecks { private ContainerLayoutTestInfo containerLayoutTestInfo; private MutableVolumeSet volumeSet; private OzoneConfiguration conf; + @TempDir private File testRoot; private ChunkManager chunkManager; private String clusterID = UUID.randomUUID().toString(); @@ -87,7 +87,6 @@ void initTestData(ContainerTestVersionInfo versionInfo) throws Exception { private void setup() throws Exception { LOG.info("Testing layout:{}", containerLayoutTestInfo.getLayout()); - this.testRoot = GenericTestUtils.getRandomizedTestDir(); conf.set(HDDS_DATANODE_DIR_KEY, testRoot.getAbsolutePath()); conf.set(OzoneConfigKeys.OZONE_METADATA_DIRS, testRoot.getAbsolutePath()); containerLayoutTestInfo.updateConfig(conf); @@ -101,7 +100,6 @@ private void setup() throws Exception { public void teardown() { BlockUtils.shutdownCache(conf); volumeSet.shutdown(); - FileUtil.fullyDelete(testRoot); } protected ContainerLayoutVersion getChunkLayout() { diff --git a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/keyvalue/TestKeyValueContainerMarkUnhealthy.java b/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/keyvalue/TestKeyValueContainerMarkUnhealthy.java index 0b63ab1796d..61083fa73fa 100644 --- a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/keyvalue/TestKeyValueContainerMarkUnhealthy.java +++ b/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/keyvalue/TestKeyValueContainerMarkUnhealthy.java @@ -43,8 +43,8 @@ import static org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos.ContainerDataProto.State.UNHEALTHY; import static org.assertj.core.api.Assertions.assertThat; import static org.junit.jupiter.api.Assertions.assertThrows; -import static org.mockito.ArgumentMatchers.anyList; -import static org.mockito.ArgumentMatchers.anyLong; +import static org.mockito.Mockito.anyList; +import static org.mockito.Mockito.anyLong; import static org.mockito.Mockito.mock; import static org.mockito.Mockito.when; diff --git a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/keyvalue/TestKeyValueHandler.java b/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/keyvalue/TestKeyValueHandler.java index 28c0a8092a0..2637f1922c6 100644 --- a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/keyvalue/TestKeyValueHandler.java +++ b/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/keyvalue/TestKeyValueHandler.java @@ -62,7 +62,7 @@ import static org.junit.jupiter.api.Assertions.assertNotNull; import static org.junit.jupiter.api.Assertions.assertNull; import static org.junit.jupiter.api.Assertions.assertThrows; -import static org.mockito.ArgumentMatchers.any; +import static org.mockito.Mockito.any; import org.junit.jupiter.api.BeforeEach; import org.junit.jupiter.api.Test; diff --git a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/keyvalue/TestKeyValueHandlerWithUnhealthyContainer.java b/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/keyvalue/TestKeyValueHandlerWithUnhealthyContainer.java index 1e326ce3ee0..565c3c94408 100644 --- a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/keyvalue/TestKeyValueHandlerWithUnhealthyContainer.java +++ b/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/keyvalue/TestKeyValueHandlerWithUnhealthyContainer.java @@ -50,7 +50,7 @@ import static org.apache.hadoop.ozone.container.ContainerTestHelper.getTestBlockID; import static org.apache.hadoop.ozone.container.ContainerTestHelper.getWriteChunkRequest; import static org.junit.jupiter.api.Assertions.assertEquals; -import static org.mockito.ArgumentMatchers.any; +import static org.mockito.Mockito.any; import static org.mockito.Mockito.atMostOnce; import static org.mockito.Mockito.mock; import static org.mockito.Mockito.never; diff --git a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/keyvalue/TestTarContainerPacker.java b/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/keyvalue/TestTarContainerPacker.java index 9ebc55a4b80..c44682203e2 100644 --- a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/keyvalue/TestTarContainerPacker.java +++ b/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/keyvalue/TestTarContainerPacker.java @@ -382,7 +382,7 @@ private File writeDbFile( private File writeSingleFile(Path parentPath, String fileName, String content) throws IOException { - Path path = parentPath.resolve(fileName); + Path path = parentPath.resolve(fileName).normalize(); Files.createDirectories(path.getParent()); File file = path.toFile(); FileOutputStream fileStream = new FileOutputStream(file); diff --git a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/keyvalue/helpers/TestChunkUtils.java b/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/keyvalue/helpers/TestChunkUtils.java index 14b47a57c3a..1a1158a210f 100644 --- a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/keyvalue/helpers/TestChunkUtils.java +++ b/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/keyvalue/helpers/TestChunkUtils.java @@ -70,7 +70,7 @@ class TestChunkUtils { private static final Random RANDOM = new Random(); @TempDir - private Path tempDir; + private File tempDir; static ChunkBuffer readData(File file, long off, long len) throws StorageContainerException { @@ -84,7 +84,7 @@ void concurrentReadOfSameFile() throws Exception { String s = "Hello World"; byte[] array = s.getBytes(UTF_8); ChunkBuffer data = ChunkBuffer.wrap(ByteBuffer.wrap(array)); - Path tempFile = tempDir.resolve("concurrent"); + Path tempFile = tempDir.toPath().resolve("concurrent"); int len = data.limit(); int offset = 0; File file = tempFile.toFile(); @@ -136,7 +136,7 @@ void concurrentProcessing() throws Exception { 0, TimeUnit.SECONDS, new LinkedBlockingQueue<>()); AtomicInteger processed = new AtomicInteger(); for (int i = 0; i < threads; i++) { - Path path = tempDir.resolve(String.valueOf(i)); + Path path = tempDir.toPath().resolve(String.valueOf(i)); executor.execute(() -> { try { ChunkUtils.processFileExclusively(path, () -> { @@ -166,7 +166,7 @@ void serialRead() throws IOException { String s = "Hello World"; byte[] array = s.getBytes(UTF_8); ChunkBuffer data = ChunkBuffer.wrap(ByteBuffer.wrap(array)); - Path tempFile = tempDir.resolve("serial"); + Path tempFile = tempDir.toPath().resolve("serial"); File file = tempFile.toFile(); int len = data.limit(); int offset = 0; @@ -185,7 +185,7 @@ void serialRead() throws IOException { @Test void validateChunkForOverwrite() throws IOException { - Path tempFile = tempDir.resolve("overwrite"); + Path tempFile = tempDir.toPath().resolve("overwrite"); FileUtils.write(tempFile.toFile(), "test", UTF_8); assertTrue( @@ -226,7 +226,7 @@ void readMissingFile() { @Test void testReadData() throws Exception { - final File dir = GenericTestUtils.getTestDir("testReadData"); + final File dir = new File(tempDir, "testReadData"); try { assertTrue(dir.mkdirs()); diff --git a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/keyvalue/impl/AbstractTestChunkManager.java b/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/keyvalue/impl/AbstractTestChunkManager.java index 0ca0d267b88..0c373cb0dbf 100644 --- a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/keyvalue/impl/AbstractTestChunkManager.java +++ b/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/keyvalue/impl/AbstractTestChunkManager.java @@ -44,8 +44,8 @@ import static org.junit.jupiter.api.Assertions.assertEquals; import static org.junit.jupiter.api.Assertions.assertNotNull; import static org.junit.jupiter.api.Assertions.assertTrue; -import static org.mockito.ArgumentMatchers.anyList; -import static org.mockito.ArgumentMatchers.anyLong; +import static org.mockito.Mockito.anyList; +import static org.mockito.Mockito.anyLong; import static org.mockito.Mockito.mock; import static org.mockito.Mockito.when; diff --git a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/keyvalue/impl/TestBlockManagerImpl.java b/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/keyvalue/impl/TestBlockManagerImpl.java index 38a01e46900..26d959e8860 100644 --- a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/keyvalue/impl/TestBlockManagerImpl.java +++ b/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/keyvalue/impl/TestBlockManagerImpl.java @@ -45,6 +45,7 @@ import java.util.List; import java.util.UUID; +import static org.apache.hadoop.hdds.scm.ScmConfigKeys.OZONE_CHUNK_LIST_INCREMENTAL; import static org.junit.jupiter.api.Assertions.assertEquals; import static org.junit.jupiter.api.Assertions.assertNotNull; import static org.apache.hadoop.ozone.container.keyvalue.helpers.KeyValueContainerUtil.isSameSchemaVersion; @@ -83,6 +84,7 @@ private void initTest(ContainerTestVersionInfo versionInfo) this.schemaVersion = versionInfo.getSchemaVersion(); this.config = new OzoneConfiguration(); ContainerTestVersionInfo.setTestSchemaVersion(schemaVersion, config); + config.setBoolean(OZONE_CHUNK_LIST_INCREMENTAL, true); initilaze(); } diff --git a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/keyvalue/impl/TestFilePerBlockStrategy.java b/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/keyvalue/impl/TestFilePerBlockStrategy.java index 304bfa7f206..36d71655192 100644 --- a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/keyvalue/impl/TestFilePerBlockStrategy.java +++ b/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/keyvalue/impl/TestFilePerBlockStrategy.java @@ -37,7 +37,7 @@ import static org.apache.hadoop.ozone.container.ContainerTestHelper.setDataChecksum; import static org.apache.hadoop.ozone.container.common.ContainerTestUtils.WRITE_STAGE; import static org.junit.jupiter.api.Assertions.assertEquals; -import static org.junit.jupiter.api.Assertions.fail; +import static org.junit.jupiter.api.Assertions.assertThrows; /** * Test for FilePerBlockStrategy. @@ -48,7 +48,7 @@ public class TestFilePerBlockStrategy extends CommonChunkManagerTestCases { public void testDeletePartialChunkWithOffsetUnsupportedRequest() { // GIVEN ChunkManager chunkManager = createTestSubject(); - try { + StorageContainerException e = assertThrows(StorageContainerException.class, () -> { KeyValueContainer container = getKeyValueContainer(); BlockID blockID = getBlockID(); chunkManager.writeChunk(container, blockID, @@ -58,12 +58,8 @@ public void testDeletePartialChunkWithOffsetUnsupportedRequest() { // WHEN chunkManager.deleteChunk(container, blockID, chunkInfo); - - // THEN - fail("testDeleteChunkUnsupportedRequest"); - } catch (StorageContainerException ex) { - assertEquals(ContainerProtos.Result.UNSUPPORTED_REQUEST, ex.getResult()); - } + }); + assertEquals(ContainerProtos.Result.UNSUPPORTED_REQUEST, e.getResult()); } /** diff --git a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/ozoneimpl/TestBackgroundContainerDataScanner.java b/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/ozoneimpl/TestBackgroundContainerDataScanner.java index fb79fafe2ba..e34bfe9396a 100644 --- a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/ozoneimpl/TestBackgroundContainerDataScanner.java +++ b/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/ozoneimpl/TestBackgroundContainerDataScanner.java @@ -42,8 +42,8 @@ import static org.junit.jupiter.api.Assertions.assertNotNull; import static org.junit.jupiter.api.Assertions.assertNull; import static org.junit.jupiter.api.Assertions.assertTrue; -import static org.mockito.ArgumentMatchers.any; -import static org.mockito.ArgumentMatchers.eq; +import static org.mockito.Mockito.any; +import static org.mockito.Mockito.eq; import static org.mockito.Mockito.atLeastOnce; import static org.mockito.Mockito.atMostOnce; import static org.mockito.Mockito.never; diff --git a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/ozoneimpl/TestBackgroundContainerMetadataScanner.java b/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/ozoneimpl/TestBackgroundContainerMetadataScanner.java index 302e2d2d138..9bb3a382c60 100644 --- a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/ozoneimpl/TestBackgroundContainerMetadataScanner.java +++ b/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/ozoneimpl/TestBackgroundContainerMetadataScanner.java @@ -42,7 +42,7 @@ import static org.junit.jupiter.api.Assertions.assertNull; import static org.junit.jupiter.api.Assertions.assertTrue; import static org.junit.jupiter.api.Assertions.assertFalse; -import static org.mockito.ArgumentMatchers.any; +import static org.mockito.Mockito.any; import static org.mockito.Mockito.atLeastOnce; import static org.mockito.Mockito.atMostOnce; import static org.mockito.Mockito.never; diff --git a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/ozoneimpl/TestContainerReader.java b/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/ozoneimpl/TestContainerReader.java index d48f0d3314e..7f38eab785b 100644 --- a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/ozoneimpl/TestContainerReader.java +++ b/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/ozoneimpl/TestContainerReader.java @@ -70,8 +70,8 @@ import static org.junit.jupiter.api.Assertions.assertNotNull; import static org.junit.jupiter.api.Assertions.assertNull; import static org.junit.jupiter.api.Assertions.assertTrue; -import static org.mockito.ArgumentMatchers.anyList; -import static org.mockito.ArgumentMatchers.anyLong; +import static org.mockito.Mockito.anyList; +import static org.mockito.Mockito.anyLong; import static org.mockito.Mockito.mock; import static org.mockito.Mockito.when; @@ -358,7 +358,7 @@ public void testContainerReaderWithInvalidDbPath( hddsVolume1, containerSet1, conf, true); containerReader.readVolume(hddsVolume1.getHddsRootDir()); assertEquals(0, containerSet1.containerCount()); - assertTrue(dnLogs.getOutput().contains("Container DB file is missing")); + assertThat(dnLogs.getOutput()).contains("Container DB file is missing"); } @ContainerTestVersionInfo.ContainerTest @@ -558,8 +558,10 @@ public void testMarkedDeletedContainerCleared( // add db entry for the container ID 101 for V3 baseCount = addDbEntry(containerData); } + + // verify container data and perform cleanup ContainerReader containerReader = new ContainerReader(volumeSet, - hddsVolume, containerSet, conf, false); + hddsVolume, containerSet, conf, true); containerReader.run(); diff --git a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/ozoneimpl/TestContainerScannersAbstract.java b/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/ozoneimpl/TestContainerScannersAbstract.java index 3a3a96cc429..d892e916ce6 100644 --- a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/ozoneimpl/TestContainerScannersAbstract.java +++ b/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/ozoneimpl/TestContainerScannersAbstract.java @@ -45,8 +45,8 @@ import static org.apache.hadoop.ozone.container.common.ContainerTestUtils.getUnhealthyScanResult; import static org.apache.hadoop.ozone.container.ozoneimpl.ContainerScannerConfiguration.CONTAINER_SCAN_MIN_GAP_DEFAULT; import static org.junit.jupiter.api.Assertions.assertTrue; -import static org.mockito.ArgumentMatchers.any; -import static org.mockito.ArgumentMatchers.eq; +import static org.mockito.Mockito.any; +import static org.mockito.Mockito.eq; import static org.mockito.Mockito.mock; import static org.mockito.Mockito.verify; import static org.mockito.Mockito.when; diff --git a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/ozoneimpl/TestOnDemandContainerDataScanner.java b/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/ozoneimpl/TestOnDemandContainerDataScanner.java index acf04edd643..8334c7b078c 100644 --- a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/ozoneimpl/TestOnDemandContainerDataScanner.java +++ b/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/ozoneimpl/TestOnDemandContainerDataScanner.java @@ -46,8 +46,8 @@ import static org.junit.jupiter.api.Assertions.assertNotNull; import static org.junit.jupiter.api.Assertions.assertNull; import static org.junit.jupiter.api.Assertions.assertTrue; -import static org.mockito.ArgumentMatchers.any; -import static org.mockito.ArgumentMatchers.eq; +import static org.mockito.Mockito.any; +import static org.mockito.Mockito.eq; import static org.mockito.Mockito.atLeastOnce; import static org.mockito.Mockito.atMostOnce; import static org.mockito.Mockito.never; diff --git a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/ozoneimpl/TestOzoneContainer.java b/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/ozoneimpl/TestOzoneContainer.java index af6bd5d17f3..497418dcdcb 100644 --- a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/ozoneimpl/TestOzoneContainer.java +++ b/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/ozoneimpl/TestOzoneContainer.java @@ -153,6 +153,7 @@ public void testBuildContainerMap(ContainerTestVersionInfo versionInfo) OzoneContainer ozoneContainer = ContainerTestUtils .getOzoneContainer(datanodeDetails, conf); + ozoneContainer.buildContainerSet(); ContainerSet containerset = ozoneContainer.getContainerSet(); assertEquals(numTestContainers, containerset.containerCount()); diff --git a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/replication/GrpcOutputStreamTest.java b/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/replication/GrpcOutputStreamTest.java index a47dffd6d38..231a90eb0bd 100644 --- a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/replication/GrpcOutputStreamTest.java +++ b/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/replication/GrpcOutputStreamTest.java @@ -36,7 +36,7 @@ import static org.junit.jupiter.api.Assertions.assertArrayEquals; import static org.junit.jupiter.api.Assertions.assertEquals; import static org.junit.jupiter.api.Assertions.assertThrows; -import static org.mockito.ArgumentMatchers.any; +import static org.mockito.Mockito.any; import static org.mockito.Mockito.times; import static org.mockito.Mockito.verify; import static org.mockito.Mockito.when; diff --git a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/replication/TestContainerImporter.java b/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/replication/TestContainerImporter.java index d9ff2364f32..1b989e6bc7f 100644 --- a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/replication/TestContainerImporter.java +++ b/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/replication/TestContainerImporter.java @@ -21,7 +21,9 @@ import java.io.FileOutputStream; import java.io.IOException; import java.io.InputStream; +import java.nio.charset.StandardCharsets; import java.nio.file.Files; +import java.util.HashSet; import java.util.concurrent.CompletableFuture; import java.util.concurrent.Semaphore; @@ -41,8 +43,10 @@ import org.apache.hadoop.ozone.container.common.volume.StorageVolume; import org.apache.hadoop.ozone.container.keyvalue.KeyValueContainer; import org.apache.hadoop.ozone.container.keyvalue.KeyValueContainerData; +import org.apache.hadoop.ozone.container.keyvalue.TarContainerPacker; import org.apache.hadoop.ozone.container.ozoneimpl.ContainerController; import org.apache.ozone.test.GenericTestUtils; +import org.junit.jupiter.api.Assertions; import org.junit.jupiter.api.BeforeEach; import org.junit.jupiter.api.Test; import org.junit.jupiter.api.io.TempDir; @@ -52,7 +56,10 @@ import static org.junit.jupiter.api.Assertions.assertEquals; import static org.junit.jupiter.api.Assertions.assertThrows; import static org.mockito.ArgumentMatchers.any; +import static org.mockito.Mockito.doNothing; +import static org.mockito.Mockito.doReturn; import static org.mockito.Mockito.mock; +import static org.mockito.Mockito.spy; import static org.mockito.Mockito.when; /** @@ -138,6 +145,42 @@ void importSameContainerWhenFirstInProgress() throws Exception { semaphore.release(); } + @Test + public void testInconsistentChecksumContainerShouldThrowError() throws Exception { + // create container + long containerId = 1; + KeyValueContainerData containerData = spy(new KeyValueContainerData(containerId, + ContainerLayoutVersion.FILE_PER_BLOCK, 100, "test", "test")); + // mock to return different checksum + when(containerData.getChecksum()).thenReturn("checksum1", "checksum2"); + doNothing().when(containerData).setChecksumTo0ByteArray(); + // create containerImporter object + ContainerController controllerMock = mock(ContainerController.class); + ContainerSet containerSet = new ContainerSet(0); + MutableVolumeSet volumeSet = new MutableVolumeSet("test", conf, null, + StorageVolume.VolumeType.DATA_VOLUME, null); + ContainerImporter containerImporter = spy(new ContainerImporter(conf, + containerSet, controllerMock, volumeSet)); + + TarContainerPacker packer = mock(TarContainerPacker.class); + when(packer.unpackContainerDescriptor(any())).thenReturn("test".getBytes( + StandardCharsets.UTF_8)); + when(containerImporter.getPacker(any())).thenReturn(packer); + + doReturn(containerData).when(containerImporter).getKeyValueContainerData(any(byte[].class)); + when(containerImporter.getImportContainerProgress()).thenReturn(new HashSet<>()); + + File tarFile = File.createTempFile("temp_" + System + .currentTimeMillis(), ".tar"); + + StorageContainerException scException = + assertThrows(StorageContainerException.class, + () -> containerImporter.importContainer(containerId, + tarFile.toPath(), null, NO_COMPRESSION)); + Assertions.assertTrue(scException.getMessage(). + contains("Container checksum error")); + } + private File containerTarFile( long containerId, ContainerData containerData) throws IOException { File yamlFile = new File(tempDir, "container.yaml"); diff --git a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/replication/TestGrpcContainerUploader.java b/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/replication/TestGrpcContainerUploader.java index fb6b8ffa357..2208a4536f8 100644 --- a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/replication/TestGrpcContainerUploader.java +++ b/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/replication/TestGrpcContainerUploader.java @@ -35,7 +35,7 @@ import static org.apache.hadoop.ozone.container.replication.CopyContainerCompression.NO_COMPRESSION; import static org.junit.jupiter.api.Assertions.assertThrows; import static org.junit.jupiter.api.Assertions.assertTrue; -import static org.mockito.ArgumentMatchers.any; +import static org.mockito.Mockito.any; import static org.mockito.Mockito.mock; import static org.mockito.Mockito.verify; import static org.mockito.Mockito.when; diff --git a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/replication/TestPushReplicator.java b/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/replication/TestPushReplicator.java index b678f6e4042..91fd29681cc 100644 --- a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/replication/TestPushReplicator.java +++ b/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/replication/TestPushReplicator.java @@ -39,8 +39,8 @@ import static org.apache.hadoop.ozone.container.replication.CopyContainerCompression.NO_COMPRESSION; import static org.apache.hadoop.ozone.protocol.commands.ReplicateContainerCommand.toTarget; import static org.junit.jupiter.api.Assertions.assertEquals; -import static org.mockito.ArgumentMatchers.any; -import static org.mockito.ArgumentMatchers.eq; +import static org.mockito.Mockito.any; +import static org.mockito.Mockito.eq; import static org.mockito.Mockito.doAnswer; import static org.mockito.Mockito.mock; import static org.mockito.Mockito.when; diff --git a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/replication/TestReplicationSupervisor.java b/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/replication/TestReplicationSupervisor.java index 833b8de4e3f..26c6853b64a 100644 --- a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/replication/TestReplicationSupervisor.java +++ b/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/replication/TestReplicationSupervisor.java @@ -18,6 +18,7 @@ package org.apache.hadoop.ozone.container.replication; +import java.io.File; import java.io.IOException; import java.nio.file.Path; import java.nio.file.Paths; @@ -65,8 +66,9 @@ import org.apache.ozone.test.TestClock; import org.junit.jupiter.api.AfterEach; import org.junit.jupiter.api.BeforeEach; +import org.junit.jupiter.api.io.TempDir; -import javax.annotation.Nonnull; +import jakarta.annotation.Nonnull; import static com.google.common.util.concurrent.MoreExecutors.newDirectExecutorService; import static java.util.Collections.emptyList; @@ -81,9 +83,9 @@ import static org.junit.jupiter.api.Assertions.fail; import static org.apache.hadoop.hdds.protocol.proto.StorageContainerDatanodeProtocolProtos.ReplicationCommandPriority.LOW; import static org.apache.hadoop.hdds.protocol.proto.StorageContainerDatanodeProtocolProtos.ReplicationCommandPriority.NORMAL; -import static org.mockito.ArgumentMatchers.any; -import static org.mockito.ArgumentMatchers.anyList; -import static org.mockito.ArgumentMatchers.anyLong; +import static org.mockito.Mockito.any; +import static org.mockito.Mockito.anyList; +import static org.mockito.Mockito.anyLong; import static org.mockito.Mockito.mock; import static org.mockito.Mockito.when; @@ -276,7 +278,8 @@ public void slowDownload() { } @ContainerLayoutTestInfo.ContainerTest - public void testDownloadAndImportReplicatorFailure() throws IOException { + public void testDownloadAndImportReplicatorFailure(ContainerLayoutVersion layout, + @TempDir File tempFile) throws IOException { OzoneConfiguration conf = new OzoneConfiguration(); ReplicationSupervisor supervisor = ReplicationSupervisor.newBuilder() @@ -294,9 +297,7 @@ public void testDownloadAndImportReplicatorFailure() throws IOException { any(Path.class), any())) .thenReturn(res); - final String testDir = GenericTestUtils.getTempPath( - TestReplicationSupervisor.class.getSimpleName() + - "-" + UUID.randomUUID()); + final String testDir = tempFile.getPath(); MutableVolumeSet volumeSet = mock(MutableVolumeSet.class); when(volumeSet.getVolumesList()) .thenReturn(singletonList( diff --git a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/replication/TestSendContainerRequestHandler.java b/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/replication/TestSendContainerRequestHandler.java index 160c690bd4a..f054358b35b 100644 --- a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/replication/TestSendContainerRequestHandler.java +++ b/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/replication/TestSendContainerRequestHandler.java @@ -35,7 +35,7 @@ import static org.apache.hadoop.ozone.container.replication.CopyContainerCompression.NO_COMPRESSION; import static org.junit.jupiter.api.Assertions.assertEquals; import static org.junit.jupiter.api.Assertions.assertInstanceOf; -import static org.mockito.ArgumentMatchers.any; +import static org.mockito.Mockito.any; import static org.mockito.Mockito.doAnswer; import static org.mockito.Mockito.mock; diff --git a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/stream/TestDirstreamClientHandler.java b/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/stream/TestDirstreamClientHandler.java index 78dc44338d6..d3907a6031d 100644 --- a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/stream/TestDirstreamClientHandler.java +++ b/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/stream/TestDirstreamClientHandler.java @@ -19,12 +19,9 @@ import io.netty.buffer.ByteBuf; import io.netty.buffer.Unpooled; -import org.apache.commons.io.FileUtils; -import org.apache.ozone.test.GenericTestUtils; -import org.jetbrains.annotations.NotNull; -import org.junit.jupiter.api.AfterEach; -import org.junit.jupiter.api.BeforeEach; +import jakarta.annotation.Nonnull; import org.junit.jupiter.api.Test; +import org.junit.jupiter.api.io.TempDir; import java.io.IOException; import java.nio.charset.StandardCharsets; @@ -39,18 +36,9 @@ */ public class TestDirstreamClientHandler { + @TempDir private Path tmpDir; - @BeforeEach - public void init() { - tmpDir = GenericTestUtils.getRandomizedTestDir().toPath(); - } - - @AfterEach - public void destroy() throws IOException { - FileUtils.deleteDirectory(tmpDir.toFile()); - } - @Test public void oneFileStream() throws IOException { @@ -129,7 +117,7 @@ public void splitContent() throws IOException { assertEquals("yyy", getContent("bsd.txt")); } - @NotNull + @Nonnull private String getContent(String name) throws IOException { return new String(Files.readAllBytes(tmpDir.resolve(name)), StandardCharsets.UTF_8); diff --git a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/upgrade/TestDatanodeUpgradeToSchemaV3.java b/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/upgrade/TestDatanodeUpgradeToSchemaV3.java index 2a59eee8db9..383e76dcc72 100644 --- a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/upgrade/TestDatanodeUpgradeToSchemaV3.java +++ b/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/upgrade/TestDatanodeUpgradeToSchemaV3.java @@ -73,7 +73,7 @@ import static org.junit.jupiter.api.Assertions.assertNull; import static org.junit.jupiter.api.Assertions.assertTrue; import static org.junit.jupiter.api.Assertions.fail; -import static org.mockito.ArgumentMatchers.any; +import static org.mockito.Mockito.any; import static org.mockito.Mockito.doThrow; import static org.mockito.Mockito.mock; diff --git a/hadoop-hdds/dev-support/checkstyle/checkstyle.xml b/hadoop-hdds/dev-support/checkstyle/checkstyle.xml index baa9fa4e67d..3a69c793c26 100644 --- a/hadoop-hdds/dev-support/checkstyle/checkstyle.xml +++ b/hadoop-hdds/dev-support/checkstyle/checkstyle.xml @@ -209,12 +209,6 @@ - - - - - - diff --git a/hadoop-hdds/docs/content/concept/Datanodes.md b/hadoop-hdds/docs/content/concept/Datanodes.md index 1149eba2b0d..47c09a798fc 100644 --- a/hadoop-hdds/docs/content/concept/Datanodes.md +++ b/hadoop-hdds/docs/content/concept/Datanodes.md @@ -48,7 +48,7 @@ the key to the Ozone Manager. Ozone manager returns the list of Ozone blocks that make up that key. An Ozone block contains the container ID and a local ID. The figure below -shows the logical layout out of Ozone block. +shows the logical layout of the Ozone block. ![Ozone Block](OzoneBlock.png) diff --git a/hadoop-hdds/docs/content/concept/OzoneManager.md b/hadoop-hdds/docs/content/concept/OzoneManager.md index 31f6a428107..f58a4cd8f45 100644 --- a/hadoop-hdds/docs/content/concept/OzoneManager.md +++ b/hadoop-hdds/docs/content/concept/OzoneManager.md @@ -31,7 +31,7 @@ Ozone Manager (OM) is the namespace manager for Ozone. This means that when you want to write some data, you ask Ozone Manager for a block and Ozone Manager gives you a block and remembers that information. When you want to read that file back, you need to find the -address of the block and Ozone Manager returns it you. +address of the block and the Ozone Manager returns it to you. Ozone Manager also allows users to organize keys under a volume and bucket. Volumes and buckets are part of the namespace and managed by Ozone Manager. diff --git a/hadoop-hdds/docs/content/concept/Recon.md b/hadoop-hdds/docs/content/concept/Recon.md index 235d6c314fb..608a37c39b4 100644 --- a/hadoop-hdds/docs/content/concept/Recon.md +++ b/hadoop-hdds/docs/content/concept/Recon.md @@ -153,7 +153,7 @@ ozone.recon.db.dir | none | Directory where the Recon Server stores its metadata ozone.recon.om.db.dir | none | Directory where the Recon Server stores its OM snapshot DB. ozone.recon.om.snapshot
.task.interval.delay | 10m | Interval in MINUTES by Recon to request OM DB Snapshot / delta updates. ozone.recon.task
.missingcontainer.interval | 300s | Time interval of the periodic check for Unhealthy Containers in the cluster. -ozone.recon.task
.safemode.wait.threshold | 300s | Max time for Recon to wait before it exit out of safe or warmup mode. +ozone.recon.task
.safemode.wait.threshold | 300s | Max time for Recon to wait before it exits out of safe or warmup mode. ozone.recon.sql.db.jooq.dialect | DERBY | Please refer to [SQL Dialect](https://www.jooq.org/javadoc/latest/org.jooq/org/jooq/SQLDialect.html) to specify a different dialect. ozone.recon.sql.db.jdbc.url | jdbc:derby:${ozone.recon.db.dir}
/ozone_recon_derby.db | Recon SQL database jdbc url. ozone.recon.sql.db.username | none | Recon SQL database username. diff --git a/hadoop-hdds/docs/content/concept/StorageContainerManager.md b/hadoop-hdds/docs/content/concept/StorageContainerManager.md index 8922f89bc5d..860e69a77da 100644 --- a/hadoop-hdds/docs/content/concept/StorageContainerManager.md +++ b/hadoop-hdds/docs/content/concept/StorageContainerManager.md @@ -43,7 +43,7 @@ read and write these blocks directly. 2. SCM keeps track of all the block replicas. If there is a loss of data node or a disk, SCM -detects it and instructs data nodes make copies of the +detects it and instructs data nodes to make copies of the missing blocks to ensure high availability. 3. **SCM's Certificate Authority** is in diff --git a/hadoop-hdds/docs/content/feature/Decommission.zh.md b/hadoop-hdds/docs/content/feature/Decommission.zh.md new file mode 100644 index 00000000000..ad959469b95 --- /dev/null +++ b/hadoop-hdds/docs/content/feature/Decommission.zh.md @@ -0,0 +1,96 @@ +--- +title: "Decommissioning" +weight: 1 +menu: + main: + parent: 特性 +summary: Decommissioning of SCM, OM and Datanode. +--- + + +# DataNode Decommission + +DataNode Decommission是从Ozone集群中删除现有DataNode的过程中,同时确保新数据不会被写入正在Decommission的DataNode。当你启动DataNode Decommission的操作时候,Ozone会自动确保在Decommission完成之前,该数据节点上的所有Storage containers都在另一个DataNode上创建了额外的副本。因此,DataNode在Decommission完成后可以继续运行,并可用于读取,但不能用于写入,直到手动停止DataNode的服务。 + +当我们启动Decommission时,这个操作首先要检查节点的当前状态,理想情况下应该是 "IN_SERVICE",然后将其状态更改为 "DECOMMISSIONING",并启动Decommission的流程: + +1. 首先它会触发一个事件,关闭节点上的所有Pipelines,同时关闭所有Containers。 + +2. 然后获取节点上的Container信息,并检查是否需要新的副本。如果需要,创建新的副本的任务就会被调度起来。 + +3. 复制任务被调度后,节点仍处于待处理状态,直到复制任务完成。 + +4. 在此阶段,节点将完成Decommission的过程,然后节点状态将更改为 "DECOMMISSIONED"。 + +要检查DataNode的当前状态,可以执行以下命令, +```shell +ozone admin datanode list +``` + +要decommission某台datanode的时候,可以执行下面的命令, + +```shell +ozone admin datanode decommission [-hV] [-id=] + [--scm=] [...] +``` +您可以输入多个主机,以便一起Decommission多个DataNode。 + +**Note:** 要Recommission某台DataNode的时候,可在命令行执行以下命令, +```shell +ozone admin datanode recommission [-hV] [-id=] + [--scm=] [...] +``` + +# OM Decommission + +Ozone Manager(OM)Decommissioning是指从 OM HA Ring 中从容地(gracefully)移除一个 OM 的过程。 + +要Decommission OM 并将这个节点从 OM HA ring中移除,需要执行以下步骤。 +1. 将要被Decommission的 OM 节点的 _OM NodeId_ 添加到所有其他 OM 的 _ozone-site.xml_ 中的 _ozone.om.decommissioned.nodes._ 属性中。 +2. 运行以下命令Decommission这台 OM 节点. +```shell +ozone admin om decommission -id= -nodeid= -hostname= [optional --force] +``` + _force选项将跳过检查 _ozone-site.xml_ 中的 OM 配置是否已更新,并将Decommission节点添加至 _**ozone.om.decommissioned.nodes**_ 配置中.

**Note -** 建议在Decommissioning一个 OM 节点之前bootstrap另一个 OM 节点,以保持OM的高可用性(HA).

+ +# SCM Decommission + +存储容器管理器 (SCM) Decommissioning 是允许您从容地(gracefully)将一个 SCM 从 SCM HA Ring 中移除的过程。 + +在Decommission一台SCM,并将其从SCM HA ring中移除时,需要执行以下步骤。 +```shell +ozone admin scm decommission [-hV] [--service-id=] -nodeid= +``` +执行以下命令可获得 "nodeId": **"ozone admin scm roles "** + +### Leader SCM +如果需要decommission **leader** SCM, 您必须先将leader的角色转移到另一个 scm,然后再Decommission这个节点。 + +您可以使用以下的命令来转移leader的角色, +```shell +ozone admin scm transfer [--service-id=] -n= +``` +在Leader的角色成功地转移之后,您可以继续decommission的操作。 + +### Primordial SCM +如果要decommission **primordial** scm,必须更改 _ozone.scm.primordial.node.id_ 的属性,使其指向不同的 SCM,然后再继续decommissioning。 + +### 注意 +在运行SCM decommissioning的操作期间,应手动删除decommissioned SCM的私钥。私钥可在 _hdds.metadata.dir_ 中找到。 + +在支持证书吊销之前(HDDS-8399),需要手动删除decommissioned SCM上的证书。 diff --git a/hadoop-hdds/docs/content/feature/OM-HA.md b/hadoop-hdds/docs/content/feature/OM-HA.md index 3e4048c51c3..3872c387335 100644 --- a/hadoop-hdds/docs/content/feature/OM-HA.md +++ b/hadoop-hdds/docs/content/feature/OM-HA.md @@ -23,13 +23,13 @@ summary: HA setup for Ozone Manager to avoid any single point of failure. limitations under the License. --> -Ozone has two metadata-manager nodes (*Ozone Manager* for key space management and *Storage Container Management* for block space management) and multiple storage nodes (Datanode). Data is replicated between Datanodes with the help of RAFT consensus algorithm. +Ozone has two metadata-manager nodes (*Ozone Manager* for key space management and *Storage Container Manager* for block space management) and multiple storage nodes (Datanode). Data is replicated between Datanodes with the help of RAFT consensus algorithm. To avoid any single point of failure the metadata-manager nodes also should have a HA setup. Both Ozone Manager and Storage Container Manager supports HA. In this mode the internal state is replicated via RAFT (with Apache Ratis) -This document explain the HA setup of Ozone Manager (OM) HA, please check [this page]({{< ref "SCM-HA" >}}) for SCM HA. While they can be setup for HA independently, a reliable, full HA setup requires enabling HA for both services. +This document explains the HA setup of Ozone Manager (OM) HA, please check [this page]({{< ref "SCM-HA" >}}) for SCM HA. While they can be setup for HA independently, a reliable, full HA setup requires enabling HA for both services. ## Ozone Manager HA @@ -104,18 +104,18 @@ hdfs dfs -ls ofs://cluster1/volume/bucket/prefix/ Raft can guarantee the replication of any request if the request is persisted to the RAFT log on the majority of the nodes. To achieve high throughput with Ozone Manager, it returns with the response even if the request is persisted only to the RAFT logs. -RocksDB instance are updated by a background thread with batching transactions (so called "double buffer" as when one of the buffers is used to commit the data the other one collects all the new requests for the next commit.) To make all data available for the next request even if the background process is not yet wrote them the key data is cached in the memory. +RocksDB instance are updated by a background thread with batching transactions (so called "double buffer" as when one of the buffers is used to commit the data the other one collects all the new requests for the next commit.) To make all data available for the next request even if the background process has not yet written them, the key data is cached in the memory. ![HA - OM Double Buffer](HA-OM-doublebuffer.png) -The details of this approach discussed in a separated [design doc]({{< ref "design/omha.md" >}}) but it's integral part of the OM HA design. +The details of this approach are discussed in a separate [design doc]({{< ref "design/omha.md" >}}) but it's an integral part of the OM HA design. ## OM Bootstrap To convert a non-HA OM to be HA or to add new OM nodes to existing HA OM ring, new OM node(s) need to be bootstrapped. Before bootstrapping a new OM node, all the existing OM's on-disk configuration file (ozone-site.xml) must be updated with the configuration details -of the new OM such nodeId, address, port etc. Note that the existing OM's need not be restarted. They will reload the configuration from disk when +of the new OM such as nodeId, address, port etc. Note that the existing OM's need not be restarted. They will reload the configuration from disk when they receive a bootstrap request from the bootstrapping node. To bootstrap an OM, the following command needs to be run: diff --git a/hadoop-hdds/docs/content/feature/Topology.zh.md b/hadoop-hdds/docs/content/feature/Topology.zh.md new file mode 100644 index 00000000000..a366e3a2473 --- /dev/null +++ b/hadoop-hdds/docs/content/feature/Topology.zh.md @@ -0,0 +1,108 @@ +--- +title: "拓扑感知能力" +weight: 2 +menu: + main: + parent: 特性 +summary: 机架感知配置可以提高读/写性能 +--- + + +Ozone可以使用拓扑相关信息(例如机架位置)来优化读写管道。要获得完全的机架感知集群,Ozone需要三种不同的配置。 + + 1. 拓扑信息应由 Ozone 配置。 + 2. 当Ozone为特定管道/容器选择3个不同的数据节点时,拓扑相关信息就会被使用.(写入) + 3. 当Ozone读取一个Key时,它应该优先从最近的节点读取。 + + + +## 拓扑层次结构 + +拓扑层次结构可使用 net.topology.node.switch.mapping.impl 配置键进行配置。此配置应定义 org.apache.hadoop.net.CachedDNSToSwitchMapping 的实现。由于这是一个 Hadoop 类,因此该配置与 Hadoop 配置完全相同。 + +### 静态列表 + +静态列表可借助 ```TableMapping``` 进行配置:: + +```XML + + net.topology.node.switch.mapping.impl + org.apache.hadoop.net.TableMapping + + + net.topology.table.file.name + /opt/hadoop/compose/ozone-topology/network-config + +``` + +第二个配置选项应指向一个文本文件。文件格式为两列文本文件,各列之间用空格隔开。第一列是 IP 地址,第二列指定地址映射的机架。如果找不到与集群中主机相对应的条目,则会使用 /default-rack。 + +### 动态列表 + +机架信息可借助外部脚本识别: + + +```XML + + net.topology.node.switch.mapping.impl + org.apache.hadoop.net.ScriptBasedMapping + + + net.topology.script.file.name + /usr/local/bin/rack.sh + +``` + +如果使用外部脚本,则需要在配置文件中使用 net.topology.script.file.name 参数来指定。与 java 类不同,外部拓扑脚本不包含在 Ozone 发行版中,而是由管理员提供。Fork 拓扑脚本时,Ozone 会向 ARGV 发送多个 IP 地址。发送给拓扑脚本的 IP 地址数量由 net.topology.script.number.args 控制,默认为 100。如果将 net.topology.script.number.args 改为 1,则每个提交的 IP 地址都会Fork一个拓扑脚本。 + +## 写入路径 + +CLOSED容器放置可以通过 `ozone.scm.container.placement.impl` 配置键进行配置。 可用的容器放置策略可在 `org.apache.hdds.scm.container.placement` 包中找到。[包](https://github.com/apache/ozone/tree/master/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/placement/algorithms). + +默认情况下, CLOSED容器使用 `SCMContainerPlacementRandom` 放置策略,该策略不支持拓扑感知。为了启用拓扑感知,可配置 `SCMContainerPlacementRackAware` 作为CLOSED容器放置策略: + +```XML + + ozone.scm.container.placement.impl + org.apache.hadoop.hdds.scm.container.placement.algorithms.SCMContainerPlacementRackAware + +``` + +这种放置策略符合 HDFS 中使用的算法。在默认的 3 个副本中,两个副本位于同一个机架上,第三个副本位于不同的机架上。 + +这种实现方式适用于"/机架/节点 "这样的网络拓扑结构。如果网络拓扑结构的层数较多,则不建议使用此方法。 + +## 读取路径 + +最后,读取路径也应配置为从最近的 pipeline 读取数据。 + +```XML + + ozone.network.topology.aware.read + true + +``` + +## 参考文献 + + * 关于 `net.topology.node.switch.mapping.impl` 的 Hadoop 文档: https://hadoop.apache.org/docs/current/hadoop-project-dist/hadoop-common/RackAwareness.html + * [设计文档]({{< ref path="design/topology.md" lang="en">}}) \ No newline at end of file diff --git a/hadoop-hdds/docs/content/feature/dn-merge-rocksdb.zh.md b/hadoop-hdds/docs/content/feature/dn-merge-rocksdb.zh.md new file mode 100644 index 00000000000..cd3eb5fbdc5 --- /dev/null +++ b/hadoop-hdds/docs/content/feature/dn-merge-rocksdb.zh.md @@ -0,0 +1,70 @@ +--- +title: "在DataNode上合并Container的RocksDB" +weight: 2 +menu: + main: + parent: 特性 +summary: Ozone DataNode Container模式简介V3 +--- + + +在 Ozone 中,用户数据被分割成blocks并存储在 HDDS Container中。Container是 Ozone/HDDS 的基本复制单元。每个Container都有自己的元数据和数据, 数据以文件形式保存在磁盘上,元数据保存在RocksDB中。 + +目前,数据节点上的每个Container都有一个RocksDB。随着用户数据的不断增长,一个DataNode上将会有成百上千个RocksDB实例。在一个JVM中管理如此多的RocksDB实例是一个巨大的挑战。 + +与当前使用方法不同,"Merge Container RocksDB in DN"功能将为每个Volume只使用一个RocksDB,并在此RocksDB中保存所有Container的元数据。 + +## 配置 + +这主要是DataNode的功能,不需要太多配置。 + +如果更倾向于为每个Container使用一个RocksDB的模式,那么这下面的配置可以禁用上面所介绍的功能。请注意,一旦启用该功能,强烈建议以后不要再禁用。 + +```XML + + hdds.datanode.container.schema.v3.enabled + false + Disable or enable this feature. + +``` + +无需任何特殊配置,单个RocksDB将会被创建在"hdds.datanode.dir"中所配置的数据卷下。 + +对于一些有高性能要求的高级集群管理员,他/她可以利用快速存储来保存RocksDB。在这种情况下,请配置下面这两个属性。 + +```XML + + hdds.datanode.container.db.dir + + This setting is optional. Specify where the per-disk rocksdb instances will be stored. + + + hdds.datanode.failed.db.volumes.tolerated + -1 + The number of db volumes that are allowed to fail before a datanode stops offering service. + Default -1 means unlimited, but we should have at least one good volume left. + +``` + +### 向后兼容性 + +Existing containers each has one RocksDB for them will be still accessible after this feature is enabled. All container data will co-exist in an existing Ozone cluster. + +## 参考文献 + + * [设计文档]({{< ref path="design/dn-merge-rocksdb.md" lang="en">}}) \ No newline at end of file diff --git a/hadoop-hdds/erasurecode/pom.xml b/hadoop-hdds/erasurecode/pom.xml index a632c65254c..14511a160ce 100644 --- a/hadoop-hdds/erasurecode/pom.xml +++ b/hadoop-hdds/erasurecode/pom.xml @@ -30,7 +30,6 @@ https://maven.apache.org/xsd/maven-4.0.0.xsd"> jar - false diff --git a/hadoop-hdds/erasurecode/src/test/java/org/apache/ozone/erasurecode/TestCodecRegistry.java b/hadoop-hdds/erasurecode/src/test/java/org/apache/ozone/erasurecode/TestCodecRegistry.java index fcdbacbec10..f4e17945194 100644 --- a/hadoop-hdds/erasurecode/src/test/java/org/apache/ozone/erasurecode/TestCodecRegistry.java +++ b/hadoop-hdds/erasurecode/src/test/java/org/apache/ozone/erasurecode/TestCodecRegistry.java @@ -31,9 +31,10 @@ import java.util.List; import java.util.Set; +import static org.assertj.core.api.Assertions.assertThat; import static org.junit.jupiter.api.Assertions.assertEquals; +import static org.junit.jupiter.api.Assertions.assertInstanceOf; import static org.junit.jupiter.api.Assertions.assertNull; -import static org.junit.jupiter.api.Assertions.assertTrue; /** * Test CodecRegistry. @@ -44,10 +45,8 @@ public class TestCodecRegistry { public void testGetCodecs() { Set codecs = CodecRegistry.getInstance().getCodecNames(); assertEquals(2, codecs.size()); - assertTrue( - codecs.contains(ECReplicationConfig.EcCodec.RS.name().toLowerCase())); - assertTrue( - codecs.contains(ECReplicationConfig.EcCodec.XOR.name().toLowerCase())); + assertThat(codecs).contains(ECReplicationConfig.EcCodec.RS.name().toLowerCase()); + assertThat(codecs).contains(ECReplicationConfig.EcCodec.XOR.name().toLowerCase()); } @Test @@ -55,14 +54,14 @@ public void testGetCoders() { List coders = CodecRegistry.getInstance(). getCoders(ECReplicationConfig.EcCodec.RS.name().toLowerCase()); assertEquals(2, coders.size()); - assertTrue(coders.get(0) instanceof NativeRSRawErasureCoderFactory); - assertTrue(coders.get(1) instanceof RSRawErasureCoderFactory); + assertInstanceOf(NativeRSRawErasureCoderFactory.class, coders.get(0)); + assertInstanceOf(RSRawErasureCoderFactory.class, coders.get(1)); coders = CodecRegistry.getInstance(). getCoders(ECReplicationConfig.EcCodec.XOR.name().toLowerCase()); assertEquals(2, coders.size()); - assertTrue(coders.get(0) instanceof NativeXORRawErasureCoderFactory); - assertTrue(coders.get(1) instanceof XORRawErasureCoderFactory); + assertInstanceOf(NativeXORRawErasureCoderFactory.class, coders.get(0)); + assertInstanceOf(XORRawErasureCoderFactory.class, coders.get(1)); } @Test @@ -108,8 +107,8 @@ public String getCodecName() { List rsCoders = CodecRegistry.getInstance(). getCoders(ECReplicationConfig.EcCodec.RS.name().toLowerCase()); assertEquals(2, rsCoders.size()); - assertTrue(rsCoders.get(0) instanceof NativeRSRawErasureCoderFactory); - assertTrue(rsCoders.get(1) instanceof RSRawErasureCoderFactory); + assertInstanceOf(NativeRSRawErasureCoderFactory.class, rsCoders.get(0)); + assertInstanceOf(RSRawErasureCoderFactory.class, rsCoders.get(1)); // check RS coder names String[] rsCoderNames = CodecRegistry.getInstance(). @@ -139,21 +138,21 @@ public void testGetCoderByName() { RawErasureCoderFactory coder = CodecRegistry.getInstance(). getCoderByName(ECReplicationConfig.EcCodec.RS.name().toLowerCase(), RSRawErasureCoderFactory.CODER_NAME); - assertTrue(coder instanceof RSRawErasureCoderFactory); + assertInstanceOf(RSRawErasureCoderFactory.class, coder); coder = CodecRegistry.getInstance() .getCoderByName(ECReplicationConfig.EcCodec.RS.name().toLowerCase(), NativeRSRawErasureCoderFactory.CODER_NAME); - assertTrue(coder instanceof NativeRSRawErasureCoderFactory); + assertInstanceOf(NativeRSRawErasureCoderFactory.class, coder); coder = CodecRegistry.getInstance() .getCoderByName(ECReplicationConfig.EcCodec.XOR.name().toLowerCase(), XORRawErasureCoderFactory.CODER_NAME); - assertTrue(coder instanceof XORRawErasureCoderFactory); + assertInstanceOf(XORRawErasureCoderFactory.class, coder); coder = CodecRegistry.getInstance() .getCoderByName(ECReplicationConfig.EcCodec.XOR.name().toLowerCase(), NativeXORRawErasureCoderFactory.CODER_NAME); - assertTrue(coder instanceof NativeXORRawErasureCoderFactory); + assertInstanceOf(NativeXORRawErasureCoderFactory.class, coder); } } diff --git a/hadoop-hdds/erasurecode/src/test/java/org/apache/ozone/erasurecode/rawcoder/TestRawCoderBase.java b/hadoop-hdds/erasurecode/src/test/java/org/apache/ozone/erasurecode/rawcoder/TestRawCoderBase.java index 1f8cff43385..8bea0bf7b9c 100644 --- a/hadoop-hdds/erasurecode/src/test/java/org/apache/ozone/erasurecode/rawcoder/TestRawCoderBase.java +++ b/hadoop-hdds/erasurecode/src/test/java/org/apache/ozone/erasurecode/rawcoder/TestRawCoderBase.java @@ -24,9 +24,9 @@ import java.io.IOException; +import static org.assertj.core.api.Assertions.assertThat; import static org.junit.jupiter.api.Assertions.assertEquals; import static org.junit.jupiter.api.Assertions.assertThrows; -import static org.junit.jupiter.api.Assertions.assertTrue; import static org.junit.jupiter.api.Assertions.fail; /** @@ -93,13 +93,9 @@ protected void testCoding(boolean usingDirectBuffer) { protected void testCodingWithBadInput(boolean usingDirectBuffer) { this.usingDirectBuffer = usingDirectBuffer; prepareCoders(true); - - try { - performTestCoding(baseChunkSize, false, true, false); - fail("Encoding test with bad input should fail"); - } catch (Exception e) { - // Expected - } + assertThrows(Exception.class, + () -> performTestCoding(baseChunkSize, false, true, false), + "Encoding test with bad input should fail"); } /** @@ -109,13 +105,9 @@ protected void testCodingWithBadInput(boolean usingDirectBuffer) { protected void testCodingWithBadOutput(boolean usingDirectBuffer) { this.usingDirectBuffer = usingDirectBuffer; prepareCoders(true); - - try { - performTestCoding(baseChunkSize, false, false, true); - fail("Decoding test with bad output should fail"); - } catch (Exception e) { - // Expected - } + assertThrows(Exception.class, + () -> performTestCoding(baseChunkSize, false, false, true), + "Decoding test with bad output should fail"); } /** @@ -132,30 +124,19 @@ void testAfterRelease() throws Exception { final ECChunk[] parity = prepareParityChunksForEncoding(); IOException ioException = assertThrows(IOException.class, () -> encoder.encode(data, parity)); - assertTrue(ioException.getMessage().contains("closed")); + assertThat(ioException.getMessage()).contains("closed"); decoder.release(); final ECChunk[] in = prepareInputChunksForDecoding(data, parity); final ECChunk[] out = prepareOutputChunksForDecoding(); ioException = assertThrows(IOException.class, () -> decoder.decode(in, getErasedIndexesForDecoding(), out)); - assertTrue(ioException.getMessage().contains("closed")); + assertThat(ioException.getMessage()).contains("closed"); } @Test public void testCodingWithErasingTooMany() { - try { - testCoding(true); - fail("Decoding test erasing too many should fail"); - } catch (Exception e) { - // Expected - } - - try { - testCoding(false); - fail("Decoding test erasing too many should fail"); - } catch (Exception e) { - // Expected - } + assertThrows(Exception.class, () -> testCoding(true), "Decoding test erasing too many should fail"); + assertThrows(Exception.class, () -> testCoding(false), "Decoding test erasing too many should fail"); } @Test diff --git a/hadoop-hdds/framework/pom.xml b/hadoop-hdds/framework/pom.xml index 8ad0d11d021..5ead355066d 100644 --- a/hadoop-hdds/framework/pom.xml +++ b/hadoop-hdds/framework/pom.xml @@ -30,7 +30,6 @@ https://maven.apache.org/xsd/maven-4.0.0.xsd"> jar - false @@ -113,7 +112,7 @@ https://maven.apache.org/xsd/maven-4.0.0.xsd"> org.bouncycastle - bcprov-jdk15on + bcprov-jdk18on diff --git a/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/scm/protocolPB/StorageContainerLocationProtocolClientSideTranslatorPB.java b/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/scm/protocolPB/StorageContainerLocationProtocolClientSideTranslatorPB.java index 330cfae30b2..84a0fa4886c 100644 --- a/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/scm/protocolPB/StorageContainerLocationProtocolClientSideTranslatorPB.java +++ b/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/scm/protocolPB/StorageContainerLocationProtocolClientSideTranslatorPB.java @@ -24,6 +24,7 @@ import org.apache.hadoop.hdds.client.ECReplicationConfig; import org.apache.hadoop.hdds.client.ReplicatedReplicationConfig; import org.apache.hadoop.hdds.client.ReplicationConfig; +import org.apache.hadoop.hdds.protocol.DatanodeDetails; import org.apache.hadoop.hdds.protocol.proto.HddsProtos; import org.apache.hadoop.hdds.protocol.proto.HddsProtos.DeletedBlocksTransactionInfo; import org.apache.hadoop.hdds.protocol.proto.HddsProtos.GetScmInfoResponseProto; @@ -55,6 +56,9 @@ import org.apache.hadoop.hdds.protocol.proto.StorageContainerLocationProtocolProtos.ForceExitSafeModeResponseProto; import org.apache.hadoop.hdds.protocol.proto.StorageContainerLocationProtocolProtos.GetContainerRequestProto; import org.apache.hadoop.hdds.protocol.proto.StorageContainerLocationProtocolProtos.GetContainerReplicasRequestProto; +import org.apache.hadoop.hdds.protocol.proto.StorageContainerLocationProtocolProtos.GetContainersOnDecomNodeRequestProto; +import org.apache.hadoop.hdds.protocol.proto.StorageContainerLocationProtocolProtos.ContainersOnDecomNodeProto; +import org.apache.hadoop.hdds.protocol.proto.StorageContainerLocationProtocolProtos.GetContainersOnDecomNodeResponseProto; import org.apache.hadoop.hdds.protocol.proto.StorageContainerLocationProtocolProtos.GetContainerTokenRequestProto; import org.apache.hadoop.hdds.protocol.proto.StorageContainerLocationProtocolProtos.GetContainerTokenResponseProto; import org.apache.hadoop.hdds.protocol.proto.StorageContainerLocationProtocolProtos.GetContainerWithPipelineBatchRequestProto; @@ -89,6 +93,8 @@ import org.apache.hadoop.hdds.protocol.proto.StorageContainerLocationProtocolProtos.SCMCloseContainerResponseProto; import org.apache.hadoop.hdds.protocol.proto.StorageContainerLocationProtocolProtos.StartMaintenanceNodesRequestProto; import org.apache.hadoop.hdds.protocol.proto.StorageContainerLocationProtocolProtos.StartMaintenanceNodesResponseProto; +import org.apache.hadoop.hdds.protocol.proto.StorageContainerLocationProtocolProtos.SingleNodeQueryRequestProto; +import org.apache.hadoop.hdds.protocol.proto.StorageContainerLocationProtocolProtos.SingleNodeQueryResponseProto; import org.apache.hadoop.hdds.protocol.proto.StorageContainerLocationProtocolProtos.StartReplicationManagerRequestProto; import org.apache.hadoop.hdds.protocol.proto.StorageContainerLocationProtocolProtos.StopReplicationManagerRequestProto; import org.apache.hadoop.hdds.protocol.proto.StorageContainerLocationProtocolProtos.StartContainerBalancerRequestProto; @@ -114,6 +120,7 @@ import org.apache.hadoop.ozone.upgrade.UpgradeFinalizer; import org.apache.hadoop.ozone.upgrade.UpgradeFinalizer.StatusAndMessages; import org.apache.hadoop.security.token.Token; +import org.apache.hadoop.util.ProtobufUtils; import java.io.Closeable; import java.io.IOException; @@ -123,6 +130,7 @@ import java.util.Map; import java.util.Optional; import java.util.function.Consumer; +import java.util.UUID; import static org.apache.hadoop.hdds.protocol.proto.HddsProtos.ReplicationType.EC; import static org.apache.hadoop.hdds.protocol.proto.StorageContainerLocationProtocolProtos.SCMCloseContainerResponseProto.Status.CONTAINER_ALREADY_CLOSED; @@ -455,6 +463,23 @@ public void deleteContainer(long containerID) } + @Override + public Map> getContainersOnDecomNode(DatanodeDetails dn) throws IOException { + GetContainersOnDecomNodeRequestProto request = GetContainersOnDecomNodeRequestProto.newBuilder() + .setDatanodeDetails(dn.getProtoBufMessage()).build(); + GetContainersOnDecomNodeResponseProto response = submitRequest(Type.GetContainersOnDecomNode, + builder -> builder.setGetContainersOnDecomNodeRequest(request)).getGetContainersOnDecomNodeResponse(); + Map> containerMap = new HashMap<>(); + for (ContainersOnDecomNodeProto containersProto : response.getContainersOnDecomNodeList()) { + List containerIds = new ArrayList<>(); + for (HddsProtos.ContainerID id : containersProto.getIdList()) { + containerIds.add(ContainerID.getFromProtobuf(id)); + } + containerMap.put(containersProto.getName(), containerIds); + } + return containerMap; + } + /** * Queries a list of Nodes based on their operational state or health state. * Passing a null for either value acts as a wildcard for that state. @@ -486,6 +511,18 @@ public List queryNode( return response.getDatanodesList(); } + @Override + public HddsProtos.Node queryNode(UUID uuid) throws IOException { + SingleNodeQueryRequestProto request = SingleNodeQueryRequestProto.newBuilder() + .setUuid(ProtobufUtils.toProtobuf(uuid)) + .build(); + SingleNodeQueryResponseProto response = + submitRequest(Type.SingleNodeQuery, + builder -> builder.setSingleNodeQueryRequest(request)) + .getSingleNodeQueryResponse(); + return response.getDatanode(); + } + /** * Attempts to decommission the list of nodes. * @param nodes The list of hostnames or hostname:ports to decommission diff --git a/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/security/symmetric/SecretKeyVerifierClient.java b/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/security/symmetric/SecretKeyVerifierClient.java index 08ed39d7f4b..5bc9cd9d06c 100644 --- a/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/security/symmetric/SecretKeyVerifierClient.java +++ b/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/security/symmetric/SecretKeyVerifierClient.java @@ -19,7 +19,7 @@ import org.apache.hadoop.hdds.security.exception.SCMSecurityException; -import javax.annotation.Nullable; +import jakarta.annotation.Nullable; import java.util.UUID; /** diff --git a/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/security/x509/certificate/CertInfo.java b/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/security/x509/certificate/CertInfo.java index 3a1df1bd865..b78604643e5 100644 --- a/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/security/x509/certificate/CertInfo.java +++ b/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/security/x509/certificate/CertInfo.java @@ -25,7 +25,7 @@ import org.apache.hadoop.hdds.utils.db.DelegatedCodec; import org.apache.hadoop.hdds.utils.db.Proto2Codec; -import javax.annotation.Nonnull; +import jakarta.annotation.Nonnull; import java.io.IOException; import java.io.Serializable; import java.security.cert.X509Certificate; diff --git a/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/security/x509/certificate/authority/profile/DefaultProfile.java b/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/security/x509/certificate/authority/profile/DefaultProfile.java index d565eedae43..765cf96e2f9 100644 --- a/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/security/x509/certificate/authority/profile/DefaultProfile.java +++ b/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/security/x509/certificate/authority/profile/DefaultProfile.java @@ -46,7 +46,6 @@ import java.util.stream.Collectors; import java.util.stream.Stream; -import static org.apache.hadoop.hdds.function.Predicates.yesBi; import static org.bouncycastle.asn1.x509.KeyPurposeId.id_kp_clientAuth; import static org.bouncycastle.asn1.x509.KeyPurposeId.id_kp_serverAuth; @@ -61,9 +60,9 @@ public class DefaultProfile implements PKIProfile { private static final BiPredicate VALIDATE_KEY_USAGE = DefaultProfile::validateKeyUsage; private static final BiPredicate - VALIDATE_AUTHORITY_KEY_IDENTIFIER = yesBi(); - private static final BiPredicate VALIDATE_LOGO_TYPE = - yesBi(); + VALIDATE_AUTHORITY_KEY_IDENTIFIER = (t, u) -> true; + private static final BiPredicate VALIDATE_LOGO_TYPE + = (t, u) -> true; private static final Logger LOG = LoggerFactory.getLogger(DefaultProfile.class); private static final BiPredicate diff --git a/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/security/x509/certificate/client/DefaultCertificateClient.java b/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/security/x509/certificate/client/DefaultCertificateClient.java index 403295aebf2..d3db81c71b6 100644 --- a/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/security/x509/certificate/client/DefaultCertificateClient.java +++ b/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/security/x509/certificate/client/DefaultCertificateClient.java @@ -245,8 +245,11 @@ private synchronized void readCertificateFile(Path filePath) { updateCachedData(fileName, CAType.SUBORDINATE, this::updateCachedSubCAId); updateCachedData(fileName, CAType.ROOT, this::updateCachedRootCAId); - getLogger().info("Added certificate {} from file: {}.", cert, + getLogger().info("Added certificate {} from file: {}.", readCertSerialId, filePath.toAbsolutePath()); + if (getLogger().isDebugEnabled()) { + getLogger().debug("Certificate: {}", cert); + } } catch (java.security.cert.CertificateException | IOException | IndexOutOfBoundsException e) { getLogger().error("Error reading certificate from file: {}.", @@ -487,7 +490,6 @@ private X509Certificate getCertificateFromScm(String certId) * @param data - Data to sign. * @throws CertificateException - on Error. */ - @Override public byte[] signData(byte[] data) throws CertificateException { try { Signature sign = Signature.getInstance(securityConfig.getSignatureAlgo(), @@ -582,7 +584,6 @@ public CertificateSignRequest.Builder getCSRBuilder() * @param caType - Is CA certificate. * @throws CertificateException - on Error. */ - @Override public void storeCertificate(String pemEncodedCert, CAType caType) throws CertificateException { CertificateCodec certificateCodec = new CertificateCodec(securityConfig, @@ -992,7 +993,6 @@ public List getCAList() { } } - @Override public List listCA() throws IOException { pemEncodedCACertsLock.lock(); try { @@ -1024,8 +1024,7 @@ public List updateCAList() throws IOException { public synchronized KeyStoresFactory getServerKeyStoresFactory() throws CertificateException { if (serverKeyStoresFactory == null) { - serverKeyStoresFactory = SecurityUtil.getServerKeyStoresFactory( - securityConfig, this, true); + serverKeyStoresFactory = SecurityUtil.getServerKeyStoresFactory(this, true); } return serverKeyStoresFactory; } @@ -1034,8 +1033,7 @@ public synchronized KeyStoresFactory getServerKeyStoresFactory() public KeyStoresFactory getClientKeyStoresFactory() throws CertificateException { if (clientKeyStoresFactory == null) { - clientKeyStoresFactory = SecurityUtil.getClientKeyStoresFactory( - securityConfig, this, true); + clientKeyStoresFactory = SecurityUtil.getClientKeyStoresFactory(this, true); } return clientKeyStoresFactory; } diff --git a/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/security/x509/crl/CRLInfo.java b/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/security/x509/crl/CRLInfo.java index 02a9d12ebda..134c841e697 100644 --- a/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/security/x509/crl/CRLInfo.java +++ b/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/security/x509/crl/CRLInfo.java @@ -24,7 +24,7 @@ import org.apache.hadoop.hdds.utils.db.Codec; import org.apache.hadoop.hdds.utils.db.DelegatedCodec; import org.apache.hadoop.hdds.utils.db.Proto2Codec; -import org.jetbrains.annotations.NotNull; +import jakarta.annotation.Nonnull; import java.security.cert.CRLException; import java.security.cert.X509CRL; @@ -139,7 +139,7 @@ public Instant getRevocationTime() { * from being compared to this object. */ @Override - public int compareTo(@NotNull CRLInfo o) { + public int compareTo(@Nonnull CRLInfo o) { return this.compare(this, o); } diff --git a/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/security/x509/keys/SecurityUtil.java b/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/security/x509/keys/SecurityUtil.java index 5f34e8dfe03..96fb2a7fd91 100644 --- a/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/security/x509/keys/SecurityUtil.java +++ b/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/security/x509/keys/SecurityUtil.java @@ -95,10 +95,10 @@ public static PublicKey getPublicKey(byte[] encodedKey, } public static KeyStoresFactory getServerKeyStoresFactory( - SecurityConfig securityConfig, CertificateClient client, + CertificateClient client, boolean requireClientAuth) throws CertificateException { PemFileBasedKeyStoresFactory factory = - new PemFileBasedKeyStoresFactory(securityConfig, client); + new PemFileBasedKeyStoresFactory(client); try { factory.init(KeyStoresFactory.Mode.SERVER, requireClientAuth); } catch (IOException | GeneralSecurityException e) { @@ -109,10 +109,10 @@ public static KeyStoresFactory getServerKeyStoresFactory( } public static KeyStoresFactory getClientKeyStoresFactory( - SecurityConfig securityConfig, CertificateClient client, + CertificateClient client, boolean requireClientAuth) throws CertificateException { PemFileBasedKeyStoresFactory factory = - new PemFileBasedKeyStoresFactory(securityConfig, client); + new PemFileBasedKeyStoresFactory(client); try { factory.init(KeyStoresFactory.Mode.CLIENT, requireClientAuth); diff --git a/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/utils/db/ByteStringCodec.java b/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/utils/db/ByteStringCodec.java index 9928d90570f..997bdf6cf2e 100644 --- a/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/utils/db/ByteStringCodec.java +++ b/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/utils/db/ByteStringCodec.java @@ -20,7 +20,7 @@ import com.google.protobuf.ByteString; -import javax.annotation.Nonnull; +import jakarta.annotation.Nonnull; /** * Codec to serialize/deserialize a {@link ByteString}. diff --git a/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/utils/db/DBStoreBuilder.java b/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/utils/db/DBStoreBuilder.java index fe495e7b061..32fcbfec6e4 100644 --- a/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/utils/db/DBStoreBuilder.java +++ b/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/utils/db/DBStoreBuilder.java @@ -83,6 +83,9 @@ public final class DBStoreBuilder { // The column family options that will be used for any column families // added by name only (without specifying options). private ManagedColumnFamilyOptions defaultCfOptions; + // Initialize the Statistics instance if ROCKSDB_STATISTICS enabled + private ManagedStatistics statistics; + private String dbname; private Path dbPath; private String dbJmxBeanNameName; @@ -188,6 +191,11 @@ private void setDBOptionsProps(ManagedDBOptions dbOptions) { if (maxNumberOfOpenFiles != null) { dbOptions.setMaxOpenFiles(maxNumberOfOpenFiles); } + if (!rocksDbStat.equals(OZONE_METADATA_STORE_ROCKSDB_STATISTICS_OFF)) { + statistics = new ManagedStatistics(); + statistics.setStatsLevel(StatsLevel.valueOf(rocksDbStat)); + dbOptions.setStatistics(statistics); + } } /** @@ -217,7 +225,7 @@ public DBStore build() throws IOException { throw new IOException("The DB destination directory should exist."); } - return new RDBStore(dbFile, rocksDBOption, writeOptions, tableConfigs, + return new RDBStore(dbFile, rocksDBOption, statistics, writeOptions, tableConfigs, registry.build(), openReadOnly, maxFSSnapshots, dbJmxBeanNameName, enableCompactionDag, maxDbUpdatesSizeThreshold, createCheckpointDirs, configuration, threadNamePrefix); @@ -413,13 +421,6 @@ protected void log(InfoLogLevel infoLogLevel, String s) { dbOptions.setWalTtlSeconds(rocksDBConfiguration.getWalTTL()); dbOptions.setWalSizeLimitMB(rocksDBConfiguration.getWalSizeLimit()); - // Create statistics. - if (!rocksDbStat.equals(OZONE_METADATA_STORE_ROCKSDB_STATISTICS_OFF)) { - ManagedStatistics statistics = new ManagedStatistics(); - statistics.setStatsLevel(StatsLevel.valueOf(rocksDbStat)); - dbOptions.setStatistics(statistics); - } - return dbOptions; } diff --git a/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/utils/db/RDBStore.java b/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/utils/db/RDBStore.java index 71cd3716e56..6760eb47f48 100644 --- a/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/utils/db/RDBStore.java +++ b/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/utils/db/RDBStore.java @@ -26,7 +26,6 @@ import java.nio.file.Paths; import java.util.ArrayList; import java.util.Collection; -import java.util.HashMap; import java.util.Map; import java.util.Set; @@ -37,6 +36,7 @@ import org.apache.hadoop.hdds.utils.db.RocksDatabase.ColumnFamily; import org.apache.hadoop.hdds.utils.db.managed.ManagedCompactRangeOptions; import org.apache.hadoop.hdds.utils.db.managed.ManagedDBOptions; +import org.apache.hadoop.hdds.utils.db.managed.ManagedStatistics; import org.apache.hadoop.hdds.utils.db.managed.ManagedTransactionLogIterator; import org.apache.hadoop.hdds.utils.db.managed.ManagedWriteOptions; import org.apache.ozone.rocksdiff.RocksDBCheckpointDiffer; @@ -78,10 +78,11 @@ public class RDBStore implements DBStore { // number in request to avoid increase in heap memory. private final long maxDbUpdatesSizeThreshold; private final ManagedDBOptions dbOptions; + private final ManagedStatistics statistics; private final String threadNamePrefix; @SuppressWarnings("parameternumber") - public RDBStore(File dbFile, ManagedDBOptions dbOptions, + public RDBStore(File dbFile, ManagedDBOptions dbOptions, ManagedStatistics statistics, ManagedWriteOptions writeOptions, Set families, CodecRegistry registry, boolean readOnly, int maxFSSnapshots, String dbJmxBeanName, boolean enableCompactionDag, @@ -98,6 +99,7 @@ public RDBStore(File dbFile, ManagedDBOptions dbOptions, codecRegistry = registry; dbLocation = dbFile; this.dbOptions = dbOptions; + this.statistics = statistics; try { if (enableCompactionDag) { @@ -120,8 +122,8 @@ public RDBStore(File dbFile, ManagedDBOptions dbOptions, if (dbJmxBeanName == null) { dbJmxBeanName = dbFile.getName(); } - metrics = RocksDBStoreMetrics.create(dbOptions.statistics(), db, - dbJmxBeanName); + // Use statistics instead of dbOptions.statistics() to avoid repeated init. + metrics = RocksDBStoreMetrics.create(statistics, db, dbJmxBeanName); if (metrics == null) { LOG.warn("Metrics registration failed during RocksDB init, " + "db path :{}", dbJmxBeanName); @@ -198,6 +200,7 @@ public String getSnapshotsParentDir() { return snapshotsParentDir; } + @Override public RocksDBCheckpointDiffer getRocksDBCheckpointDiffer() { return rocksDBCheckpointDiffer; } @@ -231,6 +234,9 @@ public void close() throws IOException { RocksDBCheckpointDifferHolder .invalidateCacheEntry(rocksDBCheckpointDiffer.getMetadataDir()); } + if (statistics != null) { + IOUtils.close(LOG, statistics); + } IOUtils.close(LOG, db); } @@ -344,13 +350,7 @@ public File getDbLocation() { @Override public Map getTableNames() { - Map tableNames = new HashMap<>(); - StringCodec stringCodec = StringCodec.get(); - - for (ColumnFamily columnFamily : getColumnFamilies()) { - tableNames.put(columnFamily.getID(), columnFamily.getName(stringCodec)); - } - return tableNames; + return db.getColumnFamilyNames(); } public Collection getColumnFamilies() { diff --git a/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/utils/db/RocksDatabase.java b/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/utils/db/RocksDatabase.java index 4dd1042fde2..19f60d914f3 100644 --- a/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/utils/db/RocksDatabase.java +++ b/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/utils/db/RocksDatabase.java @@ -36,6 +36,7 @@ import org.apache.hadoop.hdds.utils.db.managed.ManagedWriteOptions; import org.apache.ozone.rocksdiff.RocksDiffUtils; import org.apache.ratis.util.UncheckedAutoCloseable; +import org.apache.ratis.util.MemoizedSupplier; import org.rocksdb.ColumnFamilyDescriptor; import org.rocksdb.ColumnFamilyHandle; import org.rocksdb.Holder; @@ -51,7 +52,6 @@ import java.nio.ByteBuffer; import java.nio.file.Path; import java.util.ArrayList; -import java.util.Arrays; import java.util.Collection; import java.util.Collections; import java.util.Comparator; @@ -66,7 +66,6 @@ import java.util.stream.Stream; import static java.nio.charset.StandardCharsets.UTF_8; -import static org.apache.hadoop.hdds.StringUtils.bytes2String; import static org.apache.hadoop.hdds.utils.db.managed.ManagedColumnFamilyOptions.closeDeeply; import static org.apache.hadoop.hdds.utils.db.managed.ManagedRocksIterator.managed; import static org.apache.hadoop.hdds.utils.db.managed.ManagedTransactionLogIterator.managed; @@ -86,10 +85,14 @@ public final class RocksDatabase implements Closeable { } private static final ManagedReadOptions DEFAULT_READ_OPTION = new ManagedReadOptions(); - private static Map> dbNameToCfHandleMap = - new HashMap<>(); - private final StackTraceElement[] stackTrace; + static String bytes2String(byte[] bytes) { + return StringCodec.get().fromPersistedFormat(bytes); + } + + static String bytes2String(ByteBuffer bytes) { + return StringCodec.get().decode(bytes); + } static IOException toIOException(Object name, String op, RocksDBException e) { return HddsServerUtil.toIOException(name + ": Failed to " + op, e); @@ -158,15 +161,7 @@ static RocksDatabase open(File dbFile, ManagedDBOptions dbOptions, db = ManagedRocksDB.open(dbOptions, dbFile.getAbsolutePath(), descriptors, handles); } - dbNameToCfHandleMap.put(db.get().getName(), handles); - // init a column family map. - AtomicLong counter = new AtomicLong(0); - for (ColumnFamilyHandle h : handles) { - final ColumnFamily f = new ColumnFamily(h, counter); - columnFamilies.put(f.getName(), f); - } - return new RocksDatabase(dbFile, db, dbOptions, writeOptions, - descriptors, Collections.unmodifiableMap(columnFamilies), counter); + return new RocksDatabase(dbFile, db, dbOptions, writeOptions, descriptors, handles); } catch (RocksDBException e) { close(columnFamilies, db, descriptors, writeOptions, dbOptions); throw toIOException(RocksDatabase.class, "open " + dbFile, e); @@ -260,17 +255,13 @@ public void close() throws IOException { * * @see ColumnFamilyHandle */ - public static final class ColumnFamily { + public final class ColumnFamily { private final byte[] nameBytes; - private AtomicLong counter; private final String name; private final ColumnFamilyHandle handle; - private AtomicBoolean isClosed = new AtomicBoolean(false); - public ColumnFamily(ColumnFamilyHandle handle, AtomicLong counter) - throws RocksDBException { + private ColumnFamily(ColumnFamilyHandle handle) throws RocksDBException { this.nameBytes = handle.getName(); - this.counter = counter; this.name = bytes2String(nameBytes); this.handle = handle; LOG.debug("new ColumnFamily for {}", name); @@ -289,10 +280,6 @@ public ColumnFamilyHandle getHandle() { return handle; } - public int getID() { - return getHandle().getID(); - } - public void batchDelete(ManagedWriteBatch writeBatch, byte[] key) throws IOException { try (UncheckedAutoCloseable ignored = acquire()) { @@ -331,10 +318,6 @@ public void batchPut(ManagedWriteBatch writeBatch, ByteBuffer key, } } - public void markClosed() { - isClosed.set(true); - } - private UncheckedAutoCloseable acquire() throws IOException { if (isClosed.get()) { throw new IOException("Rocks Database is closed"); @@ -353,27 +336,49 @@ public String toString() { } private final String name; + private final Throwable creationStackTrace = new Throwable("Object creation stack trace"); + private final ManagedRocksDB db; private final ManagedDBOptions dbOptions; private final ManagedWriteOptions writeOptions; private final List descriptors; + /** column family names -> {@link ColumnFamily}. */ private final Map columnFamilies; + /** {@link ColumnFamilyHandle#getID()} -> column family names. */ + private final Supplier> columnFamilyNames; private final AtomicBoolean isClosed = new AtomicBoolean(); - private final AtomicLong counter; + /** Count the number of operations running concurrently. */ + private final AtomicLong counter = new AtomicLong(); private RocksDatabase(File dbFile, ManagedRocksDB db, ManagedDBOptions dbOptions, ManagedWriteOptions writeOptions, - List descriptors, - Map columnFamilies, AtomicLong counter) { + List descriptors, List handles) throws RocksDBException { this.name = getClass().getSimpleName() + "[" + dbFile + "]"; this.db = db; this.dbOptions = dbOptions; this.writeOptions = writeOptions; this.descriptors = descriptors; - this.columnFamilies = columnFamilies; - this.counter = counter; - this.stackTrace = Thread.currentThread().getStackTrace(); + this.columnFamilies = toColumnFamilyMap(handles); + this.columnFamilyNames = MemoizedSupplier.valueOf(() -> toColumnFamilyNameMap(columnFamilies.values())); + } + + private Map toColumnFamilyMap(List handles) throws RocksDBException { + final Map map = new HashMap<>(); + for (ColumnFamilyHandle h : handles) { + final ColumnFamily f = new ColumnFamily(h); + map.put(f.getName(), f); + } + return Collections.unmodifiableMap(map); + } + + private static Map toColumnFamilyNameMap(Collection families) { + return Collections.unmodifiableMap(families.stream() + .collect(Collectors.toMap(f -> f.getHandle().getID(), ColumnFamily::getName))); + } + + Map getColumnFamilyNames() { + return columnFamilyNames.get(); } @Override @@ -389,10 +394,6 @@ private void close(boolean isSync) { // Then close all attached listeners dbOptions.listeners().forEach(listener -> listener.close()); - if (columnFamilies != null) { - columnFamilies.values().stream().forEach(f -> f.markClosed()); - } - if (isSync) { waitAndClose(); return; @@ -579,20 +580,9 @@ public void compactRange(String cfName) throws IOException { } } - private ColumnFamilyHandle getColumnFamilyHandle(String cfName) - throws IOException { - for (ColumnFamilyHandle cf : getCfHandleMap().get(db.get().getName())) { - try { - String table = new String(cf.getName(), UTF_8); - if (cfName.equals(table)) { - return cf; - } - } catch (RocksDBException e) { - closeOnError(e); - throw toIOException(this, "columnFamilyHandle.getName", e); - } - } - return null; + private ColumnFamilyHandle getColumnFamilyHandle(String columnFamilyName) { + final ColumnFamily columnFamily = getColumnFamily(columnFamilyName); + return columnFamily != null ? columnFamily.getHandle() : null; } public void compactRange(ColumnFamily family, final byte[] begin, @@ -896,20 +886,10 @@ public void deleteFilesNotMatchingPrefix(Map prefixPairs) } } - public static Map> getCfHandleMap() { - return dbNameToCfHandleMap; - } - @Override protected void finalize() throws Throwable { if (!isClosed()) { - String warning = "RocksDatabase is not closed properly."; - if (LOG.isDebugEnabled()) { - String debugMessage = String.format("%n StackTrace for unclosed " + - "RocksDatabase instance: %s", Arrays.toString(stackTrace)); - warning = warning.concat(debugMessage); - } - LOG.warn(warning); + LOG.warn("RocksDatabase {} is not closed properly.", name, creationStackTrace); } super.finalize(); } diff --git a/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/utils/db/TypedTable.java b/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/utils/db/TypedTable.java index 8e8abdcc7a8..539bf8a29c4 100644 --- a/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/utils/db/TypedTable.java +++ b/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/utils/db/TypedTable.java @@ -28,6 +28,7 @@ import java.util.Objects; import com.google.common.annotations.VisibleForTesting; +import org.apache.hadoop.hdds.utils.IOUtils; import org.apache.hadoop.hdds.utils.MetadataKeyFilters; import org.apache.hadoop.hdds.utils.TableCacheMetrics; import org.apache.hadoop.hdds.utils.db.cache.CacheKey; @@ -165,10 +166,17 @@ public void put(KEY key, VALUE value) throws IOException { public void putWithBatch(BatchOperation batch, KEY key, VALUE value) throws IOException { if (supportCodecBuffer) { - // The buffers will be released after commit. - rawTable.putWithBatch(batch, - keyCodec.toDirectCodecBuffer(key), - valueCodec.toDirectCodecBuffer(value)); + CodecBuffer keyBuffer = null; + CodecBuffer valueBuffer = null; + try { + keyBuffer = keyCodec.toDirectCodecBuffer(key); + valueBuffer = valueCodec.toDirectCodecBuffer(value); + // The buffers will be released after commit. + rawTable.putWithBatch(batch, keyBuffer, valueBuffer); + } catch (Exception e) { + IOUtils.closeQuietly(valueBuffer, keyBuffer); + throw e; + } } else { rawTable.putWithBatch(batch, encodeKey(key), encodeValue(value)); } diff --git a/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/utils/db/cache/CacheValue.java b/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/utils/db/cache/CacheValue.java index f52e739e3e4..a102e94aea7 100644 --- a/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/utils/db/cache/CacheValue.java +++ b/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/utils/db/cache/CacheValue.java @@ -18,15 +18,14 @@ package org.apache.hadoop.hdds.utils.db.cache; -import com.google.common.base.Optional; - import java.util.Objects; /** * CacheValue for the RocksDB Table. * @param */ -public class CacheValue { +public final class CacheValue { + /** @return a {@link CacheValue} with a non-null value. */ public static CacheValue get(long epoch, V value) { Objects.requireNonNull(value, "value == null"); @@ -48,16 +47,6 @@ private CacheValue(long epoch, VALUE value) { this.epoch = epoch; } - /** - * @deprecated - * use {@link #get(long, Object)} or {@link #get(long)}. - */ - @Deprecated - public CacheValue(Optional value, long epoch) { - this.value = value.orNull(); - this.epoch = epoch; - } - public VALUE getCacheValue() { return value; } diff --git a/hadoop-hdds/framework/src/main/java/org/apache/hadoop/ozone/audit/AuditLogger.java b/hadoop-hdds/framework/src/main/java/org/apache/hadoop/ozone/audit/AuditLogger.java index 33ad7086987..042887e4e53 100644 --- a/hadoop-hdds/framework/src/main/java/org/apache/hadoop/ozone/audit/AuditLogger.java +++ b/hadoop-hdds/framework/src/main/java/org/apache/hadoop/ozone/audit/AuditLogger.java @@ -49,6 +49,7 @@ public class AuditLogger { private static final Marker WRITE_MARKER = AuditMarker.WRITE.getMarker(); private static final Marker READ_MARKER = AuditMarker.READ.getMarker(); private static final Marker AUTH_MARKER = AuditMarker.AUTH.getMarker(); + private static final Marker PERFORMANCE = AuditMarker.PERFORMANCE.getMarker(); private final AtomicReference> debugCmdSetRef = new AtomicReference<>(new HashSet<>()); public static final String AUDIT_LOG_DEBUG_CMD_LIST_PREFIX = @@ -118,6 +119,10 @@ public void logWrite(AuditMessage auditMessage) { } } + public void logPerformance(AuditMessage msg) { + this.logger.logIfEnabled(FQCN, Level.INFO, PERFORMANCE, msg, null); + } + public void refreshDebugCmdSet() { OzoneConfiguration conf = new OzoneConfiguration(); refreshDebugCmdSet(conf); @@ -161,6 +166,22 @@ public void appendOpLatencyNanos(long nanos) { append("opLatencyMs", TimeUnit.NANOSECONDS.toMillis(nanos)); } + /** + * Appends pre-operation operation latency in milliseconds. + * @param millis Latency in nanoseconds. + */ + public void appendPreOpLatencyMs(long millis) { + append("preOpLatencyMs", millis); + } + + /** + * Appends whole operation latency in milliseconds. + * @param millis Latency in milliseconds. + */ + public void appendOpLatencyMs(long millis) { + append("opLatencyMs", millis); + } + /** * Appends the size in bytes. * @param bytes Size in bytes. diff --git a/hadoop-hdds/framework/src/main/java/org/apache/hadoop/ozone/audit/AuditMarker.java b/hadoop-hdds/framework/src/main/java/org/apache/hadoop/ozone/audit/AuditMarker.java index d9aed2bb65f..2e991a6ea2e 100644 --- a/hadoop-hdds/framework/src/main/java/org/apache/hadoop/ozone/audit/AuditMarker.java +++ b/hadoop-hdds/framework/src/main/java/org/apache/hadoop/ozone/audit/AuditMarker.java @@ -25,7 +25,8 @@ public enum AuditMarker { WRITE(MarkerManager.getMarker("WRITE")), READ(MarkerManager.getMarker("READ")), - AUTH(MarkerManager.getMarker("AUTH")),; + AUTH(MarkerManager.getMarker("AUTH")), + PERFORMANCE(MarkerManager.getMarker("PERFORMANCE")); private Marker marker; diff --git a/hadoop-hdds/framework/src/test/java/org/apache/hadoop/hdds/conf/TestHddsConfServlet.java b/hadoop-hdds/framework/src/test/java/org/apache/hadoop/hdds/conf/TestHddsConfServlet.java index df16178e64b..ad95ce1632d 100644 --- a/hadoop-hdds/framework/src/test/java/org/apache/hadoop/hdds/conf/TestHddsConfServlet.java +++ b/hadoop-hdds/framework/src/test/java/org/apache/hadoop/hdds/conf/TestHddsConfServlet.java @@ -19,9 +19,9 @@ import static org.assertj.core.api.Assertions.assertThat; import static org.junit.jupiter.api.Assertions.assertEquals; +import static org.junit.jupiter.api.Assertions.assertThrows; import static org.junit.jupiter.api.Assertions.assertTrue; -import static org.junit.jupiter.api.Assertions.fail; -import static org.mockito.ArgumentMatchers.eq; +import static org.mockito.Mockito.eq; import static org.mockito.Mockito.mock; import static org.mockito.Mockito.verify; import static org.mockito.Mockito.when; @@ -170,12 +170,8 @@ public void testWriteXml() throws Exception { @Test public void testBadFormat() throws Exception { StringWriter sw = new StringWriter(); - try { - HddsConfServlet.writeResponse(getTestConf(), sw, "not a format", null); - fail("writeResponse with bad format didn't throw!"); - } catch (HddsConfServlet.BadFormatException bfe) { - // expected - } + assertThrows(HddsConfServlet.BadFormatException.class, + () -> HddsConfServlet.writeResponse(getTestConf(), sw, "not a format", null)); assertEquals("", sw.toString()); } diff --git a/hadoop-hdds/framework/src/test/java/org/apache/hadoop/hdds/security/ssl/TestPemFileBasedKeyStoresFactory.java b/hadoop-hdds/framework/src/test/java/org/apache/hadoop/hdds/security/ssl/TestPemFileBasedKeyStoresFactory.java index 0034890a68c..6efb93c93d6 100644 --- a/hadoop-hdds/framework/src/test/java/org/apache/hadoop/hdds/security/ssl/TestPemFileBasedKeyStoresFactory.java +++ b/hadoop-hdds/framework/src/test/java/org/apache/hadoop/hdds/security/ssl/TestPemFileBasedKeyStoresFactory.java @@ -74,7 +74,7 @@ public void setup() throws Exception { @ParameterizedTest public void testInit(boolean clientAuth) throws Exception { KeyStoresFactory keyStoresFactory = new PemFileBasedKeyStoresFactory( - secConf, caClient); + caClient); try { keyStoresFactory.init(KeyStoresFactory.Mode.CLIENT, clientAuth); assertEquals(clientAuth, keyStoresFactory.getKeyManagers()[0] @@ -101,13 +101,13 @@ public void testConnectionWithCertReload() throws Exception { ManagedChannel channel = null; try { // create server - serverFactory = new PemFileBasedKeyStoresFactory(secConf, caClient); + serverFactory = new PemFileBasedKeyStoresFactory(caClient); serverFactory.init(KeyStoresFactory.Mode.SERVER, true); server = setupServer(serverFactory); server.start(); // create client - clientFactory = new PemFileBasedKeyStoresFactory(secConf, caClient); + clientFactory = new PemFileBasedKeyStoresFactory(caClient); clientFactory.init(KeyStoresFactory.Mode.CLIENT, true); channel = setupClient(clientFactory, server.getPort()); XceiverClientProtocolServiceStub asyncStub = diff --git a/hadoop-hdds/framework/src/test/java/org/apache/hadoop/hdds/security/token/TestOzoneBlockTokenSecretManager.java b/hadoop-hdds/framework/src/test/java/org/apache/hadoop/hdds/security/token/TestOzoneBlockTokenSecretManager.java index efd61be4248..d653c6af793 100644 --- a/hadoop-hdds/framework/src/test/java/org/apache/hadoop/hdds/security/token/TestOzoneBlockTokenSecretManager.java +++ b/hadoop-hdds/framework/src/test/java/org/apache/hadoop/hdds/security/token/TestOzoneBlockTokenSecretManager.java @@ -31,12 +31,13 @@ import org.apache.hadoop.hdds.security.symmetric.SecretKeyTestUtil; import org.apache.hadoop.hdds.security.SecurityConfig; import org.apache.hadoop.security.token.Token; -import org.apache.ozone.test.GenericTestUtils; import org.junit.jupiter.api.BeforeEach; import org.junit.jupiter.api.Test; +import org.junit.jupiter.api.io.TempDir; import java.io.ByteArrayInputStream; import java.io.DataInputStream; +import java.io.File; import java.security.NoSuchAlgorithmException; import java.util.EnumSet; import java.util.UUID; @@ -52,7 +53,7 @@ import static org.junit.jupiter.api.Assertions.assertEquals; import static org.junit.jupiter.api.Assertions.assertThrows; import static org.junit.jupiter.api.Assertions.assertTrue; -import static org.mockito.ArgumentMatchers.any; +import static org.mockito.Mockito.any; import static org.mockito.Mockito.mock; import static org.mockito.Mockito.when; @@ -61,8 +62,8 @@ */ public class TestOzoneBlockTokenSecretManager { - private static final String BASEDIR = GenericTestUtils - .getTempPath(TestOzoneBlockTokenSecretManager.class.getSimpleName()); + @TempDir + private File baseDir; private static final String ALGORITHM = "HmacSHA256"; private OzoneBlockTokenSecretManager secretManager; @@ -77,7 +78,7 @@ public void setUp() throws Exception { pipeline = MockPipeline.createPipeline(3); OzoneConfiguration conf = new OzoneConfiguration(); - conf.set(HddsConfigKeys.OZONE_METADATA_DIRS, BASEDIR); + conf.set(HddsConfigKeys.OZONE_METADATA_DIRS, baseDir.getPath()); conf.setBoolean(HddsConfigKeys.HDDS_BLOCK_TOKEN_ENABLED, true); SecurityConfig securityConfig = new SecurityConfig(conf); diff --git a/hadoop-hdds/framework/src/test/java/org/apache/hadoop/hdds/security/token/TokenVerifierTests.java b/hadoop-hdds/framework/src/test/java/org/apache/hadoop/hdds/security/token/TokenVerifierTests.java index ff0081dc8ca..6e76f4c12ee 100644 --- a/hadoop-hdds/framework/src/test/java/org/apache/hadoop/hdds/security/token/TokenVerifierTests.java +++ b/hadoop-hdds/framework/src/test/java/org/apache/hadoop/hdds/security/token/TokenVerifierTests.java @@ -25,7 +25,7 @@ import org.apache.hadoop.hdds.security.SecurityConfig; import org.apache.hadoop.security.token.Token; import org.apache.hadoop.security.token.TokenIdentifier; -import org.jetbrains.annotations.NotNull; +import jakarta.annotation.Nonnull; import org.junit.jupiter.api.Test; import org.slf4j.Logger; import org.slf4j.LoggerFactory; @@ -40,7 +40,7 @@ import static java.nio.charset.StandardCharsets.UTF_8; import static org.assertj.core.api.Assertions.assertThat; import static org.junit.jupiter.api.Assertions.assertThrows; -import static org.mockito.ArgumentMatchers.any; +import static org.mockito.Mockito.any; import static org.mockito.Mockito.mock; import static org.mockito.Mockito.never; import static org.mockito.Mockito.verify; @@ -174,7 +174,7 @@ public void rejectsInvalidSignature() throws Exception { .contains("Invalid token for user"); } - @NotNull + @Nonnull private SecretKeyVerifierClient mockSecretKeyClient(boolean validSignature) throws IOException { SecretKeyVerifierClient secretKeyClient = diff --git a/hadoop-hdds/framework/src/test/java/org/apache/hadoop/hdds/security/x509/certificate/authority/TestDefaultCAServer.java b/hadoop-hdds/framework/src/test/java/org/apache/hadoop/hdds/security/x509/certificate/authority/TestDefaultCAServer.java index f4f76c671fd..f98ad41e8db 100644 --- a/hadoop-hdds/framework/src/test/java/org/apache/hadoop/hdds/security/x509/certificate/authority/TestDefaultCAServer.java +++ b/hadoop-hdds/framework/src/test/java/org/apache/hadoop/hdds/security/x509/certificate/authority/TestDefaultCAServer.java @@ -78,7 +78,6 @@ import static org.junit.jupiter.api.Assertions.assertNotNull; import static org.junit.jupiter.api.Assertions.assertThrows; import static org.junit.jupiter.api.Assertions.assertTrue; -import static org.junit.jupiter.api.Assertions.fail; /** * Tests the Default CA Server. @@ -124,15 +123,11 @@ public void testMissingCertificate() { ((DefaultCAServer) testCA).processVerificationStatus( DefaultCAServer.VerificationStatus.MISSING_CERTIFICATE, CAType.ROOT); - try { - - caInitializer.accept(securityConfig); - fail("code should not reach here, exception should have been thrown."); - } catch (IllegalStateException e) { - // This also is a runtime exception. Hence not caught by junit expected - // exception. - assertThat(e.toString()).contains("Missing Root Certs"); - } + IllegalStateException e = + assertThrows(IllegalStateException.class, () -> caInitializer.accept(securityConfig)); + // This also is a runtime exception. Hence not caught by junit expected + // exception. + assertThat(e.toString()).contains("Missing Root Certs"); } @Test @@ -145,15 +140,11 @@ public void testMissingKey() { Consumer caInitializer = ((DefaultCAServer) testCA).processVerificationStatus( DefaultCAServer.VerificationStatus.MISSING_KEYS, CAType.ROOT); - try { - - caInitializer.accept(securityConfig); - fail("code should not reach here, exception should have been thrown."); - } catch (IllegalStateException e) { - // This also is a runtime exception. Hence not caught by junit expected - // exception. - assertThat(e.toString()).contains("Missing Keys"); - } + IllegalStateException e = + assertThrows(IllegalStateException.class, () -> caInitializer.accept(securityConfig)); + // This also is a runtime exception. Hence not caught by junit expected + // exception. + assertThat(e.toString()).contains("Missing Keys"); } /** @@ -457,7 +448,7 @@ public void testInitWithCertChain(@TempDir Path tempDir) throws Exception { } @Test - public void testIntermediaryCA() throws Exception { + void testIntermediaryCA() throws Exception { conf.set(HddsConfigKeys.HDDS_X509_MAX_DURATION, "P3650D"); securityConfig = new SecurityConfig(conf); @@ -527,11 +518,8 @@ clusterId, scmId, caStore, new DefaultProfile(), clusterId, scmId, caStore, new DefaultProfile(), scmCertificateClient.getComponentName()); - try { - scmCA.init(securityConfig, CAType.SUBORDINATE); - } catch (Exception e) { - fail("testIntermediaryCA failed during init"); - } + + scmCA.init(securityConfig, CAType.SUBORDINATE); } } diff --git a/hadoop-hdds/framework/src/test/java/org/apache/hadoop/hdds/security/x509/certificate/client/CertificateClientTestImpl.java b/hadoop-hdds/framework/src/test/java/org/apache/hadoop/hdds/security/x509/certificate/client/CertificateClientTestImpl.java index 66d6ef893d4..c322aaf2a7a 100644 --- a/hadoop-hdds/framework/src/test/java/org/apache/hadoop/hdds/security/x509/certificate/client/CertificateClientTestImpl.java +++ b/hadoop-hdds/framework/src/test/java/org/apache/hadoop/hdds/security/x509/certificate/client/CertificateClientTestImpl.java @@ -48,7 +48,6 @@ import org.apache.hadoop.hdds.conf.OzoneConfiguration; import org.apache.hadoop.hdds.security.ssl.KeyStoresFactory; import org.apache.hadoop.hdds.security.SecurityConfig; -import org.apache.hadoop.hdds.security.x509.certificate.authority.CAType; import org.apache.hadoop.hdds.security.x509.certificate.authority.DefaultApprover; import org.apache.hadoop.hdds.security.x509.certificate.authority.profile.DefaultProfile; import org.apache.hadoop.hdds.security.x509.certificate.utils.CertificateSignRequest; @@ -59,7 +58,6 @@ import org.apache.hadoop.hdds.security.x509.keys.SecurityUtil; import org.bouncycastle.cert.X509CertificateHolder; import org.bouncycastle.cert.jcajce.JcaX509CertificateConverter; -import org.bouncycastle.pkcs.PKCS10CertificationRequest; import static org.apache.hadoop.hdds.HddsConfigKeys.HDDS_X509_DEFAULT_DURATION; import static org.apache.hadoop.hdds.HddsConfigKeys.HDDS_X509_DEFAULT_DURATION_DEFAULT; @@ -155,9 +153,9 @@ public CertificateClientTestImpl(OzoneConfiguration conf, boolean autoRenew) notificationReceivers = new HashSet<>(); serverKeyStoresFactory = SecurityUtil.getServerKeyStoresFactory( - securityConfig, this, true); + this, true); clientKeyStoresFactory = SecurityUtil.getClientKeyStoresFactory( - securityConfig, this, true); + this, true); if (autoRenew) { Duration gracePeriod = securityConfig.getRenewalGracePeriod(); @@ -221,11 +219,6 @@ public X509Certificate getCACertificate() { return rootCert; } - @Override - public byte[] signData(byte[] data) throws CertificateException { - return new byte[0]; - } - @Override public boolean verifySignature(byte[] data, byte[] signature, X509Certificate cert) throws CertificateException { @@ -248,17 +241,6 @@ public CertificateSignRequest.Builder getCSRBuilder() { return new CertificateSignRequest.Builder(); } - @Override - public String signAndStoreCertificate(PKCS10CertificationRequest request) - throws CertificateException { - return null; - } - - @Override - public void storeCertificate(String cert, CAType caType) - throws CertificateException { - } - @Override public void initWithRecovery() throws IOException { } @@ -288,11 +270,6 @@ public List getCAList() { return null; } - @Override - public List listCA() throws IOException { - return null; - } - @Override public List updateCAList() throws IOException { return null; diff --git a/hadoop-hdds/framework/src/test/java/org/apache/hadoop/hdds/security/x509/certificate/client/TestDefaultCertificateClient.java b/hadoop-hdds/framework/src/test/java/org/apache/hadoop/hdds/security/x509/certificate/client/TestDefaultCertificateClient.java index 29fca3cd677..b5d0425becf 100644 --- a/hadoop-hdds/framework/src/test/java/org/apache/hadoop/hdds/security/x509/certificate/client/TestDefaultCertificateClient.java +++ b/hadoop-hdds/framework/src/test/java/org/apache/hadoop/hdds/security/x509/certificate/client/TestDefaultCertificateClient.java @@ -45,7 +45,6 @@ import java.security.cert.X509Certificate; import java.time.Duration; import java.util.Arrays; -import java.util.UUID; import java.util.function.Predicate; import org.apache.commons.io.FileUtils; @@ -74,8 +73,8 @@ import static org.junit.jupiter.api.Assertions.assertNull; import static org.junit.jupiter.api.Assertions.assertThrows; import static org.junit.jupiter.api.Assertions.assertTrue; -import static org.mockito.ArgumentMatchers.any; -import static org.mockito.ArgumentMatchers.anyString; +import static org.mockito.Mockito.any; +import static org.mockito.Mockito.anyString; import static org.mockito.Mockito.mock; import static org.mockito.Mockito.when; @@ -88,6 +87,7 @@ public class TestDefaultCertificateClient { private X509Certificate x509Certificate; private DNCertificateClient dnCertClient; private HDDSKeyGenerator keyGenerator; + @TempDir private Path dnMetaDirPath; private SecurityConfig dnSecurityConfig; private SCMSecurityProtocolClientSideTranslatorPB scmSecurityClient; @@ -99,10 +99,7 @@ public void setUp() throws Exception { OzoneConfiguration config = new OzoneConfiguration(); config.setStrings(OZONE_SCM_NAMES, "localhost"); config.setInt(IPC_CLIENT_CONNECT_MAX_RETRIES_KEY, 2); - final String dnPath = GenericTestUtils - .getTempPath(UUID.randomUUID().toString()); - dnMetaDirPath = Paths.get(dnPath, "test"); config.set(HDDS_METADATA_DIR_NAME, dnMetaDirPath.toString()); dnSecurityConfig = new SecurityConfig(config); @@ -130,7 +127,6 @@ private void getCertClient() throws IOException { public void tearDown() throws IOException { dnCertClient.close(); dnCertClient = null; - FileUtils.deleteQuietly(dnMetaDirPath.toFile()); } /** diff --git a/hadoop-hdds/framework/src/test/java/org/apache/hadoop/hdds/security/x509/certificate/client/TestDnCertificateClientInit.java b/hadoop-hdds/framework/src/test/java/org/apache/hadoop/hdds/security/x509/certificate/client/TestDnCertificateClientInit.java index 987e841e51a..3c3330a2b2d 100644 --- a/hadoop-hdds/framework/src/test/java/org/apache/hadoop/hdds/security/x509/certificate/client/TestDnCertificateClientInit.java +++ b/hadoop-hdds/framework/src/test/java/org/apache/hadoop/hdds/security/x509/certificate/client/TestDnCertificateClientInit.java @@ -28,10 +28,10 @@ import org.apache.hadoop.hdds.security.x509.keys.KeyCodec; import org.apache.hadoop.ozone.OzoneSecurityUtil; import org.apache.hadoop.security.ssl.KeyStoreTestUtil; -import org.apache.ozone.test.GenericTestUtils; import org.bouncycastle.cert.X509CertificateHolder; import org.junit.jupiter.api.AfterEach; import org.junit.jupiter.api.BeforeEach; +import org.junit.jupiter.api.io.TempDir; import org.junit.jupiter.params.ParameterizedTest; import org.junit.jupiter.params.provider.Arguments; import org.junit.jupiter.params.provider.MethodSource; @@ -42,7 +42,6 @@ import java.nio.file.Paths; import java.security.KeyPair; import java.security.cert.X509Certificate; -import java.util.UUID; import java.util.stream.Stream; import static org.apache.hadoop.hdds.HddsConfigKeys.HDDS_METADATA_DIR_NAME; @@ -63,6 +62,7 @@ public class TestDnCertificateClientInit { private String certSerialId = "3284792342234"; private DNCertificateClient dnCertificateClient; private HDDSKeyGenerator keyGenerator; + @TempDir private Path metaDirPath; private SecurityConfig securityConfig; private KeyCodec dnKeyCodec; @@ -85,9 +85,6 @@ private static Stream parameters() { @BeforeEach public void setUp() throws Exception { OzoneConfiguration config = new OzoneConfiguration(); - final String path = GenericTestUtils - .getTempPath(UUID.randomUUID().toString()); - metaDirPath = Paths.get(path, "test"); config.set(HDDS_METADATA_DIR_NAME, metaDirPath.toString()); securityConfig = new SecurityConfig(config); keyGenerator = new HDDSKeyGenerator(securityConfig); @@ -107,7 +104,6 @@ public void setUp() throws Exception { public void tearDown() throws IOException { dnCertificateClient.close(); dnCertificateClient = null; - FileUtils.deleteQuietly(metaDirPath.toFile()); } diff --git a/hadoop-hdds/framework/src/test/java/org/apache/hadoop/hdds/security/x509/certificate/utils/TestCertificateSignRequest.java b/hadoop-hdds/framework/src/test/java/org/apache/hadoop/hdds/security/x509/certificate/utils/TestCertificateSignRequest.java index 82974f2aa21..1d32712fc28 100644 --- a/hadoop-hdds/framework/src/test/java/org/apache/hadoop/hdds/security/x509/certificate/utils/TestCertificateSignRequest.java +++ b/hadoop-hdds/framework/src/test/java/org/apache/hadoop/hdds/security/x509/certificate/utils/TestCertificateSignRequest.java @@ -54,6 +54,7 @@ import static org.apache.hadoop.hdds.security.x509.certificate.utils.CertificateSignRequest.getPkcs9Extensions; import static org.junit.jupiter.api.Assertions.assertEquals; import static org.junit.jupiter.api.Assertions.assertNull; +import static org.junit.jupiter.api.Assertions.assertThrows; import static org.junit.jupiter.api.Assertions.assertTrue; import static org.junit.jupiter.api.Assertions.fail; @@ -206,29 +207,23 @@ public void testGenerateCSRWithInvalidParams() throws NoSuchProviderException, } // Now try with blank/null Subject. - try { + assertThrows(IllegalArgumentException.class, () -> { builder.setSubject(null); builder.build(); - fail("Null/Blank Subject should have thrown."); - } catch (IllegalArgumentException e) { - builder.setSubject(subject); - } + }); + builder.setSubject(subject); - try { + assertThrows(IllegalArgumentException.class, () -> { builder.setSubject(""); builder.build(); - fail("Null/Blank Subject should have thrown."); - } catch (IllegalArgumentException e) { - builder.setSubject(subject); - } + }); + builder.setSubject(subject); // Now try with invalid IP address - try { + assertThrows(IllegalArgumentException.class, () -> { builder.addIpAddress("255.255.255.*"); builder.build(); - fail("Invalid ip address"); - } catch (IllegalArgumentException e) { - } + }); PKCS10CertificationRequest csr = builder.build(); @@ -290,7 +285,7 @@ private void verifyServiceId(Extensions extensions) { assertEquals("2.16.840.1.113730.3.1.34", oid); } if (o instanceof DERTaggedObject) { - String serviceName = ((DERTaggedObject)o).getObject().toString(); + String serviceName = ((DERTaggedObject)o).toASN1Primitive().toString(); assertEquals("OzoneMarketingCluster003", serviceName); } } diff --git a/hadoop-hdds/framework/src/test/java/org/apache/hadoop/hdds/security/x509/certificate/utils/TestRootCertificate.java b/hadoop-hdds/framework/src/test/java/org/apache/hadoop/hdds/security/x509/certificate/utils/TestRootCertificate.java index c8c0d64da0a..bba36fad819 100644 --- a/hadoop-hdds/framework/src/test/java/org/apache/hadoop/hdds/security/x509/certificate/utils/TestRootCertificate.java +++ b/hadoop-hdds/framework/src/test/java/org/apache/hadoop/hdds/security/x509/certificate/utils/TestRootCertificate.java @@ -47,6 +47,7 @@ import static org.junit.jupiter.api.Assertions.assertFalse; import static org.junit.jupiter.api.Assertions.assertNotNull; import static org.junit.jupiter.api.Assertions.assertNull; +import static org.junit.jupiter.api.Assertions.assertThrows; import static org.junit.jupiter.api.Assertions.assertTrue; import static org.junit.jupiter.api.Assertions.fail; @@ -199,45 +200,35 @@ public void testInvalidParamFails() throws Exception { } // Now try with Blank Subject. - try { + assertThrows(IllegalArgumentException.class, () -> { builder.setSubject(""); builder.build(); - fail("Null/Blank Subject should have thrown."); - } catch (IllegalArgumentException e) { - builder.setSubject(subject); - } + }); + builder.setSubject(subject); // Now try with blank/null SCM ID - try { + assertThrows(IllegalArgumentException.class, () -> { builder.setScmID(null); builder.build(); - fail("Null/Blank SCM ID should have thrown."); - } catch (IllegalArgumentException e) { - builder.setScmID(scmID); - } - + }); + builder.setScmID(scmID); // Now try with blank/null SCM ID - try { + assertThrows(IllegalArgumentException.class, () -> { builder.setClusterID(null); builder.build(); - fail("Null/Blank Cluster ID should have thrown."); - } catch (IllegalArgumentException e) { - builder.setClusterID(clusterID); - } - + }); + builder.setClusterID(clusterID); // Swap the Begin and End Date and verify that we cannot create a // certificate like that. - try { + assertThrows(IllegalArgumentException.class, () -> { builder.setBeginDate(notAfter); builder.setEndDate(notBefore); builder.build(); - fail("Illegal dates should have thrown."); - } catch (IllegalArgumentException e) { - builder.setBeginDate(notBefore); - builder.setEndDate(notAfter); - } + }); + builder.setBeginDate(notBefore); + builder.setEndDate(notAfter); try { KeyPair newKey = keyGen.generateKey(); diff --git a/hadoop-hdds/framework/src/test/java/org/apache/hadoop/hdds/security/x509/keys/TestHDDSKeyGenerator.java b/hadoop-hdds/framework/src/test/java/org/apache/hadoop/hdds/security/x509/keys/TestHDDSKeyGenerator.java index cec6b7dd129..9628052b05d 100644 --- a/hadoop-hdds/framework/src/test/java/org/apache/hadoop/hdds/security/x509/keys/TestHDDSKeyGenerator.java +++ b/hadoop-hdds/framework/src/test/java/org/apache/hadoop/hdds/security/x509/keys/TestHDDSKeyGenerator.java @@ -21,6 +21,8 @@ import static org.apache.hadoop.hdds.HddsConfigKeys.OZONE_METADATA_DIRS; import static org.junit.jupiter.api.Assertions.assertEquals; + +import java.io.File; import java.security.KeyPair; import java.security.NoSuchAlgorithmException; import java.security.NoSuchProviderException; @@ -29,20 +31,22 @@ import java.security.spec.PKCS8EncodedKeySpec; import org.apache.hadoop.hdds.conf.OzoneConfiguration; import org.apache.hadoop.hdds.security.SecurityConfig; -import org.apache.ozone.test.GenericTestUtils; import org.junit.jupiter.api.BeforeEach; import org.junit.jupiter.api.Test; +import org.junit.jupiter.api.io.TempDir; /** * Test class for HDDS Key Generator. */ public class TestHDDSKeyGenerator { private SecurityConfig config; + @TempDir + private File tempPath; @BeforeEach public void init() { OzoneConfiguration conf = new OzoneConfiguration(); - conf.set(OZONE_METADATA_DIRS, GenericTestUtils.getTempPath("testpath")); + conf.set(OZONE_METADATA_DIRS, tempPath.getPath()); config = new SecurityConfig(conf); } /** diff --git a/hadoop-hdds/framework/src/test/java/org/apache/hadoop/hdds/server/http/TestHttpServer2Metrics.java b/hadoop-hdds/framework/src/test/java/org/apache/hadoop/hdds/server/http/TestHttpServer2Metrics.java index 257c543d22c..23a1f757309 100644 --- a/hadoop-hdds/framework/src/test/java/org/apache/hadoop/hdds/server/http/TestHttpServer2Metrics.java +++ b/hadoop-hdds/framework/src/test/java/org/apache/hadoop/hdds/server/http/TestHttpServer2Metrics.java @@ -17,14 +17,15 @@ */ package org.apache.hadoop.hdds.server.http; +import org.apache.commons.lang3.RandomUtils; import static org.apache.hadoop.hdds.server.http.HttpServer2Metrics.HttpServer2MetricsInfo.HttpServerIdleThreadCount; import static org.apache.hadoop.hdds.server.http.HttpServer2Metrics.HttpServer2MetricsInfo.HttpServerMaxThreadCount; import static org.apache.hadoop.hdds.server.http.HttpServer2Metrics.HttpServer2MetricsInfo.HttpServerThreadCount; import static org.apache.hadoop.hdds.server.http.HttpServer2Metrics.HttpServer2MetricsInfo.HttpServerThreadQueueWaitingTaskCount; import static org.apache.hadoop.hdds.server.http.HttpServer2Metrics.HttpServer2MetricsInfo.SERVER_NAME; -import static org.mockito.ArgumentMatchers.any; -import static org.mockito.ArgumentMatchers.anyInt; -import static org.mockito.ArgumentMatchers.anyString; +import static org.mockito.Mockito.any; +import static org.mockito.Mockito.anyInt; +import static org.mockito.Mockito.anyString; import static org.mockito.Mockito.mock; import static org.mockito.Mockito.verify; import static org.mockito.Mockito.when; @@ -36,8 +37,6 @@ import org.junit.jupiter.api.BeforeEach; import org.junit.jupiter.api.Test; -import java.util.Random; - /** * Testing HttpServer2Metrics. */ @@ -57,11 +56,10 @@ public void setup() { @Test public void testMetrics() { // crate mock metrics - Random random = new Random(); - int threadCount = random.nextInt(); - int maxThreadCount = random.nextInt(); - int idleThreadCount = random.nextInt(); - int threadQueueWaitingTaskCount = random.nextInt(); + int threadCount = RandomUtils.nextInt(); + int maxThreadCount = RandomUtils.nextInt(); + int idleThreadCount = RandomUtils.nextInt(); + int threadQueueWaitingTaskCount = RandomUtils.nextInt(); String name = "s3g"; when(threadPool.getThreads()).thenReturn(threadCount); diff --git a/hadoop-hdds/framework/src/test/java/org/apache/hadoop/hdds/utils/TestRDBSnapshotProvider.java b/hadoop-hdds/framework/src/test/java/org/apache/hadoop/hdds/utils/TestRDBSnapshotProvider.java index baf39bd257d..9edbe4b3fc8 100644 --- a/hadoop-hdds/framework/src/test/java/org/apache/hadoop/hdds/utils/TestRDBSnapshotProvider.java +++ b/hadoop-hdds/framework/src/test/java/org/apache/hadoop/hdds/utils/TestRDBSnapshotProvider.java @@ -81,8 +81,8 @@ public class TestRDBSnapshotProvider { private Set configSet; private RDBSnapshotProvider rdbSnapshotProvider; private File testDir; - private final int numUsedCF = 3; - private final String leaderId = "leaderNode-1"; + private static final int NUM_USED_CF = 3; + private static final String LEADER_ID = "leaderNode-1"; private final AtomicReference latestCK = new AtomicReference<>(null); @@ -109,7 +109,7 @@ public void close() { public void downloadSnapshot(String leaderNodeID, File targetFile) throws IOException { for (int i = 0; i < 10; i++) { - insertDataToDB(numUsedCF); + insertDataToDB(NUM_USED_CF); } DBCheckpoint dbCheckpoint = rdbStore.getCheckpoint(true); latestCK.set(dbCheckpoint); @@ -151,30 +151,30 @@ public void testDownloadDBSnapshotFromLeader() throws Exception { assertEquals(0, before); // Get first snapshot - checkpoint = rdbSnapshotProvider.downloadDBSnapshotFromLeader(leaderId); + checkpoint = rdbSnapshotProvider.downloadDBSnapshotFromLeader(LEADER_ID); File checkpointDir = checkpoint.getCheckpointLocation().toFile(); assertEquals(candidateDir, checkpointDir); int first = HAUtils.getExistingSstFiles( rdbSnapshotProvider.getCandidateDir()).size(); // Get second snapshot - checkpoint = rdbSnapshotProvider.downloadDBSnapshotFromLeader(leaderId); + checkpoint = rdbSnapshotProvider.downloadDBSnapshotFromLeader(LEADER_ID); int second = HAUtils.getExistingSstFiles( rdbSnapshotProvider.getCandidateDir()).size(); assertThat(second).withFailMessage("The second snapshot should have more SST files") .isGreaterThan(first); DBCheckpoint latestCheckpoint = latestCK.get(); compareDB(latestCheckpoint.getCheckpointLocation().toFile(), - checkpoint.getCheckpointLocation().toFile(), numUsedCF); + checkpoint.getCheckpointLocation().toFile(), NUM_USED_CF); // Get third snapshot - checkpoint = rdbSnapshotProvider.downloadDBSnapshotFromLeader(leaderId); + checkpoint = rdbSnapshotProvider.downloadDBSnapshotFromLeader(LEADER_ID); int third = HAUtils.getExistingSstFiles( rdbSnapshotProvider.getCandidateDir()).size(); assertThat(third).withFailMessage("The third snapshot should have more SST files") .isGreaterThan(second); compareDB(latestCK.get().getCheckpointLocation().toFile(), - checkpoint.getCheckpointLocation().toFile(), numUsedCF); + checkpoint.getCheckpointLocation().toFile(), NUM_USED_CF); // Test cleanup candidateDB rdbSnapshotProvider.init(); diff --git a/hadoop-hdds/framework/src/test/java/org/apache/hadoop/hdds/utils/db/TestRDBStore.java b/hadoop-hdds/framework/src/test/java/org/apache/hadoop/hdds/utils/db/TestRDBStore.java index 7724835957f..56a16422d05 100644 --- a/hadoop-hdds/framework/src/test/java/org/apache/hadoop/hdds/utils/db/TestRDBStore.java +++ b/hadoop-hdds/framework/src/test/java/org/apache/hadoop/hdds/utils/db/TestRDBStore.java @@ -65,7 +65,7 @@ public static RDBStore newRDBStore(File dbFile, ManagedDBOptions options, Set families, long maxDbUpdatesSizeThreshold) throws IOException { - return new RDBStore(dbFile, options, new ManagedWriteOptions(), families, + return new RDBStore(dbFile, options, null, new ManagedWriteOptions(), families, CodecRegistry.newBuilder().build(), false, 1000, null, false, maxDbUpdatesSizeThreshold, true, null, ""); } @@ -248,7 +248,7 @@ public void listTables() throws Exception { int count = families.size(); // Assert that we have all the tables in the list and no more. for (String name : families) { - assertTrue(hashTable.containsKey(name)); + assertThat(hashTable).containsKey(name); count--; } assertEquals(0, count); diff --git a/hadoop-hdds/framework/src/test/java/org/apache/hadoop/hdds/utils/db/TestRDBStoreByteArrayIterator.java b/hadoop-hdds/framework/src/test/java/org/apache/hadoop/hdds/utils/db/TestRDBStoreByteArrayIterator.java index 525ff27c545..9a065d9b1c1 100644 --- a/hadoop-hdds/framework/src/test/java/org/apache/hadoop/hdds/utils/db/TestRDBStoreByteArrayIterator.java +++ b/hadoop-hdds/framework/src/test/java/org/apache/hadoop/hdds/utils/db/TestRDBStoreByteArrayIterator.java @@ -38,9 +38,8 @@ import static org.junit.jupiter.api.Assertions.assertInstanceOf; import static org.junit.jupiter.api.Assertions.assertThrows; import static org.junit.jupiter.api.Assertions.assertTrue; -import static org.junit.jupiter.api.Assertions.fail; -import static org.mockito.ArgumentMatchers.any; import static org.mockito.ArgumentCaptor.forClass; +import static org.mockito.Mockito.any; import static org.mockito.Mockito.clearInvocations; import static org.mockito.Mockito.inOrder; import static org.mockito.Mockito.mock; @@ -296,13 +295,9 @@ public void testNormalPrefixedIterator() throws IOException { assertTrue(iter.hasNext()); verify(rocksDBIteratorMock, times(1)).isValid(); verify(rocksDBIteratorMock, times(1)).key(); - - try { - iter.seekToLast(); - fail("Prefixed iterator does not support seekToLast"); - } catch (Exception e) { - assertInstanceOf(UnsupportedOperationException.class, e); - } + Exception e = + assertThrows(Exception.class, () -> iter.seekToLast(), "Prefixed iterator does not support seekToLast"); + assertInstanceOf(UnsupportedOperationException.class, e); iter.close(); } diff --git a/hadoop-hdds/framework/src/test/java/org/apache/hadoop/hdds/utils/db/TestRDBStoreCodecBufferIterator.java b/hadoop-hdds/framework/src/test/java/org/apache/hadoop/hdds/utils/db/TestRDBStoreCodecBufferIterator.java index 2b3b15b47b8..fb10b1b06db 100644 --- a/hadoop-hdds/framework/src/test/java/org/apache/hadoop/hdds/utils/db/TestRDBStoreCodecBufferIterator.java +++ b/hadoop-hdds/framework/src/test/java/org/apache/hadoop/hdds/utils/db/TestRDBStoreCodecBufferIterator.java @@ -41,8 +41,7 @@ import static org.junit.jupiter.api.Assertions.assertInstanceOf; import static org.junit.jupiter.api.Assertions.assertThrows; import static org.junit.jupiter.api.Assertions.assertTrue; -import static org.junit.jupiter.api.Assertions.fail; -import static org.mockito.ArgumentMatchers.any; +import static org.mockito.Mockito.any; import static org.mockito.Mockito.clearInvocations; import static org.mockito.Mockito.inOrder; import static org.mockito.Mockito.mock; @@ -358,12 +357,9 @@ public void testNormalPrefixedIterator() throws Exception { verify(rocksIteratorMock, times(1)).isValid(); verify(rocksIteratorMock, times(1)).key(any()); - try { - i.seekToLast(); - fail("Prefixed iterator does not support seekToLast"); - } catch (Exception e) { - assertInstanceOf(UnsupportedOperationException.class, e); - } + Exception e = + assertThrows(Exception.class, () -> i.seekToLast(), "Prefixed iterator does not support seekToLast"); + assertInstanceOf(UnsupportedOperationException.class, e); } CodecTestUtil.gc(); diff --git a/hadoop-hdds/interface-admin/src/main/proto/ScmAdminProtocol.proto b/hadoop-hdds/interface-admin/src/main/proto/ScmAdminProtocol.proto index 49e71d2fe69..6adca817ed1 100644 --- a/hadoop-hdds/interface-admin/src/main/proto/ScmAdminProtocol.proto +++ b/hadoop-hdds/interface-admin/src/main/proto/ScmAdminProtocol.proto @@ -78,9 +78,11 @@ message ScmContainerLocationRequest { optional GetContainerReplicasRequestProto getContainerReplicasRequest = 39; optional ReplicationManagerReportRequestProto replicationManagerReportRequest = 40; optional ResetDeletedBlockRetryCountRequestProto resetDeletedBlockRetryCountRequest = 41; - optional TransferLeadershipRequestProto transferScmLeadershipRequest = 42; + optional TransferLeadershipRequestProto transferScmLeadershipRequest = 42; optional GetFailedDeletedBlocksTxnRequestProto getFailedDeletedBlocksTxnRequest = 43; optional DecommissionScmRequestProto decommissionScmRequest = 44; + optional SingleNodeQueryRequestProto singleNodeQueryRequest = 45; + optional GetContainersOnDecomNodeRequestProto getContainersOnDecomNodeRequest = 46; } message ScmContainerLocationResponse { @@ -130,9 +132,11 @@ message ScmContainerLocationResponse { optional GetContainerReplicasResponseProto getContainerReplicasResponse = 39; optional ReplicationManagerReportResponseProto getReplicationManagerReportResponse = 40; optional ResetDeletedBlockRetryCountResponseProto resetDeletedBlockRetryCountResponse = 41; - optional TransferLeadershipResponseProto transferScmLeadershipResponse = 42; + optional TransferLeadershipResponseProto transferScmLeadershipResponse = 42; optional GetFailedDeletedBlocksTxnResponseProto getFailedDeletedBlocksTxnResponse = 43; optional DecommissionScmResponseProto decommissionScmResponse = 44; + optional SingleNodeQueryResponseProto singleNodeQueryResponse = 45; + optional GetContainersOnDecomNodeResponseProto getContainersOnDecomNodeResponse = 46; enum Status { OK = 1; @@ -184,6 +188,8 @@ enum Type { TransferLeadership = 38; GetFailedDeletedBlocksTransaction = 39; DecommissionScm = 40; + SingleNodeQuery = 41; + GetContainersOnDecomNode = 42; } /** @@ -326,6 +332,14 @@ message NodeQueryResponseProto { repeated Node datanodes = 1; } +message SingleNodeQueryRequestProto { + required UUID uuid = 1; +} + +message SingleNodeQueryResponseProto { + optional Node datanode = 1; +} + /* Datanode usage info request message. */ @@ -591,6 +605,19 @@ message DecommissionScmResponseProto { optional string errorMsg = 2; } +message GetContainersOnDecomNodeRequestProto { + required DatanodeDetailsProto datanodeDetails = 1; +} + +message ContainersOnDecomNodeProto { + required string name = 1; + repeated ContainerID id = 2; +} + +message GetContainersOnDecomNodeResponseProto { + repeated ContainersOnDecomNodeProto containersOnDecomNode = 1; +} + /** * Protocol used from an HDFS node to StorageContainerManager. See the request * and response messages for details of the RPC calls. diff --git a/hadoop-hdds/interface-admin/src/main/resources/proto.lock b/hadoop-hdds/interface-admin/src/main/resources/proto.lock index d834dd4dffe..8e898afc5c1 100644 --- a/hadoop-hdds/interface-admin/src/main/resources/proto.lock +++ b/hadoop-hdds/interface-admin/src/main/resources/proto.lock @@ -171,6 +171,22 @@ { "name": "ResetDeletedBlockRetryCount", "integer": 36 + }, + { + "name": "GetClosedContainerCount", + "integer": 37 + }, + { + "name": "TransferLeadership", + "integer": 38 + }, + { + "name": "GetFailedDeletedBlocksTransaction", + "integer": 39 + }, + { + "name": "DecommissionScm", + "integer": 40 } ] }, @@ -195,6 +211,23 @@ } ] }, + { + "name": "SCMCloseContainerResponseProto.Status", + "enum_fields": [ + { + "name": "OK", + "integer": 1 + }, + { + "name": "CONTAINER_ALREADY_CLOSED", + "integer": 2 + }, + { + "name": "CONTAINER_ALREADY_CLOSING", + "integer": 3 + } + ] + }, { "name": "PipelineResponseProto.Error", "enum_fields": [ @@ -446,6 +479,24 @@ "name": "resetDeletedBlockRetryCountRequest", "type": "ResetDeletedBlockRetryCountRequestProto", "optional": true + }, + { + "id": 42, + "name": "transferScmLeadershipRequest", + "type": "TransferLeadershipRequestProto", + "optional": true + }, + { + "id": 43, + "name": "getFailedDeletedBlocksTxnRequest", + "type": "GetFailedDeletedBlocksTxnRequestProto", + "optional": true + }, + { + "id": 44, + "name": "decommissionScmRequest", + "type": "DecommissionScmRequestProto", + "optional": true } ] }, @@ -703,6 +754,24 @@ "name": "resetDeletedBlockRetryCountResponse", "type": "ResetDeletedBlockRetryCountResponseProto", "optional": true + }, + { + "id": 42, + "name": "transferScmLeadershipResponse", + "type": "TransferLeadershipResponseProto", + "optional": true + }, + { + "id": 43, + "name": "getFailedDeletedBlocksTxnResponse", + "type": "GetFailedDeletedBlocksTxnResponseProto", + "optional": true + }, + { + "id": 44, + "name": "decommissionScmResponse", + "type": "DecommissionScmResponseProto", + "optional": true } ] }, @@ -1031,7 +1100,15 @@ ] }, { - "name": "SCMCloseContainerResponseProto" + "name": "SCMCloseContainerResponseProto", + "fields": [ + { + "id": 1, + "name": "status", + "type": "Status", + "optional": true + } + ] }, { "name": "NodeQueryRequestProto", @@ -1506,6 +1583,40 @@ } ] }, + { + "name": "GetFailedDeletedBlocksTxnRequestProto", + "fields": [ + { + "id": 1, + "name": "traceID", + "type": "string", + "optional": true + }, + { + "id": 2, + "name": "count", + "type": "int32", + "required": true + }, + { + "id": 3, + "name": "startTxId", + "type": "int64", + "optional": true + } + ] + }, + { + "name": "GetFailedDeletedBlocksTxnResponseProto", + "fields": [ + { + "id": 1, + "name": "deletedBlocksTransactions", + "type": "DeletedBlocksTransactionInfo", + "is_repeated": true + } + ] + }, { "name": "ResetDeletedBlockRetryCountRequestProto", "fields": [ @@ -1615,7 +1726,19 @@ "id": 4, "name": "certSerialId", "type": "string", - "required": true + "optional": true, + "options": [ + { + "name": "deprecated", + "value": "true" + } + ] + }, + { + "id": 5, + "name": "secretKeyId", + "type": "UUID", + "optional": true } ] }, @@ -1764,6 +1887,34 @@ "required": true } ] + }, + { + "name": "DecommissionScmRequestProto", + "fields": [ + { + "id": 1, + "name": "scmId", + "type": "string", + "required": true + } + ] + }, + { + "name": "DecommissionScmResponseProto", + "fields": [ + { + "id": 1, + "name": "success", + "type": "bool", + "required": true + }, + { + "id": 2, + "name": "errorMsg", + "type": "string", + "optional": true + } + ] } ], "services": [ diff --git a/hadoop-hdds/interface-client/src/main/resources/proto.lock b/hadoop-hdds/interface-client/src/main/resources/proto.lock index ee6d2251d3e..1f3f552a4d1 100644 --- a/hadoop-hdds/interface-client/src/main/resources/proto.lock +++ b/hadoop-hdds/interface-client/src/main/resources/proto.lock @@ -78,6 +78,14 @@ { "name": "GetCommittedBlockLength", "integer": 18 + }, + { + "name": "StreamInit", + "integer": 19 + }, + { + "name": "StreamWrite", + "integer": 20 } ] }, @@ -348,6 +356,31 @@ "integer": 1 } ] + }, + { + "name": "CopyContainerCompressProto", + "enum_fields": [ + { + "name": "NO_COMPRESSION", + "integer": 1 + }, + { + "name": "GZIP", + "integer": 2 + }, + { + "name": "LZ4", + "integer": 3 + }, + { + "name": "SNAPPY", + "integer": 4 + }, + { + "name": "ZSTD", + "integer": 5 + } + ] } ], "messages": [ @@ -1100,6 +1133,12 @@ "name": "checksumData", "type": "ChecksumData", "required": true + }, + { + "id": 6, + "name": "stripeChecksum", + "type": "bytes", + "optional": true } ] }, @@ -1150,7 +1189,7 @@ "id": 2, "name": "chunkData", "type": "ChunkInfo", - "required": true + "optional": true }, { "id": 3, @@ -1204,12 +1243,14 @@ { "id": 3, "name": "data", - "type": "bytes" + "type": "bytes", + "oneof_parent": "responseData" }, { "id": 4, "name": "dataBuffers", - "type": "DataBuffers" + "type": "DataBuffers", + "oneof_parent": "responseData" } ] }, @@ -1366,6 +1407,12 @@ "name": "version", "type": "uint32", "optional": true + }, + { + "id": 5, + "name": "compression", + "type": "CopyContainerCompressProto", + "optional": true } ] }, @@ -1409,6 +1456,44 @@ "optional": true } ] + }, + { + "name": "SendContainerRequest", + "fields": [ + { + "id": 1, + "name": "containerID", + "type": "int64", + "required": true + }, + { + "id": 2, + "name": "offset", + "type": "uint64", + "required": true + }, + { + "id": 3, + "name": "data", + "type": "bytes", + "required": true + }, + { + "id": 4, + "name": "checksum", + "type": "int64", + "optional": true + }, + { + "id": 5, + "name": "compression", + "type": "CopyContainerCompressProto", + "optional": true + } + ] + }, + { + "name": "SendContainerResponse" } ], "services": [ @@ -1432,6 +1517,12 @@ "in_type": "CopyContainerRequestProto", "out_type": "CopyContainerResponseProto", "out_streamed": true + }, + { + "name": "upload", + "in_type": "SendContainerRequest", + "out_type": "SendContainerResponse", + "in_streamed": true } ] } @@ -1455,6 +1546,150 @@ ] } }, + { + "protopath": "ReconfigureProtocol.proto", + "def": { + "messages": [ + { + "name": "GetServerNameRequestProto" + }, + { + "name": "GetServerNameResponseProto", + "fields": [ + { + "id": 1, + "name": "name", + "type": "string", + "required": true + } + ] + }, + { + "name": "StartReconfigureRequestProto" + }, + { + "name": "StartReconfigureResponseProto" + }, + { + "name": "GetReconfigureStatusRequestProto" + }, + { + "name": "GetConfigurationChangeProto", + "fields": [ + { + "id": 1, + "name": "name", + "type": "string", + "required": true + }, + { + "id": 2, + "name": "oldValue", + "type": "string", + "required": true + }, + { + "id": 3, + "name": "newValue", + "type": "string", + "optional": true + }, + { + "id": 4, + "name": "errorMessage", + "type": "string", + "optional": true + } + ] + }, + { + "name": "GetReconfigureStatusResponseProto", + "fields": [ + { + "id": 1, + "name": "startTime", + "type": "int64", + "required": true + }, + { + "id": 2, + "name": "endTime", + "type": "int64", + "optional": true + }, + { + "id": 3, + "name": "changes", + "type": "GetConfigurationChangeProto", + "is_repeated": true + } + ] + }, + { + "name": "ListReconfigurePropertiesRequestProto" + }, + { + "name": "ListReconfigurePropertiesResponseProto", + "fields": [ + { + "id": 1, + "name": "name", + "type": "string", + "is_repeated": true + } + ] + } + ], + "services": [ + { + "name": "ReconfigureProtocolService", + "rpcs": [ + { + "name": "getServerName", + "in_type": "GetServerNameRequestProto", + "out_type": "GetServerNameResponseProto" + }, + { + "name": "getReconfigureStatus", + "in_type": "GetReconfigureStatusRequestProto", + "out_type": "GetReconfigureStatusResponseProto" + }, + { + "name": "startReconfigure", + "in_type": "StartReconfigureRequestProto", + "out_type": "StartReconfigureResponseProto" + }, + { + "name": "listReconfigureProperties", + "in_type": "ListReconfigurePropertiesRequestProto", + "out_type": "ListReconfigurePropertiesResponseProto" + } + ] + } + ], + "package": { + "name": "hadoop.hdds" + }, + "options": [ + { + "name": "java_package", + "value": "org.apache.hadoop.hdds.protocol.proto" + }, + { + "name": "java_outer_classname", + "value": "ReconfigureProtocolProtos" + }, + { + "name": "java_generic_services", + "value": "true" + }, + { + "name": "java_generate_equals_and_hash", + "value": "true" + } + ] + } + }, { "protopath": "hdds.proto", "def": { @@ -2192,6 +2427,12 @@ "name": "node", "type": "DatanodeDetailsProto", "optional": true + }, + { + "id": 5, + "name": "containerCount", + "type": "int64", + "optional": true } ] }, @@ -2363,6 +2604,46 @@ } ] }, + { + "name": "RemoveScmRequestProto", + "fields": [ + { + "id": 1, + "name": "clusterId", + "type": "string", + "required": true + }, + { + "id": 2, + "name": "scmId", + "type": "string", + "required": true + }, + { + "id": 3, + "name": "ratisAddr", + "type": "string", + "required": true + } + ] + }, + { + "name": "RemoveScmResponseProto", + "fields": [ + { + "id": 1, + "name": "success", + "type": "bool", + "required": true + }, + { + "id": 2, + "name": "scmId", + "type": "string", + "optional": true + } + ] + }, { "name": "ECReplicationConfig", "fields": [ @@ -2538,7 +2819,13 @@ "id": 4, "name": "omCertSerialId", "type": "string", - "required": true + "optional": true, + "options": [ + { + "name": "deprecated", + "value": "true" + } + ] }, { "id": 5, @@ -2551,6 +2838,12 @@ "name": "maxLength", "type": "uint64", "required": true + }, + { + "id": 7, + "name": "secretKeyId", + "type": "UUID", + "optional": true } ] }, @@ -2671,6 +2964,12 @@ "name": "bcsId", "type": "int64", "required": true + }, + { + "id": 5, + "name": "state", + "type": "string", + "optional": true } ] }, @@ -2876,6 +3175,119 @@ "name": "nextIterationIndex", "type": "int32", "optional": true + }, + { + "id": 20, + "name": "moveReplicationTimeout", + "type": "int64", + "optional": true + } + ] + }, + { + "name": "TransferLeadershipRequestProto", + "fields": [ + { + "id": 1, + "name": "newLeaderId", + "type": "string", + "required": true + } + ] + }, + { + "name": "TransferLeadershipResponseProto" + }, + { + "name": "DeletedBlocksTransactionInfo", + "fields": [ + { + "id": 1, + "name": "txID", + "type": "int64", + "optional": true + }, + { + "id": 2, + "name": "containerID", + "type": "int64", + "optional": true + }, + { + "id": 3, + "name": "localID", + "type": "int64", + "is_repeated": true + }, + { + "id": 4, + "name": "count", + "type": "int32", + "optional": true + } + ] + }, + { + "name": "CompactionFileInfoProto", + "fields": [ + { + "id": 1, + "name": "fileName", + "type": "string", + "optional": true + }, + { + "id": 2, + "name": "startKey", + "type": "string", + "optional": true + }, + { + "id": 3, + "name": "endKey", + "type": "string", + "optional": true + }, + { + "id": 4, + "name": "columnFamily", + "type": "string", + "optional": true + } + ] + }, + { + "name": "CompactionLogEntryProto", + "fields": [ + { + "id": 1, + "name": "dbSequenceNumber", + "type": "uint64", + "optional": true + }, + { + "id": 2, + "name": "compactionTime", + "type": "uint64", + "optional": true + }, + { + "id": 3, + "name": "inputFileIntoList", + "type": "CompactionFileInfoProto", + "is_repeated": true + }, + { + "id": 4, + "name": "outputFileIntoList", + "type": "CompactionFileInfoProto", + "is_repeated": true + }, + { + "id": 5, + "name": "compactionReason", + "type": "string", + "optional": true } ] } diff --git a/hadoop-hdds/interface-server/src/main/resources/proto.lock b/hadoop-hdds/interface-server/src/main/resources/proto.lock index 31659ed1094..6966915f4a2 100644 --- a/hadoop-hdds/interface-server/src/main/resources/proto.lock +++ b/hadoop-hdds/interface-server/src/main/resources/proto.lock @@ -127,6 +127,14 @@ { "name": "FINALIZE", "integer": 8 + }, + { + "name": "SECRET_KEY", + "integer": 9 + }, + { + "name": "CERT_ROTATE", + "integer": 10 } ] } @@ -460,6 +468,296 @@ ] } }, + { + "protopath": "ScmSecretKeyProtocol.proto", + "def": { + "enums": [ + { + "name": "Type", + "enum_fields": [ + { + "name": "GetCurrentSecretKey", + "integer": 1 + }, + { + "name": "GetSecretKey", + "integer": 2 + }, + { + "name": "GetAllSecretKeys", + "integer": 3 + }, + { + "name": "CheckAndRotate", + "integer": 4 + } + ] + }, + { + "name": "Status", + "enum_fields": [ + { + "name": "OK", + "integer": 1 + }, + { + "name": "INTERNAL_ERROR", + "integer": 2 + }, + { + "name": "SECRET_KEY_NOT_ENABLED", + "integer": 3 + }, + { + "name": "SECRET_KEY_NOT_INITIALIZED", + "integer": 4 + } + ] + } + ], + "messages": [ + { + "name": "SCMSecretKeyRequest", + "fields": [ + { + "id": 1, + "name": "cmdType", + "type": "Type", + "required": true + }, + { + "id": 2, + "name": "traceID", + "type": "string", + "optional": true + }, + { + "id": 3, + "name": "getSecretKeyRequest", + "type": "SCMGetSecretKeyRequest", + "optional": true + }, + { + "id": 4, + "name": "checkAndRotateRequest", + "type": "SCMGetCheckAndRotateRequest", + "optional": true + } + ] + }, + { + "name": "SCMSecretKeyResponse", + "fields": [ + { + "id": 1, + "name": "cmdType", + "type": "Type", + "required": true + }, + { + "id": 2, + "name": "traceID", + "type": "string", + "optional": true + }, + { + "id": 3, + "name": "success", + "type": "bool", + "optional": true, + "options": [ + { + "name": "default", + "value": "true" + } + ] + }, + { + "id": 4, + "name": "message", + "type": "string", + "optional": true + }, + { + "id": 5, + "name": "status", + "type": "Status", + "required": true + }, + { + "id": 11, + "name": "currentSecretKeyResponseProto", + "type": "SCMGetCurrentSecretKeyResponse", + "optional": true + }, + { + "id": 12, + "name": "getSecretKeyResponseProto", + "type": "SCMGetSecretKeyResponse", + "optional": true + }, + { + "id": 13, + "name": "secretKeysListResponseProto", + "type": "SCMSecretKeysListResponse", + "optional": true + }, + { + "id": 14, + "name": "checkAndRotateResponseProto", + "type": "SCMGetCheckAndRotateResponse", + "optional": true + } + ] + }, + { + "name": "ManagedSecretKey", + "fields": [ + { + "id": 1, + "name": "id", + "type": "UUID", + "required": true + }, + { + "id": 2, + "name": "creationTime", + "type": "uint64", + "required": true + }, + { + "id": 3, + "name": "expiryTime", + "type": "uint64", + "required": true + }, + { + "id": 4, + "name": "algorithm", + "type": "string", + "required": true + }, + { + "id": 5, + "name": "encoded", + "type": "bytes", + "required": true + } + ] + }, + { + "name": "SCMGetSecretKeyRequest", + "fields": [ + { + "id": 1, + "name": "secretKeyId", + "type": "UUID", + "required": true + } + ] + }, + { + "name": "SCMGetCheckAndRotateRequest", + "fields": [ + { + "id": 1, + "name": "force", + "type": "bool", + "optional": true, + "options": [ + { + "name": "default", + "value": "false" + } + ] + } + ] + }, + { + "name": "SCMGetCurrentSecretKeyResponse", + "fields": [ + { + "id": 1, + "name": "secretKey", + "type": "ManagedSecretKey", + "required": true + } + ] + }, + { + "name": "SCMGetSecretKeyResponse", + "fields": [ + { + "id": 1, + "name": "secretKey", + "type": "ManagedSecretKey", + "optional": true + } + ] + }, + { + "name": "SCMSecretKeysListResponse", + "fields": [ + { + "id": 1, + "name": "secretKeys", + "type": "ManagedSecretKey", + "is_repeated": true + } + ] + }, + { + "name": "SCMGetCheckAndRotateResponse", + "fields": [ + { + "id": 1, + "name": "status", + "type": "bool", + "optional": true + } + ] + } + ], + "services": [ + { + "name": "SCMSecretKeyProtocolService", + "rpcs": [ + { + "name": "submitRequest", + "in_type": "SCMSecretKeyRequest", + "out_type": "SCMSecretKeyResponse" + } + ] + } + ], + "imports": [ + { + "path": "hdds.proto" + } + ], + "package": { + "name": "hadoop.hdds.security.symmetric" + }, + "options": [ + { + "name": "java_package", + "value": "org.apache.hadoop.hdds.protocol.proto" + }, + { + "name": "java_outer_classname", + "value": "SCMSecretKeyProtocolProtos" + }, + { + "name": "java_generic_services", + "value": "true" + }, + { + "name": "java_generate_equals_and_hash", + "value": "true" + } + ] + } + }, { "protopath": "ScmServerDatanodeHeartbeatProtocol.proto", "def": { @@ -681,6 +979,19 @@ "integer": 11 } ] + }, + { + "name": "ReplicationCommandPriority", + "enum_fields": [ + { + "name": "NORMAL", + "integer": 1 + }, + { + "name": "LOW", + "integer": 2 + } + ] } ], "messages": [ @@ -999,6 +1310,12 @@ "name": "commands", "type": "SCMCommandProto", "is_repeated": true + }, + { + "id": 3, + "name": "term", + "type": "int64", + "optional": true } ] }, @@ -1292,6 +1609,18 @@ "name": "replicaIndex", "type": "int32", "optional": true + }, + { + "id": 15, + "name": "isEmpty", + "type": "bool", + "optional": true, + "options": [ + { + "name": "default", + "value": "false" + } + ] } ] }, @@ -1552,6 +1881,12 @@ "name": "encodedToken", "type": "string", "optional": true + }, + { + "id": 17, + "name": "deadlineMsSinceEpoch", + "type": "int64", + "optional": true } ] }, @@ -1742,6 +2077,24 @@ "name": "replicaIndex", "type": "int32", "optional": true + }, + { + "id": 5, + "name": "target", + "type": "DatanodeDetailsProto", + "optional": true + }, + { + "id": 6, + "name": "priority", + "type": "ReplicationCommandPriority", + "optional": true, + "options": [ + { + "name": "default", + "value": "NORMAL" + } + ] } ] }, @@ -2176,6 +2529,34 @@ { "name": "FAILED_TO_ADD_CRL_CLIENT", "integer": 39 + }, + { + "name": "INVALID_PIPELINE_STATE", + "integer": 40 + }, + { + "name": "DUPLICATED_PIPELINE_ID", + "integer": 41 + }, + { + "name": "TIMEOUT", + "integer": 42 + }, + { + "name": "CA_ROTATION_IN_PROGRESS", + "integer": 43 + }, + { + "name": "CA_ROTATION_IN_POST_PROGRESS", + "integer": 44 + }, + { + "name": "CONTAINER_ALREADY_CLOSED", + "integer": 45 + }, + { + "name": "CONTAINER_ALREADY_CLOSING", + "integer": 46 } ] }, @@ -2405,6 +2786,12 @@ "name": "ecReplicationConfig", "type": "hadoop.hdds.ECReplicationConfig", "optional": true + }, + { + "id": 9, + "name": "client", + "type": "string", + "optional": true } ] }, @@ -2632,6 +3019,14 @@ { "name": "GetCert", "integer": 12 + }, + { + "name": "GetAllRootCaCertificates", + "integer": 13 + }, + { + "name": "RemoveExpiredCertificates", + "integer": 14 } ] }, @@ -2864,6 +3259,18 @@ "name": "getCertRequest", "type": "SCMGetCertRequestProto", "optional": true + }, + { + "id": 14, + "name": "getAllRootCaCertificatesRequestProto", + "type": "SCMGetAllRootCaCertificatesRequestProto", + "optional": true + }, + { + "id": 15, + "name": "removeExpiredCertificatesRequestProto", + "type": "SCMRemoveExpiredCertificatesRequestProto", + "optional": true } ] }, @@ -2935,6 +3342,18 @@ "name": "revokeCertificatesResponseProto", "type": "SCMRevokeCertificatesResponseProto", "optional": true + }, + { + "id": 11, + "name": "allRootCaCertificatesResponseProto", + "type": "SCMGetAllRootCaCertificatesResponseProto", + "optional": true + }, + { + "id": 12, + "name": "removeExpiredCertificatesResponseProto", + "type": "SCMRemoveExpiredCertificatesResponseProto", + "optional": true } ] }, @@ -3003,6 +3422,18 @@ "name": "CSR", "type": "string", "required": true + }, + { + "id": 3, + "name": "renew", + "type": "bool", + "optional": true, + "options": [ + { + "name": "default", + "value": "false" + } + ] } ] }, @@ -3095,6 +3526,28 @@ } ] }, + { + "name": "SCMGetAllRootCaCertificatesResponseProto", + "fields": [ + { + "id": 1, + "name": "allX509RootCaCertificates", + "type": "string", + "is_repeated": true + } + ] + }, + { + "name": "SCMRemoveExpiredCertificatesResponseProto", + "fields": [ + { + "id": 1, + "name": "removedExpiredCertificates", + "type": "string", + "is_repeated": true + } + ] + }, { "name": "SCMGetRootCACertificateRequestProto" }, @@ -3166,6 +3619,9 @@ } ] }, + { + "name": "SCMGetAllRootCaCertificatesRequestProto" + }, { "name": "SCMRevokeCertificatesResponseProto", "fields": [ @@ -3176,6 +3632,9 @@ "optional": true } ] + }, + { + "name": "SCMRemoveExpiredCertificatesRequestProto" } ], "services": [ diff --git a/hadoop-hdds/managed-rocksdb/src/main/java/org/apache/hadoop/hdds/utils/db/managed/ManagedBloomFilter.java b/hadoop-hdds/managed-rocksdb/src/main/java/org/apache/hadoop/hdds/utils/db/managed/ManagedBloomFilter.java index 32d08f46f22..8246d10820b 100644 --- a/hadoop-hdds/managed-rocksdb/src/main/java/org/apache/hadoop/hdds/utils/db/managed/ManagedBloomFilter.java +++ b/hadoop-hdds/managed-rocksdb/src/main/java/org/apache/hadoop/hdds/utils/db/managed/ManagedBloomFilter.java @@ -31,7 +31,10 @@ public class ManagedBloomFilter extends BloomFilter { @Override public void close() { - super.close(); - leakTracker.close(); + try { + super.close(); + } finally { + leakTracker.close(); + } } } diff --git a/hadoop-hdds/managed-rocksdb/src/main/java/org/apache/hadoop/hdds/utils/db/managed/ManagedColumnFamilyOptions.java b/hadoop-hdds/managed-rocksdb/src/main/java/org/apache/hadoop/hdds/utils/db/managed/ManagedColumnFamilyOptions.java index dc6a8409260..055d4be9d9a 100644 --- a/hadoop-hdds/managed-rocksdb/src/main/java/org/apache/hadoop/hdds/utils/db/managed/ManagedColumnFamilyOptions.java +++ b/hadoop-hdds/managed-rocksdb/src/main/java/org/apache/hadoop/hdds/utils/db/managed/ManagedColumnFamilyOptions.java @@ -79,8 +79,11 @@ public boolean isReused() { @Override public void close() { - super.close(); - leakTracker.close(); + try { + super.close(); + } finally { + leakTracker.close(); + } } /** diff --git a/hadoop-hdds/managed-rocksdb/src/main/java/org/apache/hadoop/hdds/utils/db/managed/ManagedCompactRangeOptions.java b/hadoop-hdds/managed-rocksdb/src/main/java/org/apache/hadoop/hdds/utils/db/managed/ManagedCompactRangeOptions.java index 44f4dba8f8f..6ac4a2fa5b6 100644 --- a/hadoop-hdds/managed-rocksdb/src/main/java/org/apache/hadoop/hdds/utils/db/managed/ManagedCompactRangeOptions.java +++ b/hadoop-hdds/managed-rocksdb/src/main/java/org/apache/hadoop/hdds/utils/db/managed/ManagedCompactRangeOptions.java @@ -31,7 +31,10 @@ public class ManagedCompactRangeOptions extends CompactRangeOptions { @Override public void close() { - super.close(); - leakTracker.close(); + try { + super.close(); + } finally { + leakTracker.close(); + } } } diff --git a/hadoop-hdds/managed-rocksdb/src/main/java/org/apache/hadoop/hdds/utils/db/managed/ManagedDBOptions.java b/hadoop-hdds/managed-rocksdb/src/main/java/org/apache/hadoop/hdds/utils/db/managed/ManagedDBOptions.java index dd8e20cd955..638739ff557 100644 --- a/hadoop-hdds/managed-rocksdb/src/main/java/org/apache/hadoop/hdds/utils/db/managed/ManagedDBOptions.java +++ b/hadoop-hdds/managed-rocksdb/src/main/java/org/apache/hadoop/hdds/utils/db/managed/ManagedDBOptions.java @@ -31,7 +31,10 @@ public class ManagedDBOptions extends DBOptions { @Override public void close() { - super.close(); - leakTracker.close(); + try { + super.close(); + } finally { + leakTracker.close(); + } } } diff --git a/hadoop-hdds/managed-rocksdb/src/main/java/org/apache/hadoop/hdds/utils/db/managed/ManagedEnvOptions.java b/hadoop-hdds/managed-rocksdb/src/main/java/org/apache/hadoop/hdds/utils/db/managed/ManagedEnvOptions.java index d19ffbda4f1..388f5abea39 100644 --- a/hadoop-hdds/managed-rocksdb/src/main/java/org/apache/hadoop/hdds/utils/db/managed/ManagedEnvOptions.java +++ b/hadoop-hdds/managed-rocksdb/src/main/java/org/apache/hadoop/hdds/utils/db/managed/ManagedEnvOptions.java @@ -31,7 +31,10 @@ public class ManagedEnvOptions extends EnvOptions { @Override public void close() { - super.close(); - leakTracker.close(); + try { + super.close(); + } finally { + leakTracker.close(); + } } } diff --git a/hadoop-hdds/managed-rocksdb/src/main/java/org/apache/hadoop/hdds/utils/db/managed/ManagedFlushOptions.java b/hadoop-hdds/managed-rocksdb/src/main/java/org/apache/hadoop/hdds/utils/db/managed/ManagedFlushOptions.java index 7a2049efda8..b151f836f96 100644 --- a/hadoop-hdds/managed-rocksdb/src/main/java/org/apache/hadoop/hdds/utils/db/managed/ManagedFlushOptions.java +++ b/hadoop-hdds/managed-rocksdb/src/main/java/org/apache/hadoop/hdds/utils/db/managed/ManagedFlushOptions.java @@ -31,7 +31,10 @@ public class ManagedFlushOptions extends FlushOptions { @Override public void close() { - super.close(); - leakTracker.close(); + try { + super.close(); + } finally { + leakTracker.close(); + } } } diff --git a/hadoop-hdds/managed-rocksdb/src/main/java/org/apache/hadoop/hdds/utils/db/managed/ManagedIngestExternalFileOptions.java b/hadoop-hdds/managed-rocksdb/src/main/java/org/apache/hadoop/hdds/utils/db/managed/ManagedIngestExternalFileOptions.java index 36e8e36ef08..ec68f42e748 100644 --- a/hadoop-hdds/managed-rocksdb/src/main/java/org/apache/hadoop/hdds/utils/db/managed/ManagedIngestExternalFileOptions.java +++ b/hadoop-hdds/managed-rocksdb/src/main/java/org/apache/hadoop/hdds/utils/db/managed/ManagedIngestExternalFileOptions.java @@ -31,7 +31,10 @@ public class ManagedIngestExternalFileOptions extends IngestExternalFileOptions @Override public void close() { - super.close(); - leakTracker.close(); + try { + super.close(); + } finally { + leakTracker.close(); + } } } diff --git a/hadoop-hdds/managed-rocksdb/src/main/java/org/apache/hadoop/hdds/utils/db/managed/ManagedLRUCache.java b/hadoop-hdds/managed-rocksdb/src/main/java/org/apache/hadoop/hdds/utils/db/managed/ManagedLRUCache.java index db8ff7ddbdd..8130361d79d 100644 --- a/hadoop-hdds/managed-rocksdb/src/main/java/org/apache/hadoop/hdds/utils/db/managed/ManagedLRUCache.java +++ b/hadoop-hdds/managed-rocksdb/src/main/java/org/apache/hadoop/hdds/utils/db/managed/ManagedLRUCache.java @@ -35,7 +35,10 @@ public ManagedLRUCache(long capacity) { @Override public void close() { - super.close(); - leakTracker.close(); + try { + super.close(); + } finally { + leakTracker.close(); + } } } diff --git a/hadoop-hdds/managed-rocksdb/src/main/java/org/apache/hadoop/hdds/utils/db/managed/ManagedObject.java b/hadoop-hdds/managed-rocksdb/src/main/java/org/apache/hadoop/hdds/utils/db/managed/ManagedObject.java index cae72ab7307..522ca1ac325 100644 --- a/hadoop-hdds/managed-rocksdb/src/main/java/org/apache/hadoop/hdds/utils/db/managed/ManagedObject.java +++ b/hadoop-hdds/managed-rocksdb/src/main/java/org/apache/hadoop/hdds/utils/db/managed/ManagedObject.java @@ -41,7 +41,10 @@ public T get() { @Override public void close() { - original.close(); - leakTracker.close(); + try { + original.close(); + } finally { + leakTracker.close(); + } } } diff --git a/hadoop-hdds/managed-rocksdb/src/main/java/org/apache/hadoop/hdds/utils/db/managed/ManagedOptions.java b/hadoop-hdds/managed-rocksdb/src/main/java/org/apache/hadoop/hdds/utils/db/managed/ManagedOptions.java index 73ee224a1ad..9cf0a46fd8b 100644 --- a/hadoop-hdds/managed-rocksdb/src/main/java/org/apache/hadoop/hdds/utils/db/managed/ManagedOptions.java +++ b/hadoop-hdds/managed-rocksdb/src/main/java/org/apache/hadoop/hdds/utils/db/managed/ManagedOptions.java @@ -31,7 +31,10 @@ public class ManagedOptions extends Options { @Override public void close() { - super.close(); - leakTracker.close(); + try { + super.close(); + } finally { + leakTracker.close(); + } } } diff --git a/hadoop-hdds/managed-rocksdb/src/main/java/org/apache/hadoop/hdds/utils/db/managed/ManagedReadOptions.java b/hadoop-hdds/managed-rocksdb/src/main/java/org/apache/hadoop/hdds/utils/db/managed/ManagedReadOptions.java index af5d3879e7b..39d41482751 100644 --- a/hadoop-hdds/managed-rocksdb/src/main/java/org/apache/hadoop/hdds/utils/db/managed/ManagedReadOptions.java +++ b/hadoop-hdds/managed-rocksdb/src/main/java/org/apache/hadoop/hdds/utils/db/managed/ManagedReadOptions.java @@ -31,7 +31,10 @@ public class ManagedReadOptions extends ReadOptions { @Override public void close() { - super.close(); - leakTracker.close(); + try { + super.close(); + } finally { + leakTracker.close(); + } } } diff --git a/hadoop-hdds/managed-rocksdb/src/main/java/org/apache/hadoop/hdds/utils/db/managed/ManagedRocksObjectUtils.java b/hadoop-hdds/managed-rocksdb/src/main/java/org/apache/hadoop/hdds/utils/db/managed/ManagedRocksObjectUtils.java index 8ae82a688bb..9c86a47d740 100644 --- a/hadoop-hdds/managed-rocksdb/src/main/java/org/apache/hadoop/hdds/utils/db/managed/ManagedRocksObjectUtils.java +++ b/hadoop-hdds/managed-rocksdb/src/main/java/org/apache/hadoop/hdds/utils/db/managed/ManagedRocksObjectUtils.java @@ -26,7 +26,7 @@ import org.slf4j.Logger; import org.slf4j.LoggerFactory; -import javax.annotation.Nullable; +import jakarta.annotation.Nullable; import java.io.File; import java.io.IOException; import java.time.Duration; diff --git a/hadoop-hdds/managed-rocksdb/src/main/java/org/apache/hadoop/hdds/utils/db/managed/ManagedSlice.java b/hadoop-hdds/managed-rocksdb/src/main/java/org/apache/hadoop/hdds/utils/db/managed/ManagedSlice.java index b69dc5d7044..cff320fec5e 100644 --- a/hadoop-hdds/managed-rocksdb/src/main/java/org/apache/hadoop/hdds/utils/db/managed/ManagedSlice.java +++ b/hadoop-hdds/managed-rocksdb/src/main/java/org/apache/hadoop/hdds/utils/db/managed/ManagedSlice.java @@ -40,9 +40,12 @@ public synchronized long getNativeHandle() { @Override protected void disposeInternal() { - super.disposeInternal(); // RocksMutableObject.close is final thus can't be decorated. // So, we decorate disposeInternal instead to track closure. - leakTracker.close(); + try { + super.disposeInternal(); + } finally { + leakTracker.close(); + } } } diff --git a/hadoop-hdds/managed-rocksdb/src/main/java/org/apache/hadoop/hdds/utils/db/managed/ManagedSstFileWriter.java b/hadoop-hdds/managed-rocksdb/src/main/java/org/apache/hadoop/hdds/utils/db/managed/ManagedSstFileWriter.java index a80b7b69a1c..0c9f27dd5eb 100644 --- a/hadoop-hdds/managed-rocksdb/src/main/java/org/apache/hadoop/hdds/utils/db/managed/ManagedSstFileWriter.java +++ b/hadoop-hdds/managed-rocksdb/src/main/java/org/apache/hadoop/hdds/utils/db/managed/ManagedSstFileWriter.java @@ -38,7 +38,10 @@ public ManagedSstFileWriter(EnvOptions envOptions, @Override public void close() { - super.close(); - leakTracker.close(); + try { + super.close(); + } finally { + leakTracker.close(); + } } } diff --git a/hadoop-hdds/managed-rocksdb/src/main/java/org/apache/hadoop/hdds/utils/db/managed/ManagedStatistics.java b/hadoop-hdds/managed-rocksdb/src/main/java/org/apache/hadoop/hdds/utils/db/managed/ManagedStatistics.java index ecd731dd6fa..8fc166bb612 100644 --- a/hadoop-hdds/managed-rocksdb/src/main/java/org/apache/hadoop/hdds/utils/db/managed/ManagedStatistics.java +++ b/hadoop-hdds/managed-rocksdb/src/main/java/org/apache/hadoop/hdds/utils/db/managed/ManagedStatistics.java @@ -31,7 +31,10 @@ public class ManagedStatistics extends Statistics { @Override public void close() { - super.close(); - leakTracker.close(); + try { + super.close(); + } finally { + leakTracker.close(); + } } } diff --git a/hadoop-hdds/managed-rocksdb/src/main/java/org/apache/hadoop/hdds/utils/db/managed/ManagedWriteBatch.java b/hadoop-hdds/managed-rocksdb/src/main/java/org/apache/hadoop/hdds/utils/db/managed/ManagedWriteBatch.java index 28aadf95f38..bda1af7d59b 100644 --- a/hadoop-hdds/managed-rocksdb/src/main/java/org/apache/hadoop/hdds/utils/db/managed/ManagedWriteBatch.java +++ b/hadoop-hdds/managed-rocksdb/src/main/java/org/apache/hadoop/hdds/utils/db/managed/ManagedWriteBatch.java @@ -38,7 +38,10 @@ public ManagedWriteBatch(byte[] data) { @Override public void close() { - super.close(); - leakTracker.close(); + try { + super.close(); + } finally { + leakTracker.close(); + } } } diff --git a/hadoop-hdds/managed-rocksdb/src/main/java/org/apache/hadoop/hdds/utils/db/managed/ManagedWriteOptions.java b/hadoop-hdds/managed-rocksdb/src/main/java/org/apache/hadoop/hdds/utils/db/managed/ManagedWriteOptions.java index d226b3e03ea..4ce8bc037bb 100644 --- a/hadoop-hdds/managed-rocksdb/src/main/java/org/apache/hadoop/hdds/utils/db/managed/ManagedWriteOptions.java +++ b/hadoop-hdds/managed-rocksdb/src/main/java/org/apache/hadoop/hdds/utils/db/managed/ManagedWriteOptions.java @@ -31,7 +31,10 @@ public class ManagedWriteOptions extends WriteOptions { @Override public void close() { - super.close(); - leakTracker.close(); + try { + super.close(); + } finally { + leakTracker.close(); + } } } diff --git a/hadoop-hdds/rocks-native/pom.xml b/hadoop-hdds/rocks-native/pom.xml index d3a015a18f9..c12ddbb091b 100644 --- a/hadoop-hdds/rocks-native/pom.xml +++ b/hadoop-hdds/rocks-native/pom.xml @@ -52,10 +52,11 @@ - false 8 8 + https://sourceware.org/pub/bzip2/bzip2-${bzip2.version}.tar.gz + https://zlib.net/fossils/zlib-${zlib.version}.tar.gz @@ -134,7 +135,7 @@ wget - https://zlib.net/fossils/zlib-${zlib.version}.tar.gz + ${zlib.url} zlib-${zlib.version}.tar.gz ${project.build.directory}/zlib @@ -146,7 +147,7 @@ wget - https://sourceware.org/pub/bzip2/bzip2-${bzip2.version}.tar.gz + ${bzip2.url} bzip2-v${bzip2.version}.tar.gz ${project.build.directory}/bzip2 @@ -220,6 +221,7 @@ + diff --git a/hadoop-hdds/rocks-native/src/test/java/org/apache/hadoop/hdds/utils/TestNativeLibraryLoader.java b/hadoop-hdds/rocks-native/src/test/java/org/apache/hadoop/hdds/utils/TestNativeLibraryLoader.java index ed5873770e3..8fc4e83e7a1 100644 --- a/hadoop-hdds/rocks-native/src/test/java/org/apache/hadoop/hdds/utils/TestNativeLibraryLoader.java +++ b/hadoop-hdds/rocks-native/src/test/java/org/apache/hadoop/hdds/utils/TestNativeLibraryLoader.java @@ -33,12 +33,12 @@ import static org.apache.hadoop.hdds.utils.NativeConstants.ROCKS_TOOLS_NATIVE_LIBRARY_NAME; import static org.apache.hadoop.hdds.utils.NativeLibraryLoader.NATIVE_LIB_TMP_DIR; -import static org.mockito.ArgumentMatchers.anyString; -import static org.mockito.ArgumentMatchers.same; import static org.junit.jupiter.api.Assertions.assertEquals; import static org.junit.jupiter.api.Assertions.assertNotNull; import static org.junit.jupiter.api.Assertions.assertTrue; import static org.mockito.Mockito.CALLS_REAL_METHODS; +import static org.mockito.Mockito.anyString; +import static org.mockito.Mockito.same; import static org.mockito.Mockito.mockStatic; /** diff --git a/hadoop-hdds/rocks-native/src/test/java/org/apache/hadoop/hdds/utils/db/managed/TestManagedSSTDumpIterator.java b/hadoop-hdds/rocks-native/src/test/java/org/apache/hadoop/hdds/utils/db/managed/TestManagedSSTDumpIterator.java index d8fefeb9b75..d2796c19fc5 100644 --- a/hadoop-hdds/rocks-native/src/test/java/org/apache/hadoop/hdds/utils/db/managed/TestManagedSSTDumpIterator.java +++ b/hadoop-hdds/rocks-native/src/test/java/org/apache/hadoop/hdds/utils/db/managed/TestManagedSSTDumpIterator.java @@ -54,9 +54,9 @@ import java.util.stream.Stream; import static org.apache.hadoop.hdds.utils.NativeConstants.ROCKS_TOOLS_NATIVE_LIBRARY_NAME; +import static org.assertj.core.api.Assertions.assertThat; import static org.junit.jupiter.api.Assertions.assertEquals; import static org.junit.jupiter.api.Assertions.assertThrows; -import static org.junit.jupiter.api.Assertions.assertTrue; import static org.junit.jupiter.api.Assumptions.assumeTrue; import static org.mockito.Mockito.any; import static org.mockito.Mockito.mock; @@ -234,7 +234,7 @@ protected KeyValue getTransformedValue( ManagedSSTDumpIterator.KeyValue r = iterator.next(); String key = new String(r.getKey(), StandardCharsets.UTF_8); Pair recordKey = Pair.of(key, r.getType()); - assertTrue(expectedKeys.containsKey(recordKey)); + assertThat(expectedKeys).containsKey(recordKey); assertEquals(Optional.ofNullable(expectedKeys .get(recordKey)).orElse(""), new String(r.getValue(), StandardCharsets.UTF_8)); diff --git a/hadoop-hdds/rocksdb-checkpoint-differ/pom.xml b/hadoop-hdds/rocksdb-checkpoint-differ/pom.xml index 3d5967e9c0c..829c0d6ac36 100644 --- a/hadoop-hdds/rocksdb-checkpoint-differ/pom.xml +++ b/hadoop-hdds/rocksdb-checkpoint-differ/pom.xml @@ -30,7 +30,6 @@ https://maven.apache.org/xsd/maven-4.0.0.xsd"> jar - false diff --git a/hadoop-hdds/rocksdb-checkpoint-differ/src/main/java/org/apache/ozone/rocksdiff/RocksDBCheckpointDiffer.java b/hadoop-hdds/rocksdb-checkpoint-differ/src/main/java/org/apache/ozone/rocksdiff/RocksDBCheckpointDiffer.java index e830106e570..97d015fb239 100644 --- a/hadoop-hdds/rocksdb-checkpoint-differ/src/main/java/org/apache/ozone/rocksdiff/RocksDBCheckpointDiffer.java +++ b/hadoop-hdds/rocksdb-checkpoint-differ/src/main/java/org/apache/ozone/rocksdiff/RocksDBCheckpointDiffer.java @@ -172,7 +172,7 @@ public class RocksDBCheckpointDiffer implements AutoCloseable, private ColumnFamilyHandle snapshotInfoTableCFHandle; private final AtomicInteger tarballRequestCount; - private final String dagPruningServiceName = "CompactionDagPruningService"; + private static final String DAG_PRUNING_SERVICE_NAME = "CompactionDagPruningService"; private AtomicBoolean suspended; private ColumnFamilyHandle compactionLogTableCFHandle; @@ -230,7 +230,7 @@ public class RocksDBCheckpointDiffer implements AutoCloseable, TimeUnit.MILLISECONDS); if (pruneCompactionDagDaemonRunIntervalInMs > 0) { - this.scheduler = new Scheduler(dagPruningServiceName, + this.scheduler = new Scheduler(DAG_PRUNING_SERVICE_NAME, true, 1); this.scheduler.scheduleWithFixedDelay( @@ -307,7 +307,7 @@ public void close() throws Exception { if (!closed) { closed = true; if (scheduler != null) { - LOG.info("Shutting down {}.", dagPruningServiceName); + LOG.info("Shutting down {}.", DAG_PRUNING_SERVICE_NAME); scheduler.close(); } } @@ -1421,16 +1421,21 @@ public String getCompactionLogDir() { * those are not needed to generate snapshot diff. These files are basically * non-leaf nodes of the DAG. */ - public synchronized void pruneSstFiles() { + public void pruneSstFiles() { if (!shouldRun()) { return; } Set nonLeafSstFiles; - nonLeafSstFiles = forwardCompactionDAG.nodes().stream() - .filter(node -> !forwardCompactionDAG.successors(node).isEmpty()) - .map(node -> node.getFileName()) - .collect(Collectors.toSet()); + // This is synchronized because compaction thread can update the compactionDAG and can be in situation + // when nodes are added to the graph, but arcs are still in progress. + // Hence, the lock is taken. + synchronized (this) { + nonLeafSstFiles = forwardCompactionDAG.nodes().stream() + .filter(node -> !forwardCompactionDAG.successors(node).isEmpty()) + .map(node -> node.getFileName()) + .collect(Collectors.toSet()); + } if (CollectionUtils.isNotEmpty(nonLeafSstFiles)) { LOG.info("Removing SST files: {} as part of SST file pruning.", @@ -1448,8 +1453,13 @@ public void incrementTarballRequestCount() { tarballRequestCount.incrementAndGet(); } - public void decrementTarballRequestCount() { - tarballRequestCount.decrementAndGet(); + public void decrementTarballRequestCountAndNotify() { + // Synchronized block is used to ensure that lock is on the same instance notifyAll is being called. + synchronized (this) { + tarballRequestCount.decrementAndGet(); + // Notify compaction threads to continue. + notifyAll(); + } } public boolean shouldRun() { @@ -1517,8 +1527,7 @@ public static RocksDBCheckpointDiffer getInstance( * for cache. */ public static void invalidateCacheEntry(String cacheKey) { - IOUtils.closeQuietly(INSTANCE_MAP.get(cacheKey)); - INSTANCE_MAP.remove(cacheKey); + IOUtils.close(LOG, INSTANCE_MAP.remove(cacheKey)); } } diff --git a/hadoop-hdds/rocksdb-checkpoint-differ/src/test/java/org/apache/ozone/rocksdiff/TestRocksDBCheckpointDiffer.java b/hadoop-hdds/rocksdb-checkpoint-differ/src/test/java/org/apache/ozone/rocksdiff/TestRocksDBCheckpointDiffer.java index f70b85daebb..b01e4cc2e30 100644 --- a/hadoop-hdds/rocksdb-checkpoint-differ/src/test/java/org/apache/ozone/rocksdiff/TestRocksDBCheckpointDiffer.java +++ b/hadoop-hdds/rocksdb-checkpoint-differ/src/test/java/org/apache/ozone/rocksdiff/TestRocksDBCheckpointDiffer.java @@ -96,6 +96,7 @@ import static org.apache.ozone.rocksdiff.RocksDBCheckpointDiffer.DEBUG_DAG_LIVE_NODES; import static org.apache.ozone.rocksdiff.RocksDBCheckpointDiffer.DEBUG_READ_ALL_DB_KEYS; import static org.apache.ozone.rocksdiff.RocksDBCheckpointDiffer.SST_FILE_EXTENSION; +import static org.assertj.core.api.Assertions.assertThat; import static org.junit.jupiter.api.Assertions.assertEquals; import static org.junit.jupiter.api.Assertions.assertFalse; import static org.junit.jupiter.api.Assertions.assertThrows; @@ -125,10 +126,10 @@ public class TestRocksDBCheckpointDiffer { private final List> colHandles = new ArrayList<>(); - private final String activeDbDirName = "./rocksdb-data"; - private final String metadataDirName = "./metadata"; - private final String compactionLogDirName = "compaction-log"; - private final String sstBackUpDirName = "compaction-sst-backup"; + private static final String ACTIVE_DB_DIR_NAME = "./rocksdb-data"; + private static final String METADATA_DIR_NAME = "./metadata"; + private static final String COMPACTION_LOG_DIR_NAME = "compaction-log"; + private static final String SST_BACK_UP_DIR_NAME = "compaction-sst-backup"; private File activeDbDir; private File metadataDirDir; private File compactionLogDir; @@ -149,17 +150,17 @@ public void init() throws RocksDBException { // Test class log level. Set to DEBUG for verbose output GenericTestUtils.setLogLevel(TestRocksDBCheckpointDiffer.LOG, Level.INFO); - activeDbDir = new File(activeDbDirName); - createDir(activeDbDir, activeDbDirName); + activeDbDir = new File(ACTIVE_DB_DIR_NAME); + createDir(activeDbDir, ACTIVE_DB_DIR_NAME); - metadataDirDir = new File(metadataDirName); - createDir(metadataDirDir, metadataDirName); + metadataDirDir = new File(METADATA_DIR_NAME); + createDir(metadataDirDir, METADATA_DIR_NAME); - compactionLogDir = new File(metadataDirName, compactionLogDirName); - createDir(compactionLogDir, metadataDirName + "/" + compactionLogDirName); + compactionLogDir = new File(METADATA_DIR_NAME, COMPACTION_LOG_DIR_NAME); + createDir(compactionLogDir, METADATA_DIR_NAME + "/" + COMPACTION_LOG_DIR_NAME); - sstBackUpDir = new File(metadataDirName, sstBackUpDirName); - createDir(sstBackUpDir, metadataDirName + "/" + sstBackUpDirName); + sstBackUpDir = new File(METADATA_DIR_NAME, SST_BACK_UP_DIR_NAME); + createDir(sstBackUpDir, METADATA_DIR_NAME + "/" + SST_BACK_UP_DIR_NAME); config = mock(ConfigurationSource.class); @@ -173,10 +174,10 @@ public void init() throws RocksDBException { OZONE_OM_SNAPSHOT_PRUNE_COMPACTION_DAG_DAEMON_RUN_INTERVAL_DEFAULT, TimeUnit.MILLISECONDS)).thenReturn(0L); - rocksDBCheckpointDiffer = new RocksDBCheckpointDiffer(metadataDirName, - sstBackUpDirName, - compactionLogDirName, - activeDbDirName, + rocksDBCheckpointDiffer = new RocksDBCheckpointDiffer(METADATA_DIR_NAME, + SST_BACK_UP_DIR_NAME, + COMPACTION_LOG_DIR_NAME, + ACTIVE_DB_DIR_NAME, config); ColumnFamilyOptions cfOpts = new ColumnFamilyOptions() @@ -188,7 +189,7 @@ public void init() throws RocksDBException { .setCreateMissingColumnFamilies(true); rocksDBCheckpointDiffer.setRocksDBForCompactionTracking(dbOptions); - activeRocksDB = RocksDB.open(dbOptions, activeDbDirName, cfDescriptors, + activeRocksDB = RocksDB.open(dbOptions, ACTIVE_DB_DIR_NAME, cfDescriptors, cfHandles); keyTableCFHandle = cfHandles.get(1); directoryTableCFHandle = cfHandles.get(2); @@ -517,7 +518,7 @@ public void testGetSSTDiffListWithoutDB(String description, @Test void testDifferWithDB() throws Exception { writeKeysAndCheckpointing(); - readRocksDBInstance(activeDbDirName, activeRocksDB, null, + readRocksDBInstance(ACTIVE_DB_DIR_NAME, activeRocksDB, null, rocksDBCheckpointDiffer); if (LOG.isDebugEnabled()) { @@ -613,7 +614,7 @@ private void createCheckpoint(RocksDB rocksDB) throws RocksDBException { } cpDirList.add(dir); - createCheckPoint(activeDbDirName, cpPath, rocksDB); + createCheckPoint(ACTIVE_DB_DIR_NAME, cpPath, rocksDB); final UUID snapshotId = UUID.randomUUID(); List colHandle = new ArrayList<>(); colHandles.add(colHandle); @@ -1272,7 +1273,7 @@ public void testPruneOlderSnapshotsWithCompactionHistory( if (compactionLogs != null) { for (int i = 0; i < compactionLogs.size(); i++) { - String compactionFileName = metadataDirName + "/" + compactionLogDirName + String compactionFileName = METADATA_DIR_NAME + "/" + COMPACTION_LOG_DIR_NAME + "/0000" + i + COMPACTION_LOG_FILE_NAME_SUFFIX; File compactionFile = new File(compactionFileName); Files.write(compactionFile.toPath(), @@ -1349,10 +1350,10 @@ private void waitForLock(RocksDBCheckpointDiffer differ, }); // Confirm that the consumer doesn't finish with lock taken. assertThrows(TimeoutException.class, - () -> future.get(5000, TimeUnit.MILLISECONDS)); + () -> future.get(1000, TimeUnit.MILLISECONDS)); } // Confirm consumer finishes when unlocked. - assertTrue(future.get(1000, TimeUnit.MILLISECONDS)); + assertTrue(future.get(100, TimeUnit.MILLISECONDS)); } private static Stream sstFilePruningScenarios() { @@ -1490,8 +1491,8 @@ public void testSstFilePruning( Path compactionLogFilePath = null; if (compactionLog != null) { - String compactionLogFileName = metadataDirName + "/" + - compactionLogDirName + "/compaction_log" + + String compactionLogFileName = METADATA_DIR_NAME + "/" + + COMPACTION_LOG_DIR_NAME + "/compaction_log" + COMPACTION_LOG_FILE_NAME_SUFFIX; compactionLogFilePath = new File(compactionLogFileName).toPath(); createFileWithContext(compactionLogFileName, compactionLog); @@ -1511,7 +1512,7 @@ public void testSstFilePruning( Set actualFileSetAfterPruning; try (Stream pathStream = Files.list( - Paths.get(metadataDirName + "/" + sstBackUpDirName)) + Paths.get(METADATA_DIR_NAME + "/" + SST_BACK_UP_DIR_NAME)) .filter(e -> e.toString().toLowerCase() .endsWith(SST_FILE_EXTENSION)) .sorted()) { @@ -1867,7 +1868,7 @@ public void testDagOnlyContainsDesiredCfh() createKeys(compactionLogTableCFHandle, "logName-", "logValue-", 100); // Make sures that some compaction happened. - assertFalse(rocksDBCheckpointDiffer.getCompactionNodeMap().isEmpty()); + assertThat(rocksDBCheckpointDiffer.getCompactionNodeMap()).isNotEmpty(); List compactionNodes = rocksDBCheckpointDiffer. getCompactionNodeMap().values().stream() @@ -1877,7 +1878,7 @@ public void testDagOnlyContainsDesiredCfh() // CompactionNodeMap should not contain any node other than 'keyTable', // 'directoryTable' and 'fileTable' column families nodes. - assertTrue(compactionNodes.isEmpty()); + assertThat(compactionNodes).isEmpty(); // Assert that only 'keyTable', 'directoryTable' and 'fileTable' // column families SST files are backed-up. @@ -1889,7 +1890,7 @@ public void testDagOnlyContainsDesiredCfh() fileReader.open(path.toAbsolutePath().toString()); String columnFamily = StringUtils.bytes2String( fileReader.getTableProperties().getColumnFamilyName()); - assertTrue(COLUMN_FAMILIES_TO_TRACK_IN_DAG.contains(columnFamily)); + assertThat(COLUMN_FAMILIES_TO_TRACK_IN_DAG).contains(columnFamily); } catch (RocksDBException rocksDBException) { fail("Failed to read file: " + path.toAbsolutePath()); } diff --git a/hadoop-hdds/server-scm/pom.xml b/hadoop-hdds/server-scm/pom.xml index b42262fbabd..bb2bdec1405 100644 --- a/hadoop-hdds/server-scm/pom.xml +++ b/hadoop-hdds/server-scm/pom.xml @@ -29,7 +29,6 @@ https://maven.apache.org/xsd/maven-4.0.0.xsd"> jar - false @@ -68,7 +67,7 @@ https://maven.apache.org/xsd/maven-4.0.0.xsd"> org.bouncycastle - bcprov-jdk15on + bcprov-jdk18on io.dropwizard.metrics diff --git a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/PipelineChoosePolicy.java b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/PipelineChoosePolicy.java similarity index 86% rename from hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/PipelineChoosePolicy.java rename to hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/PipelineChoosePolicy.java index 76439a78464..e1d0fdd35aa 100644 --- a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/PipelineChoosePolicy.java +++ b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/PipelineChoosePolicy.java @@ -17,6 +17,7 @@ package org.apache.hadoop.hdds.scm; +import org.apache.hadoop.hdds.scm.node.NodeManager; import org.apache.hadoop.hdds.scm.pipeline.Pipeline; import java.util.List; @@ -26,6 +27,15 @@ */ public interface PipelineChoosePolicy { + /** + * Updates the policy with NodeManager. + * @return updated policy. + */ + default PipelineChoosePolicy init(final NodeManager nodeManager) { + // override if the policy requires nodeManager + return this; + } + /** * Given an initial list of pipelines, return one of the pipelines. * diff --git a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/ScmUtils.java b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/ScmUtils.java index 27a97a0349d..cc6147c7a64 100644 --- a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/ScmUtils.java +++ b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/ScmUtils.java @@ -35,7 +35,7 @@ import org.apache.hadoop.net.NetUtils; import org.apache.hadoop.ozone.ha.ConfUtils; import org.apache.hadoop.util.StringUtils; -import org.jetbrains.annotations.NotNull; +import jakarta.annotation.Nonnull; import org.slf4j.Logger; import org.slf4j.LoggerFactory; @@ -204,7 +204,7 @@ public static boolean shouldRemovePeers(final ConfigurationSource conf) { ScmConfigKeys.OZONE_SCM_DATANODE_DISALLOW_SAME_PEERS_DEFAULT)); } - @NotNull + @Nonnull public static List> initContainerReportQueue( OzoneConfiguration configuration) { int threadPoolSize = configuration.getInt(getContainerReportConfPrefix() diff --git a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/block/BlockManagerImpl.java b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/block/BlockManagerImpl.java index 1260ea6a006..5f42fb00e45 100644 --- a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/block/BlockManagerImpl.java +++ b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/block/BlockManagerImpl.java @@ -97,12 +97,9 @@ public BlockManagerImpl(final ConfigurationSource conf, // SCM block deleting transaction log and deleting service. deletedBlockLog = new DeletedBlockLogImpl(conf, + scm, scm.getContainerManager(), - scm.getScmHAManager().getRatisServer(), - scm.getScmMetadataStore().getDeletedBlocksTXTable(), scm.getScmHAManager().getDBTransactionBuffer(), - scm.getScmContext(), - scm.getSequenceIdGen(), metrics); diff --git a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/block/DeletedBlockLogImpl.java b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/block/DeletedBlockLogImpl.java index ac64f6e973e..9d5377b9e3e 100644 --- a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/block/DeletedBlockLogImpl.java +++ b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/block/DeletedBlockLogImpl.java @@ -46,9 +46,9 @@ import org.apache.hadoop.hdds.scm.container.replication.ContainerHealthResult; import org.apache.hadoop.hdds.scm.container.replication.ReplicationManager; import org.apache.hadoop.hdds.scm.ha.SCMContext; -import org.apache.hadoop.hdds.scm.ha.SCMRatisServer; import org.apache.hadoop.hdds.scm.ha.SequenceIdGenerator; import org.apache.hadoop.hdds.scm.metadata.DBTransactionBuffer; +import org.apache.hadoop.hdds.scm.server.StorageContainerManager; import org.apache.hadoop.hdds.server.events.EventHandler; import org.apache.hadoop.hdds.server.events.EventPublisher; import org.apache.hadoop.hdds.utils.db.Table; @@ -95,14 +95,10 @@ public class DeletedBlockLogImpl private static final int LIST_ALL_FAILED_TRANSACTIONS = -1; - @SuppressWarnings("parameternumber") public DeletedBlockLogImpl(ConfigurationSource conf, + StorageContainerManager scm, ContainerManager containerManager, - SCMRatisServer ratisServer, - Table deletedBlocksTXTable, DBTransactionBuffer dbTxBuffer, - SCMContext scmContext, - SequenceIdGenerator sequenceIdGen, ScmBlockDeletingServiceMetrics metrics) { maxRetry = conf.getInt(OZONE_SCM_BLOCK_DELETION_MAX_RETRY, OZONE_SCM_BLOCK_DELETION_MAX_RETRY_DEFAULT); @@ -112,17 +108,17 @@ public DeletedBlockLogImpl(ConfigurationSource conf, this.deletedBlockLogStateManager = DeletedBlockLogStateManagerImpl .newBuilder() .setConfiguration(conf) - .setDeletedBlocksTable(deletedBlocksTXTable) + .setDeletedBlocksTable(scm.getScmMetadataStore().getDeletedBlocksTXTable()) .setContainerManager(containerManager) - .setRatisServer(ratisServer) + .setRatisServer(scm.getScmHAManager().getRatisServer()) .setSCMDBTransactionBuffer(dbTxBuffer) .build(); - this.scmContext = scmContext; - this.sequenceIdGen = sequenceIdGen; + this.scmContext = scm.getScmContext(); + this.sequenceIdGen = scm.getSequenceIdGen(); this.metrics = metrics; this.transactionStatusManager = new SCMDeletedBlockTransactionStatusManager(deletedBlockLogStateManager, - containerManager, scmContext, metrics, scmCommandTimeoutMs); + containerManager, this.scmContext, metrics, scmCommandTimeoutMs); } @Override diff --git a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/block/SCMDeletedBlockTransactionStatusManager.java b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/block/SCMDeletedBlockTransactionStatusManager.java index b43e91e0592..d0306211350 100644 --- a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/block/SCMDeletedBlockTransactionStatusManager.java +++ b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/block/SCMDeletedBlockTransactionStatusManager.java @@ -201,7 +201,7 @@ protected void onSent(UUID dnId, long scmCmdId) { } protected void onDatanodeDead(UUID dnId) { - LOG.info("Clean SCMCommand record for DN: {}", dnId); + LOG.info("Clean SCMCommand record for Datanode: {}", dnId); scmCmdStatusRecord.remove(dnId); } @@ -252,12 +252,14 @@ private void updateStatus(UUID dnId, long scmCmdId, CommandStatus.Status newStatus) { Map recordForDn = scmCmdStatusRecord.get(dnId); if (recordForDn == null) { - LOG.warn("Unknown Datanode: {} scmCmdId {} newStatus {}", + LOG.warn("Unknown Datanode: {} Scm Command ID: {} report status {}", dnId, scmCmdId, newStatus); return; } if (recordForDn.get(scmCmdId) == null) { - LOG.warn("Unknown SCM Command: {} Datanode {} newStatus {}", + // Because of the delay in the DN report, the DN sometimes report obsolete + // Command status that are cleared by the SCM. + LOG.debug("Unknown SCM Command ID: {} Datanode: {} report status {}", scmCmdId, dnId, newStatus); return; } @@ -293,22 +295,23 @@ private void updateStatus(UUID dnId, long scmCmdId, // which should not normally occur. LOG.error("Received {} status for a command marked TO_BE_SENT. " + "This indicates a potential issue in command handling. " + - "SCM Command ID: {}, Datanode ID: {}, Current Status: {}", + "SCM Command ID: {}, Datanode: {}, Current status: {}", newStatus, scmCmdId, dnId, oldStatus); removeScmCommand(dnId, scmCmdId); changed = true; } break; default: - LOG.error("Can not update to Unknown new Status: {}", newStatus); + LOG.error("Unexpected status from Datanode: {}. SCM Command ID: {} with status: {}.", + dnId, scmCmdId, newStatus); break; } if (!changed) { - LOG.warn("Cannot update illegal status for DN: {} ScmCommandId {} " + - "Status From {} to {}", dnId, scmCmdId, oldStatus, newStatus); + LOG.warn("Cannot update illegal status for Datanode: {} SCM Command ID: {} " + + "status {} by DN report status {}", dnId, scmCmdId, oldStatus, newStatus); } else { - LOG.debug("Successful update DN: {} ScmCommandId {} Status From {} to" + - " {}", dnId, scmCmdId, oldStatus, newStatus); + LOG.debug("Successful update Datanode: {} SCM Command ID: {} status From {} to" + + " {}, DN report status {}", dnId, scmCmdId, oldStatus, statusData.getStatus(), newStatus); } } @@ -320,11 +323,8 @@ private void removeTimeoutScmCommand(UUID dnId, if (updateTime != null && Duration.between(updateTime, now).toMillis() > timeoutMs) { CmdStatusData state = removeScmCommand(dnId, scmCmdId); - LOG.warn("Remove Timeout SCM BlockDeletionCommand {} for DN {} " + - "after without update {}ms}", state, dnId, timeoutMs); - } else { - LOG.warn("Timeout SCM scmCmdIds {} for DN {} " + - "after without update {}ms}", scmCmdIds, dnId, timeoutMs); + LOG.warn("SCM BlockDeletionCommand {} for Datanode: {} was removed after {}ms without update", + state, dnId, timeoutMs); } } } @@ -335,7 +335,7 @@ private CmdStatusData removeScmCommand(UUID dnId, long scmCmdId) { return null; } CmdStatusData statusData = record.remove(scmCmdId); - LOG.debug("Remove ScmCommand {} for DN: {} ", statusData, dnId); + LOG.debug("Remove ScmCommand {} for Datanode: {} ", statusData, dnId); return statusData; } @@ -483,7 +483,7 @@ public void commitTransactions( // Mostly likely it's a retried delete command response. if (LOG.isDebugEnabled()) { LOG.debug( - "Transaction txId={} commit by dnId={} for containerID={}" + "Transaction txId: {} commit by Datanode: {} for ContainerId: {}" + " failed. Corresponding entry not found.", txID, dnId, containerId); } @@ -508,13 +508,13 @@ public void commitTransactions( transactionToDNsCommitMap.remove(txID); transactionToRetryCountMap.remove(txID); if (LOG.isDebugEnabled()) { - LOG.debug("Purging txId={} from block deletion log", txID); + LOG.debug("Purging txId: {} from block deletion log", txID); } txIDsToBeDeleted.add(txID); } } if (LOG.isDebugEnabled()) { - LOG.debug("Datanode txId={} containerId={} committed by dnId={}", + LOG.debug("Datanode txId: {} ContainerId: {} committed by Datanode: {}", txID, containerId, dnId); } } catch (IOException e) { @@ -557,7 +557,7 @@ private void processSCMCommandStatus(List deleteBlockStatus, lastStatus.put(cmdStatus.getCmdId(), cmdStatus); summary.put(cmdStatus.getCmdId(), cmdStatus.getStatus()); }); - LOG.debug("CommandStatus {} from Datanode {} ", summary, dnID); + LOG.debug("CommandStatus {} from Datanode: {} ", summary, dnID); for (Map.Entry entry : lastStatus.entrySet()) { CommandStatus.Status status = entry.getValue().getStatus(); scmDeleteBlocksCommandStatusManager.updateStatusByDNCommandStatus( @@ -568,11 +568,11 @@ private void processSCMCommandStatus(List deleteBlockStatus, private boolean isTransactionFailed(DeleteBlockTransactionResult result) { if (LOG.isDebugEnabled()) { LOG.debug( - "Got block deletion ACK from datanode, TXIDs={}, " + "success={}", + "Got block deletion ACK from datanode, TXIDs {}, " + "success {}", result.getTxID(), result.getSuccess()); } if (!result.getSuccess()) { - LOG.warn("Got failed ACK for TXID={}, prepare to resend the " + LOG.warn("Got failed ACK for TXID {}, prepare to resend the " + "TX in next interval", result.getTxID()); return true; } diff --git a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/CloseContainerEventHandler.java b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/CloseContainerEventHandler.java index e9d260f743a..2da19b4ef20 100644 --- a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/CloseContainerEventHandler.java +++ b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/CloseContainerEventHandler.java @@ -20,7 +20,7 @@ import java.util.List; import java.util.stream.Collectors; -import javax.annotation.Nullable; +import jakarta.annotation.Nullable; import org.apache.hadoop.hdds.protocol.DatanodeDetails; import org.apache.hadoop.hdds.protocol.proto.HddsProtos; import org.apache.hadoop.hdds.protocol.proto.HddsProtos.LifeCycleEvent; diff --git a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/ContainerReplica.java b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/ContainerReplica.java index 56da38ed577..78ebfd311dd 100644 --- a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/ContainerReplica.java +++ b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/ContainerReplica.java @@ -39,33 +39,21 @@ public final class ContainerReplica implements Comparable { private final UUID placeOfBirth; private final int replicaIndex; - private Long sequenceId; + private final Long sequenceId; private final long keyCount; private final long bytesUsed; private final boolean isEmpty; - @SuppressWarnings("parameternumber") - private ContainerReplica( - final ContainerID containerID, - final ContainerReplicaProto.State state, - final int replicaIndex, - final DatanodeDetails datanode, - final UUID originNodeId, - long keyNum, - long dataSize, - boolean isEmpty) { - this.containerID = containerID; - this.state = state; - this.datanodeDetails = datanode; - this.placeOfBirth = originNodeId; - this.keyCount = keyNum; - this.bytesUsed = dataSize; - this.replicaIndex = replicaIndex; - this.isEmpty = isEmpty; - } - - private void setSequenceId(Long seqId) { - sequenceId = seqId; + private ContainerReplica(ContainerReplicaBuilder b) { + containerID = b.containerID; + state = b.state; + datanodeDetails = b.datanode; + placeOfBirth = Optional.ofNullable(b.placeOfBirth).orElse(datanodeDetails.getUuid()); + keyCount = b.keyCount; + bytesUsed = b.bytesUsed; + replicaIndex = b.replicaIndex; + isEmpty = b.isEmpty; + sequenceId = b.sequenceId; } /** @@ -299,12 +287,7 @@ public ContainerReplica build() { "Container state can't be null"); Preconditions.checkNotNull(datanode, "DatanodeDetails can't be null"); - ContainerReplica replica = new ContainerReplica( - containerID, state, replicaIndex, datanode, - Optional.ofNullable(placeOfBirth).orElse(datanode.getUuid()), - keyCount, bytesUsed, isEmpty); - Optional.ofNullable(sequenceId).ifPresent(replica::setSequenceId); - return replica; + return new ContainerReplica(this); } } diff --git a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/ContainerStateManagerImpl.java b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/ContainerStateManagerImpl.java index 72d90abe1f4..cf5975d05eb 100644 --- a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/ContainerStateManagerImpl.java +++ b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/ContainerStateManagerImpl.java @@ -251,16 +251,12 @@ private void initialize() throws IOException { pipelineManager.addContainerToPipelineSCMStart( container.getPipelineID(), container.containerID()); } catch (PipelineNotFoundException ex) { + // We are ignoring this here. The container will be moved to + // CLOSING state by ReplicationManager's OpenContainerHandler + // For more info: HDDS-10231 LOG.warn("Found container {} which is in OPEN state with " + - "pipeline {} that does not exist. Marking container for " + - "closing.", container, container.getPipelineID()); - try { - updateContainerState(container.containerID().getProtobuf(), - LifeCycleEvent.FINALIZE); - } catch (InvalidStateTransitionException e) { - // This cannot happen. - LOG.warn("Unable to finalize Container {}.", container); - } + "pipeline {} that does not exist.", + container, container.getPipelineID()); } } } diff --git a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/balancer/AbstractFindTargetGreedy.java b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/balancer/AbstractFindTargetGreedy.java index 660452b2d8b..5416a9ff1c3 100644 --- a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/balancer/AbstractFindTargetGreedy.java +++ b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/balancer/AbstractFindTargetGreedy.java @@ -96,7 +96,7 @@ private void setConfiguration(ContainerBalancerConfiguration conf) { * Find a {@link ContainerMoveSelection} consisting of a target and * container to move for a source datanode. Favours more under-utilized nodes. * @param source Datanode to find a target for - * @param candidateContainers Set of candidate containers satisfying + * @param container candidate container satisfying * selection criteria * {@link ContainerBalancerSelectionCriteria} * (DatanodeDetails, Long) method returns true if the size specified in the @@ -105,29 +105,27 @@ private void setConfiguration(ContainerBalancerConfiguration conf) { */ @Override public ContainerMoveSelection findTargetForContainerMove( - DatanodeDetails source, Set candidateContainers) { + DatanodeDetails source, ContainerID container) { sortTargetForSource(source); for (DatanodeUsageInfo targetInfo : potentialTargets) { DatanodeDetails target = targetInfo.getDatanodeDetails(); - for (ContainerID container : candidateContainers) { - Set replicas; - ContainerInfo containerInfo; - try { - replicas = containerManager.getContainerReplicas(container); - containerInfo = containerManager.getContainer(container); - } catch (ContainerNotFoundException e) { - logger.warn("Could not get Container {} from Container Manager for " + - "obtaining replicas in Container Balancer.", container, e); - continue; - } + Set replicas; + ContainerInfo containerInfo; + try { + replicas = containerManager.getContainerReplicas(container); + containerInfo = containerManager.getContainer(container); + } catch (ContainerNotFoundException e) { + logger.warn("Could not get Container {} from Container Manager for " + + "obtaining replicas in Container Balancer.", container, e); + return null; + } - if (replicas.stream().noneMatch( - replica -> replica.getDatanodeDetails().equals(target)) && - containerMoveSatisfiesPlacementPolicy(container, replicas, source, - target) && - canSizeEnterTarget(target, containerInfo.getUsedBytes())) { - return new ContainerMoveSelection(target, container); - } + if (replicas.stream().noneMatch( + replica -> replica.getDatanodeDetails().equals(target)) && + containerMoveSatisfiesPlacementPolicy(container, replicas, source, + target) && + canSizeEnterTarget(target, containerInfo.getUsedBytes())) { + return new ContainerMoveSelection(target, container); } } logger.info("Container Balancer could not find a target for " + @@ -228,6 +226,9 @@ public void increaseSizeEntering(DatanodeDetails target, long size) { if (totalEnteringSize < config.getMaxSizeEnteringTarget()) { //reorder potentialTargets.add(nodeManager.getUsageInfo(target)); + } else { + logger.debug("Datanode {} removed from the list of potential targets. The total size of data entering it in " + + "this iteration is {}.", target, totalEnteringSize); } return; } diff --git a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/balancer/ContainerBalancerConfiguration.java b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/balancer/ContainerBalancerConfiguration.java index 8f9332e2d3c..7e2ba2fd012 100644 --- a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/balancer/ContainerBalancerConfiguration.java +++ b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/balancer/ContainerBalancerConfiguration.java @@ -26,7 +26,7 @@ import org.apache.hadoop.hdds.protocol.proto.HddsProtos.ContainerBalancerConfigurationProto; import org.apache.hadoop.hdds.scm.container.ContainerID; import org.apache.hadoop.ozone.OzoneConsts; -import org.jetbrains.annotations.NotNull; +import jakarta.annotation.Nonnull; import org.slf4j.Logger; import org.slf4j.LoggerFactory; @@ -472,8 +472,8 @@ ContainerBalancerConfigurationProto.Builder toProtobufBuilder() { } static ContainerBalancerConfiguration fromProtobuf( - @NotNull ContainerBalancerConfigurationProto proto, - @NotNull OzoneConfiguration ozoneConfiguration) { + @Nonnull ContainerBalancerConfigurationProto proto, + @Nonnull OzoneConfiguration ozoneConfiguration) { ContainerBalancerConfiguration config = ozoneConfiguration.getObject(ContainerBalancerConfiguration.class); if (proto.hasUtilizationThreshold()) { diff --git a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/balancer/ContainerBalancerSelectionCriteria.java b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/balancer/ContainerBalancerSelectionCriteria.java index 8171320a54f..d9102a88329 100644 --- a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/balancer/ContainerBalancerSelectionCriteria.java +++ b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/balancer/ContainerBalancerSelectionCriteria.java @@ -31,8 +31,11 @@ import org.slf4j.Logger; import org.slf4j.LoggerFactory; +import java.util.Collections; import java.util.Comparator; +import java.util.HashMap; import java.util.HashSet; +import java.util.Map; import java.util.NavigableSet; import java.util.Set; import java.util.TreeSet; @@ -52,6 +55,7 @@ public class ContainerBalancerSelectionCriteria { private Set selectedContainers; private Set excludeContainers; private FindSourceStrategy findSourceStrategy; + private Map> setMap; public ContainerBalancerSelectionCriteria( ContainerBalancerConfiguration balancerConfiguration, @@ -66,6 +70,7 @@ public ContainerBalancerSelectionCriteria( selectedContainers = new HashSet<>(); excludeContainers = balancerConfiguration.getExcludeContainers(); this.findSourceStrategy = findSourceStrategy; + this.setMap = new HashMap<>(); } /** @@ -79,38 +84,20 @@ private boolean isContainerReplicatingOrDeleting(ContainerID containerID) { } /** - * Gets containers that are suitable for moving based on the following - * required criteria: - * 1. Container must not be undergoing replication. - * 2. Container must not already be selected for balancing. - * 3. Container size should be closer to 5GB. - * 4. Container must not be in the configured exclude containers list. - * 5. Container should be closed. - * 6. If the {@link LegacyReplicationManager} is enabled, then the container should not be an EC container. - * @param node DatanodeDetails for which to find candidate containers. - * @return NavigableSet of candidate containers that satisfy the criteria. + * Get ContainerID Set for the Datanode, it will be returned as NavigableSet + * Since sorting will be time-consuming, the Set will be cached. + * + * @param node source datanode + * @return cached Navigable ContainerID Set */ - public NavigableSet getCandidateContainers( - DatanodeDetails node, long sizeMovedAlready) { - NavigableSet containerIDSet = - new TreeSet<>(orderContainersByUsedBytes().reversed()); - try { - containerIDSet.addAll(nodeManager.getContainers(node)); - } catch (NodeNotFoundException e) { - LOG.warn("Could not find Datanode {} while selecting candidate " + - "containers for Container Balancer.", node.toString(), e); - return containerIDSet; + public Set getContainerIDSet(DatanodeDetails node) { + // Check if the node is registered at the beginning + if (!nodeManager.isNodeRegistered(node)) { + return Collections.emptySet(); } - if (excludeContainers != null) { - containerIDSet.removeAll(excludeContainers); - } - if (selectedContainers != null) { - containerIDSet.removeAll(selectedContainers); - } - - containerIDSet.removeIf( - containerID -> shouldBeExcluded(containerID, node, sizeMovedAlready)); - return containerIDSet; + Set containers = setMap.computeIfAbsent(node, + this::getCandidateContainers); + return containers != null ? containers : Collections.emptySet(); } /** @@ -165,7 +152,19 @@ private boolean isECContainerAndLegacyRMEnabled(ContainerInfo container) { && replicationManager.getConfig().isLegacyEnabled(); } - private boolean shouldBeExcluded(ContainerID containerID, + /** + * Gets containers that are suitable for moving based on the following + * required criteria: + * 1. Container must not be undergoing replication. + * 2. Container must not already be selected for balancing. + * 3. Container size should be closer to 5GB. + * 4. Container must not be in the configured exclude containers list. + * 5. Container should be closed. + * 6. If the {@link LegacyReplicationManager} is enabled, then the container should not be an EC container. + * @param node DatanodeDetails for which to find candidate containers. + * @return true if the container should be excluded, else false + */ + public boolean shouldBeExcluded(ContainerID containerID, DatanodeDetails node, long sizeMovedAlready) { ContainerInfo container; try { @@ -175,7 +174,8 @@ private boolean shouldBeExcluded(ContainerID containerID, "candidate container. Excluding it.", containerID); return true; } - return !isContainerClosed(container, node) || isECContainerAndLegacyRMEnabled(container) || + return excludeContainers.contains(containerID) || selectedContainers.contains(containerID) || + !isContainerClosed(container, node) || isECContainerAndLegacyRMEnabled(container) || isContainerReplicatingOrDeleting(containerID) || !findSourceStrategy.canSizeLeaveSource(node, container.getUsedBytes()) || breaksMaxSizeToMoveLimit(container.containerID(), @@ -242,4 +242,24 @@ public void setSelectedContainers( this.selectedContainers = selectedContainers; } + + private NavigableSet getCandidateContainers(DatanodeDetails node) { + NavigableSet newSet = + new TreeSet<>(orderContainersByUsedBytes().reversed()); + try { + Set idSet = nodeManager.getContainers(node); + if (excludeContainers != null) { + idSet.removeAll(excludeContainers); + } + if (selectedContainers != null) { + idSet.removeAll(selectedContainers); + } + newSet.addAll(idSet); + return newSet; + } catch (NodeNotFoundException e) { + LOG.warn("Could not find Datanode {} while selecting candidate " + + "containers for Container Balancer.", node, e); + return null; + } + } } diff --git a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/balancer/ContainerBalancerTask.java b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/balancer/ContainerBalancerTask.java index abbc50ac86a..94e8cfd04a1 100644 --- a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/balancer/ContainerBalancerTask.java +++ b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/balancer/ContainerBalancerTask.java @@ -50,7 +50,6 @@ import java.util.Iterator; import java.util.List; import java.util.Map; -import java.util.NavigableSet; import java.util.Set; import java.util.concurrent.CompletableFuture; import java.util.concurrent.ExecutionException; @@ -692,11 +691,10 @@ private long cancelMovesThatExceedTimeoutDuration() { * @return ContainerMoveSelection containing the selected target and container */ private ContainerMoveSelection matchSourceWithTarget(DatanodeDetails source) { - NavigableSet candidateContainers = - selectionCriteria.getCandidateContainers(source, - sizeScheduledForMoveInLatestIteration); + Set sourceContainerIDSet = + selectionCriteria.getContainerIDSet(source); - if (candidateContainers.isEmpty()) { + if (sourceContainerIDSet.isEmpty()) { if (LOG.isDebugEnabled()) { LOG.debug("ContainerBalancer could not find any candidate containers " + "for datanode {}", source.getUuidString()); @@ -708,9 +706,23 @@ private ContainerMoveSelection matchSourceWithTarget(DatanodeDetails source) { LOG.debug("ContainerBalancer is finding suitable target for source " + "datanode {}", source.getUuidString()); } - ContainerMoveSelection moveSelection = - findTargetStrategy.findTargetForContainerMove( - source, candidateContainers); + + ContainerMoveSelection moveSelection = null; + Set toRemoveContainerIds = new HashSet<>(); + for (ContainerID containerId: sourceContainerIDSet) { + if (selectionCriteria.shouldBeExcluded(containerId, source, + sizeScheduledForMoveInLatestIteration)) { + toRemoveContainerIds.add(containerId); + continue; + } + moveSelection = findTargetStrategy.findTargetForContainerMove(source, + containerId); + if (moveSelection != null) { + break; + } + } + // Update cached containerIDSet in setMap + sourceContainerIDSet.removeAll(toRemoveContainerIds); if (moveSelection == null) { if (LOG.isDebugEnabled()) { diff --git a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/balancer/FindSourceGreedy.java b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/balancer/FindSourceGreedy.java index 4f5868f2456..6350c3c7619 100644 --- a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/balancer/FindSourceGreedy.java +++ b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/balancer/FindSourceGreedy.java @@ -20,7 +20,7 @@ import org.apache.hadoop.hdds.protocol.DatanodeDetails; import org.apache.hadoop.hdds.scm.node.DatanodeUsageInfo; import org.apache.hadoop.hdds.scm.node.NodeManager; -import org.jetbrains.annotations.NotNull; +import jakarta.annotation.Nonnull; import org.slf4j.Logger; import org.slf4j.LoggerFactory; @@ -72,7 +72,7 @@ private void setLowerLimit(Double lowerLimit) { * {@inheritDoc} */ public void resetPotentialSources( - @NotNull Collection sources) { + @Nonnull Collection sources) { List usageInfos = new ArrayList<>(sources.size()); sources.forEach(source -> usageInfos.add(nodeManager.getUsageInfo(source))); resetSources(usageInfos); diff --git a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/balancer/FindSourceStrategy.java b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/balancer/FindSourceStrategy.java index e2716304839..236bdfd98d4 100644 --- a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/balancer/FindSourceStrategy.java +++ b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/balancer/FindSourceStrategy.java @@ -21,7 +21,7 @@ import org.apache.hadoop.hdds.protocol.DatanodeDetails; import org.apache.hadoop.hdds.scm.node.DatanodeUsageInfo; -import javax.annotation.Nonnull; +import jakarta.annotation.Nonnull; import java.util.Collection; import java.util.List; diff --git a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/balancer/FindTargetGreedyByNetworkTopology.java b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/balancer/FindTargetGreedyByNetworkTopology.java index 87feae4981d..393b44d44bf 100644 --- a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/balancer/FindTargetGreedyByNetworkTopology.java +++ b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/balancer/FindTargetGreedyByNetworkTopology.java @@ -25,7 +25,8 @@ import org.apache.hadoop.hdds.scm.net.NetworkTopology; import org.apache.hadoop.hdds.scm.node.DatanodeUsageInfo; import org.apache.hadoop.hdds.scm.node.NodeManager; -import org.jetbrains.annotations.NotNull; +import jakarta.annotation.Nonnull; +import org.slf4j.Logger; import org.slf4j.LoggerFactory; import java.util.ArrayList; @@ -41,6 +42,8 @@ */ public class FindTargetGreedyByNetworkTopology extends AbstractFindTargetGreedy { + public static final Logger LOG = + LoggerFactory.getLogger(FindTargetGreedyByNetworkTopology.class); private NetworkTopology networkTopology; private List potentialTargets; @@ -51,7 +54,7 @@ public FindTargetGreedyByNetworkTopology( NodeManager nodeManager, NetworkTopology networkTopology) { super(containerManager, placementPolicyValidateProxy, nodeManager); - setLogger(LoggerFactory.getLogger(FindTargetGreedyByNetworkTopology.class)); + setLogger(LOG); potentialTargets = new LinkedList<>(); setPotentialTargets(potentialTargets); this.networkTopology = networkTopology; @@ -87,7 +90,7 @@ public void sortTargetForSource(DatanodeDetails source) { */ @Override public void resetPotentialTargets( - @NotNull Collection targets) { + @Nonnull Collection targets) { // create DatanodeUsageInfo from DatanodeDetails List usageInfos = new ArrayList<>(targets.size()); targets.forEach(datanodeDetails -> usageInfos.add( diff --git a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/balancer/FindTargetGreedyByUsageInfo.java b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/balancer/FindTargetGreedyByUsageInfo.java index 71a338db922..6e0c923b926 100644 --- a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/balancer/FindTargetGreedyByUsageInfo.java +++ b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/balancer/FindTargetGreedyByUsageInfo.java @@ -24,7 +24,8 @@ import org.apache.hadoop.hdds.scm.container.ContainerManager; import org.apache.hadoop.hdds.scm.node.DatanodeUsageInfo; import org.apache.hadoop.hdds.scm.node.NodeManager; -import org.jetbrains.annotations.NotNull; +import jakarta.annotation.Nonnull; +import org.slf4j.Logger; import org.slf4j.LoggerFactory; import java.util.ArrayList; @@ -37,12 +38,15 @@ * target with the lowest space usage. */ public class FindTargetGreedyByUsageInfo extends AbstractFindTargetGreedy { + public static final Logger LOG = + LoggerFactory.getLogger(FindTargetGreedyByUsageInfo.class); + public FindTargetGreedyByUsageInfo( ContainerManager containerManager, PlacementPolicyValidateProxy placementPolicyValidateProxy, NodeManager nodeManager) { super(containerManager, placementPolicyValidateProxy, nodeManager); - setLogger(LoggerFactory.getLogger(FindTargetGreedyByUsageInfo.class)); + setLogger(LOG); setPotentialTargets(new TreeSet<>((a, b) -> compareByUsage(a, b))); } @@ -63,7 +67,7 @@ public void sortTargetForSource(DatanodeDetails source) { */ @Override public void resetPotentialTargets( - @NotNull Collection targets) { + @Nonnull Collection targets) { // create DatanodeUsageInfo from DatanodeDetails List usageInfos = new ArrayList<>(targets.size()); targets.forEach(datanodeDetails -> usageInfos.add( diff --git a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/balancer/FindTargetStrategy.java b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/balancer/FindTargetStrategy.java index 17f6aa329dc..a9f2ee00a2d 100644 --- a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/balancer/FindTargetStrategy.java +++ b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/balancer/FindTargetStrategy.java @@ -22,10 +22,9 @@ import org.apache.hadoop.hdds.scm.container.ContainerID; import org.apache.hadoop.hdds.scm.node.DatanodeUsageInfo; -import javax.annotation.Nonnull; +import jakarta.annotation.Nonnull; import java.util.Collection; import java.util.List; -import java.util.Set; /** * This interface can be used to implement strategies to find a target for a @@ -40,7 +39,7 @@ public interface FindTargetStrategy { * enter a potential target. * * @param source Datanode to find a target for - * @param candidateContainers Set of candidate containers satisfying + * @param candidateContainer candidate containers satisfying * selection criteria * {@link ContainerBalancerSelectionCriteria} * (DatanodeDetails, Long) method returns true if the size specified in the @@ -49,7 +48,7 @@ public interface FindTargetStrategy { * selected container */ ContainerMoveSelection findTargetForContainerMove( - DatanodeDetails source, Set candidateContainers); + DatanodeDetails source, ContainerID candidateContainer); /** * increase the Entering size of a candidate target data node. diff --git a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/placement/metrics/SCMNodeMetric.java b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/placement/metrics/SCMNodeMetric.java index 330bf67416a..094e535dcbd 100644 --- a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/placement/metrics/SCMNodeMetric.java +++ b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/placement/metrics/SCMNodeMetric.java @@ -23,7 +23,8 @@ /** * SCM Node Metric that is used in the placement classes. */ -public class SCMNodeMetric implements DatanodeMetric { +public class SCMNodeMetric implements DatanodeMetric, + Comparable { private SCMNodeStat stat; /** @@ -195,12 +196,12 @@ public void subtract(SCMNodeStat value) { * @throws ClassCastException if the specified object's type prevents it * from being compared to this object. */ - //@Override - public int compareTo(SCMNodeStat o) { - if (isEqual(o)) { + @Override + public int compareTo(SCMNodeMetric o) { + if (isEqual(o.get())) { return 0; } - if (isGreater(o)) { + if (isGreater(o.get())) { return 1; } else { return -1; @@ -225,4 +226,9 @@ public boolean equals(Object o) { public int hashCode() { return stat != null ? stat.hashCode() : 0; } + + @Override + public String toString() { + return "SCMNodeMetric{" + stat.toString() + '}'; + } } diff --git a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/placement/metrics/SCMNodeStat.java b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/placement/metrics/SCMNodeStat.java index 2a848a04eff..5456e6ee527 100644 --- a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/placement/metrics/SCMNodeStat.java +++ b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/placement/metrics/SCMNodeStat.java @@ -174,4 +174,13 @@ public int hashCode() { return Long.hashCode(capacity.get() ^ scmUsed.get() ^ remaining.get() ^ committed.get() ^ freeSpaceToSpare.get()); } + + @Override + public String toString() { + return "SCMNodeStat{" + + "capacity=" + capacity.get() + + ", scmUsed=" + scmUsed.get() + + ", remaining=" + remaining.get() + + '}'; + } } diff --git a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/replication/ReplicationManager.java b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/replication/ReplicationManager.java index 979cff799fa..a3661243be6 100644 --- a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/replication/ReplicationManager.java +++ b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/replication/ReplicationManager.java @@ -61,6 +61,7 @@ import org.apache.hadoop.hdds.scm.node.NodeManager; import org.apache.hadoop.hdds.scm.node.NodeStatus; import org.apache.hadoop.hdds.scm.node.states.NodeNotFoundException; +import org.apache.hadoop.hdds.scm.pipeline.PipelineNotFoundException; import org.apache.hadoop.hdds.scm.server.StorageContainerManager; import org.apache.hadoop.hdds.server.events.EventPublisher; import org.apache.hadoop.ozone.common.statemachine.InvalidStateTransitionException; @@ -1545,5 +1546,14 @@ private int getRemainingMaintenanceRedundancy(boolean isEC) { private static boolean isEC(ReplicationConfig replicationConfig) { return replicationConfig.getReplicationType() == EC; } + + public boolean hasHealthyPipeline(ContainerInfo container) { + try { + return scmContext.getScm().getPipelineManager() + .getPipeline(container.getPipelineID()) != null; + } catch (PipelineNotFoundException e) { + return false; + } + } } diff --git a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/replication/health/OpenContainerHandler.java b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/replication/health/OpenContainerHandler.java index 2c0b405db97..21c3c76d3e9 100644 --- a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/replication/health/OpenContainerHandler.java +++ b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/replication/health/OpenContainerHandler.java @@ -53,20 +53,26 @@ public boolean handle(ContainerCheckRequest request) { if (containerInfo.getState() == HddsProtos.LifeCycleState.OPEN) { LOG.debug("Checking open container {} in OpenContainerHandler", containerInfo); - if (!isOpenContainerHealthy( - containerInfo, request.getContainerReplicas())) { - // This is an unhealthy open container, so we need to trigger the - // close process on it. - LOG.debug("Container {} is open but unhealthy. Triggering close.", - containerInfo); - request.getReport().incrementAndSample( - ReplicationManagerReport.HealthState.OPEN_UNHEALTHY, + final boolean noPipeline = !replicationManager.hasHealthyPipeline(containerInfo); + // Minor optimization. If noPipeline is true, isOpenContainerHealthy will not + // be called. + final boolean unhealthy = noPipeline || !isOpenContainerHealthy(containerInfo, + request.getContainerReplicas()); + if (unhealthy) { + // For an OPEN container, we close the container + // if the container has no Pipeline or if the container is unhealthy. + LOG.info("Container {} is open but {}. Triggering close.", + containerInfo, noPipeline ? "has no Pipeline" : "unhealthy"); + + request.getReport().incrementAndSample(noPipeline ? + ReplicationManagerReport.HealthState.OPEN_WITHOUT_PIPELINE : + ReplicationManagerReport.HealthState.OPEN_UNHEALTHY, containerInfo.containerID()); + if (!request.isReadOnly()) { replicationManager .sendCloseContainerEvent(containerInfo.containerID()); } - return true; } // For open containers we do not want to do any further processing in RM // so return true to stop the command chain. diff --git a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/ha/BackgroundSCMService.java b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/ha/BackgroundSCMService.java index 03e2a15938e..f28fcc7423b 100644 --- a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/ha/BackgroundSCMService.java +++ b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/ha/BackgroundSCMService.java @@ -45,17 +45,14 @@ public final class BackgroundSCMService implements SCMService { private final Runnable periodicalTask; private volatile boolean runImmediately = false; - private BackgroundSCMService( - final Clock clock, final SCMContext scmContext, - final String serviceName, final long intervalInMillis, - final long waitTimeInMillis, final Runnable task) { - this.scmContext = scmContext; - this.clock = clock; - this.periodicalTask = task; - this.serviceName = serviceName; - this.log = LoggerFactory.getLogger(serviceName); - this.intervalInMillis = intervalInMillis; - this.waitTimeInMillis = waitTimeInMillis; + private BackgroundSCMService(Builder b) { + scmContext = b.scmContext; + clock = b.clock; + periodicalTask = b.periodicalTask; + serviceName = b.serviceName; + log = LoggerFactory.getLogger(serviceName); + intervalInMillis = b.intervalInMillis; + waitTimeInMillis = b.waitTimeInMillis; start(); } @@ -206,8 +203,7 @@ public BackgroundSCMService build() { Preconditions.assertNotNull(clock, "clock is null"); Preconditions.assertNotNull(serviceName, "serviceName is null"); - return new BackgroundSCMService(clock, scmContext, serviceName, - intervalInMillis, waitTimeInMillis, periodicalTask); + return new BackgroundSCMService(this); } } } diff --git a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/ha/SCMContext.java b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/ha/SCMContext.java index 08ee20f5af7..b5f926638d4 100644 --- a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/ha/SCMContext.java +++ b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/ha/SCMContext.java @@ -27,7 +27,6 @@ import org.slf4j.Logger; import org.slf4j.LoggerFactory; -import java.util.Optional; import java.util.concurrent.locks.ReadWriteLock; import java.util.concurrent.locks.ReentrantReadWriteLock; @@ -78,17 +77,13 @@ public static SCMContext emptyContext() { */ private volatile FinalizationCheckpoint finalizationCheckpoint; - private SCMContext(boolean isLeader, long term, - final SafeModeStatus safeModeStatus, - final FinalizationCheckpoint finalizationCheckpoint, - final OzoneStorageContainerManager scm, String threadNamePrefix) { - this.isLeader = isLeader; - this.term = term; - this.safeModeStatus = safeModeStatus; - this.finalizationCheckpoint = finalizationCheckpoint; - this.scm = scm; - this.isLeaderReady = false; - this.threadNamePrefix = threadNamePrefix; + private SCMContext(Builder b) { + isLeader = b.isLeader; + term = b.term; + safeModeStatus = new SafeModeStatus(b.isInSafeMode, b.isPreCheckComplete); + finalizationCheckpoint = b.finalizationCheckpoint; + scm = b.scm; + threadNamePrefix = b.threadNamePrefix; } /** @@ -104,9 +99,9 @@ public void updateLeaderAndTerm(boolean leader, long newTerm) { isLeader = leader; // If it is not leader, set isLeaderReady to false. if (!isLeader) { - isLeaderReady = false; LOG.info("update from <{}> to <{}>", isLeaderReady, false); + isLeaderReady = false; } term = newTerm; } finally { @@ -285,7 +280,7 @@ public static class Builder { private boolean isInSafeMode = false; private boolean isPreCheckComplete = true; private OzoneStorageContainerManager scm = null; - private FinalizationCheckpoint finalizationCheckpoint; + private FinalizationCheckpoint finalizationCheckpoint = FinalizationCheckpoint.FINALIZATION_COMPLETE; private String threadNamePrefix = ""; public Builder setLeader(boolean leader) { @@ -335,13 +330,7 @@ public SCMContext build() { */ @VisibleForTesting SCMContext buildMaybeInvalid() { - return new SCMContext( - isLeader, - term, - new SafeModeStatus(isInSafeMode, isPreCheckComplete), - Optional.ofNullable(finalizationCheckpoint).orElse( - FinalizationCheckpoint.FINALIZATION_COMPLETE), - scm, threadNamePrefix); + return new SCMContext(this); } } } diff --git a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/ha/SCMGrpcOutputStream.java b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/ha/SCMGrpcOutputStream.java index 1194a5260ce..2f2e1c4c1ff 100644 --- a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/ha/SCMGrpcOutputStream.java +++ b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/ha/SCMGrpcOutputStream.java @@ -30,7 +30,7 @@ import org.slf4j.Logger; import org.slf4j.LoggerFactory; -import javax.annotation.Nonnull; +import jakarta.annotation.Nonnull; import java.io.IOException; import java.io.OutputStream; diff --git a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/ha/SCMNodeDetails.java b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/ha/SCMNodeDetails.java index 17901ecfde5..bd65d384844 100644 --- a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/ha/SCMNodeDetails.java +++ b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/ha/SCMNodeDetails.java @@ -19,8 +19,6 @@ import org.apache.hadoop.net.NetUtils; import org.apache.hadoop.hdds.NodeDetails; -import org.apache.ratis.protocol.RaftGroup; -import org.apache.ratis.protocol.RaftPeerId; import org.slf4j.Logger; import org.slf4j.LoggerFactory; @@ -30,38 +28,26 @@ * Construct SCM node details. */ public final class SCMNodeDetails extends NodeDetails { - private InetSocketAddress blockProtocolServerAddress; - private String blockProtocolServerAddressKey; - private InetSocketAddress clientProtocolServerAddress; - private String clientProtocolServerAddressKey; - private InetSocketAddress datanodeProtocolServerAddress; - private String datanodeAddressKey; - private int grpcPort; + private final InetSocketAddress blockProtocolServerAddress; + private final String blockProtocolServerAddressKey; + private final InetSocketAddress clientProtocolServerAddress; + private final String clientProtocolServerAddressKey; + private final InetSocketAddress datanodeProtocolServerAddress; + private final String datanodeAddressKey; + private final int grpcPort; + public static final Logger LOG = LoggerFactory.getLogger(SCMNodeDetails.class); - /** - * Constructs SCMNodeDetails object. - */ - @SuppressWarnings("checkstyle:ParameterNumber") - private SCMNodeDetails(String serviceId, String nodeId, - InetSocketAddress rpcAddr, int ratisPort, int grpcPort, - String httpAddress, String httpsAddress, - InetSocketAddress blockProtocolServerAddress, - InetSocketAddress clientProtocolServerAddress, - InetSocketAddress datanodeProtocolServerAddress, RaftGroup group, - RaftPeerId selfPeerId, String datanodeAddressKey, - String blockProtocolServerAddressKey, - String clientProtocolServerAddressAddressKey) { - super(serviceId, nodeId, rpcAddr, ratisPort, - httpAddress, httpsAddress); - this.grpcPort = grpcPort; - this.blockProtocolServerAddress = blockProtocolServerAddress; - this.clientProtocolServerAddress = clientProtocolServerAddress; - this.datanodeProtocolServerAddress = datanodeProtocolServerAddress; - this.datanodeAddressKey = datanodeAddressKey; - this.blockProtocolServerAddressKey = blockProtocolServerAddressKey; - this.clientProtocolServerAddressKey = clientProtocolServerAddressAddressKey; + private SCMNodeDetails(Builder b) { + super(b.scmServiceId, b.scmNodeId, b.rpcAddress, b.ratisPort, b.httpAddr, b.httpsAddr); + grpcPort = b.grpcPort; + blockProtocolServerAddress = b.blockProtocolServerAddress; + clientProtocolServerAddress = b.clientProtocolServerAddress; + datanodeProtocolServerAddress = b.datanodeProtocolServerAddress; + datanodeAddressKey = b.datanodeAddressKey; + blockProtocolServerAddressKey = b.blockProtocolServerAddressKey; + clientProtocolServerAddressKey = b.clientProtocolServerAddressKey; } @Override @@ -96,8 +82,6 @@ public static class Builder { private String clientProtocolServerAddressKey; private InetSocketAddress datanodeProtocolServerAddress; private String datanodeAddressKey; - private RaftGroup raftGroup; - private RaftPeerId selfPeerId; public Builder setDatanodeAddressKey(String addressKey) { this.datanodeAddressKey = addressKey; @@ -129,16 +113,6 @@ public Builder setDatanodeProtocolServerAddress(InetSocketAddress address) { return this; } - public Builder setRaftGroup(RaftGroup group) { - this.raftGroup = group; - return this; - } - - public Builder setSelfPeerId(RaftPeerId peerId) { - this.selfPeerId = peerId; - return this; - } - public Builder setRpcAddress(InetSocketAddress rpcAddr) { this.rpcAddress = rpcAddr; return this; @@ -175,11 +149,7 @@ public Builder setHttpsAddress(String httpsAddress) { } public SCMNodeDetails build() { - return new SCMNodeDetails(scmServiceId, scmNodeId, rpcAddress, - ratisPort, grpcPort, httpAddr, httpsAddr, blockProtocolServerAddress, - clientProtocolServerAddress, datanodeProtocolServerAddress, - raftGroup, selfPeerId, datanodeAddressKey, - blockProtocolServerAddressKey, clientProtocolServerAddressKey); + return new SCMNodeDetails(this); } } diff --git a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/metadata/X509CertificateCodec.java b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/metadata/X509CertificateCodec.java index c7ab2cb2e8c..3e7db16c2a0 100644 --- a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/metadata/X509CertificateCodec.java +++ b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/metadata/X509CertificateCodec.java @@ -32,7 +32,7 @@ import org.apache.hadoop.hdds.utils.io.LengthOutputStream; import org.apache.ratis.util.function.CheckedFunction; -import javax.annotation.Nonnull; +import jakarta.annotation.Nonnull; /** * Codec to serialize/deserialize {@link X509Certificate}. diff --git a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/node/DatanodeAdminMonitor.java b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/node/DatanodeAdminMonitor.java index e0b4c3ce543..fbfbb49c252 100644 --- a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/node/DatanodeAdminMonitor.java +++ b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/node/DatanodeAdminMonitor.java @@ -18,7 +18,11 @@ package org.apache.hadoop.hdds.scm.node; import org.apache.hadoop.hdds.protocol.DatanodeDetails; +import org.apache.hadoop.hdds.scm.container.ContainerID; +import org.apache.hadoop.hdds.scm.node.states.NodeNotFoundException; +import java.util.List; +import java.util.Map; import java.util.Set; /** @@ -31,4 +35,6 @@ public interface DatanodeAdminMonitor extends Runnable { void stopMonitoring(DatanodeDetails dn); Set getTrackedNodes(); void setMetrics(NodeDecommissionMetrics metrics); + Map> getContainersReplicatedOnNode(DatanodeDetails dn) + throws NodeNotFoundException; } diff --git a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/node/DatanodeAdminMonitorImpl.java b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/node/DatanodeAdminMonitorImpl.java index 51c6d12dea9..d7975ff1e58 100644 --- a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/node/DatanodeAdminMonitorImpl.java +++ b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/node/DatanodeAdminMonitorImpl.java @@ -96,8 +96,8 @@ public class DatanodeAdminMonitorImpl implements DatanodeAdminMonitor { public static final class TrackedNode { private DatanodeDetails datanodeDetails; - private long startTime = 0L; + private Map> containersReplicatedOnNode = new ConcurrentHashMap<>(); public TrackedNode(DatanodeDetails datanodeDetails, long startTime) { this.datanodeDetails = datanodeDetails; @@ -122,6 +122,15 @@ public DatanodeDetails getDatanodeDetails() { public long getStartTime() { return startTime; } + + public Map> getContainersReplicatedOnNode() { + return containersReplicatedOnNode; + } + + public void setContainersReplicatedOnNode(List underReplicated, List unClosed) { + this.containersReplicatedOnNode.put("UnderReplicated", Collections.unmodifiableList(underReplicated)); + this.containersReplicatedOnNode.put("UnClosed", Collections.unmodifiableList(unClosed)); + } } private Map containerStateByHost; @@ -423,9 +432,7 @@ private boolean checkContainersReplicatedOnNode(TrackedNode dn) boolean isHealthy = replicaSet.isHealthyEnoughForOffline(); if (!isHealthy) { - if (LOG.isDebugEnabled()) { - unClosedIDs.add(cid); - } + unClosedIDs.add(cid); if (unclosed < containerDetailsLoggingLimit || LOG.isDebugEnabled()) { LOG.info("Unclosed Container {} {}; {}", cid, replicaSet, replicaDetails(replicaSet.getReplicas())); @@ -448,20 +455,18 @@ private boolean checkContainersReplicatedOnNode(TrackedNode dn) replicationManager.checkContainerStatus(replicaSet.getContainer(), report); replicatedOK = report.getStat(ReplicationManagerReport.HealthState.UNDER_REPLICATED) == 0; } - if (replicatedOK) { sufficientlyReplicated++; } else { - if (LOG.isDebugEnabled()) { - underReplicatedIDs.add(cid); - } + underReplicatedIDs.add(cid); if (underReplicated < containerDetailsLoggingLimit || LOG.isDebugEnabled()) { LOG.info("Under Replicated Container {} {}; {}", cid, replicaSet, replicaDetails(replicaSet.getReplicas())); } underReplicated++; } } catch (ContainerNotFoundException e) { - LOG.warn("ContainerID {} present in node list for {} but not found in containerManager", cid, dn); + LOG.warn("ContainerID {} present in node list for {} but not found in containerManager", cid, + dn.getDatanodeDetails()); } } LOG.info("{} has {} sufficientlyReplicated, {} deleting, {} " + @@ -485,9 +490,21 @@ private boolean checkContainersReplicatedOnNode(TrackedNode dn) unclosed, unClosedIDs.stream().map( Object::toString).collect(Collectors.joining(", "))); } + dn.setContainersReplicatedOnNode(underReplicatedIDs, unClosedIDs); return underReplicated == 0 && unclosed == 0; } + public Map> getContainersReplicatedOnNode(DatanodeDetails dn) { + Iterator iterator = trackedNodes.iterator(); + while (iterator.hasNext()) { + TrackedNode trackedNode = iterator.next(); + if (trackedNode.equals(new TrackedNode(dn, 0L))) { + return trackedNode.getContainersReplicatedOnNode(); + } + } + return new HashMap<>(); + } + private String replicaDetails(Collection replicas) { StringBuilder sb = new StringBuilder(); sb.append("Replicas{"); diff --git a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/node/DeadNodeHandler.java b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/node/DeadNodeHandler.java index 3c40437d7f6..19ed24fbcaf 100644 --- a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/node/DeadNodeHandler.java +++ b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/node/DeadNodeHandler.java @@ -42,7 +42,7 @@ import com.google.common.base.Preconditions; -import javax.annotation.Nullable; +import jakarta.annotation.Nullable; import static org.apache.hadoop.hdds.scm.events.SCMEvents.CLOSE_CONTAINER; diff --git a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/node/NodeDecommissionManager.java b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/node/NodeDecommissionManager.java index c98cc63c466..38e59b89e76 100644 --- a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/node/NodeDecommissionManager.java +++ b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/node/NodeDecommissionManager.java @@ -24,6 +24,7 @@ import org.apache.hadoop.hdds.protocol.proto.HddsProtos.NodeOperationalState; import org.apache.hadoop.hdds.scm.DatanodeAdminError; import org.apache.hadoop.hdds.scm.ScmConfigKeys; +import org.apache.hadoop.hdds.scm.container.ContainerID; import org.apache.hadoop.hdds.scm.container.replication.ReplicationManager; import org.apache.hadoop.hdds.scm.ha.SCMContext; import org.apache.hadoop.hdds.scm.node.states.NodeNotFoundException; @@ -40,6 +41,7 @@ import java.util.Comparator; import java.util.LinkedList; import java.util.List; +import java.util.Map; import java.util.concurrent.Executors; import java.util.concurrent.ScheduledExecutorService; import java.util.concurrent.TimeUnit; @@ -292,6 +294,11 @@ public NodeDecommissionManager(OzoneConfiguration config, NodeManager nm, TimeUnit.SECONDS); } + public Map> getContainersReplicatedOnNode(DatanodeDetails dn) + throws NodeNotFoundException { + return getMonitor().getContainersReplicatedOnNode(dn); + } + @VisibleForTesting public DatanodeAdminMonitor getMonitor() { return monitor; diff --git a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/node/NodeManager.java b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/node/NodeManager.java index 399a7ef952e..21bcd1f78a2 100644 --- a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/node/NodeManager.java +++ b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/node/NodeManager.java @@ -40,7 +40,7 @@ import org.apache.hadoop.ozone.protocol.commands.RegisteredCommand; import org.apache.hadoop.ozone.protocol.commands.SCMCommand; -import javax.annotation.Nullable; +import jakarta.annotation.Nullable; import java.io.Closeable; import java.util.List; import java.util.Map; diff --git a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/node/SCMNodeManager.java b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/node/SCMNodeManager.java index e9b7d220e1f..a149998db8b 100644 --- a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/node/SCMNodeManager.java +++ b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/node/SCMNodeManager.java @@ -69,7 +69,9 @@ import javax.management.ObjectName; import java.io.IOException; +import java.math.RoundingMode; import java.net.InetAddress; +import java.text.DecimalFormat; import java.util.ArrayList; import java.util.Collection; import java.util.Collections; @@ -138,9 +140,11 @@ public class SCMNodeManager implements NodeManager { * consistent view of the node state. */ private final ReentrantReadWriteLock lock = new ReentrantReadWriteLock(); - private final String opeState = "OPSTATE"; - private final String comState = "COMSTATE"; - private final String lastHeartbeat = "LASTHEARTBEAT"; + private static final String OPESTATE = "OPSTATE"; + private static final String COMSTATE = "COMSTATE"; + private static final String LASTHEARTBEAT = "LASTHEARTBEAT"; + private static final String USEDSPACEPERCENT = "USEDSPACEPERCENT"; + private static final String TOTALCAPACITY = "CAPACITY"; /** * Constructs SCM machine Manager. */ @@ -268,7 +272,7 @@ public int getNodeCount(NodeStatus nodeStatus) { * Returns the Number of Datanodes by State they are in. Passing null for * either of the states acts like a wildcard for that state. * - * @parem nodeOpState - The Operational State of the node + * @param nodeOpState - The Operational State of the node * @param health - The health of the node * @return count */ @@ -507,19 +511,15 @@ private boolean updateDnsToUuidMap( * Send heartbeat to indicate the datanode is alive and doing well. * * @param datanodeDetails - DatanodeDetailsProto. - * @param layoutInfo - Layout Version Proto. * @return SCMheartbeat response. */ @Override public List processHeartbeat(DatanodeDetails datanodeDetails, - LayoutVersionProto layoutInfo, - CommandQueueReportProto queueReport) { + CommandQueueReportProto queueReport) { Preconditions.checkNotNull(datanodeDetails, "Heartbeat is missing " + "DatanodeDetails."); try { nodeStateManager.updateLastHeartbeatTime(datanodeDetails); - nodeStateManager.updateLastKnownLayoutVersion(datanodeDetails, - layoutInfo); metrics.incNumHBProcessed(); updateDatanodeOpState(datanodeDetails); } catch (NodeNotFoundException e) { @@ -682,6 +682,15 @@ public void processLayoutVersionReport(DatanodeDetails datanodeDetails, layoutVersionReport.toString().replaceAll("\n", "\\\\n")); } + try { + nodeStateManager.updateLastKnownLayoutVersion(datanodeDetails, + layoutVersionReport); + } catch (NodeNotFoundException e) { + LOG.error("SCM trying to process Layout Version from an " + + "unregistered node {}.", datanodeDetails); + return; + } + // Software layout version is hardcoded to the SCM. int scmSlv = scmLayoutVersionManager.getSoftwareLayoutVersion(); int dnSlv = layoutVersionReport.getSoftwareLayoutVersion(); @@ -1103,9 +1112,9 @@ public Map> getNodeStatusInfo() { heartbeatTimeDiff = getLastHeartbeatTimeDiff(dni.getLastHeartbeatTime()); } Map map = new HashMap<>(); - map.put(opeState, opstate); - map.put(comState, healthState); - map.put(lastHeartbeat, heartbeatTimeDiff); + map.put(OPESTATE, opstate); + map.put(COMSTATE, healthState); + map.put(LASTHEARTBEAT, heartbeatTimeDiff); if (httpPort != null) { map.put(httpPort.getName().toString(), httpPort.getValue().toString()); } @@ -1113,11 +1122,97 @@ public Map> getNodeStatusInfo() { map.put(httpsPort.getName().toString(), httpsPort.getValue().toString()); } + String capacity = calculateStorageCapacity(dni.getStorageReports()); + map.put(TOTALCAPACITY, capacity); + String[] storagePercentage = calculateStoragePercentage( + dni.getStorageReports()); + String scmUsedPerc = storagePercentage[0]; + String nonScmUsedPerc = storagePercentage[1]; + map.put(USEDSPACEPERCENT, + "Ozone: " + scmUsedPerc + "%, other: " + nonScmUsedPerc + "%"); nodes.put(hostName, map); } return nodes; } + /** + * Calculate the storage capacity of the DataNode node. + * @param storageReports Calculate the storage capacity corresponding + * to the storage collection. + * @return + */ + public static String calculateStorageCapacity( + List storageReports) { + long capacityByte = 0; + if (storageReports != null && !storageReports.isEmpty()) { + for (StorageReportProto storageReport : storageReports) { + capacityByte += storageReport.getCapacity(); + } + } + + double ua = capacityByte; + StringBuilder unit = new StringBuilder("B"); + if (ua > 1024) { + ua = ua / 1024; + unit.replace(0, 1, "KB"); + } + if (ua > 1024) { + ua = ua / 1024; + unit.replace(0, 2, "MB"); + } + if (ua > 1024) { + ua = ua / 1024; + unit.replace(0, 2, "GB"); + } + if (ua > 1024) { + ua = ua / 1024; + unit.replace(0, 2, "TB"); + } + + DecimalFormat decimalFormat = new DecimalFormat("#0.0"); + decimalFormat.setRoundingMode(RoundingMode.HALF_UP); + String capacity = decimalFormat.format(ua); + return capacity + unit.toString(); + } + + /** + * Calculate the storage usage percentage of a DataNode node. + * @param storageReports Calculate the storage percentage corresponding + * to the storage collection. + * @return + */ + public static String[] calculateStoragePercentage( + List storageReports) { + String[] storagePercentage = new String[2]; + String usedPercentage = "N/A"; + String nonUsedPercentage = "N/A"; + if (storageReports != null && !storageReports.isEmpty()) { + long capacity = 0; + long scmUsed = 0; + long remaining = 0; + for (StorageReportProto storageReport : storageReports) { + capacity += storageReport.getCapacity(); + scmUsed += storageReport.getScmUsed(); + remaining += storageReport.getRemaining(); + } + long scmNonUsed = capacity - scmUsed - remaining; + + DecimalFormat decimalFormat = new DecimalFormat("#0.00"); + decimalFormat.setRoundingMode(RoundingMode.HALF_UP); + + double usedPerc = ((double) scmUsed / capacity) * 100; + usedPerc = usedPerc > 100.0 ? 100.0 : usedPerc; + double nonUsedPerc = ((double) scmNonUsed / capacity) * 100; + nonUsedPerc = nonUsedPerc > 100.0 ? 100.0 : nonUsedPerc; + usedPercentage = decimalFormat.format(usedPerc); + nonUsedPercentage = decimalFormat.format(nonUsedPerc); + } + + storagePercentage[0] = usedPercentage; + storagePercentage[1] = nonUsedPercentage; + return storagePercentage; + } + /** * Based on the current time and the last heartbeat, calculate the time difference * and get a string of the relative value. E.g. "2s ago", "1m 2s ago", etc. diff --git a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/pipeline/WritableRatisContainerProvider.java b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/pipeline/WritableRatisContainerProvider.java index f9fc651f2fa..99a58f690c2 100644 --- a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/pipeline/WritableRatisContainerProvider.java +++ b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/pipeline/WritableRatisContainerProvider.java @@ -32,7 +32,7 @@ import org.slf4j.Logger; import org.slf4j.LoggerFactory; -import javax.annotation.Nullable; +import jakarta.annotation.Nullable; import java.io.IOException; import java.util.List; import java.util.stream.Collectors; diff --git a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/pipeline/choose/algorithms/CapacityPipelineChoosePolicy.java b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/pipeline/choose/algorithms/CapacityPipelineChoosePolicy.java new file mode 100644 index 00000000000..a95a473de6d --- /dev/null +++ b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/pipeline/choose/algorithms/CapacityPipelineChoosePolicy.java @@ -0,0 +1,136 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with this + * work for additional information regarding copyright ownership. The ASF + * licenses this file to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + *

+ * http://www.apache.org/licenses/LICENSE-2.0 + *

+ * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT + * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the + * License for the specific language governing permissions and limitations under + * the License. + */ + +package org.apache.hadoop.hdds.scm.pipeline.choose.algorithms; + +import org.apache.hadoop.hdds.scm.PipelineChoosePolicy; +import org.apache.hadoop.hdds.scm.PipelineRequestInformation; +import org.apache.hadoop.hdds.scm.container.placement.metrics.SCMNodeMetric; +import org.apache.hadoop.hdds.scm.node.NodeManager; +import org.apache.hadoop.hdds.scm.pipeline.Pipeline; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; + +import java.util.ArrayDeque; +import java.util.ArrayList; +import java.util.Comparator; +import java.util.Deque; +import java.util.List; +import java.util.Objects; + +/** + * Pipeline choose policy that randomly choose pipeline with relatively + * lower utilization. + *

+ * The Algorithm is as follows, Pick 2 random pipelines from a given pool of + * pipelines and then pick the pipeline which has lower utilization. + * This leads to a higher probability of pipelines with lower utilization + * to be picked. + *

+ * For those wondering why we choose two pipelines randomly and choose the + * pipeline with lower utilization. There are links to this original papers in + * HDFS-11564. + * Also, the same algorithm applies to SCMContainerPlacementCapacity. + *

+ */ +public class CapacityPipelineChoosePolicy implements PipelineChoosePolicy { + + private static final Logger LOG = + LoggerFactory.getLogger(PipelineChoosePolicy.class); + + private NodeManager nodeManager; + + private final PipelineChoosePolicy healthPolicy; + + public CapacityPipelineChoosePolicy() { + healthPolicy = new HealthyPipelineChoosePolicy(); + } + + @Override + public PipelineChoosePolicy init(final NodeManager scmNodeManager) { + this.nodeManager = scmNodeManager; + return this; + } + + @Override + public Pipeline choosePipeline(List pipelineList, + PipelineRequestInformation pri) { + Pipeline pipeline1 = healthPolicy.choosePipeline(pipelineList, pri); + Pipeline pipeline2 = healthPolicy.choosePipeline(pipelineList, pri); + + int result = new CapacityPipelineComparator(this) + .compare(pipeline1, pipeline2); + + LOG.debug("Chosen the {} pipeline", result <= 0 ? "first" : "second"); + return result <= 0 ? pipeline1 : pipeline2; + } + + @Override + public int choosePipelineIndex(List pipelineList, + PipelineRequestInformation pri) { + List mutableList = new ArrayList<>(pipelineList); + Pipeline pipeline = choosePipeline(mutableList, pri); + return pipelineList.indexOf(pipeline); + } + + /** + * Return a list of SCMNodeMetrics corresponding to the DataNodes in the + * pipeline, sorted in descending order based on scm used storage. + * @param pipeline pipeline + * @return sorted SCMNodeMetrics corresponding the pipeline + */ + private Deque getSortedNodeFromPipeline(Pipeline pipeline) { + Deque sortedNodeStack = new ArrayDeque<>(); + pipeline.getNodes().stream() + .map(nodeManager::getNodeStat) + .filter(Objects::nonNull) + .sorted() + .forEach(sortedNodeStack::push); + return sortedNodeStack; + } + + static class CapacityPipelineComparator implements Comparator { + private final CapacityPipelineChoosePolicy policy; + + CapacityPipelineComparator(CapacityPipelineChoosePolicy policy) { + this.policy = policy; + } + @Override + public int compare(Pipeline p1, Pipeline p2) { + if (p1.getId().equals(p2.getId())) { + LOG.debug("Compare the same pipeline {}", p1); + return 0; + } + Deque sortedNodes1 = policy.getSortedNodeFromPipeline(p1); + Deque sortedNodes2 = policy.getSortedNodeFromPipeline(p2); + + // Compare the scmUsed weight of the node in the two sorted node stacks + LOG.debug("Compare scmUsed weight in pipelines, first : {}, second : {}", + sortedNodes1, sortedNodes2); + int result = 0; + int count = 0; + while (result == 0 && + !sortedNodes1.isEmpty() && !sortedNodes2.isEmpty()) { + count++; + LOG.debug("Compare {} round", count); + result = sortedNodes1.pop().compareTo(sortedNodes2.pop()); + } + return result; + } + } + +} diff --git a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/pipeline/choose/algorithms/PipelineChoosePolicyFactory.java b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/pipeline/choose/algorithms/PipelineChoosePolicyFactory.java index d040dbe2bca..90736a01813 100644 --- a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/pipeline/choose/algorithms/PipelineChoosePolicyFactory.java +++ b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/pipeline/choose/algorithms/PipelineChoosePolicyFactory.java @@ -22,6 +22,7 @@ import org.apache.hadoop.hdds.scm.PipelineChoosePolicy; import org.apache.hadoop.hdds.scm.ScmConfig; import org.apache.hadoop.hdds.scm.exceptions.SCMException; +import org.apache.hadoop.hdds.scm.node.NodeManager; import org.slf4j.Logger; import org.slf4j.LoggerFactory; @@ -48,14 +49,14 @@ public final class PipelineChoosePolicyFactory { private PipelineChoosePolicyFactory() { } - public static PipelineChoosePolicy getPolicy( + public static PipelineChoosePolicy getPolicy(final NodeManager nodeManager, ScmConfig scmConfig, boolean forEC) throws SCMException { Class policyClass = null; String policyName = forEC ? scmConfig.getECPipelineChoosePolicyName() : scmConfig.getPipelineChoosePolicyName(); try { policyClass = getClass(policyName, PipelineChoosePolicy.class); - return createPipelineChoosePolicyFromClass(policyClass); + return createPipelineChoosePolicyFromClass(nodeManager, policyClass); } catch (Exception e) { Class defaultPolicy = forEC ? OZONE_SCM_EC_PIPELINE_CHOOSE_POLICY_IMPL_DEFAULT : @@ -64,13 +65,14 @@ public static PipelineChoosePolicy getPolicy( LOG.error("Met an exception while create pipeline choose policy " + "for the given class {}. Fallback to the default pipeline " + " choose policy {}", policyName, defaultPolicy, e); - return createPipelineChoosePolicyFromClass(defaultPolicy); + return createPipelineChoosePolicyFromClass(nodeManager, defaultPolicy); } throw e; } } private static PipelineChoosePolicy createPipelineChoosePolicyFromClass( + final NodeManager nodeManager, Class policyClass) throws SCMException { Constructor constructor; try { @@ -86,7 +88,7 @@ private static PipelineChoosePolicy createPipelineChoosePolicyFromClass( } try { - return constructor.newInstance(); + return constructor.newInstance().init(nodeManager); } catch (Exception e) { throw new RuntimeException("Failed to instantiate class " + policyClass.getCanonicalName() + " for " + e.getMessage()); diff --git a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/protocol/StorageContainerLocationProtocolServerSideTranslatorPB.java b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/protocol/StorageContainerLocationProtocolServerSideTranslatorPB.java index 7738d0e3907..f402b9309fe 100644 --- a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/protocol/StorageContainerLocationProtocolServerSideTranslatorPB.java +++ b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/protocol/StorageContainerLocationProtocolServerSideTranslatorPB.java @@ -24,6 +24,7 @@ import org.apache.hadoop.hdds.annotation.InterfaceAudience; import org.apache.hadoop.hdds.client.ECReplicationConfig; import org.apache.hadoop.hdds.client.ReplicationConfig; +import org.apache.hadoop.hdds.protocol.DatanodeDetails; import org.apache.hadoop.hdds.protocol.proto.HddsProtos; import org.apache.hadoop.hdds.protocol.proto.HddsProtos.TransferLeadershipRequestProto; import org.apache.hadoop.hdds.protocol.proto.HddsProtos.TransferLeadershipResponseProto; @@ -51,6 +52,9 @@ import org.apache.hadoop.hdds.protocol.proto.StorageContainerLocationProtocolProtos.ForceExitSafeModeResponseProto; import org.apache.hadoop.hdds.protocol.proto.StorageContainerLocationProtocolProtos.GetContainerReplicasRequestProto; import org.apache.hadoop.hdds.protocol.proto.StorageContainerLocationProtocolProtos.GetContainerReplicasResponseProto; +import org.apache.hadoop.hdds.protocol.proto.StorageContainerLocationProtocolProtos.GetContainersOnDecomNodeRequestProto; +import org.apache.hadoop.hdds.protocol.proto.StorageContainerLocationProtocolProtos.ContainersOnDecomNodeProto; +import org.apache.hadoop.hdds.protocol.proto.StorageContainerLocationProtocolProtos.GetContainersOnDecomNodeResponseProto; import org.apache.hadoop.hdds.protocol.proto.StorageContainerLocationProtocolProtos.GetContainerRequestProto; import org.apache.hadoop.hdds.protocol.proto.StorageContainerLocationProtocolProtos.GetContainerResponseProto; import org.apache.hadoop.hdds.protocol.proto.StorageContainerLocationProtocolProtos.GetContainerTokenRequestProto; @@ -92,6 +96,8 @@ import org.apache.hadoop.hdds.protocol.proto.StorageContainerLocationProtocolProtos.ScmContainerLocationRequest; import org.apache.hadoop.hdds.protocol.proto.StorageContainerLocationProtocolProtos.ScmContainerLocationResponse; import org.apache.hadoop.hdds.protocol.proto.StorageContainerLocationProtocolProtos.ScmContainerLocationResponse.Status; +import org.apache.hadoop.hdds.protocol.proto.StorageContainerLocationProtocolProtos.SingleNodeQueryResponseProto; +import org.apache.hadoop.hdds.protocol.proto.StorageContainerLocationProtocolProtos.SingleNodeQueryRequestProto; import org.apache.hadoop.hdds.protocol.proto.StorageContainerLocationProtocolProtos.StartContainerBalancerRequestProto; import org.apache.hadoop.hdds.protocol.proto.StorageContainerLocationProtocolProtos.StartContainerBalancerResponseProto; import org.apache.hadoop.hdds.protocol.proto.StorageContainerLocationProtocolProtos.StartMaintenanceNodesRequestProto; @@ -120,6 +126,7 @@ import org.apache.hadoop.hdds.utils.ProtocolMessageMetrics; import org.apache.hadoop.ozone.ClientVersion; import org.apache.hadoop.ozone.upgrade.UpgradeFinalizer.StatusAndMessages; +import org.apache.hadoop.util.ProtobufUtils; import org.slf4j.Logger; import org.slf4j.LoggerFactory; @@ -460,6 +467,13 @@ public ScmContainerLocationResponse processRequest( .setNodeQueryResponse(queryNode(request.getNodeQueryRequest(), request.getVersion())) .build(); + case SingleNodeQuery: + return ScmContainerLocationResponse.newBuilder() + .setCmdType(request.getCmdType()) + .setStatus(Status.OK) + .setSingleNodeQueryResponse(querySingleNode(request + .getSingleNodeQueryRequest())) + .build(); case CloseContainer: return ScmContainerLocationResponse.newBuilder() .setCmdType(request.getCmdType()) @@ -604,6 +618,12 @@ public ScmContainerLocationResponse processRequest( .setDecommissionNodesResponse(decommissionNodes( request.getDecommissionNodesRequest())) .build(); + case GetContainersOnDecomNode: + return ScmContainerLocationResponse.newBuilder() + .setCmdType(request.getCmdType()) + .setStatus(Status.OK) + .setGetContainersOnDecomNodeResponse(getContainersOnDecomNode(request.getGetContainersOnDecomNodeRequest())) + .build(); case RecommissionNodes: return ScmContainerLocationResponse.newBuilder() .setCmdType(request.getCmdType()) @@ -866,6 +886,16 @@ public NodeQueryResponseProto queryNode( .build(); } + public SingleNodeQueryResponseProto querySingleNode( + SingleNodeQueryRequestProto request) + throws IOException { + + HddsProtos.Node datanode = impl.queryNode(ProtobufUtils.fromProtobuf(request.getUuid())); + return SingleNodeQueryResponseProto.newBuilder() + .setDatanode(datanode) + .build(); + } + public SCMCloseContainerResponseProto closeContainer( SCMCloseContainerRequestProto request) throws IOException { @@ -1140,6 +1170,22 @@ public DecommissionNodesResponseProto decommissionNodes( return response.build(); } + public GetContainersOnDecomNodeResponseProto getContainersOnDecomNode(GetContainersOnDecomNodeRequestProto request) + throws IOException { + Map> containerMap = impl.getContainersOnDecomNode( + DatanodeDetails.getFromProtoBuf(request.getDatanodeDetails())); + List containersProtoList = new ArrayList<>(); + for (Map.Entry> containerList : containerMap.entrySet()) { + List containerIdsProto = new ArrayList<>(); + for (ContainerID id : containerList.getValue()) { + containerIdsProto.add(id.getProtobuf()); + } + containersProtoList.add(ContainersOnDecomNodeProto.newBuilder().setName(containerList.getKey()) + .addAllId(containerIdsProto).build()); + } + return GetContainersOnDecomNodeResponseProto.newBuilder().addAllContainersOnDecomNode(containersProtoList).build(); + } + public RecommissionNodesResponseProto recommissionNodes( RecommissionNodesRequestProto request) throws IOException { List errors = diff --git a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/security/SecretKeyManagerService.java b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/security/SecretKeyManagerService.java index c1f52914f4f..6b77350cc8c 100644 --- a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/security/SecretKeyManagerService.java +++ b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/security/SecretKeyManagerService.java @@ -61,7 +61,6 @@ public class SecretKeyManagerService implements SCMService, Runnable { private final ScheduledExecutorService scheduler; - @SuppressWarnings("parameternumber") public SecretKeyManagerService(SCMContext scmContext, ConfigurationSource conf, SCMRatisServer ratisServer) { diff --git a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/server/ContainerReportQueue.java b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/server/ContainerReportQueue.java index bffddff87b3..2748f9c3a49 100644 --- a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/server/ContainerReportQueue.java +++ b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/server/ContainerReportQueue.java @@ -32,8 +32,8 @@ import org.apache.hadoop.hdds.scm.server.SCMDatanodeHeartbeatDispatcher.ContainerReportFromDatanode; import org.apache.hadoop.hdds.server.events.FixedThreadPoolWithAffinityExecutor.IQueueMetrics; import org.apache.hadoop.util.Time; -import org.jetbrains.annotations.NotNull; -import org.jetbrains.annotations.Nullable; +import jakarta.annotation.Nonnull; +import jakarta.annotation.Nullable; /** * Customized queue to handle FCR and ICR from datanode optimally, @@ -159,7 +159,7 @@ private ContainerReport getReport(String uuid) { return null; } - public boolean addValue(@NotNull ContainerReport value) { + public boolean addValue(@Nonnull ContainerReport value) { synchronized (this) { if (remainingCapacity() == 0) { return false; @@ -177,7 +177,7 @@ public boolean addValue(@NotNull ContainerReport value) { } @Override - public boolean add(@NotNull ContainerReport value) { + public boolean add(@Nonnull ContainerReport value) { Objects.requireNonNull(value); synchronized (this) { if (remainingCapacity() == 0) { @@ -189,7 +189,7 @@ public boolean add(@NotNull ContainerReport value) { } @Override - public boolean offer(@NotNull ContainerReport value) { + public boolean offer(@Nonnull ContainerReport value) { Objects.requireNonNull(value); synchronized (this) { return addValue(value); @@ -229,7 +229,7 @@ public ContainerReport peek() { } @Override - public void put(@NotNull ContainerReport value) throws InterruptedException { + public void put(@Nonnull ContainerReport value) throws InterruptedException { Objects.requireNonNull(value); while (!addValue(value)) { Thread.currentThread().sleep(10); @@ -238,7 +238,7 @@ public void put(@NotNull ContainerReport value) throws InterruptedException { @Override public boolean offer(ContainerReport value, long timeout, - @NotNull TimeUnit unit) throws InterruptedException { + @Nonnull TimeUnit unit) throws InterruptedException { Objects.requireNonNull(value); long timeoutMillis = unit.toMillis(timeout); while (timeoutMillis > 0) { @@ -253,7 +253,7 @@ public boolean offer(ContainerReport value, long timeout, return false; } - @NotNull + @Nonnull @Override public ContainerReport take() throws InterruptedException { String uuid = orderingQueue.take(); @@ -264,7 +264,7 @@ public ContainerReport take() throws InterruptedException { @Nullable @Override - public ContainerReport poll(long timeout, @NotNull TimeUnit unit) + public ContainerReport poll(long timeout, @Nonnull TimeUnit unit) throws InterruptedException { String uuid = orderingQueue.poll(timeout, unit); synchronized (this) { @@ -286,25 +286,25 @@ public boolean remove(Object o) { } @Override - public boolean containsAll(@NotNull Collection c) { + public boolean containsAll(@Nonnull Collection c) { // no need support this throw new UnsupportedOperationException("not supported"); } @Override - public boolean addAll(@NotNull Collection c) { + public boolean addAll(@Nonnull Collection c) { // no need support this throw new UnsupportedOperationException("not supported"); } @Override - public boolean removeAll(@NotNull Collection c) { + public boolean removeAll(@Nonnull Collection c) { // no need support this throw new UnsupportedOperationException("not supported"); } @Override - public boolean retainAll(@NotNull Collection c) { + public boolean retainAll(@Nonnull Collection c) { // no need support this throw new UnsupportedOperationException("not supported"); } @@ -336,35 +336,35 @@ public boolean contains(Object o) { throw new UnsupportedOperationException("not supported"); } - @NotNull + @Nonnull @Override public Iterator iterator() { // no need support this throw new UnsupportedOperationException("not supported"); } - @NotNull + @Nonnull @Override public Object[] toArray() { // no need support this throw new UnsupportedOperationException("not supported"); } - @NotNull + @Nonnull @Override - public T[] toArray(@NotNull T[] a) { + public T[] toArray(@Nonnull T[] a) { // no need support this throw new UnsupportedOperationException("not supported"); } @Override - public int drainTo(@NotNull Collection c) { + public int drainTo(@Nonnull Collection c) { // no need support this throw new UnsupportedOperationException("not supported"); } @Override - public int drainTo(@NotNull Collection c, + public int drainTo(@Nonnull Collection c, int maxElements) { // no need support this throw new UnsupportedOperationException("not supported"); diff --git a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/server/SCMClientProtocolServer.java b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/server/SCMClientProtocolServer.java index 3d38fdbe819..13bef8590b7 100644 --- a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/server/SCMClientProtocolServer.java +++ b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/server/SCMClientProtocolServer.java @@ -109,6 +109,7 @@ import java.util.TreeSet; import java.util.stream.Collectors; import java.util.stream.Stream; +import java.util.UUID; import static org.apache.hadoop.hdds.protocol.proto.StorageContainerLocationProtocolProtos.StorageContainerLocationProtocolService.newReflectiveBlockingService; import static org.apache.hadoop.hdds.scm.ScmConfigKeys.OZONE_SCM_HANDLER_COUNT_DEFAULT; @@ -587,6 +588,15 @@ public void deleteContainer(long containerID) throws IOException { } } + @Override + public Map> getContainersOnDecomNode(DatanodeDetails dn) throws IOException { + try { + return scm.getScmDecommissionManager().getContainersReplicatedOnNode(dn); + } catch (NodeNotFoundException e) { + throw new IOException("Failed to get containers list. Unable to find required node", e); + } + } + @Override public List queryNode( HddsProtos.NodeOperationalState opState, HddsProtos.NodeState state, @@ -613,6 +623,27 @@ public List queryNode( return result; } + @Override + public HddsProtos.Node queryNode(UUID uuid) + throws IOException { + HddsProtos.Node result = null; + try { + DatanodeDetails node = scm.getScmNodeManager().getNodeByUuid(uuid); + if (node != null) { + NodeStatus ns = scm.getScmNodeManager().getNodeStatus(node); + result = HddsProtos.Node.newBuilder() + .setNodeID(node.getProtoBufMessage()) + .addNodeStates(ns.getHealth()) + .addNodeOperationalStates(ns.getOperationalState()) + .build(); + } + } catch (NodeNotFoundException e) { + throw new IOException( + "An unexpected error occurred querying the NodeStatus", e); + } + return result; + } + @Override public List decommissionNodes(List nodes) throws IOException { diff --git a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/server/SCMDatanodeHeartbeatDispatcher.java b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/server/SCMDatanodeHeartbeatDispatcher.java index 38db618ef53..b6dc6f599bd 100644 --- a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/server/SCMDatanodeHeartbeatDispatcher.java +++ b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/server/SCMDatanodeHeartbeatDispatcher.java @@ -130,8 +130,7 @@ public List dispatch(SCMHeartbeatRequestProto heartbeat) { commandQueueReport = heartbeat.getCommandQueueReport(); } // should we dispatch heartbeat through eventPublisher? - commands = nodeManager.processHeartbeat(datanodeDetails, - layoutVersion, commandQueueReport); + commands = nodeManager.processHeartbeat(datanodeDetails, commandQueueReport); if (heartbeat.hasNodeReport()) { LOG.debug("Dispatching Node Report."); eventPublisher.fireEvent( diff --git a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/server/SCMSecurityProtocolServer.java b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/server/SCMSecurityProtocolServer.java index bad326cad1e..40431330d11 100644 --- a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/server/SCMSecurityProtocolServer.java +++ b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/server/SCMSecurityProtocolServer.java @@ -83,7 +83,7 @@ import org.slf4j.Logger; import org.slf4j.LoggerFactory; -import javax.annotation.Nullable; +import jakarta.annotation.Nullable; import static org.apache.hadoop.hdds.scm.ScmUtils.checkIfCertSignRequestAllowed; import static org.apache.hadoop.hdds.scm.ha.SequenceIdGenerator.CERTIFICATE_ID; diff --git a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/server/StorageContainerManager.java b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/server/StorageContainerManager.java index 1a3ea2515f2..11fdc0d16d7 100644 --- a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/server/StorageContainerManager.java +++ b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/server/StorageContainerManager.java @@ -450,7 +450,6 @@ private StorageContainerManager(OzoneConfiguration conf, moveManager = new MoveManager(replicationManager, containerManager); containerReplicaPendingOps.registerSubscriber(moveManager); containerBalancer = new ContainerBalancer(this); - LOG.info(containerBalancer.toString()); // Emit initial safe mode status, as now handlers are registered. scmSafeModeManager.emitSafeModeStatus(); @@ -804,9 +803,9 @@ private void initializeSystemManagers(OzoneConfiguration conf, ScmConfig scmConfig = conf.getObject(ScmConfig.class); pipelineChoosePolicy = PipelineChoosePolicyFactory - .getPolicy(scmConfig, false); + .getPolicy(scmNodeManager, scmConfig, false); ecPipelineChoosePolicy = PipelineChoosePolicyFactory - .getPolicy(scmConfig, true); + .getPolicy(scmNodeManager, scmConfig, true); if (configurator.getWritableContainerFactory() != null) { writableContainerFactory = configurator.getWritableContainerFactory(); } else { diff --git a/hadoop-hdds/server-scm/src/main/resources/webapps/scm/scm-overview.html b/hadoop-hdds/server-scm/src/main/resources/webapps/scm/scm-overview.html index 214a2ad7868..fdd8de15b6a 100644 --- a/hadoop-hdds/server-scm/src/main/resources/webapps/scm/scm-overview.html +++ b/hadoop-hdds/server-scm/src/main/resources/webapps/scm/scm-overview.html @@ -48,6 +48,10 @@

Node Status

HostName + Used Space Percent + Capacity Operational State element.key === "USEDSPACEPERCENT").value, + capacity: value && value.find((element) => element.key === "CAPACITY").value, comstate: value && value.find((element) => element.key === "COMSTATE").value, lastheartbeat: value && value.find((element) => element.key === "LASTHEARTBEAT").value, port: portSpec.port, diff --git a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/TestSCMCommonPlacementPolicy.java b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/TestSCMCommonPlacementPolicy.java index 6c651cbfacd..634a723f289 100644 --- a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/TestSCMCommonPlacementPolicy.java +++ b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/TestSCMCommonPlacementPolicy.java @@ -37,7 +37,9 @@ import org.apache.ozone.test.GenericTestUtils; import org.junit.jupiter.api.BeforeEach; import org.junit.jupiter.api.Test; +import org.junit.jupiter.api.io.TempDir; +import java.io.File; import java.util.Arrays; import java.util.Collections; import java.util.HashSet; @@ -72,9 +74,9 @@ public class TestSCMCommonPlacementPolicy { private OzoneConfiguration conf; @BeforeEach - public void setup() { + void setup(@TempDir File testDir) { nodeManager = new MockNodeManager(true, 10); - conf = SCMTestUtils.getConf(); + conf = SCMTestUtils.getConf(testDir); } @Test diff --git a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/TestStorageContainerManagerHttpServer.java b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/TestStorageContainerManagerHttpServer.java index 0aa2aacf9d1..754fab6d1b1 100644 --- a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/TestStorageContainerManagerHttpServer.java +++ b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/TestStorageContainerManagerHttpServer.java @@ -24,7 +24,6 @@ import java.net.URL; import java.net.URLConnection; -import org.apache.hadoop.fs.FileUtil; import org.apache.hadoop.hdds.conf.OzoneConfiguration; import org.apache.hadoop.hdds.scm.server.StorageContainerManagerHttpServer; import org.apache.hadoop.hdfs.web.URLConnectionFactory; @@ -33,10 +32,10 @@ import org.apache.hadoop.net.NetUtils; import org.apache.hadoop.ozone.OzoneConfigKeys; import org.apache.hadoop.security.ssl.KeyStoreTestUtil; -import org.apache.ozone.test.GenericTestUtils; import org.junit.jupiter.api.AfterAll; import org.junit.jupiter.api.BeforeAll; +import org.junit.jupiter.api.io.TempDir; import org.junit.jupiter.params.ParameterizedTest; import org.junit.jupiter.params.provider.EnumSource; @@ -46,8 +45,8 @@ * Test http server os SCM with various HTTP option. */ public class TestStorageContainerManagerHttpServer { - private static final String BASEDIR = GenericTestUtils - .getTempPath(TestStorageContainerManagerHttpServer.class.getSimpleName()); + @TempDir + private static File baseDir; private static String keystoresDir; private static String sslConfDir; private static OzoneConfiguration conf; @@ -55,12 +54,10 @@ public class TestStorageContainerManagerHttpServer { @BeforeAll public static void setUp() throws Exception { - File base = new File(BASEDIR); - FileUtil.fullyDelete(base); - File ozoneMetadataDirectory = new File(BASEDIR, "metadata"); + File ozoneMetadataDirectory = new File(baseDir, "metadata"); ozoneMetadataDirectory.mkdirs(); conf = new OzoneConfiguration(); - keystoresDir = new File(BASEDIR).getAbsolutePath(); + keystoresDir = baseDir.getAbsolutePath(); sslConfDir = KeyStoreTestUtil.getClasspathDir( TestStorageContainerManagerHttpServer.class); KeyStoreTestUtil.setupSSLConfig(keystoresDir, sslConfDir, conf, false); @@ -77,7 +74,6 @@ public static void setUp() throws Exception { @AfterAll public static void tearDown() throws Exception { connectionFactory.destroy(); - FileUtil.fullyDelete(new File(BASEDIR)); KeyStoreTestUtil.cleanupSSLConfig(keystoresDir, sslConfDir); } diff --git a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/block/TestBlockManager.java b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/block/TestBlockManager.java index 9d852a15446..6438b6f8d49 100644 --- a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/block/TestBlockManager.java +++ b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/block/TestBlockManager.java @@ -17,10 +17,10 @@ package org.apache.hadoop.hdds.scm.block; +import java.io.File; import java.io.IOException; import java.time.Clock; import java.time.ZoneId; -import java.nio.file.Path; import java.time.ZoneOffset; import java.util.List; import java.util.Map; @@ -83,7 +83,6 @@ import static org.junit.jupiter.api.Assertions.assertNotEquals; import static org.junit.jupiter.api.Assertions.assertNotNull; import static org.junit.jupiter.api.Assertions.assertThrows; -import static org.junit.jupiter.api.Assertions.fail; import org.junit.jupiter.api.AfterEach; import org.junit.jupiter.api.BeforeEach; @@ -114,14 +113,13 @@ public class TestBlockManager { private ReplicationConfig replicationConfig; @BeforeEach - public void setUp(@TempDir Path tempDir) throws Exception { - conf = SCMTestUtils.getConf(); + void setUp(@TempDir File tempDir) throws Exception { + conf = SCMTestUtils.getConf(tempDir); numContainerPerOwnerInPipeline = conf.getInt( ScmConfigKeys.OZONE_SCM_PIPELINE_OWNER_CONTAINER_COUNT, ScmConfigKeys.OZONE_SCM_PIPELINE_OWNER_CONTAINER_COUNT_DEFAULT); - conf.set(HddsConfigKeys.OZONE_METADATA_DIRS, tempDir.toString()); conf.setBoolean(HddsConfigKeys.HDDS_SCM_SAFEMODE_PIPELINE_CREATION, false); conf.setTimeDuration(HddsConfigKeys.HDDS_PIPELINE_REPORT_INTERVAL, 5, TimeUnit.SECONDS); @@ -249,7 +247,7 @@ public void testAllocateBlockWithExclusion() throws Exception { } @Test - public void testAllocateBlockInParallel() { + void testAllocateBlockInParallel() throws Exception { int threadCount = 20; List executors = new ArrayList<>(threadCount); for (int i = 0; i < threadCount; i++) { @@ -273,17 +271,14 @@ public void testAllocateBlockInParallel() { }, executors.get(i)); futureList.add(future); } - try { - CompletableFuture - .allOf(futureList.toArray(new CompletableFuture[futureList.size()])) - .get(); - } catch (Exception e) { - fail("testAllocateBlockInParallel failed"); - } + + CompletableFuture + .allOf(futureList.toArray(new CompletableFuture[futureList.size()])) + .get(); } @Test - public void testBlockDistribution() throws Exception { + void testBlockDistribution() throws Exception { int threadCount = numContainerPerOwnerInPipeline * numContainerPerOwnerInPipeline; nodeManager.setNumPipelinePerDatanode(1); @@ -323,24 +318,19 @@ public void testBlockDistribution() throws Exception { }, executors.get(i)); futureList.add(future); } - try { - CompletableFuture.allOf(futureList.toArray( - new CompletableFuture[0])).get(); - - assertEquals(1, pipelineManager.getPipelines(replicationConfig).size()); - assertEquals(numContainerPerOwnerInPipeline, allocatedBlockMap.size()); - assertEquals(numContainerPerOwnerInPipeline, allocatedBlockMap.values().size()); - allocatedBlockMap.values().forEach(v -> { - assertEquals(numContainerPerOwnerInPipeline, v.size()); - }); - } catch (Exception e) { - fail("testAllocateBlockInParallel failed"); - } + CompletableFuture.allOf(futureList.toArray(new CompletableFuture[0])).get(); + + assertEquals(1, pipelineManager.getPipelines(replicationConfig).size()); + assertEquals(numContainerPerOwnerInPipeline, allocatedBlockMap.size()); + assertEquals(numContainerPerOwnerInPipeline, allocatedBlockMap.values().size()); + allocatedBlockMap.values().forEach(v -> { + assertEquals(numContainerPerOwnerInPipeline, v.size()); + }); } @Test - public void testBlockDistributionWithMultipleDisks() throws Exception { + void testBlockDistributionWithMultipleDisks() throws Exception { int threadCount = numContainerPerOwnerInPipeline * numContainerPerOwnerInPipeline; nodeManager.setNumHealthyVolumes(numContainerPerOwnerInPipeline); @@ -381,30 +371,26 @@ public void testBlockDistributionWithMultipleDisks() throws Exception { }, executors.get(i)); futureList.add(future); } - try { - CompletableFuture - .allOf(futureList.toArray( - new CompletableFuture[futureList.size()])).get(); - assertEquals(1, - pipelineManager.getPipelines(replicationConfig).size()); - Pipeline pipeline = - pipelineManager.getPipelines(replicationConfig).get(0); - // total no of containers to be created will be number of healthy - // volumes * number of numContainerPerOwnerInPipeline which is equal to - // the thread count - assertEquals(threadCount, pipelineManager.getNumberOfContainers(pipeline.getId())); - assertEquals(threadCount, allocatedBlockMap.size()); - assertEquals(threadCount, allocatedBlockMap.values().size()); - allocatedBlockMap.values().forEach(v -> { - assertEquals(1, v.size()); - }); - } catch (Exception e) { - fail("testAllocateBlockInParallel failed"); - } + CompletableFuture + .allOf(futureList.toArray( + new CompletableFuture[futureList.size()])).get(); + assertEquals(1, + pipelineManager.getPipelines(replicationConfig).size()); + Pipeline pipeline = + pipelineManager.getPipelines(replicationConfig).get(0); + // total no of containers to be created will be number of healthy + // volumes * number of numContainerPerOwnerInPipeline which is equal to + // the thread count + assertEquals(threadCount, pipelineManager.getNumberOfContainers(pipeline.getId())); + assertEquals(threadCount, allocatedBlockMap.size()); + assertEquals(threadCount, allocatedBlockMap.values().size()); + allocatedBlockMap.values().forEach(v -> { + assertEquals(1, v.size()); + }); } @Test - public void testBlockDistributionWithMultipleRaftLogDisks() throws Exception { + void testBlockDistributionWithMultipleRaftLogDisks() throws Exception { int threadCount = numContainerPerOwnerInPipeline * numContainerPerOwnerInPipeline; int numMetaDataVolumes = 2; @@ -446,25 +432,20 @@ public void testBlockDistributionWithMultipleRaftLogDisks() throws Exception { }, executors.get(i)); futureList.add(future); } - try { - CompletableFuture - .allOf(futureList.toArray( - new CompletableFuture[futureList.size()])).get(); - assertEquals(1, - pipelineManager.getPipelines(replicationConfig).size()); - Pipeline pipeline = - pipelineManager.getPipelines(replicationConfig).get(0); - // the pipeline per raft log disk config is set to 1 by default - int numContainers = (int)Math.ceil((double) - (numContainerPerOwnerInPipeline * - numContainerPerOwnerInPipeline) / numMetaDataVolumes); - assertEquals(numContainers, pipelineManager. - getNumberOfContainers(pipeline.getId())); - assertEquals(numContainers, allocatedBlockMap.size()); - assertEquals(numContainers, allocatedBlockMap.values().size()); - } catch (Exception e) { - fail("testAllocateBlockInParallel failed"); - } + CompletableFuture + .allOf(futureList.toArray( + new CompletableFuture[futureList.size()])).get(); + assertEquals(1, + pipelineManager.getPipelines(replicationConfig).size()); + Pipeline pipeline = + pipelineManager.getPipelines(replicationConfig).get(0); + // the pipeline per raft log disk config is set to 1 by default + int numContainers = (int)Math.ceil((double) + (numContainerPerOwnerInPipeline * + numContainerPerOwnerInPipeline) / numMetaDataVolumes); + assertEquals(numContainers, pipelineManager.getNumberOfContainers(pipeline.getId())); + assertEquals(numContainers, allocatedBlockMap.size()); + assertEquals(numContainers, allocatedBlockMap.values().size()); } @Test diff --git a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/block/TestDeletedBlockLog.java b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/block/TestDeletedBlockLog.java index be57aa8ea6a..03500529ff9 100644 --- a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/block/TestDeletedBlockLog.java +++ b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/block/TestDeletedBlockLog.java @@ -17,7 +17,7 @@ */ package org.apache.hadoop.hdds.scm.block; -import org.apache.commons.io.FileUtils; +import org.apache.commons.lang3.RandomUtils; import org.apache.hadoop.hdds.HddsConfigKeys; import org.apache.hadoop.hdds.client.RatisReplicationConfig; import org.apache.hadoop.hdds.client.StandaloneReplicationConfig; @@ -54,10 +54,10 @@ import org.apache.hadoop.ozone.protocol.commands.CommandStatus; import org.apache.hadoop.ozone.protocol.commands.DeleteBlocksCommand; import org.apache.hadoop.ozone.protocol.commands.SCMCommand; -import org.apache.ozone.test.GenericTestUtils; import org.junit.jupiter.api.AfterEach; import org.junit.jupiter.api.BeforeEach; import org.junit.jupiter.api.Test; +import org.junit.jupiter.api.io.TempDir; import java.io.File; import java.io.IOException; @@ -70,7 +70,6 @@ import java.util.List; import java.util.Map; import java.util.Optional; -import java.util.Random; import java.util.Set; import java.util.UUID; import java.util.concurrent.TimeoutException; @@ -81,7 +80,7 @@ import static org.assertj.core.api.Assertions.assertThat; import static org.junit.jupiter.api.Assertions.assertEquals; import static org.apache.hadoop.hdds.scm.ScmConfigKeys.OZONE_SCM_BLOCK_DELETION_MAX_RETRY; -import static org.mockito.ArgumentMatchers.any; +import static org.mockito.Mockito.any; import static org.mockito.Mockito.doAnswer; import static org.mockito.Mockito.doReturn; import static org.mockito.Mockito.mock; @@ -95,6 +94,7 @@ public class TestDeletedBlockLog { private DeletedBlockLogImpl deletedBlockLog; private static final int BLOCKS_PER_TXN = 5; private OzoneConfiguration conf; + @TempDir private File testDir; private ContainerManager containerManager; private Table containerTable; @@ -111,8 +111,6 @@ public class TestDeletedBlockLog { @BeforeEach public void setup() throws Exception { - testDir = GenericTestUtils.getTestDir( - TestDeletedBlockLog.class.getSimpleName()); conf = new OzoneConfiguration(); conf.setBoolean(ScmConfigKeys.OZONE_SCM_HA_ENABLE_KEY, true); conf.setInt(OZONE_SCM_BLOCK_DELETION_MAX_RETRY, 20); @@ -128,12 +126,9 @@ public void setup() throws Exception { new SCMHADBTransactionBufferStub(scm.getScmMetadataStore().getStore()); metrics = mock(ScmBlockDeletingServiceMetrics.class); deletedBlockLog = new DeletedBlockLogImpl(conf, + scm, containerManager, - scm.getScmHAManager().getRatisServer(), - scm.getScmMetadataStore().getDeletedBlocksTXTable(), scmHADBTransactionBuffer, - scm.getScmContext(), - scm.getSequenceIdGen(), metrics); dnList = new ArrayList<>(3); setupContainerManager(); @@ -208,7 +203,6 @@ public void tearDown() throws Exception { deletedBlockLog.close(); scm.stop(); scm.join(); - FileUtils.deleteDirectory(testDir); } private Map> generateData(int dataSize) throws IOException { @@ -218,9 +212,8 @@ private Map> generateData(int dataSize) throws IOException { private Map> generateData(int dataSize, HddsProtos.LifeCycleState state) throws IOException { Map> blockMap = new HashMap<>(); - Random random = new Random(1); - int continerIDBase = random.nextInt(100); - int localIDBase = random.nextInt(1000); + int continerIDBase = RandomUtils.nextInt(0, 100); + int localIDBase = RandomUtils.nextInt(0, 1000); for (int i = 0; i < dataSize; i++) { long containerID = continerIDBase + i; updateContainerMetadata(containerID, state); @@ -692,13 +685,12 @@ public void testInadequateReplicaCommit() throws Exception { @Test public void testRandomOperateTransactions() throws Exception { mockContainerHealthResult(true); - Random random = new Random(); int added = 0, committed = 0; List blocks = new ArrayList<>(); List txIDs; // Randomly add/get/commit/increase transactions. for (int i = 0; i < 100; i++) { - int state = random.nextInt(4); + int state = RandomUtils.nextInt(0, 4); if (state == 0) { addTransactions(generateData(10), true); added += 10; @@ -736,12 +728,9 @@ public void testPersistence() throws Exception { // transactions are stored persistently. deletedBlockLog.close(); deletedBlockLog = new DeletedBlockLogImpl(conf, + scm, containerManager, - scm.getScmHAManager().getRatisServer(), - scm.getScmMetadataStore().getDeletedBlocksTXTable(), scmHADBTransactionBuffer, - scm.getScmContext(), - scm.getSequenceIdGen(), metrics); List blocks = getTransactions(10 * BLOCKS_PER_TXN * THREE); @@ -755,12 +744,9 @@ public void testPersistence() throws Exception { // currentTxnID = 50 deletedBlockLog.close(); new DeletedBlockLogImpl(conf, + scm, containerManager, - scm.getScmHAManager().getRatisServer(), - scm.getScmMetadataStore().getDeletedBlocksTXTable(), scmHADBTransactionBuffer, - scm.getScmContext(), - scm.getSequenceIdGen(), metrics); blocks = getTransactions(40 * BLOCKS_PER_TXN * THREE); assertEquals(0, blocks.size()); @@ -803,8 +789,7 @@ public void testDeletedBlockTransactions() // add two transactions for same container containerID = blocks.get(0).getContainerID(); Map> deletedBlocksMap = new HashMap<>(); - Random random = new Random(); - long localId = random.nextLong(); + long localId = RandomUtils.nextLong(); deletedBlocksMap.put(containerID, new LinkedList<>( Collections.singletonList(localId))); addTransactions(deletedBlocksMap, true); diff --git a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/block/TestSCMBlockDeletingService.java b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/block/TestSCMBlockDeletingService.java index 3bd7ad00f6a..1fd6a2277c7 100644 --- a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/block/TestSCMBlockDeletingService.java +++ b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/block/TestSCMBlockDeletingService.java @@ -49,10 +49,10 @@ import static org.junit.jupiter.api.Assertions.assertEquals; import static org.junit.jupiter.api.Assertions.assertFalse; -import static org.mockito.ArgumentMatchers.any; -import static org.mockito.ArgumentMatchers.anyInt; -import static org.mockito.ArgumentMatchers.anySet; -import static org.mockito.ArgumentMatchers.eq; +import static org.mockito.Mockito.any; +import static org.mockito.Mockito.anyInt; +import static org.mockito.Mockito.anySet; +import static org.mockito.Mockito.eq; import static org.mockito.Mockito.mock; import static org.mockito.Mockito.spy; import static org.mockito.Mockito.times; diff --git a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/container/MockNodeManager.java b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/container/MockNodeManager.java index 794dedceef0..84f3684ab7c 100644 --- a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/container/MockNodeManager.java +++ b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/container/MockNodeManager.java @@ -772,13 +772,11 @@ private synchronized void addEntryTodnsToUuidMap( * Send heartbeat to indicate the datanode is alive and doing well. * * @param datanodeDetails - Datanode ID. - * @param layoutInfo - DataNode Layout info * @param commandQueueReportProto - Command Queue Report Proto * @return SCMheartbeat response list */ @Override public List processHeartbeat(DatanodeDetails datanodeDetails, - LayoutVersionProto layoutInfo, CommandQueueReportProto commandQueueReportProto) { return null; } @@ -786,7 +784,7 @@ public List processHeartbeat(DatanodeDetails datanodeDetails, @Override public Boolean isNodeRegistered( DatanodeDetails datanodeDetails) { - return false; + return healthyNodes.contains(datanodeDetails); } @Override diff --git a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/container/SimpleMockNodeManager.java b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/container/SimpleMockNodeManager.java index 2bd13d4489e..9649159de3f 100644 --- a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/container/SimpleMockNodeManager.java +++ b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/container/SimpleMockNodeManager.java @@ -428,7 +428,6 @@ public RegisteredCommand register(DatanodeDetails datanodeDetails, @Override public List processHeartbeat(DatanodeDetails datanodeDetails, - LayoutVersionProto layoutInfo, CommandQueueReportProto commandQueueReportProto) { return null; } diff --git a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/container/TestCloseContainerEventHandler.java b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/container/TestCloseContainerEventHandler.java index 97fda58163b..6891d316142 100644 --- a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/container/TestCloseContainerEventHandler.java +++ b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/container/TestCloseContainerEventHandler.java @@ -52,10 +52,10 @@ import static org.mockito.Mockito.when; import static org.mockito.Mockito.doAnswer; import static org.mockito.Mockito.verify; -import static org.mockito.ArgumentMatchers.any; -import static org.mockito.ArgumentMatchers.anyLong; -import static org.mockito.ArgumentMatchers.eq; +import static org.mockito.Mockito.any; +import static org.mockito.Mockito.anyLong; import static org.mockito.Mockito.atLeastOnce; +import static org.mockito.Mockito.eq; import static org.mockito.Mockito.never; import static org.mockito.Mockito.times; diff --git a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/container/TestContainerManagerImpl.java b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/container/TestContainerManagerImpl.java index a5150f3c952..25a4a80f233 100644 --- a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/container/TestContainerManagerImpl.java +++ b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/container/TestContainerManagerImpl.java @@ -21,11 +21,8 @@ import java.io.IOException; import java.util.ArrayList; import java.util.List; -import java.util.UUID; import java.util.concurrent.TimeoutException; -import org.apache.hadoop.fs.FileUtil; -import org.apache.hadoop.hdds.HddsConfigKeys; import org.apache.hadoop.hdds.client.ECReplicationConfig; import org.apache.hadoop.hdds.client.RatisReplicationConfig; import org.apache.hadoop.hdds.conf.OzoneConfiguration; @@ -45,10 +42,10 @@ import org.apache.hadoop.hdds.utils.db.DBStore; import org.apache.hadoop.hdds.utils.db.DBStoreBuilder; import org.apache.hadoop.ozone.container.common.SCMTestUtils; -import org.apache.ozone.test.GenericTestUtils; import org.junit.jupiter.api.AfterEach; import org.junit.jupiter.api.BeforeEach; import org.junit.jupiter.api.Test; +import org.junit.jupiter.api.io.TempDir; import static org.mockito.Mockito.mock; import static org.mockito.Mockito.times; @@ -66,6 +63,7 @@ */ public class TestContainerManagerImpl { + @TempDir private File testDir; private DBStore dbStore; private ContainerManager containerManager; @@ -75,11 +73,8 @@ public class TestContainerManagerImpl { private ContainerReplicaPendingOps pendingOpsMock; @BeforeEach - public void setUp() throws Exception { - final OzoneConfiguration conf = SCMTestUtils.getConf(); - testDir = GenericTestUtils.getTestDir( - TestContainerManagerImpl.class.getSimpleName() + UUID.randomUUID()); - conf.set(HddsConfigKeys.OZONE_METADATA_DIRS, testDir.getAbsolutePath()); + void setUp() throws Exception { + final OzoneConfiguration conf = SCMTestUtils.getConf(testDir); dbStore = DBStoreBuilder.createDBStore( conf, new SCMDBDefinition()); scmhaManager = SCMHAManagerStub.getInstance(true); @@ -105,8 +100,6 @@ public void cleanup() throws Exception { if (dbStore != null) { dbStore.close(); } - - FileUtil.fullyDelete(testDir); } @Test diff --git a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/container/TestContainerReportHandler.java b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/container/TestContainerReportHandler.java index 53512528a0d..695c88d11a3 100644 --- a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/container/TestContainerReportHandler.java +++ b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/container/TestContainerReportHandler.java @@ -16,8 +16,6 @@ */ package org.apache.hadoop.hdds.scm.container; -import org.apache.hadoop.fs.FileUtil; -import org.apache.hadoop.hdds.HddsConfigKeys; import org.apache.hadoop.hdds.client.ECReplicationConfig; import org.apache.hadoop.hdds.client.RatisReplicationConfig; import org.apache.hadoop.hdds.conf.OzoneConfiguration; @@ -48,10 +46,10 @@ import org.apache.hadoop.ozone.common.statemachine.InvalidStateTransitionException; import org.apache.hadoop.ozone.container.common.SCMTestUtils; import org.apache.hadoop.ozone.protocol.commands.CommandForDatanode; -import org.apache.ozone.test.GenericTestUtils; import org.junit.jupiter.api.AfterEach; import org.junit.jupiter.api.BeforeEach; import org.junit.jupiter.api.Test; +import org.junit.jupiter.api.io.TempDir; import java.io.File; import java.io.IOException; @@ -63,7 +61,6 @@ import java.util.List; import java.util.Map; import java.util.Set; -import java.util.UUID; import java.util.concurrent.TimeoutException; import java.util.stream.Collectors; import java.util.stream.Stream; @@ -89,20 +86,17 @@ public class TestContainerReportHandler { private ContainerManager containerManager; private ContainerStateManager containerStateManager; private EventPublisher publisher; + @TempDir private File testDir; private DBStore dbStore; private SCMHAManager scmhaManager; private PipelineManager pipelineManager; @BeforeEach - public void setup() throws IOException, InvalidStateTransitionException, - TimeoutException { - final OzoneConfiguration conf = SCMTestUtils.getConf(); + void setup() throws IOException, InvalidStateTransitionException { + final OzoneConfiguration conf = SCMTestUtils.getConf(testDir); nodeManager = new MockNodeManager(true, 10); containerManager = mock(ContainerManager.class); - testDir = GenericTestUtils.getTestDir( - TestContainerReportHandler.class.getSimpleName() + UUID.randomUUID()); - conf.set(HddsConfigKeys.OZONE_METADATA_DIRS, testDir.getAbsolutePath()); dbStore = DBStoreBuilder.createDBStore( conf, new SCMDBDefinition()); scmhaManager = SCMHAManagerStub.getInstance(true); @@ -165,8 +159,6 @@ public void tearDown() throws Exception { if (dbStore != null) { dbStore.close(); } - - FileUtil.fullyDelete(testDir); } private void testReplicaIndexUpdate(ContainerInfo container, diff --git a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/container/TestContainerStateManager.java b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/container/TestContainerStateManager.java index c3dd608ab28..27505c6dd3b 100644 --- a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/container/TestContainerStateManager.java +++ b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/container/TestContainerStateManager.java @@ -23,10 +23,8 @@ import java.time.ZoneId; import java.util.ArrayList; import java.util.Set; -import java.util.UUID; import java.util.concurrent.TimeoutException; -import org.apache.hadoop.fs.FileUtil; import org.apache.hadoop.hdds.HddsConfigKeys; import org.apache.hadoop.hdds.client.StandaloneReplicationConfig; import org.apache.hadoop.hdds.conf.OzoneConfiguration; @@ -46,10 +44,10 @@ import org.apache.hadoop.hdds.scm.pipeline.PipelineManager; import org.apache.hadoop.hdds.utils.db.DBStore; import org.apache.hadoop.hdds.utils.db.DBStoreBuilder; -import org.apache.ozone.test.GenericTestUtils; import org.junit.jupiter.api.AfterEach; import org.junit.jupiter.api.BeforeEach; import org.junit.jupiter.api.Test; +import org.junit.jupiter.api.io.TempDir; import static org.junit.jupiter.api.Assertions.assertEquals; import static org.mockito.Mockito.mock; @@ -64,6 +62,7 @@ public class TestContainerStateManager { private ContainerStateManager containerStateManager; private PipelineManager pipelineManager; private SCMHAManager scmhaManager; + @TempDir private File testDir; private DBStore dbStore; private Pipeline pipeline; @@ -72,8 +71,6 @@ public class TestContainerStateManager { public void init() throws IOException, TimeoutException { OzoneConfiguration conf = new OzoneConfiguration(); scmhaManager = SCMHAManagerStub.getInstance(true); - testDir = GenericTestUtils.getTestDir( - TestContainerStateManager.class.getSimpleName() + UUID.randomUUID()); conf.set(HddsConfigKeys.OZONE_METADATA_DIRS, testDir.getAbsolutePath()); dbStore = DBStoreBuilder.createDBStore( conf, new SCMDBDefinition()); @@ -106,8 +103,6 @@ public void tearDown() throws Exception { if (dbStore != null) { dbStore.close(); } - - FileUtil.fullyDelete(testDir); } @Test diff --git a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/container/TestIncrementalContainerReportHandler.java b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/container/TestIncrementalContainerReportHandler.java index 8cbfdd9c788..dbcccce598c 100644 --- a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/container/TestIncrementalContainerReportHandler.java +++ b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/container/TestIncrementalContainerReportHandler.java @@ -17,7 +17,6 @@ */ package org.apache.hadoop.hdds.scm.container; -import org.apache.hadoop.fs.FileUtil; import org.apache.hadoop.hdds.HddsConfigKeys; import org.apache.hadoop.hdds.client.ECReplicationConfig; import org.apache.hadoop.hdds.client.RatisReplicationConfig; @@ -55,10 +54,10 @@ import org.apache.hadoop.hdds.utils.db.DBStore; import org.apache.hadoop.hdds.utils.db.DBStoreBuilder; import org.apache.hadoop.ozone.common.statemachine.InvalidStateTransitionException; -import org.apache.ozone.test.GenericTestUtils; import org.junit.jupiter.api.AfterEach; import org.junit.jupiter.api.BeforeEach; import org.junit.jupiter.api.Test; +import org.junit.jupiter.api.io.TempDir; import java.io.File; import java.io.IOException; @@ -70,7 +69,6 @@ import java.util.List; import java.util.Map; import java.util.Set; -import java.util.UUID; import java.util.concurrent.ExecutionException; import java.util.concurrent.Executors; import java.util.concurrent.Future; @@ -106,6 +104,7 @@ public class TestIncrementalContainerReportHandler { private HDDSLayoutVersionManager versionManager; private SCMContext scmContext = SCMContext.emptyContext(); private PipelineManager pipelineManager; + @TempDir private File testDir; private DBStore dbStore; private SCMHAManager scmhaManager; @@ -114,9 +113,7 @@ public class TestIncrementalContainerReportHandler { public void setup() throws IOException, InvalidStateTransitionException, TimeoutException { final OzoneConfiguration conf = new OzoneConfiguration(); - final String path = - GenericTestUtils.getTempPath(UUID.randomUUID().toString()); - Path scmPath = Paths.get(path, "scm-meta"); + Path scmPath = Paths.get(testDir.getPath(), "scm-meta"); conf.set(HddsConfigKeys.OZONE_METADATA_DIRS, scmPath.toString()); this.containerManager = mock(ContainerManager.class); NetworkTopology clusterMap = new NetworkTopologyImpl(conf); @@ -129,9 +126,6 @@ public void setup() throws IOException, InvalidStateTransitionException, new SCMNodeManager(conf, storageConfig, eventQueue, clusterMap, scmContext, versionManager); scmhaManager = SCMHAManagerStub.getInstance(true); - testDir = GenericTestUtils.getTestDir( - TestIncrementalContainerReportHandler.class.getSimpleName() - + UUID.randomUUID()); dbStore = DBStoreBuilder.createDBStore( conf, new SCMDBDefinition()); @@ -200,8 +194,6 @@ public void tearDown() throws Exception { if (dbStore != null) { dbStore.close(); } - - FileUtil.fullyDelete(testDir); } diff --git a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/container/TestUnknownContainerReport.java b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/container/TestUnknownContainerReport.java index 72df033ae81..9ea4ea45b56 100644 --- a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/container/TestUnknownContainerReport.java +++ b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/container/TestUnknownContainerReport.java @@ -27,10 +27,7 @@ import java.io.File; import java.io.IOException; import java.util.Iterator; -import java.util.UUID; -import org.apache.hadoop.fs.FileUtil; -import org.apache.hadoop.hdds.HddsConfigKeys; import org.apache.hadoop.hdds.conf.OzoneConfiguration; import org.apache.hadoop.hdds.protocol.DatanodeDetails; import org.apache.hadoop.hdds.protocol.proto.HddsProtos.LifeCycleState; @@ -55,10 +52,10 @@ import org.apache.hadoop.hdds.utils.db.DBStoreBuilder; import org.apache.hadoop.ozone.container.common.SCMTestUtils; import org.apache.hadoop.ozone.protocol.commands.CommandForDatanode; -import org.apache.ozone.test.GenericTestUtils; import org.junit.jupiter.api.AfterEach; import org.junit.jupiter.api.BeforeEach; import org.junit.jupiter.api.Test; +import org.junit.jupiter.api.io.TempDir; /** * Test container deletion behaviour of unknown containers @@ -71,18 +68,16 @@ public class TestUnknownContainerReport { private ContainerStateManager containerStateManager; private EventPublisher publisher; private PipelineManager pipelineManager; + @TempDir private File testDir; private DBStore dbStore; private SCMHAManager scmhaManager; @BeforeEach public void setup() throws IOException { - final OzoneConfiguration conf = SCMTestUtils.getConf(); + final OzoneConfiguration conf = SCMTestUtils.getConf(testDir); this.nodeManager = new MockNodeManager(true, 10); this.containerManager = mock(ContainerManager.class); - testDir = GenericTestUtils.getTestDir( - TestUnknownContainerReport.class.getSimpleName() + UUID.randomUUID()); - conf.set(HddsConfigKeys.OZONE_METADATA_DIRS, testDir.getAbsolutePath()); dbStore = DBStoreBuilder.createDBStore( conf, new SCMDBDefinition()); scmhaManager = SCMHAManagerStub.getInstance(true); @@ -107,8 +102,6 @@ public void tearDown() throws Exception { if (dbStore != null) { dbStore.close(); } - - FileUtil.fullyDelete(testDir); } @Test diff --git a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/container/balancer/TestContainerBalancerTask.java b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/container/balancer/TestContainerBalancerTask.java index 3b1d4db0659..3bed3878123 100644 --- a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/container/balancer/TestContainerBalancerTask.java +++ b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/container/balancer/TestContainerBalancerTask.java @@ -53,7 +53,6 @@ import org.apache.hadoop.hdds.server.events.EventPublisher; import org.apache.hadoop.ozone.OzoneConsts; import org.apache.ozone.test.GenericTestUtils; -import org.apache.ozone.test.tag.Unhealthy; import org.junit.jupiter.api.BeforeEach; import org.junit.jupiter.api.Test; import org.slf4j.Logger; @@ -1048,7 +1047,6 @@ public void checkIterationResultException() stopBalancer(); } - @Unhealthy("HDDS-8941") @Test public void testDelayedStart() throws InterruptedException, TimeoutException { conf.setTimeDuration("hdds.scm.wait.time.after.safemode.exit", 10, @@ -1066,7 +1064,7 @@ public void testDelayedStart() throws InterruptedException, TimeoutException { This is the delay before it starts balancing. */ GenericTestUtils.waitFor( - () -> balancingThread.getState() == Thread.State.TIMED_WAITING, 1, 20); + () -> balancingThread.getState() == Thread.State.TIMED_WAITING, 1, 40); assertEquals(Thread.State.TIMED_WAITING, balancingThread.getState()); diff --git a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/container/placement/algorithms/TestSCMContainerPlacementCapacity.java b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/container/placement/algorithms/TestSCMContainerPlacementCapacity.java index 03ba2c54845..34678a301eb 100644 --- a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/container/placement/algorithms/TestSCMContainerPlacementCapacity.java +++ b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/container/placement/algorithms/TestSCMContainerPlacementCapacity.java @@ -43,7 +43,7 @@ import static org.junit.jupiter.api.Assertions.assertNotEquals; import static org.apache.hadoop.hdds.scm.ScmConfigKeys.OZONE_DATANODE_RATIS_VOLUME_FREE_SPACE_MIN; import static org.assertj.core.api.Assertions.assertThat; -import static org.mockito.ArgumentMatchers.any; +import static org.mockito.Mockito.any; import static org.mockito.Mockito.mock; import static org.mockito.Mockito.when; diff --git a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/container/placement/algorithms/TestSCMContainerPlacementRackAware.java b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/container/placement/algorithms/TestSCMContainerPlacementRackAware.java index 39e19135efa..cbe513eef82 100644 --- a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/container/placement/algorithms/TestSCMContainerPlacementRackAware.java +++ b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/container/placement/algorithms/TestSCMContainerPlacementRackAware.java @@ -19,9 +19,11 @@ import java.util.ArrayList; import java.util.Arrays; import java.util.List; -import java.util.Random; import java.util.stream.IntStream; +import org.apache.commons.lang3.RandomUtils; +import org.apache.commons.lang3.StringUtils; + import org.apache.hadoop.hdds.conf.OzoneConfiguration; import org.apache.hadoop.hdds.conf.StorageUnit; import org.apache.hadoop.hdds.protocol.DatanodeDetails; @@ -48,8 +50,6 @@ import org.junit.jupiter.params.provider.MethodSource; import org.junit.jupiter.params.provider.ValueSource; -import org.apache.commons.lang3.StringUtils; - import static org.apache.hadoop.hdds.protocol.proto.HddsProtos.NodeOperationalState.DECOMMISSIONED; import static org.apache.hadoop.hdds.protocol.proto.HddsProtos.NodeOperationalState.IN_SERVICE; import static org.apache.hadoop.hdds.protocol.proto.HddsProtos.NodeState.HEALTHY; @@ -64,8 +64,8 @@ import static org.junit.jupiter.api.Assertions.assertSame; import static org.junit.jupiter.api.Assertions.assertFalse; import static org.junit.jupiter.api.Assertions.assertNull; +import static org.junit.jupiter.api.Assertions.assertThrows; import static org.junit.jupiter.api.Assertions.assertTrue; -import static org.junit.jupiter.api.Assertions.fail; import static org.junit.jupiter.api.Assumptions.assumeTrue; import static org.mockito.Mockito.mock; import static org.mockito.Mockito.when; @@ -356,12 +356,11 @@ public void testNoFallback(int datanodeCount) { setup(datanodeCount); // 5 replicas. there are only 3 racks. policy prohibit fallback should fail. int nodeNum = 5; - try { - policyNoFallback.chooseDatanodes(null, null, nodeNum, 0, 15); - fail("Fallback prohibited, this call should fail"); - } catch (Exception e) { - assertEquals("SCMException", e.getClass().getSimpleName()); - } + Exception e = + assertThrows(Exception.class, + () -> policyNoFallback.chooseDatanodes(null, null, nodeNum, 0, 15), + "Fallback prohibited, this call should fail"); + assertEquals("SCMException", e.getClass().getSimpleName()); // get metrics long totalRequest = metrics.getDatanodeRequestCount(); @@ -425,13 +424,12 @@ public void testNoInfiniteLoop(int datanodeCount) { setup(datanodeCount); int nodeNum = 1; - try { - // request storage space larger than node capability - policy.chooseDatanodes(null, null, nodeNum, STORAGE_CAPACITY + 0, 15); - fail("Storage requested exceeds capacity, this call should fail"); - } catch (Exception e) { - assertEquals("SCMException", e.getClass().getSimpleName()); - } + // request storage space larger than node capability + Exception e = + assertThrows(Exception.class, + () -> policy.chooseDatanodes(null, null, nodeNum, STORAGE_CAPACITY + 0, 15), + "Storage requested exceeds capacity, this call should fail"); + assertEquals("SCMException", e.getClass().getSimpleName()); // get metrics long totalRequest = metrics.getDatanodeRequestCount(); @@ -625,7 +623,7 @@ public void testOutOfServiceNodesNotSelected(int datanodeCount) { for (int i = 0; i < 10; i++) { // Set a random DN to in_service and ensure it is always picked - int index = new Random().nextInt(dnInfos.size()); + int index = RandomUtils.nextInt(0, dnInfos.size()); dnInfos.get(index).setNodeStatus(NodeStatus.inServiceHealthy()); try { List datanodeDetails = @@ -830,12 +828,11 @@ public void testNoFallbackWithUsedNodes(int datanodeCount) { // 5 replicas. there are only 3 racks. policy prohibit fallback should fail. int nodeNum = 5; - try { - policyNoFallback.chooseDatanodes(usedNodes, null, null, nodeNum, 0, 15); - fail("Fallback prohibited, this call should fail"); - } catch (Exception e) { - assertEquals("SCMException", e.getClass().getSimpleName()); - } + Exception e = + assertThrows(Exception.class, + () -> policyNoFallback.chooseDatanodes(usedNodes, null, null, nodeNum, 0, 15), + "Fallback prohibited, this call should fail"); + assertEquals("SCMException", e.getClass().getSimpleName()); // get metrics long totalRequest = metrics.getDatanodeRequestCount(); diff --git a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/container/placement/algorithms/TestSCMContainerPlacementRackScatter.java b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/container/placement/algorithms/TestSCMContainerPlacementRackScatter.java index 5bf59b27b8c..faccfa67a58 100644 --- a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/container/placement/algorithms/TestSCMContainerPlacementRackScatter.java +++ b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/container/placement/algorithms/TestSCMContainerPlacementRackScatter.java @@ -70,7 +70,6 @@ import static org.junit.jupiter.api.Assertions.assertNotEquals; import static org.junit.jupiter.api.Assertions.assertThrows; import static org.junit.jupiter.api.Assertions.assertTrue; -import static org.junit.jupiter.api.Assertions.fail; import static org.junit.jupiter.api.Assumptions.assumeTrue; import static org.mockito.Mockito.mock; import static org.mockito.Mockito.when; @@ -456,14 +455,12 @@ public void chooseNodeWithFavoredNodes(int datanodeCount) public void testNoInfiniteLoop(int datanodeCount) { setup(datanodeCount); int nodeNum = 1; - - try { - // request storage space larger than node capability - policy.chooseDatanodes(null, null, nodeNum, STORAGE_CAPACITY + 0, 15); - fail("Storage requested exceeds capacity, this call should fail"); - } catch (Exception e) { - assertEquals("SCMException", e.getClass().getSimpleName()); - } + // request storage space larger than node capability + Exception e = + assertThrows(Exception.class, + () -> policy.chooseDatanodes(null, null, nodeNum, STORAGE_CAPACITY + 0, 15), + "Storage requested exceeds capacity, this call should fail"); + assertEquals("SCMException", e.getClass().getSimpleName()); // get metrics long totalRequest = metrics.getDatanodeRequestCount(); diff --git a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/container/replication/TestDatanodeCommandCountUpdatedHandler.java b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/container/replication/TestDatanodeCommandCountUpdatedHandler.java index 21f756f9a0b..29dd24f11ca 100644 --- a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/container/replication/TestDatanodeCommandCountUpdatedHandler.java +++ b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/container/replication/TestDatanodeCommandCountUpdatedHandler.java @@ -24,7 +24,7 @@ import static org.mockito.Mockito.mock; import static org.mockito.Mockito.verify; -import static org.mockito.ArgumentMatchers.eq; +import static org.mockito.Mockito.eq; /** * Tests for DatanodeCommandCountUpdatedHandler. diff --git a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/container/replication/TestECContainerReplicaCount.java b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/container/replication/TestECContainerReplicaCount.java index 6f320830334..ff0b838bd8b 100644 --- a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/container/replication/TestECContainerReplicaCount.java +++ b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/container/replication/TestECContainerReplicaCount.java @@ -26,7 +26,7 @@ import org.apache.hadoop.hdds.protocol.proto.StorageContainerDatanodeProtocolProtos.ContainerReplicaProto; import org.apache.hadoop.hdds.scm.container.ContainerInfo; import org.apache.hadoop.hdds.scm.container.ContainerReplica; -import org.jetbrains.annotations.NotNull; +import jakarta.annotation.Nonnull; import org.junit.jupiter.api.BeforeEach; import org.junit.jupiter.api.Test; @@ -524,7 +524,7 @@ public void testMissingNonMaintenanceReplicasPendingAdd() { assertEquals(0, rcnt.unavailableIndexes(true).size()); } - @NotNull + @Nonnull private List getContainerReplicaOps( List addIndexes, List deleteIndexes) { List pending = new ArrayList<>(); diff --git a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/container/replication/TestECMisReplicationHandler.java b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/container/replication/TestECMisReplicationHandler.java index b0a77f3a7c0..73f6edb468e 100644 --- a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/container/replication/TestECMisReplicationHandler.java +++ b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/container/replication/TestECMisReplicationHandler.java @@ -31,9 +31,11 @@ import org.apache.ratis.protocol.exceptions.NotLeaderException; import org.junit.jupiter.api.BeforeEach; import org.junit.jupiter.api.Test; +import org.junit.jupiter.api.io.TempDir; import org.junit.jupiter.params.ParameterizedTest; import org.junit.jupiter.params.provider.ValueSource; +import java.io.File; import java.io.IOException; import java.util.Collections; import java.util.List; @@ -62,10 +64,10 @@ public class TestECMisReplicationHandler extends TestMisReplicationHandler { @BeforeEach - public void setup() throws NodeNotFoundException, + void setup(@TempDir File testDir) throws NodeNotFoundException, CommandTargetOverloadedException, NotLeaderException { ECReplicationConfig repConfig = new ECReplicationConfig(DATA, PARITY); - setup(repConfig); + setup(repConfig, testDir); } @ParameterizedTest diff --git a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/container/replication/TestECOverReplicationHandler.java b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/container/replication/TestECOverReplicationHandler.java index 7021b956250..50cead87e03 100644 --- a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/container/replication/TestECOverReplicationHandler.java +++ b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/container/replication/TestECOverReplicationHandler.java @@ -41,8 +41,10 @@ import org.apache.ratis.protocol.exceptions.NotLeaderException; import org.junit.jupiter.api.BeforeEach; import org.junit.jupiter.api.Test; +import org.junit.jupiter.api.io.TempDir; import org.mockito.stubbing.Answer; +import java.io.File; import java.io.IOException; import java.util.ArrayList; import java.util.Collections; @@ -63,10 +65,10 @@ import static org.assertj.core.api.Assertions.assertThat; import static org.junit.jupiter.api.Assertions.assertEquals; import static org.junit.jupiter.api.Assertions.assertNotEquals; -import static org.junit.jupiter.api.Assertions.fail; -import static org.mockito.ArgumentMatchers.any; -import static org.mockito.ArgumentMatchers.anyBoolean; -import static org.mockito.ArgumentMatchers.anyInt; +import static org.junit.jupiter.api.Assertions.assertThrows; +import static org.mockito.Mockito.any; +import static org.mockito.Mockito.anyBoolean; +import static org.mockito.Mockito.anyInt; import static org.mockito.Mockito.mock; import static org.mockito.Mockito.when; import static org.mockito.Mockito.doAnswer; @@ -82,7 +84,7 @@ public class TestECOverReplicationHandler { private Set>> commandsSent; @BeforeEach - public void setup() throws NodeNotFoundException, NotLeaderException, + void setup(@TempDir File testDir) throws NodeNotFoundException, NotLeaderException, CommandTargetOverloadedException { staleNode = null; @@ -103,7 +105,7 @@ public void setup() throws NodeNotFoundException, NotLeaderException, commandsSent); NodeManager nodeManager = new MockNodeManager(true, 10); - OzoneConfiguration conf = SCMTestUtils.getConf(); + OzoneConfiguration conf = SCMTestUtils.getConf(testDir); ECReplicationConfig repConfig = new ECReplicationConfig(3, 2); container = ReplicationTestUtil .createContainer(HddsProtos.LifeCycleState.CLOSED, repConfig); @@ -311,13 +313,8 @@ public void testDeleteThrottling() throws IOException { ECOverReplicationHandler ecORH = new ECOverReplicationHandler(policy, replicationManager); - try { - ecORH.processAndSendCommands(availableReplicas, ImmutableList.of(), - health, 1); - fail("Expected CommandTargetOverloadedException"); - } catch (CommandTargetOverloadedException e) { - // This is expected. - } + assertThrows(CommandTargetOverloadedException.class, + () -> ecORH.processAndSendCommands(availableReplicas, ImmutableList.of(), health, 1)); assertEquals(1, commandsSent.size()); } diff --git a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/container/replication/TestECUnderReplicationHandler.java b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/container/replication/TestECUnderReplicationHandler.java index 0d09e26b27c..22c3630e0c6 100644 --- a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/container/replication/TestECUnderReplicationHandler.java +++ b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/container/replication/TestECUnderReplicationHandler.java @@ -49,9 +49,11 @@ import org.assertj.core.util.Lists; import org.junit.jupiter.api.BeforeEach; import org.junit.jupiter.api.Test; +import org.junit.jupiter.api.io.TempDir; import org.junit.jupiter.params.ParameterizedTest; import org.junit.jupiter.params.provider.ValueSource; +import java.io.File; import java.io.IOException; import java.util.ArrayList; import java.util.Collection; @@ -86,16 +88,16 @@ import static org.junit.jupiter.api.Assertions.assertNotEquals; import static org.junit.jupiter.api.Assertions.assertThrows; import static org.junit.jupiter.api.Assertions.assertTrue; -import static org.mockito.ArgumentMatchers.any; -import static org.mockito.ArgumentMatchers.anyBoolean; -import static org.mockito.ArgumentMatchers.anyInt; -import static org.mockito.ArgumentMatchers.anyList; -import static org.mockito.ArgumentMatchers.anyLong; -import static org.mockito.ArgumentMatchers.eq; -import static org.mockito.ArgumentMatchers.isNull; +import static org.mockito.Mockito.any; +import static org.mockito.Mockito.anyBoolean; +import static org.mockito.Mockito.anyInt; +import static org.mockito.Mockito.anyList; +import static org.mockito.Mockito.anyLong; import static org.mockito.Mockito.clearInvocations; import static org.mockito.Mockito.doAnswer; import static org.mockito.Mockito.doThrow; +import static org.mockito.Mockito.eq; +import static org.mockito.Mockito.isNull; import static org.mockito.Mockito.mock; import static org.mockito.Mockito.never; import static org.mockito.Mockito.spy; @@ -125,7 +127,7 @@ public class TestECUnderReplicationHandler { = new AtomicBoolean(false); @BeforeEach - public void setup() throws NodeNotFoundException, + void setup(@TempDir File testDir) throws NodeNotFoundException, CommandTargetOverloadedException, NotLeaderException { nodeManager = new MockNodeManager(true, 10) { @Override @@ -159,7 +161,7 @@ public NodeStatus getNodeStatus(DatanodeDetails dd) { replicationManager, commandsSent, throwOverloadedExceptionOnReconstruction); - conf = SCMTestUtils.getConf(); + conf = SCMTestUtils.getConf(testDir); repConfig = new ECReplicationConfig(DATA, PARITY); container = createContainer(HddsProtos.LifeCycleState.CLOSED, repConfig); policy = ReplicationTestUtil diff --git a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/container/replication/TestLegacyReplicationManager.java b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/container/replication/TestLegacyReplicationManager.java index 1dcf15ed65b..8aac64de702 100644 --- a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/container/replication/TestLegacyReplicationManager.java +++ b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/container/replication/TestLegacyReplicationManager.java @@ -77,10 +77,12 @@ import org.junit.jupiter.api.BeforeEach; import org.junit.jupiter.api.Nested; import org.junit.jupiter.api.Test; +import org.junit.jupiter.api.io.TempDir; import org.junit.jupiter.params.ParameterizedTest; import org.junit.jupiter.params.provider.EnumSource; import org.junit.jupiter.params.provider.ValueSource; +import java.io.File; import java.io.IOException; import java.time.Clock; import java.time.Instant; @@ -155,15 +157,18 @@ public class TestLegacyReplicationManager { private DBStore dbStore; private ContainerReplicaPendingOps containerReplicaPendingOps; + @TempDir + private File tempDir; + int getInflightCount(InflightType type) { return replicationManager.getLegacyReplicationManager() .getInflightCount(type); } @BeforeEach - void setup() throws IOException, InterruptedException, + void setup(@TempDir File testDir) throws IOException, InterruptedException, NodeNotFoundException, InvalidStateTransitionException { - OzoneConfiguration conf = SCMTestUtils.getConf(); + OzoneConfiguration conf = SCMTestUtils.getConf(testDir); conf.setTimeDuration( HddsConfigKeys.HDDS_SCM_WAIT_TIME_AFTER_SAFE_MODE_EXIT, 0, TimeUnit.SECONDS); @@ -260,10 +265,10 @@ private void createReplicationManager(ReplicationManagerConfiguration rmConf) createReplicationManager(rmConf, null); } - void createReplicationManager(ReplicationManagerConfiguration rmConf, + private void createReplicationManager(ReplicationManagerConfiguration rmConf, LegacyReplicationManagerConfiguration lrmConf) throws InterruptedException, IOException { - OzoneConfiguration config = SCMTestUtils.getConf(); + OzoneConfiguration config = SCMTestUtils.getConf(tempDir); config.setTimeDuration( HddsConfigKeys.HDDS_SCM_WAIT_TIME_AFTER_SAFE_MODE_EXIT, 0, TimeUnit.SECONDS); diff --git a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/container/replication/TestMisReplicationHandler.java b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/container/replication/TestMisReplicationHandler.java index 571c79b4f52..7746b1db621 100644 --- a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/container/replication/TestMisReplicationHandler.java +++ b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/container/replication/TestMisReplicationHandler.java @@ -38,6 +38,7 @@ import org.apache.hadoop.ozone.protocol.commands.SCMCommand; import org.apache.ratis.protocol.exceptions.NotLeaderException; +import java.io.File; import java.io.IOException; import java.util.HashSet; import java.util.List; @@ -56,15 +57,14 @@ import static org.apache.hadoop.hdds.scm.net.NetConstants.RACK_SCHEMA; import static org.apache.hadoop.hdds.scm.net.NetConstants.ROOT_SCHEMA; import static org.assertj.core.api.Assertions.assertThat; -import static org.mockito.ArgumentMatchers.any; +import static org.mockito.Mockito.any; +import static org.mockito.Mockito.anyInt; +import static org.mockito.Mockito.anyList; +import static org.mockito.Mockito.anyMap; +import static org.mockito.Mockito.eq; import static org.mockito.Mockito.mock; import static org.mockito.Mockito.when; import static org.mockito.Mockito.anyLong; -import static org.mockito.Mockito.any; -import static org.mockito.ArgumentMatchers.anyInt; -import static org.mockito.ArgumentMatchers.anyList; -import static org.mockito.ArgumentMatchers.anyMap; -import static org.mockito.ArgumentMatchers.eq; /** * Tests the MisReplicationHandling functionalities to test implementations. @@ -79,10 +79,10 @@ public abstract class TestMisReplicationHandler { new AtomicBoolean(false); private ReplicationManagerMetrics metrics; - protected void setup(ReplicationConfig repConfig) + protected void setup(ReplicationConfig repConfig, File testDir) throws NodeNotFoundException, CommandTargetOverloadedException, NotLeaderException { - conf = SCMTestUtils.getConf(); + conf = SCMTestUtils.getConf(testDir); replicationManager = mock(ReplicationManager.class); when(replicationManager.getNodeStatus(any(DatanodeDetails.class))) diff --git a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/container/replication/TestRatisMisReplicationHandler.java b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/container/replication/TestRatisMisReplicationHandler.java index 9430ad6f757..d69f0cd7554 100644 --- a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/container/replication/TestRatisMisReplicationHandler.java +++ b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/container/replication/TestRatisMisReplicationHandler.java @@ -32,9 +32,11 @@ import org.apache.ratis.protocol.exceptions.NotLeaderException; import org.junit.jupiter.api.BeforeEach; import org.junit.jupiter.api.Test; +import org.junit.jupiter.api.io.TempDir; import org.junit.jupiter.params.ParameterizedTest; import org.junit.jupiter.params.provider.ValueSource; +import java.io.File; import java.io.IOException; import java.util.Collections; import java.util.List; @@ -59,11 +61,11 @@ public class TestRatisMisReplicationHandler extends TestMisReplicationHandler { @BeforeEach - public void setup() throws NodeNotFoundException, + void setup(@TempDir File testDir) throws NodeNotFoundException, CommandTargetOverloadedException, NotLeaderException { RatisReplicationConfig repConfig = RatisReplicationConfig .getInstance(ReplicationFactor.THREE); - setup(repConfig); + setup(repConfig, testDir); } @ParameterizedTest diff --git a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/container/replication/TestRatisOverReplicationHandler.java b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/container/replication/TestRatisOverReplicationHandler.java index f4476e6df54..cfb3952d133 100644 --- a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/container/replication/TestRatisOverReplicationHandler.java +++ b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/container/replication/TestRatisOverReplicationHandler.java @@ -59,10 +59,10 @@ import static org.assertj.core.api.Assertions.assertThat; import static org.junit.jupiter.api.Assertions.assertEquals; import static org.junit.jupiter.api.Assertions.assertNotEquals; -import static org.junit.jupiter.api.Assertions.fail; -import static org.mockito.ArgumentMatchers.anyBoolean; -import static org.mockito.ArgumentMatchers.anyInt; -import static org.mockito.ArgumentMatchers.eq; +import static org.junit.jupiter.api.Assertions.assertThrows; +import static org.mockito.Mockito.anyBoolean; +import static org.mockito.Mockito.anyInt; +import static org.mockito.Mockito.eq; import static org.mockito.Mockito.mock; import static org.mockito.Mockito.when; import static org.mockito.Mockito.argThat; @@ -441,13 +441,9 @@ public void testDeleteThrottlingMisMatchedReplica() throws IOException { RatisOverReplicationHandler handler = new RatisOverReplicationHandler(policy, replicationManager); - try { - handler.processAndSendCommands(replicas, Collections.emptyList(), - getOverReplicatedHealthResult(), 2); - fail("Expected CommandTargetOverloadedException"); - } catch (CommandTargetOverloadedException e) { - // Expected - } + assertThrows(CommandTargetOverloadedException.class, + () -> handler.processAndSendCommands(replicas, Collections.emptyList(), + getOverReplicatedHealthResult(), 2)); assertEquals(1, commandsSent.size()); Pair> cmd = commandsSent.iterator().next(); assertNotEquals(quasiClosedReplica.getDatanodeDetails(), diff --git a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/container/replication/TestRatisUnderReplicationHandler.java b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/container/replication/TestRatisUnderReplicationHandler.java index ca86cb689fb..d77f093abe3 100644 --- a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/container/replication/TestRatisUnderReplicationHandler.java +++ b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/container/replication/TestRatisUnderReplicationHandler.java @@ -39,12 +39,12 @@ import org.apache.hadoop.ozone.container.common.SCMTestUtils; import org.apache.hadoop.ozone.protocol.commands.SCMCommand; import org.apache.ratis.protocol.exceptions.NotLeaderException; -import org.junit.jupiter.api.Assertions; import org.junit.jupiter.api.BeforeEach; import org.junit.jupiter.api.Test; +import org.junit.jupiter.api.io.TempDir; import org.mockito.ArgumentCaptor; -import org.mockito.Mockito; +import java.io.File; import java.io.IOException; import java.util.ArrayList; import java.util.Collections; @@ -67,9 +67,9 @@ import static org.junit.jupiter.api.Assertions.assertNotEquals; import static org.junit.jupiter.api.Assertions.assertThrows; import static org.junit.jupiter.api.Assertions.assertTrue; -import static org.mockito.ArgumentMatchers.any; -import static org.mockito.ArgumentMatchers.anyInt; -import static org.mockito.ArgumentMatchers.anyLong; +import static org.mockito.Mockito.any; +import static org.mockito.Mockito.anyInt; +import static org.mockito.Mockito.anyLong; import static org.mockito.Mockito.mock; import static org.mockito.Mockito.when; import static org.mockito.Mockito.eq; @@ -91,13 +91,13 @@ public class TestRatisUnderReplicationHandler { private ReplicationManagerMetrics metrics; @BeforeEach - public void setup() throws NodeNotFoundException, + void setup(@TempDir File testDir) throws NodeNotFoundException, CommandTargetOverloadedException, NotLeaderException { container = ReplicationTestUtil.createContainer( HddsProtos.LifeCycleState.CLOSED, RATIS_REPLICATION_CONFIG); nodeManager = mock(NodeManager.class); - conf = SCMTestUtils.getConf(); + conf = SCMTestUtils.getConf(testDir); policy = ReplicationTestUtil .getSimpleTestPlacementPolicy(nodeManager, conf); replicationManager = mock(ReplicationManager.class); @@ -605,11 +605,11 @@ public void testUnderReplicationWithVulnerableReplicasOnUniqueOrigins() throws I DECOMMISSIONING, State.UNHEALTHY, sequenceID); replicas.add(unhealthyReplica); UnderReplicatedHealthResult result = getUnderReplicatedHealthResult(); - Mockito.when(result.hasVulnerableUnhealthy()).thenReturn(true); + when(result.hasVulnerableUnhealthy()).thenReturn(true); final Set>> commands = testProcessing(replicas, Collections.emptyList(), result, 2, 1); - Assertions.assertEquals(unhealthyReplica.getDatanodeDetails(), commands.iterator().next().getKey()); + assertEquals(unhealthyReplica.getDatanodeDetails(), commands.iterator().next().getKey()); } /** diff --git a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/container/replication/TestReplicationManager.java b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/container/replication/TestReplicationManager.java index fe1cdcc0695..47844f32fb0 100644 --- a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/container/replication/TestReplicationManager.java +++ b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/container/replication/TestReplicationManager.java @@ -28,6 +28,7 @@ import org.apache.hadoop.hdds.protocol.proto.HddsProtos; import org.apache.hadoop.hdds.protocol.proto.StorageContainerDatanodeProtocolProtos.ContainerReplicaProto; import org.apache.hadoop.hdds.protocol.proto.StorageContainerDatanodeProtocolProtos.SCMCommandProto; +import org.apache.hadoop.hdds.scm.HddsTestUtils; import org.apache.hadoop.hdds.scm.PlacementPolicy; import org.apache.hadoop.hdds.scm.container.ContainerID; import org.apache.hadoop.hdds.scm.container.ContainerInfo; @@ -43,6 +44,9 @@ import org.apache.hadoop.hdds.scm.node.NodeManager; import org.apache.hadoop.hdds.scm.node.NodeStatus; import org.apache.hadoop.hdds.scm.node.states.NodeNotFoundException; +import org.apache.hadoop.hdds.scm.pipeline.PipelineManager; +import org.apache.hadoop.hdds.scm.server.StorageContainerManager; +import org.apache.hadoop.hdds.security.token.ContainerTokenGenerator; import org.apache.hadoop.hdds.server.events.EventPublisher; import org.apache.hadoop.ozone.protocol.commands.DeleteContainerCommand; import org.apache.hadoop.ozone.protocol.commands.ReconstructECContainersCommand; @@ -56,7 +60,6 @@ import org.junit.jupiter.params.ParameterizedTest; import org.junit.jupiter.params.provider.EnumSource; import org.mockito.ArgumentCaptor; -import org.mockito.Mockito; import java.io.IOException; import java.time.Instant; @@ -175,6 +178,16 @@ public void setup() throws IOException { // Ensure that RM will run when asked. when(scmContext.isLeaderReady()).thenReturn(true); when(scmContext.isInSafeMode()).thenReturn(false); + + PipelineManager pipelineManager = mock(PipelineManager.class); + when(pipelineManager.getPipeline(any())) + .thenReturn(HddsTestUtils.getRandomPipeline()); + + StorageContainerManager scm = mock(StorageContainerManager.class); + when(scm.getPipelineManager()).thenReturn(pipelineManager); + when(scm.getContainerTokenGenerator()).thenReturn(ContainerTokenGenerator.DISABLED); + + when(scmContext.getScm()).thenReturn(scm); } private ReplicationManager createReplicationManager() throws IOException { @@ -530,7 +543,7 @@ public void testQuasiClosedContainerWithUnhealthyReplicaOnDecommissioningNodeWit ContainerReplicaProto.State.UNHEALTHY); replicas.add(unhealthy); storeContainerAndReplicas(container, replicas); - Mockito.when(replicationManager.getNodeStatus(any(DatanodeDetails.class))) + when(replicationManager.getNodeStatus(any(DatanodeDetails.class))) .thenAnswer(invocation -> { DatanodeDetails dn = invocation.getArgument(0); if (dn.equals(unhealthy.getDatanodeDetails())) { @@ -550,9 +563,9 @@ public void testQuasiClosedContainerWithUnhealthyReplicaOnDecommissioningNodeWit assertEquals(0, repQueue.overReplicatedQueueSize()); // next, this test sets up some mocks to test if RatisUnderReplicationHandler will handle this container correctly - Mockito.when(ratisPlacementPolicy.chooseDatanodes(anyList(), anyList(), eq(null), eq(1), anyLong(), + when(ratisPlacementPolicy.chooseDatanodes(anyList(), anyList(), eq(null), eq(1), anyLong(), anyLong())).thenAnswer(invocation -> ImmutableList.of(MockDatanodeDetails.randomDatanodeDetails())); - Mockito.when(nodeManager.getTotalDatanodeCommandCounts(any(DatanodeDetails.class), any(), any())) + when(nodeManager.getTotalDatanodeCommandCounts(any(DatanodeDetails.class), any(), any())) .thenAnswer(invocation -> { Map map = new HashMap<>(); map.put(SCMCommandProto.Type.replicateContainerCommand, 0); diff --git a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/container/replication/TestReplicationManagerScenarios.java b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/container/replication/TestReplicationManagerScenarios.java index 723828a44bb..437fdc1c06f 100644 --- a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/container/replication/TestReplicationManagerScenarios.java +++ b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/container/replication/TestReplicationManagerScenarios.java @@ -67,7 +67,7 @@ import java.util.stream.Stream; import static org.apache.hadoop.hdds.HddsConfigKeys.HDDS_SCM_WAIT_TIME_AFTER_SAFE_MODE_EXIT; -import static org.mockito.ArgumentMatchers.any; +import static org.mockito.Mockito.any; import static org.mockito.Mockito.doAnswer; import static org.mockito.Mockito.mock; import static org.mockito.Mockito.when; diff --git a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/container/replication/health/TestOpenContainerHandler.java b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/container/replication/health/TestOpenContainerHandler.java index a950008ec9f..dec61610d1e 100644 --- a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/container/replication/health/TestOpenContainerHandler.java +++ b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/container/replication/health/TestOpenContainerHandler.java @@ -24,17 +24,20 @@ import org.apache.hadoop.hdds.scm.container.ContainerInfo; import org.apache.hadoop.hdds.scm.container.ContainerReplica; import org.apache.hadoop.hdds.scm.container.ReplicationManagerReport; +import org.apache.hadoop.hdds.scm.container.ReplicationManagerReport.HealthState; import org.apache.hadoop.hdds.scm.container.replication.ContainerCheckRequest; import org.apache.hadoop.hdds.scm.container.replication.ReplicationManager; import org.apache.hadoop.hdds.scm.container.replication.ReplicationTestUtil; import org.junit.jupiter.api.BeforeEach; import org.junit.jupiter.api.Test; +import org.mockito.Mockito; import java.util.Collections; import java.util.Set; import static org.apache.hadoop.hdds.protocol.proto.HddsProtos.LifeCycleState.CLOSED; import static org.apache.hadoop.hdds.protocol.proto.HddsProtos.LifeCycleState.OPEN; +import static org.junit.jupiter.api.Assertions.assertEquals; import static org.junit.jupiter.api.Assertions.assertTrue; import static org.junit.jupiter.api.Assertions.assertFalse; import static org.mockito.Mockito.mock; @@ -58,6 +61,7 @@ public void setup() { ratisReplicationConfig = RatisReplicationConfig.getInstance( HddsProtos.ReplicationFactor.THREE); replicationManager = mock(ReplicationManager.class); + Mockito.when(replicationManager.hasHealthyPipeline(any())).thenReturn(true); openContainerHandler = new OpenContainerHandler(replicationManager); } @@ -119,8 +123,36 @@ public void testOpenUnhealthyContainerIsClosed() { assertTrue(openContainerHandler.handle(readRequest)); verify(replicationManager, times(1)) .sendCloseContainerEvent(containerInfo.containerID()); + assertEquals(1, request.getReport().getStat(HealthState.OPEN_UNHEALTHY)); } + @Test + public void testOpenContainerWithoutPipelineIsClosed() { + Mockito.when(replicationManager.hasHealthyPipeline(any())).thenReturn(false); + ContainerInfo containerInfo = ReplicationTestUtil.createContainerInfo( + ecReplicationConfig, 1, OPEN); + Set containerReplicas = ReplicationTestUtil + .createReplicas(containerInfo.containerID(), + ContainerReplicaProto.State.OPEN, 1, 2, 3, 4); + ContainerCheckRequest request = new ContainerCheckRequest.Builder() + .setPendingOps(Collections.emptyList()) + .setReport(new ReplicationManagerReport()) + .setContainerInfo(containerInfo) + .setContainerReplicas(containerReplicas) + .build(); + ContainerCheckRequest readRequest = new ContainerCheckRequest.Builder() + .setPendingOps(Collections.emptyList()) + .setReport(new ReplicationManagerReport()) + .setContainerInfo(containerInfo) + .setContainerReplicas(containerReplicas) + .setReadOnly(true) + .build(); + assertTrue(openContainerHandler.handle(request)); + assertTrue(openContainerHandler.handle(readRequest)); + verify(replicationManager, times(1)) + .sendCloseContainerEvent(containerInfo.containerID()); + assertEquals(1, request.getReport().getStat(HealthState.OPEN_WITHOUT_PIPELINE)); + } @Test public void testClosedRatisContainerReturnsFalse() { ContainerInfo containerInfo = ReplicationTestUtil.createContainerInfo( @@ -178,5 +210,33 @@ public void testOpenUnhealthyRatisContainerIsClosed() { assertTrue(openContainerHandler.handle(request)); assertTrue(openContainerHandler.handle(readRequest)); verify(replicationManager, times(1)).sendCloseContainerEvent(any()); + assertEquals(1, request.getReport().getStat(HealthState.OPEN_UNHEALTHY)); + } + + @Test + public void testOpenRatisContainerWithoutPipelineIsClosed() { + Mockito.when(replicationManager.hasHealthyPipeline(any())).thenReturn(false); + ContainerInfo containerInfo = ReplicationTestUtil.createContainerInfo( + ratisReplicationConfig, 1, OPEN); + Set containerReplicas = ReplicationTestUtil + .createReplicas(containerInfo.containerID(), + ContainerReplicaProto.State.OPEN, 0, 0, 0); + ContainerCheckRequest request = new ContainerCheckRequest.Builder() + .setPendingOps(Collections.emptyList()) + .setReport(new ReplicationManagerReport()) + .setContainerInfo(containerInfo) + .setContainerReplicas(containerReplicas) + .build(); + ContainerCheckRequest readRequest = new ContainerCheckRequest.Builder() + .setPendingOps(Collections.emptyList()) + .setReport(new ReplicationManagerReport()) + .setContainerInfo(containerInfo) + .setContainerReplicas(containerReplicas) + .setReadOnly(true) + .build(); + assertTrue(openContainerHandler.handle(request)); + assertTrue(openContainerHandler.handle(readRequest)); + verify(replicationManager, times(1)).sendCloseContainerEvent(any()); + assertEquals(1, request.getReport().getStat(HealthState.OPEN_WITHOUT_PIPELINE)); } } diff --git a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/container/replication/health/TestVulnerableUnhealthyReplicasHandler.java b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/container/replication/health/TestVulnerableUnhealthyReplicasHandler.java index 8fa4c974e1b..28eccd5211c 100644 --- a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/container/replication/health/TestVulnerableUnhealthyReplicasHandler.java +++ b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/container/replication/health/TestVulnerableUnhealthyReplicasHandler.java @@ -35,7 +35,6 @@ import org.apache.hadoop.hdds.scm.node.states.NodeNotFoundException; import org.junit.jupiter.api.BeforeEach; import org.junit.jupiter.api.Test; -import org.mockito.Mockito; import java.util.Collections; import java.util.HashSet; @@ -190,7 +189,7 @@ public void testReturnsTrueForQuasiClosedContainerWithVulnerableReplicaWhenAllRe ContainerReplica unhealthy = createContainerReplica(container.containerID(), 0, DECOMMISSIONING, State.UNHEALTHY, sequenceId); replicas.add(unhealthy); - Mockito.when(replicationManager.getNodeStatus(Mockito.any(DatanodeDetails.class))) + when(replicationManager.getNodeStatus(any(DatanodeDetails.class))) .thenAnswer(invocation -> { DatanodeDetails dn = invocation.getArgument(0); if (dn.equals(unhealthy.getDatanodeDetails())) { diff --git a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/ha/TestInterSCMGrpcProtocolService.java b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/ha/TestInterSCMGrpcProtocolService.java index a3346da970f..dfb3ff5179e 100644 --- a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/ha/TestInterSCMGrpcProtocolService.java +++ b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/ha/TestInterSCMGrpcProtocolService.java @@ -55,8 +55,8 @@ import static java.nio.charset.StandardCharsets.UTF_8; import static org.apache.hadoop.hdds.security.x509.CertificateTestUtils.createSelfSignedCert; import static org.assertj.core.api.Assertions.assertThat; -import static org.mockito.ArgumentMatchers.any; -import static org.mockito.ArgumentMatchers.anyBoolean; +import static org.mockito.Mockito.any; +import static org.mockito.Mockito.anyBoolean; import static org.mockito.Mockito.doNothing; import static org.mockito.Mockito.doReturn; import static org.mockito.Mockito.mock; diff --git a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/ha/TestReplicationAnnotation.java b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/ha/TestReplicationAnnotation.java index 29fa47135fc..a5a2054a8ae 100644 --- a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/ha/TestReplicationAnnotation.java +++ b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/ha/TestReplicationAnnotation.java @@ -35,7 +35,7 @@ import static org.assertj.core.api.Assertions.assertThat; import static org.junit.jupiter.api.Assertions.assertNotNull; -import static org.junit.jupiter.api.Assertions.fail; +import static org.junit.jupiter.api.Assertions.assertThrows; /** * Tests on {@link org.apache.hadoop.hdds.scm.metadata.Replicate}. @@ -125,13 +125,10 @@ public void testReplicateAnnotationBasic() throws Throwable { SCMHAInvocationHandler.class.getClassLoader(), new Class[]{ContainerStateManager.class}, scmhaInvocationHandler); - - try { - proxy.addContainer(HddsProtos.ContainerInfoProto.getDefaultInstance()); - fail("Cannot reach here: should have seen a IOException"); - } catch (IOException e) { - assertNotNull(e.getMessage()); - assertThat(e.getMessage()).contains("submitRequest is called"); - } + IOException e = + assertThrows(IOException.class, + () -> proxy.addContainer(HddsProtos.ContainerInfoProto.getDefaultInstance())); + assertNotNull(e.getMessage()); + assertThat(e.getMessage()).contains("submitRequest is called"); } } diff --git a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/ha/TestSCMContext.java b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/ha/TestSCMContext.java index 642fbd635a3..757a0ab0dce 100644 --- a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/ha/TestSCMContext.java +++ b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/ha/TestSCMContext.java @@ -19,20 +19,18 @@ package org.apache.hadoop.hdds.scm.ha; import org.apache.hadoop.hdds.scm.safemode.SCMSafeModeManager.SafeModeStatus; -import org.apache.ratis.protocol.exceptions.NotLeaderException; import org.junit.jupiter.api.Test; import static org.junit.jupiter.api.Assertions.assertTrue; import static org.junit.jupiter.api.Assertions.assertEquals; import static org.junit.jupiter.api.Assertions.assertFalse; -import static org.junit.jupiter.api.Assertions.fail; /** * Test for SCMContext. */ public class TestSCMContext { @Test - public void testRaftOperations() { + void testRaftOperations() throws Exception { // start as follower SCMContext scmContext = new SCMContext.Builder() .setLeader(false).setTerm(0).buildMaybeInvalid(); @@ -44,11 +42,8 @@ public void testRaftOperations() { scmContext.setLeaderReady(); assertTrue(scmContext.isLeader()); assertTrue(scmContext.isLeaderReady()); - try { - assertEquals(scmContext.getTermOfLeader(), 10); - } catch (NotLeaderException e) { - fail("Should not throw nle."); - } + assertEquals(scmContext.getTermOfLeader(), 10); + // step down scmContext.updateLeaderAndTerm(false, 0); diff --git a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/ha/TestSCMHAConfiguration.java b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/ha/TestSCMHAConfiguration.java index 54a422b909b..75a943ee8da 100644 --- a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/ha/TestSCMHAConfiguration.java +++ b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/ha/TestSCMHAConfiguration.java @@ -29,7 +29,6 @@ import org.apache.hadoop.net.NetUtils; import org.apache.hadoop.ozone.common.Storage; import org.apache.hadoop.ozone.ha.ConfUtils; -import org.apache.ozone.test.GenericTestUtils; import org.apache.ratis.conf.RaftProperties; import org.apache.ratis.server.RaftServerConfigKeys; import org.apache.ratis.util.TimeDuration; @@ -75,9 +74,11 @@ */ class TestSCMHAConfiguration { private OzoneConfiguration conf; + @TempDir + private File tempDir; @BeforeEach - void setup(@TempDir File tempDir) { + void setup() { conf = new OzoneConfiguration(); conf.set(OZONE_METADATA_DIRS, tempDir.getAbsolutePath()); DefaultConfigManager.clearDefaultConfigs(); @@ -214,8 +215,7 @@ public void testSCMHAConfig() throws Exception { assertEquals(0, scmRatisConfig.getLogAppenderWaitTimeMin(), "getLogAppenderWaitTimeMin"); - final File testDir = GenericTestUtils.getRandomizedTestDir(); - conf.set(HddsConfigKeys.OZONE_METADATA_DIRS, testDir.getPath()); + conf.set(HddsConfigKeys.OZONE_METADATA_DIRS, tempDir.getPath()); final RaftProperties p = RatisUtil.newRaftProperties(conf); final TimeDuration t = RaftServerConfigKeys.Log.Appender.waitTimeMin(p); diff --git a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/ha/TestSCMHAManagerImpl.java b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/ha/TestSCMHAManagerImpl.java index eb0f18ae019..f33eedf9695 100644 --- a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/ha/TestSCMHAManagerImpl.java +++ b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/ha/TestSCMHAManagerImpl.java @@ -45,9 +45,13 @@ import org.apache.hadoop.security.authentication.client.AuthenticationException; import org.apache.ozone.test.GenericTestUtils; import org.apache.ratis.server.DivisionInfo; -import org.junit.jupiter.api.AfterEach; -import org.junit.jupiter.api.BeforeEach; +import org.junit.jupiter.api.AfterAll; +import org.junit.jupiter.api.BeforeAll; +import org.junit.jupiter.api.MethodOrderer; +import org.junit.jupiter.api.Order; import org.junit.jupiter.api.Test; +import org.junit.jupiter.api.TestInstance; +import org.junit.jupiter.api.TestMethodOrder; import org.junit.jupiter.api.io.TempDir; import java.io.IOException; @@ -55,6 +59,7 @@ import java.util.concurrent.TimeoutException; import static org.assertj.core.api.Assertions.assertThat; +import static org.assertj.core.api.Assumptions.assumeThat; import static org.junit.jupiter.api.Assertions.assertEquals; import static org.junit.jupiter.api.Assertions.assertThrows; import static org.mockito.Mockito.mock; @@ -64,27 +69,34 @@ /** * Test cases to verify {@link org.apache.hadoop.hdds.scm.ha.SCMHAManagerImpl}. */ +@TestInstance(TestInstance.Lifecycle.PER_CLASS) +@TestMethodOrder(MethodOrderer.OrderAnnotation.class) class TestSCMHAManagerImpl { - @TempDir + private static final String FOLLOWER_SCM_ID = "follower"; + private Path storageBaseDir; private String clusterID; private SCMHAManager primarySCMHAManager; + private SCMRatisServer follower; - @BeforeEach - void setup() throws IOException, InterruptedException, + @BeforeAll + void setup(@TempDir Path tempDir) throws IOException, InterruptedException, TimeoutException { + storageBaseDir = tempDir; clusterID = UUID.randomUUID().toString(); OzoneConfiguration conf = getConfig("scm1", 9894); final StorageContainerManager scm = getMockStorageContainerManager(conf); SCMRatisServerImpl.initialize(clusterID, scm.getScmId(), scm.getScmNodeDetails(), conf); - scm.getScmHAManager().start(); primarySCMHAManager = scm.getScmHAManager(); + primarySCMHAManager.start(); final DivisionInfo ratisDivision = primarySCMHAManager.getRatisServer() .getDivision().getInfo(); // Wait for Ratis Server to be ready waitForSCMToBeReady(ratisDivision); + follower = getMockStorageContainerManager(getConfig(FOLLOWER_SCM_ID, 9898)) + .getScmHAManager().getRatisServer(); } private OzoneConfiguration getConfig(String scmId, int ratisPort) { @@ -97,42 +109,55 @@ private OzoneConfiguration getConfig(String scmId, int ratisPort) { return conf; } - public void waitForSCMToBeReady(DivisionInfo ratisDivision) + private void waitForSCMToBeReady(DivisionInfo ratisDivision) throws TimeoutException, InterruptedException { GenericTestUtils.waitFor(ratisDivision::isLeaderReady, 1000, 10000); } - @AfterEach - public void cleanup() throws IOException { + @AfterAll + void cleanup() throws IOException { + follower.stop(); primarySCMHAManager.stop(); } @Test - public void testAddSCM() throws IOException, InterruptedException { - assertEquals(1, primarySCMHAManager.getRatisServer() - .getDivision().getGroup().getPeers().size()); + @Order(1) + void testAddSCM() throws IOException { + assertEquals(1, getPeerCount()); + + follower.start(); + final AddSCMRequest request = new AddSCMRequest( + clusterID, FOLLOWER_SCM_ID, getFollowerAddress()); + primarySCMHAManager.addSCM(request); + assertEquals(2, getPeerCount()); + } - final StorageContainerManager scm2 = getMockStorageContainerManager( - getConfig("scm2", 9898)); - try { - scm2.getScmHAManager().getRatisServer().start(); - final AddSCMRequest request = new AddSCMRequest( - clusterID, scm2.getScmId(), - "localhost:" + scm2.getScmHAManager().getRatisServer() - .getDivision().getRaftServer().getServerRpc() - .getInetSocketAddress().getPort()); - primarySCMHAManager.addSCM(request); - assertEquals(2, primarySCMHAManager.getRatisServer() - .getDivision().getGroup().getPeers().size()); - } finally { - scm2.getScmHAManager().getRatisServer().stop(); - } + @Test + @Order(2) // requires testAddSCM + void testRemoveSCM() throws IOException { + assumeThat(getPeerCount()).isEqualTo(2); + + final RemoveSCMRequest removeSCMRequest = new RemoveSCMRequest( + clusterID, FOLLOWER_SCM_ID, getFollowerAddress()); + primarySCMHAManager.removeSCM(removeSCMRequest); + assertEquals(1, getPeerCount()); + } + + private int getPeerCount() { + return primarySCMHAManager.getRatisServer() + .getDivision().getGroup().getPeers().size(); + } + + private String getFollowerAddress() { + return "localhost:" + + follower.getDivision() + .getRaftServer().getServerRpc().getInetSocketAddress().getPort(); } @Test - public void testHARingRemovalErrors() throws IOException, + void testHARingRemovalErrors() throws IOException, AuthenticationException { OzoneConfiguration config = new OzoneConfiguration(); config.set(ScmConfigKeys.OZONE_SCM_PRIMORDIAL_NODE_ID_KEY, "scm1"); @@ -160,35 +185,6 @@ public void testHARingRemovalErrors() throws IOException, scm2.getScmHAManager().getRatisServer().stop(); } } - @Test - public void testRemoveSCM() throws IOException, InterruptedException { - assertEquals(1, primarySCMHAManager.getRatisServer() - .getDivision().getGroup().getPeers().size()); - - final StorageContainerManager scm2 = getMockStorageContainerManager( - getConfig("scm2", 9898)); - try { - scm2.getScmHAManager().getRatisServer().start(); - final AddSCMRequest addSCMRequest = new AddSCMRequest( - clusterID, scm2.getScmId(), - "localhost:" + scm2.getScmHAManager().getRatisServer() - .getDivision().getRaftServer().getServerRpc() - .getInetSocketAddress().getPort()); - primarySCMHAManager.addSCM(addSCMRequest); - assertEquals(2, primarySCMHAManager.getRatisServer() - .getDivision().getGroup().getPeers().size()); - - final RemoveSCMRequest removeSCMRequest = new RemoveSCMRequest( - clusterID, scm2.getScmId(), "localhost:" + - scm2.getScmHAManager().getRatisServer().getDivision() - .getRaftServer().getServerRpc().getInetSocketAddress().getPort()); - primarySCMHAManager.removeSCM(removeSCMRequest); - assertEquals(1, primarySCMHAManager.getRatisServer() - .getDivision().getGroup().getPeers().size()); - } finally { - scm2.getScmHAManager().getRatisServer().stop(); - } - } private StorageContainerManager getMockStorageContainerManager( OzoneConfiguration conf) throws IOException { diff --git a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/ha/TestSequenceIDGenerator.java b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/ha/TestSequenceIDGenerator.java index 743c7aea9da..2e3c8e84368 100644 --- a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/ha/TestSequenceIDGenerator.java +++ b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/ha/TestSequenceIDGenerator.java @@ -25,12 +25,15 @@ import org.apache.hadoop.hdds.utils.db.Table; import org.apache.hadoop.ozone.container.common.SCMTestUtils; import org.junit.jupiter.api.Test; +import org.junit.jupiter.api.io.TempDir; + +import java.io.File; import static org.apache.hadoop.hdds.scm.ScmConfigKeys.OZONE_SCM_SEQUENCE_ID_BATCH_SIZE; -import static org.mockito.ArgumentMatchers.anyLong; -import static org.mockito.ArgumentMatchers.anyString; import static org.junit.jupiter.api.Assertions.assertEquals; import static org.junit.jupiter.api.Assertions.fail; +import static org.mockito.Mockito.anyLong; +import static org.mockito.Mockito.anyString; import static org.mockito.Mockito.spy; import static org.mockito.Mockito.when; @@ -38,9 +41,13 @@ * Tests for {@link SequenceIdGenerator}. */ public class TestSequenceIDGenerator { + + @TempDir + private File testDir; + @Test public void testSequenceIDGenUponNonRatis() throws Exception { - OzoneConfiguration conf = SCMTestUtils.getConf(); + OzoneConfiguration conf = SCMTestUtils.getConf(testDir); SCMMetadataStore scmMetadataStore = new SCMMetadataStoreImpl(conf); scmMetadataStore.start(conf); @@ -82,7 +89,7 @@ public void testSequenceIDGenUponNonRatis() throws Exception { @Test public void testSequenceIDGenUponRatis() throws Exception { - OzoneConfiguration conf = SCMTestUtils.getConf(); + OzoneConfiguration conf = SCMTestUtils.getConf(testDir); // change batchSize to 100 conf.setInt(OZONE_SCM_SEQUENCE_ID_BATCH_SIZE, 100); @@ -129,7 +136,7 @@ public void testSequenceIDGenUponRatis() throws Exception { public void testSequenceIDGenUponRatisWhenCurrentScmIsNotALeader() throws Exception { int batchSize = 100; - OzoneConfiguration conf = SCMTestUtils.getConf(); + OzoneConfiguration conf = SCMTestUtils.getConf(testDir); conf.setInt(OZONE_SCM_SEQUENCE_ID_BATCH_SIZE, batchSize); SCMMetadataStore scmMetadataStore = new SCMMetadataStoreImpl(conf); scmMetadataStore.start(conf); diff --git a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/node/TestContainerPlacement.java b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/node/TestContainerPlacement.java index 8dd6914e644..b241ac0f2d2 100644 --- a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/node/TestContainerPlacement.java +++ b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/node/TestContainerPlacement.java @@ -25,9 +25,7 @@ import java.util.Arrays; import java.util.List; import java.util.UUID; -import java.util.concurrent.TimeoutException; -import org.apache.hadoop.fs.FileUtil; import org.apache.hadoop.hdds.HddsConfigKeys; import org.apache.hadoop.hdds.client.RatisReplicationConfig; import org.apache.hadoop.hdds.client.ReplicationConfig; @@ -35,7 +33,6 @@ import org.apache.hadoop.hdds.protocol.DatanodeDetails; import org.apache.hadoop.hdds.protocol.proto.HddsProtos; import org.apache.hadoop.hdds.protocol.proto.StorageContainerDatanodeProtocolProtos; -import org.apache.hadoop.hdds.protocol.proto.StorageContainerDatanodeProtocolProtos.LayoutVersionProto; import org.apache.hadoop.hdds.scm.PlacementPolicy; import org.apache.hadoop.hdds.scm.ScmConfigKeys; import org.apache.hadoop.hdds.scm.HddsTestUtils; @@ -67,19 +64,17 @@ import org.apache.hadoop.hdds.utils.db.DBStoreBuilder; import org.apache.hadoop.ozone.OzoneConsts; import org.apache.hadoop.ozone.container.common.SCMTestUtils; -import org.apache.hadoop.ozone.upgrade.LayoutVersionManager; import org.apache.hadoop.test.PathUtils; import org.apache.commons.io.IOUtils; -import org.apache.ozone.test.GenericTestUtils; import org.junit.jupiter.api.AfterEach; import org.junit.jupiter.api.BeforeEach; import org.junit.jupiter.api.Test; +import org.junit.jupiter.api.io.TempDir; import static java.util.Collections.emptyList; import static org.apache.hadoop.hdds.scm.net.NetConstants.LEAF_SCHEMA; import static org.apache.hadoop.hdds.scm.net.NetConstants.RACK_SCHEMA; import static org.apache.hadoop.hdds.scm.net.NetConstants.ROOT_SCHEMA; -import static org.apache.hadoop.ozone.container.upgrade.UpgradeUtils.toLayoutVersionProto; import static org.apache.hadoop.hdds.upgrade.HDDSLayoutVersionManager.maxLayoutVersion; import static org.mockito.Mockito.mock; import static org.mockito.Mockito.when; @@ -91,6 +86,7 @@ * Test for different container placement policy. */ public class TestContainerPlacement { + @TempDir private File testDir; private DBStore dbStore; private ContainerManager containerManager; @@ -103,8 +99,6 @@ public class TestContainerPlacement { @BeforeEach public void setUp() throws Exception { conf = getConf(); - testDir = GenericTestUtils.getTestDir( - TestContainerPlacement.class.getSimpleName() + UUID.randomUUID()); conf.set(HddsConfigKeys.OZONE_METADATA_DIRS, testDir.getAbsolutePath()); dbStore = DBStoreBuilder.createDBStore( conf, new SCMDBDefinition()); @@ -123,8 +117,6 @@ public void cleanup() throws Exception { if (dbStore != null) { dbStore.close(); } - - FileUtil.fullyDelete(testDir); } /** @@ -182,7 +174,7 @@ ContainerManager createContainerManager() */ @Test public void testContainerPlacementCapacity() throws IOException, - InterruptedException, TimeoutException { + InterruptedException { final int nodeCount = 4; final long capacity = 10L * OzoneConsts.GB; final long used = 2L * OzoneConsts.GB; @@ -201,11 +193,6 @@ public void testContainerPlacementCapacity() throws IOException, List datanodes = HddsTestUtils .getListOfRegisteredDatanodeDetails(scmNodeManager, nodeCount); XceiverClientManager xceiverClientManager = null; - LayoutVersionManager versionManager = - scmNodeManager.getLayoutVersionManager(); - LayoutVersionProto layoutInfo = - toLayoutVersionProto(versionManager.getMetadataLayoutVersion(), - versionManager.getSoftwareLayoutVersion()); try { for (DatanodeDetails datanodeDetails : datanodes) { UUID dnId = datanodeDetails.getUuid(); @@ -221,7 +208,7 @@ public void testContainerPlacementCapacity() throws IOException, Arrays.asList(report), emptyList()); datanodeInfo.updateStorageReports( nodeReportProto.getStorageReportList()); - scmNodeManager.processHeartbeat(datanodeDetails, layoutInfo); + scmNodeManager.processHeartbeat(datanodeDetails); } //TODO: wait for heartbeat to be processed @@ -265,7 +252,6 @@ public void testContainerPlacementCapacity() throws IOException, if (xceiverClientManager != null) { xceiverClientManager.close(); } - FileUtil.fullyDelete(testDir); } } diff --git a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/node/TestDatanodeAdminMonitor.java b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/node/TestDatanodeAdminMonitor.java index 523d4226cb4..06565e1b7e5 100644 --- a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/node/TestDatanodeAdminMonitor.java +++ b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/node/TestDatanodeAdminMonitor.java @@ -41,7 +41,6 @@ import org.apache.hadoop.security.authentication.client.AuthenticationException; import org.junit.jupiter.api.BeforeEach; import org.junit.jupiter.api.Test; -import org.mockito.Mockito; import java.io.IOException; import java.util.Collections; @@ -49,6 +48,7 @@ import java.util.Set; import static org.apache.hadoop.hdds.protocol.proto.HddsProtos.NodeOperationalState.DECOMMISSIONING; +import static org.assertj.core.api.Assertions.assertThat; import static org.junit.jupiter.api.Assertions.assertEquals; import static org.junit.jupiter.api.Assertions.assertTrue; import static org.mockito.Mockito.mock; @@ -406,7 +406,7 @@ public void testDecommissionWaitsForUnhealthyReplicaWithUniqueOriginToReplicateN replicas.add(unhealthy); nodeManager.setContainers(dn1, ImmutableSet.of(containerID)); - Mockito.when(repManager.getContainerReplicaCount(Mockito.eq(containerID))) + when(repManager.getContainerReplicaCount(eq(containerID))) .thenReturn(new RatisContainerReplicaCount(container, replicas, Collections.emptyList(), 2, false)); DatanodeAdminMonitorTestUtil.mockCheckContainerState(repManager, true); @@ -430,7 +430,7 @@ public void testDecommissionWaitsForUnhealthyReplicaWithUniqueOriginToReplicateN .setDatanodeDetails(MockDatanodeDetails.randomDatanodeDetails()) .build(); replicas.add(copyOfUnhealthyOnNewNode); - Mockito.when(repManager.getContainerReplicaCount(Mockito.eq(containerID))) + when(repManager.getContainerReplicaCount(eq(containerID))) .thenReturn(new RatisContainerReplicaCount(container, replicas, Collections.emptyList(), 2, false)); DatanodeAdminMonitorTestUtil.mockCheckContainerState(repManager, false); @@ -692,8 +692,8 @@ public void testStartTimeMetricWhenNodesDecommissioned() assertEquals(1, monitor.getTrackedNodeCount()); long monitoredTime = monitor.getSingleTrackedNode(dn1.getIpAddress()) .getStartTime(); - assertTrue(monitoredTime >= beforeTime); - assertTrue(monitoredTime <= afterTime); + assertThat(monitoredTime).isGreaterThanOrEqualTo(beforeTime); + assertThat(monitoredTime).isLessThanOrEqualTo(afterTime); } @Test @@ -837,6 +837,50 @@ public void testCancelledNodesMovedToInService() nodeManager.getNodeStatus(dn1).getOperationalState()); } + @Test + public void testContainersReplicatedOnDecomDnAPI() + throws NodeNotFoundException, ContainerNotFoundException { + conf.setBoolean("hdds.scm.replication.enable.legacy", false); + + DatanodeDetails dn1 = MockDatanodeDetails.randomDatanodeDetails(); + nodeManager.register(dn1, + new NodeStatus(HddsProtos.NodeOperationalState.DECOMMISSIONING, + HddsProtos.NodeState.HEALTHY)); + + Set containers = new HashSet<>(); + containers.add(ContainerID.valueOf(1)); + containers.add(ContainerID.valueOf(2)); + nodeManager.setContainers(dn1, containers); + DatanodeAdminMonitorTestUtil + .mockGetContainerReplicaCount(repManager, + true, + HddsProtos.LifeCycleState.CLOSED, + DECOMMISSIONING, + IN_SERVICE, + IN_SERVICE); + + monitor.startMonitoring(dn1); + monitor.run(); + assertEquals(1, monitor.getTrackedNodeCount()); + assertEquals(HddsProtos.NodeOperationalState.DECOMMISSIONING, + nodeManager.getNodeStatus(dn1).getOperationalState()); + assertEquals(monitor.getContainersReplicatedOnNode(dn1).get("UnderReplicated").size(), 2); + assertEquals(monitor.getContainersReplicatedOnNode(dn1).get("UnClosed").size(), 0); + + DatanodeAdminMonitorTestUtil + .mockGetContainerReplicaCount(repManager, + true, + HddsProtos.LifeCycleState.OPEN, + IN_SERVICE); + + monitor.run(); + assertEquals(1, monitor.getTrackedNodeCount()); + assertEquals(HddsProtos.NodeOperationalState.DECOMMISSIONING, + nodeManager.getNodeStatus(dn1).getOperationalState()); + assertEquals(monitor.getContainersReplicatedOnNode(dn1).get("UnderReplicated").size(), 0); + assertEquals(monitor.getContainersReplicatedOnNode(dn1).get("UnClosed").size(), 2); + } + /** * Generate a set of ContainerID, starting from an ID of zero up to the given * count minus 1. diff --git a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/node/TestDeadNodeHandler.java b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/node/TestDeadNodeHandler.java index 4724d94ae26..aa09022b14d 100644 --- a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/node/TestDeadNodeHandler.java +++ b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/node/TestDeadNodeHandler.java @@ -33,11 +33,9 @@ import java.util.Arrays; import java.util.Collections; import java.util.Set; -import java.util.UUID; import java.util.concurrent.TimeUnit; import java.util.stream.Collectors; -import org.apache.hadoop.fs.FileUtil; import org.apache.hadoop.hdds.HddsConfigKeys; import org.apache.hadoop.hdds.client.RatisReplicationConfig; import org.apache.hadoop.hdds.conf.OzoneConfiguration; @@ -73,11 +71,11 @@ import org.apache.hadoop.ozone.OzoneConsts; import org.apache.hadoop.ozone.protocol.commands.DeleteBlocksCommand; import org.apache.hadoop.security.authentication.client.AuthenticationException; -import org.apache.ozone.test.GenericTestUtils; import org.apache.ozone.test.LambdaTestUtils; import org.junit.jupiter.api.AfterEach; import org.junit.jupiter.api.BeforeEach; import org.junit.jupiter.api.Test; +import org.junit.jupiter.api.io.TempDir; /** * Test DeadNodeHandler. @@ -92,7 +90,8 @@ public class TestDeadNodeHandler { private HealthyReadOnlyNodeHandler healthyReadOnlyNodeHandler; private EventPublisher publisher; private EventQueue eventQueue; - private String storageDir; + @TempDir + private File storageDir; private SCMContext scmContext; private DeletedBlockLog deletedBlockLog; @@ -104,9 +103,7 @@ public void setup() throws IOException, AuthenticationException { conf.setInt(ScmConfigKeys.OZONE_DATANODE_PIPELINE_LIMIT, 2); conf.setStorageSize(OZONE_DATANODE_RATIS_VOLUME_FREE_SPACE_MIN, 10, StorageUnit.MB); - storageDir = GenericTestUtils.getTempPath( - TestDeadNodeHandler.class.getSimpleName() + UUID.randomUUID()); - conf.set(HddsConfigKeys.OZONE_METADATA_DIRS, storageDir); + conf.set(HddsConfigKeys.OZONE_METADATA_DIRS, storageDir.getPath()); eventQueue = new EventQueue(); scm = HddsTestUtils.getScm(conf); nodeManager = (SCMNodeManager) scm.getScmNodeManager(); @@ -136,20 +133,19 @@ public void setup() throws IOException, AuthenticationException { public void teardown() { scm.stop(); scm.join(); - FileUtil.fullyDelete(new File(storageDir)); } @Test @SuppressWarnings("checkstyle:MethodLength") - public void testOnMessage() throws Exception { + public void testOnMessage(@TempDir File tempDir) throws Exception { //GIVEN DatanodeDetails datanode1 = MockDatanodeDetails.randomDatanodeDetails(); DatanodeDetails datanode2 = MockDatanodeDetails.randomDatanodeDetails(); DatanodeDetails datanode3 = MockDatanodeDetails.randomDatanodeDetails(); - String storagePath = GenericTestUtils.getRandomizedTempPath() + String storagePath = tempDir.getPath() .concat("/data-" + datanode1.getUuidString()); - String metaStoragePath = GenericTestUtils.getRandomizedTempPath() + String metaStoragePath = tempDir.getPath() .concat("/metadata-" + datanode1.getUuidString()); StorageReportProto storageOne = HddsTestUtils.createStorageReport( diff --git a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/node/TestNodeDecommissionManager.java b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/node/TestNodeDecommissionManager.java index 332d762a4cd..09f0dd59b9f 100644 --- a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/node/TestNodeDecommissionManager.java +++ b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/node/TestNodeDecommissionManager.java @@ -41,7 +41,6 @@ import java.util.ArrayList; import static java.util.Collections.singletonList; -import static org.apache.hadoop.ozone.container.upgrade.UpgradeUtils.defaultLayoutVersionProto; import static org.assertj.core.api.Assertions.assertThat; import static org.junit.jupiter.api.Assertions.assertEquals; import static org.junit.jupiter.api.Assertions.assertNotEquals; @@ -165,7 +164,7 @@ public void testNodesCanBeDecommissionedAndRecommissioned() // Attempt to decommission on dn(9) which has another instance at // dn(11) with identical ports. - nodeManager.processHeartbeat(dns.get(9), defaultLayoutVersionProto()); + nodeManager.processHeartbeat(dns.get(9)); DatanodeDetails duplicatePorts = dns.get(9); decom.decommissionNodes(singletonList(duplicatePorts.getIpAddress())); assertEquals(HddsProtos.NodeOperationalState.DECOMMISSIONING, @@ -237,7 +236,7 @@ public void testNodesCanBeDecommissionedAndRecommissionedMixedPorts() // Now decommission one of the DNs with the duplicate port DatanodeDetails expectedDN = dns.get(9); - nodeManager.processHeartbeat(expectedDN, defaultLayoutVersionProto()); + nodeManager.processHeartbeat(expectedDN); decom.decommissionNodes(singletonList( expectedDN.getIpAddress() + ":" + ratisPort)); @@ -287,7 +286,7 @@ public void testNodesCanBePutIntoMaintenanceAndRecommissioned() // Attempt to enable maintenance on dn(9) which has another instance at // dn(11) with identical ports. - nodeManager.processHeartbeat(dns.get(9), defaultLayoutVersionProto()); + nodeManager.processHeartbeat(dns.get(9)); DatanodeDetails duplicatePorts = dns.get(9); decom.startMaintenanceNodes(singletonList(duplicatePorts.getIpAddress()), 100); diff --git a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/node/TestNodeDecommissionMetrics.java b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/node/TestNodeDecommissionMetrics.java index 2005d518efb..d9cd79b7522 100644 --- a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/node/TestNodeDecommissionMetrics.java +++ b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/node/TestNodeDecommissionMetrics.java @@ -35,7 +35,7 @@ import java.util.Set; import static org.mockito.Mockito.mock; -import static org.junit.jupiter.api.Assertions.assertTrue; +import static org.assertj.core.api.Assertions.assertThat; import static org.junit.jupiter.api.Assertions.assertEquals; import static org.apache.hadoop.hdds.protocol.proto.HddsProtos.NodeOperationalState.DECOMMISSIONED; import static org.apache.hadoop.hdds.protocol.proto.HddsProtos.NodeOperationalState.DECOMMISSIONING; @@ -346,7 +346,7 @@ public void testDecommMonitorStartTimeForHost() { monitor.run(); long startTime = monitor.getSingleTrackedNode(dn1.getIpAddress()) .getStartTime(); - assertTrue(before <= startTime); - assertTrue(after >= startTime); + assertThat(before).isLessThanOrEqualTo(startTime); + assertThat(after).isGreaterThanOrEqualTo(startTime); } } diff --git a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/node/TestNodeReportHandler.java b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/node/TestNodeReportHandler.java index ecd5cbed5f8..558fc420f48 100644 --- a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/node/TestNodeReportHandler.java +++ b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/node/TestNodeReportHandler.java @@ -22,10 +22,10 @@ import static org.mockito.Mockito.mock; import static org.mockito.Mockito.when; +import java.io.File; import java.io.IOException; import java.util.Arrays; import java.util.List; -import java.util.UUID; import org.apache.hadoop.hdds.conf.OzoneConfiguration; import org.apache.hadoop.hdds.protocol.DatanodeDetails; import org.apache.hadoop.hdds.protocol.MockDatanodeDetails; @@ -43,9 +43,9 @@ import org.apache.hadoop.hdds.server.events.EventPublisher; import org.apache.hadoop.hdds.server.events.EventQueue; import org.apache.hadoop.hdds.upgrade.HDDSLayoutVersionManager; -import org.apache.ozone.test.GenericTestUtils; import org.junit.jupiter.api.BeforeEach; import org.junit.jupiter.api.Test; +import org.junit.jupiter.api.io.TempDir; import org.slf4j.Logger; import org.slf4j.LoggerFactory; @@ -59,10 +59,10 @@ public class TestNodeReportHandler implements EventPublisher { private NodeReportHandler nodeReportHandler; private HDDSLayoutVersionManager versionManager; private SCMNodeManager nodeManager; - private String storagePath = GenericTestUtils.getRandomizedTempPath() - .concat("/data-" + UUID.randomUUID().toString()); - private String metaStoragePath = GenericTestUtils.getRandomizedTempPath() - .concat("/metadata-" + UUID.randomUUID().toString()); + @TempDir + private File storagePath; + @TempDir + private File metaStoragePath; @BeforeEach public void resetEventCollector() throws IOException { @@ -84,9 +84,9 @@ public void resetEventCollector() throws IOException { public void testNodeReport() throws IOException { DatanodeDetails dn = MockDatanodeDetails.randomDatanodeDetails(); StorageReportProto storageOne = HddsTestUtils - .createStorageReport(dn.getUuid(), storagePath, 100, 10, 90, null); + .createStorageReport(dn.getUuid(), storagePath.getPath(), 100, 10, 90, null); MetadataStorageReportProto metaStorageOne = HddsTestUtils - .createMetadataStorageReport(metaStoragePath, 100, 10, 90, null); + .createMetadataStorageReport(metaStoragePath.getPath(), 100, 10, 90, null); SCMNodeMetric nodeMetric = nodeManager.getNodeStat(dn); assertNull(nodeMetric); @@ -100,7 +100,7 @@ public void testNodeReport() throws IOException { assertEquals(10, (long) nodeMetric.get().getScmUsed().get()); StorageReportProto storageTwo = HddsTestUtils - .createStorageReport(dn.getUuid(), storagePath, 100, 10, 90, null); + .createStorageReport(dn.getUuid(), storagePath.getPath(), 100, 10, 90, null); nodeReportHandler.onMessage( getNodeReport(dn, Arrays.asList(storageOne, storageTwo), Arrays.asList(metaStorageOne)), this); diff --git a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/node/TestSCMNodeManager.java b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/node/TestSCMNodeManager.java index 85a70b64673..cc9133cf684 100644 --- a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/node/TestSCMNodeManager.java +++ b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/node/TestSCMNodeManager.java @@ -26,7 +26,6 @@ import java.util.List; import java.util.Set; import java.util.UUID; -import java.util.concurrent.ExecutionException; import java.util.concurrent.ScheduledFuture; import java.util.concurrent.TimeUnit; import java.util.concurrent.TimeoutException; @@ -73,7 +72,6 @@ import org.apache.hadoop.ozone.protocol.commands.RegisteredCommand; import org.apache.hadoop.ozone.protocol.commands.ReplicateContainerCommand; import org.apache.hadoop.ozone.protocol.commands.SCMCommand; -import org.apache.hadoop.ozone.upgrade.LayoutVersionManager; import org.apache.hadoop.ozone.protocol.commands.SetNodeOperationalStateCommand; import org.apache.hadoop.security.authentication.client.AuthenticationException; import org.apache.hadoop.util.Time; @@ -86,6 +84,7 @@ import java.util.Map; import java.util.function.Predicate; import java.util.stream.Collectors; +import java.util.stream.Stream; import static java.util.Collections.emptyList; import static java.util.concurrent.TimeUnit.MILLISECONDS; @@ -123,6 +122,8 @@ import static org.mockito.Mockito.eq; import org.junit.jupiter.params.ParameterizedTest; +import org.junit.jupiter.params.provider.Arguments; +import org.junit.jupiter.params.provider.MethodSource; import org.junit.jupiter.params.provider.ValueSource; import org.mockito.ArgumentCaptor; import org.slf4j.Logger; @@ -219,17 +220,12 @@ public void testScmHeartbeat() throws IOException, InterruptedException, AuthenticationException { try (SCMNodeManager nodeManager = createNodeManager(getConf())) { - LayoutVersionManager versionManager = - nodeManager.getLayoutVersionManager(); - LayoutVersionProto layoutInfo = toLayoutVersionProto( - versionManager.getMetadataLayoutVersion(), - versionManager.getSoftwareLayoutVersion()); int registeredNodes = 5; // Send some heartbeats from different nodes. for (int x = 0; x < registeredNodes; x++) { DatanodeDetails datanodeDetails = HddsTestUtils .createRandomDatanodeAndRegister(nodeManager); - nodeManager.processHeartbeat(datanodeDetails, layoutInfo); + nodeManager.processHeartbeat(datanodeDetails); } //TODO: wait for heartbeat to be processed @@ -376,7 +372,7 @@ private void assertPipelineClosedAfterLayoutHeartbeat( allNodes); // node sends incorrect layout. - nodeManager.processHeartbeat(node, layout); + nodeManager.processLayoutVersionReport(node, layout); // Its pipelines should be closed then removed, meaning there is not // enough nodes for factor 3 pipelines. @@ -444,8 +440,10 @@ public void testScmLayoutOnRegister() assertPipelineCreationFailsWithNotEnoughNodes(1); // Heartbeat bad MLV nodes back to healthy. - nodeManager.processHeartbeat(badMlvNode1, CORRECT_LAYOUT_PROTO); - nodeManager.processHeartbeat(badMlvNode2, CORRECT_LAYOUT_PROTO); + nodeManager.processLayoutVersionReport(badMlvNode1, CORRECT_LAYOUT_PROTO); + nodeManager.processLayoutVersionReport(badMlvNode2, CORRECT_LAYOUT_PROTO); + nodeManager.processHeartbeat(badMlvNode1); + nodeManager.processHeartbeat(badMlvNode2); // After moving out of healthy readonly, pipeline creation should be // triggered. @@ -460,17 +458,15 @@ public void testScmLayoutOnRegister() private void assertPipelineCreationFailsWithNotEnoughNodes( int actualNodeCount) throws Exception { - try { + SCMException ex = assertThrows(SCMException.class, () -> { ReplicationConfig ratisThree = ReplicationConfig.fromProtoTypeAndFactor( HddsProtos.ReplicationType.RATIS, HddsProtos.ReplicationFactor.THREE); scm.getPipelineManager().createPipeline(ratisThree); - fail("3 nodes should not have been found for a pipeline."); - } catch (SCMException ex) { - assertThat(ex.getMessage()).contains("Required 3. Found " + - actualNodeCount); - } + }, "3 nodes should not have been found for a pipeline."); + assertThat(ex.getMessage()).contains("Required 3. Found " + + actualNodeCount); } private void assertPipelines(HddsProtos.ReplicationFactor factor, @@ -558,14 +554,8 @@ public void testScmShutdown() SCMNodeManager nodeManager = createNodeManager(conf); DatanodeDetails datanodeDetails = HddsTestUtils .createRandomDatanodeAndRegister(nodeManager); - LayoutVersionManager versionManager = nodeManager.getLayoutVersionManager(); - LayoutVersionProto layoutInfo = toLayoutVersionProto( - versionManager.getMetadataLayoutVersion(), - versionManager.getSoftwareLayoutVersion()); - nodeManager.close(); - // These should never be processed. - nodeManager.processHeartbeat(datanodeDetails, layoutInfo); + nodeManager.processHeartbeat(datanodeDetails); // Let us just wait for 2 seconds to prove that HBs are not processed. Thread.sleep(2 * 1000); @@ -588,16 +578,10 @@ public void testScmHealthyNodeCount() final int count = 10; try (SCMNodeManager nodeManager = createNodeManager(conf)) { - LayoutVersionManager versionManager = - nodeManager.getLayoutVersionManager(); - LayoutVersionProto layoutInfo = toLayoutVersionProto( - versionManager.getMetadataLayoutVersion(), - versionManager.getSoftwareLayoutVersion()); - for (int x = 0; x < count; x++) { DatanodeDetails datanodeDetails = HddsTestUtils .createRandomDatanodeAndRegister(nodeManager); - nodeManager.processHeartbeat(datanodeDetails, layoutInfo); + nodeManager.processHeartbeat(datanodeDetails); } //TODO: wait for heartbeat to be processed Thread.sleep(4 * 1000); @@ -657,12 +641,6 @@ public void testSetNodeOpStateAndCommandFired() DatanodeDetails dn = HddsTestUtils.createRandomDatanodeAndRegister( nodeManager); - LayoutVersionManager versionManager = - nodeManager.getLayoutVersionManager(); - final LayoutVersionProto layoutInfo = toLayoutVersionProto( - versionManager.getMetadataLayoutVersion(), - versionManager.getSoftwareLayoutVersion()); - long expiry = System.currentTimeMillis() / 1000 + 1000; nodeManager.setNodeOperationalState(dn, HddsProtos.NodeOperationalState.ENTERING_MAINTENANCE, expiry); @@ -670,7 +648,7 @@ public void testSetNodeOpStateAndCommandFired() // If found mismatch, leader SCM fires a SetNodeOperationalStateCommand // to update the opState persisted in Datanode. scm.getScmContext().updateLeaderAndTerm(true, 1); - List commands = nodeManager.processHeartbeat(dn, layoutInfo); + List commands = nodeManager.processHeartbeat(dn); assertEquals(SetNodeOperationalStateCommand.class, commands.get(0).getClass()); @@ -679,7 +657,7 @@ public void testSetNodeOpStateAndCommandFired() // If found mismatch, follower SCM update its own opState according // to the heartbeat, and no SCMCommand will be fired. scm.getScmContext().updateLeaderAndTerm(false, 2); - commands = nodeManager.processHeartbeat(dn, layoutInfo); + commands = nodeManager.processHeartbeat(dn); assertEquals(0, commands.size()); @@ -713,11 +691,6 @@ public void testScmDetectStaleAndDeadNode() try (SCMNodeManager nodeManager = createNodeManager(conf)) { - LayoutVersionManager versionManager = - nodeManager.getLayoutVersionManager(); - LayoutVersionProto layoutInfo = toLayoutVersionProto( - versionManager.getMetadataLayoutVersion(), - versionManager.getSoftwareLayoutVersion()); List nodeList = createNodeSet(nodeManager, nodeCount); @@ -725,18 +698,18 @@ public void testScmDetectStaleAndDeadNode() nodeManager); // Heartbeat once - nodeManager.processHeartbeat(staleNode, layoutInfo); + nodeManager.processHeartbeat(staleNode); // Heartbeat all other nodes. for (DatanodeDetails dn : nodeList) { - nodeManager.processHeartbeat(dn, layoutInfo); + nodeManager.processHeartbeat(dn); } // Wait for 2 seconds .. and heartbeat good nodes again. Thread.sleep(2 * 1000); for (DatanodeDetails dn : nodeList) { - nodeManager.processHeartbeat(dn, layoutInfo); + nodeManager.processHeartbeat(dn); } // Wait for 2 seconds, wait a total of 4 seconds to make sure that the @@ -759,7 +732,7 @@ public void testScmDetectStaleAndDeadNode() Thread.sleep(1000); // heartbeat good nodes again. for (DatanodeDetails dn : nodeList) { - nodeManager.processHeartbeat(dn, layoutInfo); + nodeManager.processHeartbeat(dn); } // 6 seconds is the dead window for this test , so we wait a total of @@ -799,8 +772,7 @@ public void testScmDetectStaleAndDeadNode() * @throws AuthenticationException */ @Test - public void testScmHandleJvmPause() - throws IOException, InterruptedException, AuthenticationException { + void testScmHandleJvmPause() throws Exception { final int healthCheckInterval = 200; // milliseconds final int heartbeatInterval = 1; // seconds final int staleNodeInterval = 3; // seconds @@ -818,18 +790,13 @@ public void testScmHandleJvmPause() deadNodeInterval, SECONDS); try (SCMNodeManager nodeManager = createNodeManager(conf)) { - LayoutVersionManager versionManager = - nodeManager.getLayoutVersionManager(); - LayoutVersionProto layoutInfo = toLayoutVersionProto( - versionManager.getMetadataLayoutVersion(), - versionManager.getSoftwareLayoutVersion()); DatanodeDetails node1 = HddsTestUtils.createRandomDatanodeAndRegister(nodeManager); DatanodeDetails node2 = HddsTestUtils.createRandomDatanodeAndRegister(nodeManager); - nodeManager.processHeartbeat(node1, layoutInfo); - nodeManager.processHeartbeat(node2, layoutInfo); + nodeManager.processHeartbeat(node1); + nodeManager.processHeartbeat(node2); // Sleep so that heartbeat processing thread gets to run. Thread.sleep(1000); @@ -861,21 +828,18 @@ public void testScmHandleJvmPause() schedFuture = nodeManager.unpauseHealthCheck(); // Step 3 : wait for 1 iteration of health check - try { - schedFuture.get(); - assertThat(nodeManager.getSkippedHealthChecks()) - .withFailMessage("We did not skip any heartbeat checks") - .isGreaterThan(0); - } catch (ExecutionException e) { - fail("Unexpected exception waiting for Scheduled Health Check"); - } + + schedFuture.get(); + assertThat(nodeManager.getSkippedHealthChecks()) + .withFailMessage("We did not skip any heartbeat checks") + .isGreaterThan(0); // Step 4 : all nodes should still be HEALTHY assertEquals(2, nodeManager.getAllNodes().size()); assertEquals(2, nodeManager.getNodeCount(NodeStatus.inServiceHealthy())); // Step 5 : heartbeat for node1 - nodeManager.processHeartbeat(node1, layoutInfo); + nodeManager.processHeartbeat(node1); // Step 6 : wait for health check process to run Thread.sleep(1000); @@ -998,8 +962,6 @@ public void testProcessCommandQueueReport() SCMNodeManager nodeManager = new SCMNodeManager(conf, scmStorageConfig, eventPublisher, new NetworkTopologyImpl(conf), scmContext, lvm); - LayoutVersionProto layoutInfo = toLayoutVersionProto( - lvm.getMetadataLayoutVersion(), lvm.getSoftwareLayoutVersion()); DatanodeDetails node1 = HddsTestUtils.createRandomDatanodeAndRegister(nodeManager); @@ -1019,7 +981,7 @@ scmStorageConfig, eventPublisher, new NetworkTopologyImpl(conf), assertEquals(5, nodeManager.getTotalDatanodeCommandCount( node1, SCMCommandProto.Type.deleteBlocksCommand)); - nodeManager.processHeartbeat(node1, layoutInfo, + nodeManager.processHeartbeat(node1, CommandQueueReportProto.newBuilder() .addCommand(SCMCommandProto.Type.replicateContainerCommand) .addCount(123) @@ -1049,7 +1011,7 @@ scmStorageConfig, eventPublisher, new NetworkTopologyImpl(conf), // Send another report missing an earlier entry, and ensure it is not // still reported as a stale value. - nodeManager.processHeartbeat(node1, layoutInfo, + nodeManager.processHeartbeat(node1, CommandQueueReportProto.newBuilder() .addCommand(SCMCommandProto.Type.closeContainerCommand) .addCount(11) @@ -1126,7 +1088,7 @@ public void testCommandCount() public void testScmCheckForErrorOnNullDatanodeDetails() throws IOException, AuthenticationException { try (SCMNodeManager nodeManager = createNodeManager(getConf())) { NullPointerException npe = assertThrows(NullPointerException.class, - () -> nodeManager.processHeartbeat(null, null)); + () -> nodeManager.processHeartbeat(null)); assertThat(npe).hasMessage("Heartbeat is missing DatanodeDetails."); } } @@ -1195,20 +1157,15 @@ public void testScmClusterIsInExpectedState1() * Cluster state: Healthy: All nodes are heartbeat-ing like normal. */ try (SCMNodeManager nodeManager = createNodeManager(conf)) { - LayoutVersionManager versionManager = - nodeManager.getLayoutVersionManager(); - LayoutVersionProto layoutInfo = toLayoutVersionProto( - versionManager.getMetadataLayoutVersion(), - versionManager.getSoftwareLayoutVersion()); DatanodeDetails healthyNode = HddsTestUtils.createRandomDatanodeAndRegister(nodeManager); DatanodeDetails staleNode = HddsTestUtils.createRandomDatanodeAndRegister(nodeManager); DatanodeDetails deadNode = HddsTestUtils.createRandomDatanodeAndRegister(nodeManager); - nodeManager.processHeartbeat(healthyNode, layoutInfo); - nodeManager.processHeartbeat(staleNode, layoutInfo); - nodeManager.processHeartbeat(deadNode, layoutInfo); + nodeManager.processHeartbeat(healthyNode); + nodeManager.processHeartbeat(staleNode); + nodeManager.processHeartbeat(deadNode); // Sleep so that heartbeat processing thread gets to run. Thread.sleep(500); @@ -1234,12 +1191,12 @@ public void testScmClusterIsInExpectedState1() * the 3 second windows. */ - nodeManager.processHeartbeat(healthyNode, layoutInfo); - nodeManager.processHeartbeat(staleNode, layoutInfo); - nodeManager.processHeartbeat(deadNode, layoutInfo); + nodeManager.processHeartbeat(healthyNode); + nodeManager.processHeartbeat(staleNode); + nodeManager.processHeartbeat(deadNode); Thread.sleep(1500); - nodeManager.processHeartbeat(healthyNode, layoutInfo); + nodeManager.processHeartbeat(healthyNode); Thread.sleep(2 * 1000); assertEquals(1, nodeManager.getNodeCount(NodeStatus.inServiceHealthy())); @@ -1260,10 +1217,10 @@ public void testScmClusterIsInExpectedState1() * staleNode to move to stale state and deadNode to move to dead state. */ - nodeManager.processHeartbeat(healthyNode, layoutInfo); - nodeManager.processHeartbeat(staleNode, layoutInfo); + nodeManager.processHeartbeat(healthyNode); + nodeManager.processHeartbeat(staleNode); Thread.sleep(1500); - nodeManager.processHeartbeat(healthyNode, layoutInfo); + nodeManager.processHeartbeat(healthyNode); Thread.sleep(2 * 1000); // 3.5 seconds have elapsed for stale node, so it moves into Stale. @@ -1295,9 +1252,9 @@ public void testScmClusterIsInExpectedState1() * Cluster State : let us heartbeat all the nodes and verify that we get * back all the nodes in healthy state. */ - nodeManager.processHeartbeat(healthyNode, layoutInfo); - nodeManager.processHeartbeat(staleNode, layoutInfo); - nodeManager.processHeartbeat(deadNode, layoutInfo); + nodeManager.processHeartbeat(healthyNode); + nodeManager.processHeartbeat(staleNode); + nodeManager.processHeartbeat(deadNode); Thread.sleep(500); //Assert all nodes are healthy. assertEquals(3, nodeManager.getAllNodes().size()); @@ -1316,13 +1273,9 @@ public void testScmClusterIsInExpectedState1() private void heartbeatNodeSet(SCMNodeManager manager, List list, int sleepDuration) throws InterruptedException { - LayoutVersionManager versionManager = manager.getLayoutVersionManager(); - LayoutVersionProto layoutInfo = toLayoutVersionProto( - versionManager.getMetadataLayoutVersion(), - versionManager.getSoftwareLayoutVersion()); while (!Thread.currentThread().isInterrupted()) { for (DatanodeDetails dn : list) { - manager.processHeartbeat(dn, layoutInfo); + manager.processHeartbeat(dn); } Thread.sleep(sleepDuration); } @@ -1405,16 +1358,10 @@ public void testScmClusterIsInExpectedState2() } }; - LayoutVersionManager versionManager = - nodeManager.getLayoutVersionManager(); - LayoutVersionProto layoutInfo = toLayoutVersionProto( - versionManager.getMetadataLayoutVersion(), - versionManager.getSoftwareLayoutVersion()); - // No Thread just one time HBs the node manager, so that these will be // marked as dead nodes eventually. for (DatanodeDetails dn : deadNodeList) { - nodeManager.processHeartbeat(dn, layoutInfo); + nodeManager.processHeartbeat(dn); } @@ -1541,11 +1488,6 @@ public void testScmStatsFromNodeReport() final long remaining = capacity - used; List dnList = new ArrayList<>(nodeCount); try (SCMNodeManager nodeManager = createNodeManager(conf)) { - LayoutVersionManager versionManager = - nodeManager.getLayoutVersionManager(); - LayoutVersionProto layoutInfo = toLayoutVersionProto( - versionManager.getMetadataLayoutVersion(), - versionManager.getSoftwareLayoutVersion()); EventQueue eventQueue = (EventQueue) scm.getEventQueue(); for (int x = 0; x < nodeCount; x++) { @@ -1558,7 +1500,7 @@ public void testScmStatsFromNodeReport() .createStorageReport(dnId, storagePath, capacity, used, free, null); nodeManager.register(dn, HddsTestUtils.createNodeReport( Arrays.asList(report), emptyList()), null); - nodeManager.processHeartbeat(dn, layoutInfo); + nodeManager.processHeartbeat(dn); } //TODO: wait for EventQueue to be processed eventQueue.processAll(8000L); @@ -1572,6 +1514,49 @@ public void testScmStatsFromNodeReport() } } + private List generateStorageReportProto( + int volumeCount, UUID dnId, long capacity, long used, long remaining) { + List reports = new ArrayList<>(volumeCount); + boolean failed = true; + for (int x = 0; x < volumeCount; x++) { + String storagePath = testDir.getAbsolutePath() + "/" + dnId; + reports.add(HddsTestUtils + .createStorageReport(dnId, storagePath, capacity, + used, remaining, null, failed)); + failed = !failed; + } + return reports; + } + + private static Stream calculateStoragePercentageScenarios() { + return Stream.of( + Arguments.of(600, 65, 500, 1, "600.0B", "10.83", "5.83"), + Arguments.of(10000, 1000, 8800, 12, "117.2KB", "10.00", "2.00"), + Arguments.of(100000000, 1000, 899999, 12, "1.1GB", "0.00", "99.10"), + Arguments.of(10000, 1000, 0, 0, "0.0B", "N/A", "N/A"), + Arguments.of(0, 0, 0, 0, "0.0B", "N/A", "N/A"), + Arguments.of(1010, 547, 400, 5, "4.9KB", "54.16", "6.24") + ); + } + + @ParameterizedTest + @MethodSource("calculateStoragePercentageScenarios") + public void testCalculateStoragePercentage(long perCapacity, + long used, long remaining, int volumeCount, String totalCapacity, + String scmUsedPerc, String nonScmUsedPerc) { + DatanodeDetails dn = MockDatanodeDetails.randomDatanodeDetails(); + UUID dnId = dn.getUuid(); + List reports = volumeCount > 0 ? + generateStorageReportProto(volumeCount, dnId, perCapacity, + used, remaining) : null; + String capacityResult = SCMNodeManager.calculateStorageCapacity(reports); + assertEquals(totalCapacity, capacityResult); + String[] storagePercentage = SCMNodeManager.calculateStoragePercentage( + reports); + assertEquals(scmUsedPerc, storagePercentage[0]); + assertEquals(nonScmUsedPerc, storagePercentage[1]); + } + /** * Test multiple nodes sending initial heartbeat with their node report * with multiple volumes. @@ -1607,12 +1592,7 @@ public void tesVolumeInfoFromNodeReport() } nodeManager.register(dn, HddsTestUtils.createNodeReport(reports, emptyList()), null); - LayoutVersionManager versionManager = - nodeManager.getLayoutVersionManager(); - LayoutVersionProto layoutInfo = toLayoutVersionProto( - versionManager.getMetadataLayoutVersion(), - versionManager.getSoftwareLayoutVersion()); - nodeManager.processHeartbeat(dn, layoutInfo); + nodeManager.processHeartbeat(dn); //TODO: wait for EventQueue to be processed eventQueue.processAll(8000L); @@ -1665,12 +1645,7 @@ public void testScmNodeReportUpdate() nodeReportHandler.onMessage( new NodeReportFromDatanode(datanodeDetails, nodeReportProto), publisher); - LayoutVersionManager versionManager = - nodeManager.getLayoutVersionManager(); - LayoutVersionProto layoutInfo = toLayoutVersionProto( - versionManager.getMetadataLayoutVersion(), - versionManager.getSoftwareLayoutVersion()); - nodeManager.processHeartbeat(datanodeDetails, layoutInfo); + nodeManager.processHeartbeat(datanodeDetails); Thread.sleep(100); } @@ -1745,13 +1720,7 @@ public void testScmNodeReportUpdate() foundRemaining = nodeManager.getStats().getRemaining().get(); assertEquals(0, foundRemaining); - LayoutVersionManager versionManager = - nodeManager.getLayoutVersionManager(); - LayoutVersionProto layoutInfo = toLayoutVersionProto( - versionManager.getMetadataLayoutVersion(), - versionManager.getSoftwareLayoutVersion()); - - nodeManager.processHeartbeat(datanodeDetails, layoutInfo); + nodeManager.processHeartbeat(datanodeDetails); // Wait up to 5 seconds so that the dead node becomes healthy // Verify usage info should be updated. @@ -1800,14 +1769,9 @@ public void testHandlingSCMCommandEvent() new CloseContainerCommand(1L, PipelineID.randomId()))); - LayoutVersionManager versionManager = - nodemanager.getLayoutVersionManager(); - LayoutVersionProto layoutInfo = toLayoutVersionProto( - versionManager.getMetadataLayoutVersion(), - versionManager.getSoftwareLayoutVersion()); eq.processAll(1000L); List command = - nodemanager.processHeartbeat(datanodeDetails, layoutInfo); + nodemanager.processHeartbeat(datanodeDetails); // With dh registered, SCM will send create pipeline command to dn assertThat(command.size()).isGreaterThanOrEqualTo(1); assertTrue(command.get(0).getClass().equals( @@ -1937,16 +1901,11 @@ public void testGetNodeInfo() Arrays.asList(report), emptyList()), HddsTestUtils.getRandomPipelineReports()); - LayoutVersionManager versionManager = - nodeManager.getLayoutVersionManager(); - LayoutVersionProto layoutInfo = toLayoutVersionProto( - versionManager.getMetadataLayoutVersion(), - versionManager.getSoftwareLayoutVersion()); nodeManager.register(datanodeDetails, HddsTestUtils.createNodeReport(Arrays.asList(report), emptyList()), - HddsTestUtils.getRandomPipelineReports(), layoutInfo); - nodeManager.processHeartbeat(datanodeDetails, layoutInfo); + HddsTestUtils.getRandomPipelineReports()); + nodeManager.processHeartbeat(datanodeDetails); if (i == 5) { nodeManager.setNodeOperationalState(datanodeDetails, HddsProtos.NodeOperationalState.ENTERING_MAINTENANCE); diff --git a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/ozone/scm/node/TestSCMNodeMetrics.java b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/node/TestSCMNodeMetrics.java similarity index 87% rename from hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/ozone/scm/node/TestSCMNodeMetrics.java rename to hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/node/TestSCMNodeMetrics.java index 79ca6013165..20c6aa2de37 100644 --- a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/ozone/scm/node/TestSCMNodeMetrics.java +++ b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/node/TestSCMNodeMetrics.java @@ -16,7 +16,7 @@ * limitations under the License. */ -package org.apache.hadoop.ozone.scm.node; +package org.apache.hadoop.hdds.scm.node; import java.io.File; import java.io.IOException; @@ -26,21 +26,16 @@ import org.apache.hadoop.hdds.protocol.DatanodeDetails; import org.apache.hadoop.hdds.protocol.MockDatanodeDetails; import org.apache.hadoop.hdds.protocol.proto.HddsProtos.NodeType; -import org.apache.hadoop.hdds.protocol.proto - .StorageContainerDatanodeProtocolProtos.LayoutVersionProto; import org.apache.hadoop.hdds.protocol.proto.StorageContainerDatanodeProtocolProtos.NodeReportProto; import org.apache.hadoop.hdds.protocol.proto.StorageContainerDatanodeProtocolProtos.PipelineReportsProto; import org.apache.hadoop.hdds.protocol.proto.StorageContainerDatanodeProtocolProtos.StorageReportProto; import org.apache.hadoop.hdds.scm.HddsTestUtils; import org.apache.hadoop.hdds.scm.ha.SCMContext; import org.apache.hadoop.hdds.scm.net.NetworkTopologyImpl; -import org.apache.hadoop.hdds.scm.node.SCMNodeManager; -import org.apache.hadoop.hdds.scm.node.SCMNodeMetrics; import org.apache.hadoop.hdds.scm.server.SCMStorageConfig; import org.apache.hadoop.hdds.server.events.EventQueue; import org.apache.hadoop.hdds.upgrade.HDDSLayoutVersionManager; import org.apache.hadoop.metrics2.MetricsRecordBuilder; -import org.apache.hadoop.ozone.upgrade.LayoutVersionManager; import org.junit.jupiter.api.AfterAll; import org.junit.jupiter.api.BeforeAll; import org.junit.jupiter.api.Test; @@ -100,16 +95,8 @@ public static void teardown() throws IOException { @Test public void testHBProcessing() throws InterruptedException { long hbProcessed = getCounter("NumHBProcessed"); - createNodeReport(); - - LayoutVersionManager versionManager = nodeManager.getLayoutVersionManager(); - LayoutVersionProto layoutInfo = LayoutVersionProto.newBuilder() - .setSoftwareLayoutVersion(versionManager.getSoftwareLayoutVersion()) - .setMetadataLayoutVersion(versionManager.getMetadataLayoutVersion()) - .build(); - nodeManager.processHeartbeat(registeredDatanode, layoutInfo); - + nodeManager.processHeartbeat(registeredDatanode); assertEquals(hbProcessed + 1, getCounter("NumHBProcessed"), "NumHBProcessed"); } @@ -119,17 +106,8 @@ public void testHBProcessing() throws InterruptedException { */ @Test public void testHBProcessingFailure() { - long hbProcessedFailed = getCounter("NumHBProcessingFailed"); - - LayoutVersionManager versionManager = nodeManager.getLayoutVersionManager(); - LayoutVersionProto layoutInfo = LayoutVersionProto.newBuilder() - .setSoftwareLayoutVersion(versionManager.getSoftwareLayoutVersion()) - .setMetadataLayoutVersion(versionManager.getMetadataLayoutVersion()) - .build(); - nodeManager.processHeartbeat(MockDatanodeDetails - .randomDatanodeDetails(), layoutInfo); - + nodeManager.processHeartbeat(MockDatanodeDetails.randomDatanodeDetails()); assertEquals(hbProcessedFailed + 1, getCounter("NumHBProcessingFailed"), "NumHBProcessingFailed"); } @@ -254,13 +232,7 @@ public void testNodeCountAndInfoMetricsReported() throws Exception { getMetrics(SCMNodeMetrics.class.getSimpleName())); assertGauge("TotalUsed", 10L, getMetrics(SCMNodeMetrics.class.getSimpleName())); - - LayoutVersionManager versionManager = nodeManager.getLayoutVersionManager(); - LayoutVersionProto layoutInfo = LayoutVersionProto.newBuilder() - .setSoftwareLayoutVersion(versionManager.getSoftwareLayoutVersion()) - .setMetadataLayoutVersion(versionManager.getMetadataLayoutVersion()) - .build(); - nodeManager.processHeartbeat(registeredDatanode, layoutInfo); + nodeManager.processHeartbeat(registeredDatanode); sleep(4000); metricsSource = getMetrics(SCMNodeMetrics.SOURCE_NAME); assertGauge("InServiceHealthyReadonlyNodes", 0, metricsSource); diff --git a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/node/TestSCMNodeStorageStatMap.java b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/node/TestSCMNodeStorageStatMap.java index 6dd5f674d4d..147aa719841 100644 --- a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/node/TestSCMNodeStorageStatMap.java +++ b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/node/TestSCMNodeStorageStatMap.java @@ -28,10 +28,11 @@ import org.apache.hadoop.hdds.scm.exceptions.SCMException; import org.apache.hadoop.ozone.OzoneConsts; import org.apache.hadoop.ozone.container.common.impl.StorageLocationReport; -import org.apache.ozone.test.GenericTestUtils; import org.junit.jupiter.api.BeforeEach; import org.junit.jupiter.api.Test; +import org.junit.jupiter.api.io.TempDir; +import java.io.File; import java.util.Arrays; import java.util.Collections; import java.util.List; @@ -53,24 +54,25 @@ */ public class TestSCMNodeStorageStatMap { private static final int DATANODE_COUNT = 100; - private final long capacity = 10L * OzoneConsts.GB; - private final long used = 2L * OzoneConsts.GB; - private final long remaining = capacity - used; + private static final long CAPACITY = 10L * OzoneConsts.GB; + private static final long USED = 2L * OzoneConsts.GB; + private static final long REMAINING = CAPACITY - USED; private static OzoneConfiguration conf = new OzoneConfiguration(); private final Map> testData = new ConcurrentHashMap<>(); + @TempDir + private File tempFile; private void generateData() { for (int dnIndex = 1; dnIndex <= DATANODE_COUNT; dnIndex++) { UUID dnId = UUID.randomUUID(); Set reportSet = new HashSet<>(); - String path = GenericTestUtils.getTempPath( - TestSCMNodeStorageStatMap.class.getSimpleName() + "-" + dnIndex); + String path = tempFile.getPath() + "-" + dnIndex; StorageLocationReport.Builder builder = StorageLocationReport.newBuilder(); builder.setStorageType(StorageType.DISK).setId(dnId.toString()) - .setStorageLocation(path).setScmUsed(used).setRemaining(remaining) - .setCapacity(capacity).setFailed(false); + .setStorageLocation(path).setScmUsed(USED).setRemaining(REMAINING) + .setCapacity(CAPACITY).setFailed(false); reportSet.add(builder.build()); testData.put(UUID.randomUUID(), reportSet); } @@ -114,13 +116,12 @@ public void testInsertNewDatanode() throws SCMException { public void testUpdateUnknownDatanode() { SCMNodeStorageStatMap map = new SCMNodeStorageStatMap(conf); UUID unknownNode = UUID.randomUUID(); - String path = GenericTestUtils.getTempPath( - TestSCMNodeStorageStatMap.class.getSimpleName() + "-" + unknownNode); + String path = tempFile.getPath() + "-" + unknownNode; Set reportSet = new HashSet<>(); StorageLocationReport.Builder builder = StorageLocationReport.newBuilder(); builder.setStorageType(StorageType.DISK).setId(unknownNode.toString()) - .setStorageLocation(path).setScmUsed(used).setRemaining(remaining) - .setCapacity(capacity).setFailed(false); + .setStorageLocation(path).setScmUsed(USED).setRemaining(REMAINING) + .setCapacity(CAPACITY).setFailed(false); reportSet.add(builder.build()); Throwable t = assertThrows(SCMException.class, () -> map.updateDatanodeMap(unknownNode, reportSet)); @@ -136,8 +137,7 @@ public void testProcessNodeReportCheckOneNode() throws IOException { map.insertNewDatanode(key, reportSet); assertTrue(map.isKnownDatanode(key)); UUID storageId = UUID.randomUUID(); - String path = - GenericTestUtils.getRandomizedTempPath().concat("/" + storageId); + String path = tempFile.getPath().concat("/" + storageId); StorageLocationReport report = reportSet.iterator().next(); long reportCapacity = report.getCapacity(); long reportScmUsed = report.getScmUsed(); @@ -184,22 +184,20 @@ public void testProcessMultipleNodeReports() throws SCMException { .entrySet()) { map.insertNewDatanode(keyEntry.getKey(), keyEntry.getValue()); } - assertEquals(DATANODE_COUNT * capacity, map.getTotalCapacity()); - assertEquals(DATANODE_COUNT * remaining, map.getTotalFreeSpace()); - assertEquals(DATANODE_COUNT * used, map.getTotalSpaceUsed()); + assertEquals(DATANODE_COUNT * CAPACITY, map.getTotalCapacity()); + assertEquals(DATANODE_COUNT * REMAINING, map.getTotalFreeSpace()); + assertEquals(DATANODE_COUNT * USED, map.getTotalSpaceUsed()); // update 1/4th of the datanode to be full for (Map.Entry> keyEntry : testData .entrySet()) { Set reportSet = new HashSet<>(); - String path = GenericTestUtils.getTempPath( - TestSCMNodeStorageStatMap.class.getSimpleName() + "-" + keyEntry - .getKey().toString()); + String path = tempFile.getPath() + "-" + keyEntry.getKey().toString(); StorageLocationReport.Builder builder = StorageLocationReport.newBuilder(); builder.setStorageType(StorageType.DISK) .setId(keyEntry.getKey().toString()).setStorageLocation(path) - .setScmUsed(capacity).setRemaining(0).setCapacity(capacity) + .setScmUsed(CAPACITY).setRemaining(0).setCapacity(CAPACITY) .setFailed(false); reportSet.add(builder.build()); @@ -216,9 +214,9 @@ public void testProcessMultipleNodeReports() throws SCMException { assertEquals(0.75 * DATANODE_COUNT, map.getDatanodeList(SCMNodeStorageStatMap.UtilizationThreshold.NORMAL).size(), 0); - assertEquals(DATANODE_COUNT * capacity, map.getTotalCapacity(), 0); - assertEquals(0.75 * DATANODE_COUNT * remaining, map.getTotalFreeSpace(), 0); - assertEquals(0.75 * DATANODE_COUNT * used + (0.25 * DATANODE_COUNT * capacity), + assertEquals(DATANODE_COUNT * CAPACITY, map.getTotalCapacity(), 0); + assertEquals(0.75 * DATANODE_COUNT * REMAINING, map.getTotalFreeSpace(), 0); + assertEquals(0.75 * DATANODE_COUNT * USED + (0.25 * DATANODE_COUNT * CAPACITY), map.getTotalSpaceUsed(), 0); counter = 1; // Remove 1/4 of the DataNodes from the Map @@ -236,9 +234,9 @@ public void testProcessMultipleNodeReports() throws SCMException { assertEquals(0.75 * DATANODE_COUNT, map.getDatanodeList(SCMNodeStorageStatMap.UtilizationThreshold.NORMAL).size(), 0); - assertEquals(0.75 * DATANODE_COUNT * capacity, map.getTotalCapacity(), 0); - assertEquals(0.75 * DATANODE_COUNT * remaining, map.getTotalFreeSpace(), 0); - assertEquals(0.75 * DATANODE_COUNT * used, map.getTotalSpaceUsed(), 0); + assertEquals(0.75 * DATANODE_COUNT * CAPACITY, map.getTotalCapacity(), 0); + assertEquals(0.75 * DATANODE_COUNT * REMAINING, map.getTotalFreeSpace(), 0); + assertEquals(0.75 * DATANODE_COUNT * USED, map.getTotalSpaceUsed(), 0); } } diff --git a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/node/TestStatisticsUpdate.java b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/node/TestStatisticsUpdate.java index 8aff3dd28aa..0ef28f658d4 100644 --- a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/node/TestStatisticsUpdate.java +++ b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/node/TestStatisticsUpdate.java @@ -22,8 +22,6 @@ import org.apache.hadoop.hdds.conf.OzoneConfiguration; import org.apache.hadoop.hdds.protocol.DatanodeDetails; import org.apache.hadoop.hdds.protocol.MockDatanodeDetails; -import org.apache.hadoop.hdds.protocol.proto - .StorageContainerDatanodeProtocolProtos; import org.apache.hadoop.hdds.protocol.proto .StorageContainerDatanodeProtocolProtos.StorageReportProto; import org.apache.hadoop.hdds.protocol.proto @@ -40,7 +38,6 @@ import org.apache.hadoop.hdds.server.events.EventPublisher; import org.apache.hadoop.hdds.server.events.EventQueue; import org.apache.hadoop.ozone.container.common.SCMTestUtils; -import org.apache.hadoop.ozone.upgrade.LayoutVersionManager; import org.apache.hadoop.security.authentication.client.AuthenticationException; import org.junit.jupiter.api.BeforeEach; import org.junit.jupiter.api.Test; @@ -122,19 +119,13 @@ public void testStatisticsUpdate() throws Exception { //TODO: Support logic to mark a node as dead in NodeManager. - LayoutVersionManager versionManager = nodeManager.getLayoutVersionManager(); - StorageContainerDatanodeProtocolProtos.LayoutVersionProto layoutInfo = - StorageContainerDatanodeProtocolProtos.LayoutVersionProto.newBuilder() - .setSoftwareLayoutVersion(versionManager.getSoftwareLayoutVersion()) - .setMetadataLayoutVersion(versionManager.getMetadataLayoutVersion()) - .build(); - nodeManager.processHeartbeat(datanode2, layoutInfo); + nodeManager.processHeartbeat(datanode2); Thread.sleep(1000); - nodeManager.processHeartbeat(datanode2, layoutInfo); + nodeManager.processHeartbeat(datanode2); Thread.sleep(1000); - nodeManager.processHeartbeat(datanode2, layoutInfo); + nodeManager.processHeartbeat(datanode2); Thread.sleep(1000); - nodeManager.processHeartbeat(datanode2, layoutInfo); + nodeManager.processHeartbeat(datanode2); //THEN statistics in SCM should changed. stat = nodeManager.getStats(); assertEquals(200L, stat.getCapacity().get()); diff --git a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/pipeline/TestECPipelineProvider.java b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/pipeline/TestECPipelineProvider.java index 2eba81d505a..f2ed769496b 100644 --- a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/pipeline/TestECPipelineProvider.java +++ b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/pipeline/TestECPipelineProvider.java @@ -52,7 +52,7 @@ import static org.apache.hadoop.hdds.scm.pipeline.Pipeline.PipelineState.ALLOCATED; import static org.assertj.core.api.Assertions.assertThat; import static org.junit.jupiter.api.Assertions.assertEquals; -import static org.mockito.ArgumentMatchers.any; +import static org.mockito.Mockito.any; import static org.mockito.Mockito.mock; import static org.mockito.Mockito.when; import static org.mockito.Mockito.anyInt; diff --git a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/pipeline/TestPipelineDatanodesIntersection.java b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/pipeline/TestPipelineDatanodesIntersection.java index 61ba3d3bb8d..385e1c65316 100644 --- a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/pipeline/TestPipelineDatanodesIntersection.java +++ b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/pipeline/TestPipelineDatanodesIntersection.java @@ -18,8 +18,6 @@ package org.apache.hadoop.hdds.scm.pipeline; -import org.apache.hadoop.fs.FileUtil; -import org.apache.hadoop.hdds.HddsConfigKeys; import org.apache.hadoop.hdds.client.RatisReplicationConfig; import org.apache.hadoop.hdds.conf.OzoneConfiguration; import org.apache.hadoop.hdds.protocol.proto.HddsProtos; @@ -35,9 +33,9 @@ import org.apache.hadoop.hdds.utils.db.DBStoreBuilder; import org.apache.hadoop.ozone.ClientVersion; import org.apache.hadoop.ozone.container.common.SCMTestUtils; -import org.apache.ozone.test.GenericTestUtils; import org.junit.jupiter.api.AfterEach; import org.junit.jupiter.api.BeforeEach; +import org.junit.jupiter.api.io.TempDir; import org.junit.jupiter.params.ParameterizedTest; import org.junit.jupiter.params.provider.CsvSource; import org.slf4j.Logger; @@ -46,7 +44,6 @@ import java.io.File; import java.io.IOException; import java.util.List; -import java.util.UUID; import static org.junit.jupiter.api.Assertions.fail; import static org.apache.hadoop.hdds.scm.ScmConfigKeys.OZONE_DATANODE_PIPELINE_LIMIT; @@ -61,17 +58,14 @@ public class TestPipelineDatanodesIntersection { private OzoneConfiguration conf; private boolean end; + @TempDir private File testDir; private DBStore dbStore; @BeforeEach public void initialize() throws IOException { - conf = SCMTestUtils.getConf(); + conf = SCMTestUtils.getConf(testDir); end = false; - testDir = GenericTestUtils.getTestDir( - TestPipelineDatanodesIntersection.class.getSimpleName() - + UUID.randomUUID()); - conf.set(HddsConfigKeys.OZONE_METADATA_DIRS, testDir.getAbsolutePath()); dbStore = DBStoreBuilder.createDBStore( conf, new SCMDBDefinition()); } @@ -81,8 +75,6 @@ public void cleanup() throws Exception { if (dbStore != null) { dbStore.close(); } - - FileUtil.fullyDelete(testDir); } @ParameterizedTest diff --git a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/pipeline/TestPipelineManagerImpl.java b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/pipeline/TestPipelineManagerImpl.java index 270ae0ef493..e9407d6a941 100644 --- a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/pipeline/TestPipelineManagerImpl.java +++ b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/pipeline/TestPipelineManagerImpl.java @@ -17,7 +17,6 @@ */ package org.apache.hadoop.hdds.scm.pipeline; -import org.apache.hadoop.hdds.HddsConfigKeys; import org.apache.hadoop.hdds.client.ECReplicationConfig; import org.apache.hadoop.hdds.client.RatisReplicationConfig; import org.apache.hadoop.hdds.client.ReplicationConfig; @@ -100,11 +99,11 @@ import static org.junit.jupiter.api.Assertions.assertTrue; import static org.junit.jupiter.api.Assertions.assertFalse; import static org.junit.jupiter.api.Assertions.fail; -import static org.mockito.ArgumentMatchers.anyLong; -import static org.mockito.ArgumentMatchers.any; -import static org.mockito.ArgumentMatchers.anyString; -import static org.mockito.ArgumentMatchers.eq; -import static org.mockito.ArgumentMatchers.anyList; +import static org.mockito.Mockito.anyLong; +import static org.mockito.Mockito.any; +import static org.mockito.Mockito.anyString; +import static org.mockito.Mockito.eq; +import static org.mockito.Mockito.anyList; import static org.mockito.Mockito.doReturn; import static org.mockito.Mockito.doThrow; import static org.mockito.Mockito.doAnswer; @@ -128,11 +127,11 @@ public class TestPipelineManagerImpl { private TestClock testClock; @BeforeEach - void init(@TempDir File testDir) throws Exception { + void init(@TempDir File testDir, @TempDir File dbDir) throws Exception { testClock = new TestClock(Instant.now(), ZoneOffset.UTC); - conf = SCMTestUtils.getConf(); - scm = HddsTestUtils.getScm(conf); - conf.set(HddsConfigKeys.OZONE_METADATA_DIRS, testDir.getAbsolutePath()); + conf = SCMTestUtils.getConf(dbDir); + scm = HddsTestUtils.getScm(SCMTestUtils.getConf(testDir)); + // Mock Node Manager is not able to correctly set up things for the EC // placement policy (Rack Scatter), so just use the random one. conf.set(ScmConfigKeys.OZONE_SCM_CONTAINER_PLACEMENT_EC_IMPL_KEY, @@ -216,7 +215,7 @@ public void testCreatePipeline() throws Exception { PipelineManagerImpl pipelineManager2 = createPipelineManager(true, buffer2); // Should be able to load previous pipelines. - assertFalse(pipelineManager2.getPipelines().isEmpty()); + assertThat(pipelineManager2.getPipelines()).isNotEmpty(); assertEquals(3, pipelineManager.getPipelines().size()); Pipeline pipeline3 = pipelineManager2.createPipeline( RatisReplicationConfig.getInstance(ReplicationFactor.THREE)); @@ -261,10 +260,10 @@ public void testUpdatePipelineStates() throws Exception { assertEquals(Pipeline.PipelineState.DORMANT, pipelineManager.getPipeline(pipelineID).getPipelineState()); buffer.flush(); assertEquals(Pipeline.PipelineState.DORMANT, pipelineStore.get(pipeline.getId()).getPipelineState()); - assertFalse(pipelineManager + assertThat(pipelineManager .getPipelines(RatisReplicationConfig .getInstance(ReplicationFactor.THREE), - Pipeline.PipelineState.OPEN).contains(pipeline)); + Pipeline.PipelineState.OPEN)).doesNotContain(pipeline); assertEquals(1, pipelineManager.getPipelineCount( RatisReplicationConfig.getInstance(ReplicationFactor.THREE), Pipeline.PipelineState.DORMANT)); @@ -332,28 +331,16 @@ public void testRemovePipeline() throws Exception { .getPipelines(RatisReplicationConfig .getInstance(ReplicationFactor.THREE), Pipeline.PipelineState.OPEN).contains(pipeline)); - - try { - pipelineManager.removePipeline(pipeline); - fail(); - } catch (IOException ioe) { - // Should not be able to remove the OPEN pipeline. - assertEquals(1, pipelineManager.getPipelines().size()); - } catch (Exception e) { - fail("Should not reach here."); - } + assertThrows(IOException.class, () -> pipelineManager.removePipeline(pipeline)); + // Should not be able to remove the OPEN pipeline. + assertEquals(1, pipelineManager.getPipelines().size()); // Destroy pipeline pipelineManager.closePipeline(pipeline.getId()); pipelineManager.deletePipeline(pipeline.getId()); - try { - pipelineManager.getPipeline(pipeline.getId()); - fail("Pipeline should not have been retrieved"); - } catch (PipelineNotFoundException e) { - // There may be pipelines created by BackgroundPipelineCreator - // exist in pipelineManager, just ignore them. - } + assertThrows(PipelineNotFoundException.class, () -> pipelineManager.getPipeline(pipeline.getId()), + "Pipeline should not have been retrieved"); } } @@ -443,17 +430,11 @@ public void testPipelineCreationFailedMetric() throws Exception { assertEquals(0, numPipelineCreateFailed); //This should fail... - try { - pipelineManager - .createPipeline(RatisReplicationConfig - .getInstance(ReplicationFactor.THREE)); - fail(); - } catch (SCMException ioe) { - // pipeline creation failed this time. - assertEquals( - ResultCodes.FAILED_TO_FIND_SUITABLE_NODE, - ioe.getResult()); - } + SCMException e = + assertThrows(SCMException.class, + () -> pipelineManager.createPipeline(RatisReplicationConfig.getInstance(ReplicationFactor.THREE))); + // pipeline creation failed this time. + assertEquals(ResultCodes.FAILED_TO_FIND_SUITABLE_NODE, e.getResult()); metrics = getMetrics( SCMPipelineMetrics.class.getSimpleName()); @@ -573,16 +554,16 @@ public void testScrubPipelines() throws Exception { pipelineManager.scrubPipelines(); // The allocatedPipeline should now be scrubbed as the interval has passed - assertFalse(pipelineManager + assertThat(pipelineManager .getPipelines(RatisReplicationConfig .getInstance(ReplicationFactor.THREE), - Pipeline.PipelineState.ALLOCATED).contains(allocatedPipeline)); + Pipeline.PipelineState.ALLOCATED)).doesNotContain(allocatedPipeline); // The closedPipeline should now be scrubbed as the interval has passed - assertFalse(pipelineManager + assertThat(pipelineManager .getPipelines(RatisReplicationConfig .getInstance(ReplicationFactor.THREE), - Pipeline.PipelineState.CLOSED).contains(closedPipeline)); + Pipeline.PipelineState.CLOSED)).doesNotContain(closedPipeline); pipelineManager.close(); } @@ -636,15 +617,11 @@ public void testPipelineNotCreatedUntilSafeModePrecheck() throws Exception { new SCMSafeModeManager.SafeModeStatus(true, false)); PipelineManagerImpl pipelineManager = createPipelineManager(true); - try { - pipelineManager - .createPipeline(RatisReplicationConfig - .getInstance(ReplicationFactor.THREE)); - fail("Pipelines should not have been created"); - } catch (IOException e) { - // No pipeline is created. - assertTrue(pipelineManager.getPipelines().isEmpty()); - } + assertThrows(IOException.class, + () -> pipelineManager.createPipeline(RatisReplicationConfig.getInstance(ReplicationFactor.THREE)), + "Pipelines should not have been created"); + // No pipeline is created. + assertTrue(pipelineManager.getPipelines().isEmpty()); // Ensure a pipeline of factor ONE can be created - no exceptions should be // raised. diff --git a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/pipeline/TestPipelinePlacementPolicy.java b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/pipeline/TestPipelinePlacementPolicy.java index 2f0b0a5cc76..0f9ec84f033 100644 --- a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/pipeline/TestPipelinePlacementPolicy.java +++ b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/pipeline/TestPipelinePlacementPolicy.java @@ -27,8 +27,6 @@ import java.util.concurrent.TimeoutException; import java.util.stream.Collectors; -import org.apache.hadoop.fs.FileUtil; -import org.apache.hadoop.hdds.HddsConfigKeys; import org.apache.hadoop.hdds.client.RatisReplicationConfig; import org.apache.hadoop.hdds.client.ReplicationConfig; import org.apache.hadoop.hdds.conf.OzoneConfiguration; @@ -58,10 +56,10 @@ import org.apache.hadoop.ozone.ClientVersion; import org.apache.hadoop.ozone.OzoneConsts; import org.apache.hadoop.ozone.container.common.SCMTestUtils; -import org.apache.ozone.test.GenericTestUtils; import org.junit.jupiter.api.AfterEach; import org.junit.jupiter.api.BeforeEach; import org.junit.jupiter.api.Test; +import org.junit.jupiter.api.io.TempDir; import java.io.IOException; @@ -83,7 +81,6 @@ import static org.junit.jupiter.api.Assertions.assertTrue; import static org.junit.jupiter.api.Assertions.assertFalse; import static org.junit.jupiter.api.Assertions.assertNotSame; -import static org.junit.jupiter.api.Assertions.fail; /** * Test for PipelinePlacementPolicy. @@ -96,6 +93,7 @@ public class TestPipelinePlacementPolicy { private NetworkTopologyImpl cluster; private static final int PIPELINE_PLACEMENT_MAX_NODES_COUNT = 10; private static final int PIPELINE_LOAD_LIMIT = 5; + @TempDir private File testDir; private DBStore dbStore; private SCMHAManager scmhaManager; @@ -109,14 +107,11 @@ public void init() throws Exception { // start with nodes with rack awareness. nodeManager = new MockNodeManager(cluster, getNodesWithRackAwareness(), false, PIPELINE_PLACEMENT_MAX_NODES_COUNT); - conf = SCMTestUtils.getConf(); + conf = SCMTestUtils.getConf(testDir); conf.setInt(OZONE_DATANODE_PIPELINE_LIMIT, PIPELINE_LOAD_LIMIT); conf.setStorageSize(OZONE_DATANODE_RATIS_VOLUME_FREE_SPACE_MIN, 10, StorageUnit.MB); nodeManager.setNumPipelinePerDatanode(PIPELINE_LOAD_LIMIT); - testDir = GenericTestUtils.getTestDir( - TestPipelinePlacementPolicy.class.getSimpleName() + UUID.randomUUID()); - conf.set(HddsConfigKeys.OZONE_METADATA_DIRS, testDir.getAbsolutePath()); dbStore = DBStoreBuilder.createDBStore( conf, new SCMDBDefinition()); scmhaManager = SCMHAManagerStub.getInstance(true); @@ -135,8 +130,6 @@ public void cleanup() throws Exception { if (dbStore != null) { dbStore.close(); } - - FileUtil.fullyDelete(testDir); } private NetworkTopologyImpl initTopology() { @@ -251,25 +244,19 @@ public void testChooseNodeNotEnoughSpace() throws IOException { String expectedMessageSubstring = "Unable to find enough nodes that meet " + "the space requirement"; - try { - // A huge container size - localPlacementPolicy.chooseDatanodes(new ArrayList<>(datanodes.size()), - new ArrayList<>(datanodes.size()), nodesRequired, - 0, 10 * OzoneConsts.TB); - fail("SCMException should have been thrown."); - } catch (SCMException ex) { - assertThat(ex.getMessage()).contains(expectedMessageSubstring); - } - try { - // a huge free space min configured - localPlacementPolicy.chooseDatanodes(new ArrayList<>(datanodes.size()), - new ArrayList<>(datanodes.size()), nodesRequired, 10 * OzoneConsts.TB, - 0); - fail("SCMException should have been thrown."); - } catch (SCMException ex) { - assertThat(ex.getMessage()).contains(expectedMessageSubstring); - } + // A huge container size + SCMException ex = + assertThrows(SCMException.class, + () -> localPlacementPolicy.chooseDatanodes(new ArrayList<>(datanodes.size()), + new ArrayList<>(datanodes.size()), nodesRequired, 0, 10 * OzoneConsts.TB)); + assertThat(ex.getMessage()).contains(expectedMessageSubstring); + + // a huge free space min configured + ex = assertThrows(SCMException.class, + () -> localPlacementPolicy.chooseDatanodes(new ArrayList<>(datanodes.size()), + new ArrayList<>(datanodes.size()), nodesRequired, 10 * OzoneConsts.TB, 0)); + assertThat(ex.getMessage()).contains(expectedMessageSubstring); } @Test diff --git a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/pipeline/TestPipelineStateManagerImpl.java b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/pipeline/TestPipelineStateManagerImpl.java index 3874a88941d..9feb9e1f0a9 100644 --- a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/pipeline/TestPipelineStateManagerImpl.java +++ b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/pipeline/TestPipelineStateManagerImpl.java @@ -18,8 +18,6 @@ package org.apache.hadoop.hdds.scm.pipeline; -import org.apache.hadoop.fs.FileUtil; -import org.apache.hadoop.hdds.HddsConfigKeys; import org.apache.hadoop.hdds.client.RatisReplicationConfig; import org.apache.hadoop.hdds.client.ReplicationConfig; import org.apache.hadoop.hdds.conf.OzoneConfiguration; @@ -39,10 +37,10 @@ import org.apache.hadoop.hdds.utils.db.DBStoreBuilder; import org.apache.hadoop.ozone.ClientVersion; import org.apache.hadoop.ozone.container.common.SCMTestUtils; -import org.apache.ozone.test.GenericTestUtils; import org.junit.jupiter.api.AfterEach; import org.junit.jupiter.api.BeforeEach; import org.junit.jupiter.api.Test; +import org.junit.jupiter.api.io.TempDir; import java.io.File; import java.io.IOException; @@ -50,13 +48,11 @@ import java.util.HashSet; import java.util.List; import java.util.Set; -import java.util.UUID; import java.util.concurrent.TimeoutException; import static org.junit.jupiter.api.Assertions.assertEquals; import static org.junit.jupiter.api.Assertions.assertThrows; import static org.junit.jupiter.api.Assertions.assertTrue; -import static org.junit.jupiter.api.Assertions.fail; import static org.assertj.core.api.Assertions.assertThat; /** @@ -65,15 +61,13 @@ public class TestPipelineStateManagerImpl { private PipelineStateManager stateManager; + @TempDir private File testDir; private DBStore dbStore; @BeforeEach public void init() throws Exception { - final OzoneConfiguration conf = SCMTestUtils.getConf(); - testDir = GenericTestUtils.getTestDir( - TestPipelineStateManagerImpl.class.getSimpleName() + UUID.randomUUID()); - conf.set(HddsConfigKeys.OZONE_METADATA_DIRS, testDir.getAbsolutePath()); + final OzoneConfiguration conf = SCMTestUtils.getConf(testDir); dbStore = DBStoreBuilder.createDBStore( conf, new SCMDBDefinition()); @@ -93,8 +87,6 @@ public void cleanup() throws Exception { if (dbStore != null) { dbStore.close(); } - - FileUtil.fullyDelete(testDir); } private Pipeline createDummyPipeline(int numNodes) { @@ -323,14 +315,13 @@ public void testAddAndGetContainer() throws IOException, TimeoutException { finalizePipeline(pipelineProto); removePipeline(pipelineProto); - try { - stateManager.addContainerToPipeline(pipeline.getId(), - ContainerID.valueOf(++containerID)); - fail("Container should not have been added"); - } catch (IOException e) { - // Can not add a container to removed pipeline - assertThat(e.getMessage()).contains("not found"); - } + Pipeline finalPipeline = pipeline; + ContainerID cid = ContainerID.valueOf(++containerID); + IOException e = + assertThrows(IOException.class, + () -> stateManager.addContainerToPipeline(finalPipeline.getId(), cid)); + // Can not add a container to removed pipeline + assertThat(e.getMessage()).contains("not found"); } @Test @@ -344,13 +335,9 @@ public void testRemovePipeline() throws IOException, TimeoutException { stateManager .addContainerToPipeline(pipeline.getId(), ContainerID.valueOf(1)); - try { - removePipeline(pipelineProto); - fail("Pipeline should not have been removed"); - } catch (IOException e) { - // can not remove a pipeline which already has containers - assertThat(e.getMessage()).contains("not yet closed"); - } + IOException e = assertThrows(IOException.class, () -> removePipeline(pipelineProto)); + // can not remove a pipeline which already has containers + assertThat(e.getMessage()).contains("not yet closed"); // close the pipeline finalizePipeline(pipelineProto); diff --git a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/pipeline/TestRatisPipelineProvider.java b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/pipeline/TestRatisPipelineProvider.java index 977cf137fd8..5350c0da86e 100644 --- a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/pipeline/TestRatisPipelineProvider.java +++ b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/pipeline/TestRatisPipelineProvider.java @@ -18,7 +18,6 @@ package org.apache.hadoop.hdds.scm.pipeline; -import org.apache.hadoop.fs.FileUtil; import org.apache.hadoop.hdds.HddsConfigKeys; import org.apache.hadoop.hdds.client.RatisReplicationConfig; import org.apache.hadoop.hdds.client.ReplicationConfig; @@ -41,10 +40,10 @@ import org.apache.hadoop.hdds.utils.db.DBStore; import org.apache.hadoop.hdds.utils.db.DBStoreBuilder; import org.apache.hadoop.ozone.ClientVersion; -import org.apache.ozone.test.GenericTestUtils; import org.junit.jupiter.api.AfterEach; import org.junit.jupiter.api.Assumptions; import org.junit.jupiter.api.Test; +import org.junit.jupiter.api.io.TempDir; import java.io.File; import java.io.IOException; @@ -67,10 +66,9 @@ import static org.junit.jupiter.api.Assertions.assertNotEquals; import static org.junit.jupiter.api.Assertions.assertTrue; import static org.junit.jupiter.api.Assertions.assertThrows; -import static org.junit.jupiter.api.Assertions.fail; /** - * Test for RatisPipelineProvider. + * Test for {@link RatisPipelineProvider}. */ public class TestRatisPipelineProvider { @@ -80,6 +78,7 @@ public class TestRatisPipelineProvider { private MockNodeManager nodeManager; private RatisPipelineProvider provider; private PipelineStateManager stateManager; + @TempDir private File testDir; private DBStore dbStore; @@ -89,9 +88,11 @@ public void init(int maxPipelinePerNode) throws Exception { public void init(int maxPipelinePerNode, OzoneConfiguration conf) throws Exception { - testDir = GenericTestUtils.getTestDir( - TestRatisPipelineProvider.class.getSimpleName() + UUID.randomUUID()); - conf.set(HddsConfigKeys.OZONE_METADATA_DIRS, testDir.getAbsolutePath()); + init(maxPipelinePerNode, conf, testDir); + } + + public void init(int maxPipelinePerNode, OzoneConfiguration conf, File dir) throws Exception { + conf.set(HddsConfigKeys.OZONE_METADATA_DIRS, dir.getAbsolutePath()); dbStore = DBStoreBuilder.createDBStore( conf, new SCMDBDefinition()); nodeManager = new MockNodeManager(true, 10); @@ -114,8 +115,6 @@ void cleanup() throws Exception { if (dbStore != null) { dbStore.close(); } - - FileUtil.fullyDelete(testDir); } private static void assertPipelineProperties( @@ -332,7 +331,7 @@ public void testFactorTHREEPipelineRackScatterEngagement() } @Test - public void testCreatePipelinesWhenNotEnoughSpace() throws Exception { + public void testCreatePipelinesWhenNotEnoughSpace(@TempDir File tempDir) throws Exception { String expectedErrorSubstring = "Unable to find enough" + " nodes that meet the space requirement"; @@ -345,29 +344,23 @@ public void testCreatePipelinesWhenNotEnoughSpace() throws Exception { if (factor == ReplicationFactor.ZERO) { continue; } - try { - provider.create(RatisReplicationConfig.getInstance(factor)); - fail("Expected SCMException for large container size with " + - "replication factor " + factor.toString()); - } catch (SCMException ex) { - assertThat(ex.getMessage()).contains(expectedErrorSubstring); - } + SCMException ex = + assertThrows(SCMException.class, () -> provider.create(RatisReplicationConfig.getInstance(factor)), + "Expected SCMException for large container size with replication factor " + factor.toString()); + assertThat(ex.getMessage()).contains(expectedErrorSubstring); } OzoneConfiguration largeMetadataConf = new OzoneConfiguration(); largeMetadataConf.set(OZONE_DATANODE_RATIS_VOLUME_FREE_SPACE_MIN, "300TB"); - init(1, largeMetadataConf); + init(1, largeMetadataConf, tempDir); for (ReplicationFactor factor: ReplicationFactor.values()) { if (factor == ReplicationFactor.ZERO) { continue; } - try { - provider.create(RatisReplicationConfig.getInstance(factor)); - fail("Expected SCMException for large metadata size with " + - "replication factor " + factor.toString()); - } catch (SCMException ex) { - assertThat(ex.getMessage()).contains(expectedErrorSubstring); - } + SCMException ex = + assertThrows(SCMException.class, () -> provider.create(RatisReplicationConfig.getInstance(factor)), + "Expected SCMException for large metadata size with replication factor " + factor.toString()); + assertThat(ex.getMessage()).contains(expectedErrorSubstring); } } diff --git a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/pipeline/TestSimplePipelineProvider.java b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/pipeline/TestSimplePipelineProvider.java index bbb714debb8..b69ebedb04d 100644 --- a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/pipeline/TestSimplePipelineProvider.java +++ b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/pipeline/TestSimplePipelineProvider.java @@ -18,8 +18,6 @@ package org.apache.hadoop.hdds.scm.pipeline; -import org.apache.hadoop.fs.FileUtil; -import org.apache.hadoop.hdds.HddsConfigKeys; import org.apache.hadoop.hdds.client.StandaloneReplicationConfig; import org.apache.hadoop.hdds.conf.OzoneConfiguration; import org.apache.hadoop.hdds.protocol.DatanodeDetails; @@ -34,16 +32,15 @@ import org.apache.hadoop.hdds.utils.db.DBStoreBuilder; import org.apache.hadoop.ozone.ClientVersion; import org.apache.hadoop.ozone.container.common.SCMTestUtils; -import org.apache.ozone.test.GenericTestUtils; import org.junit.jupiter.api.AfterEach; import org.junit.jupiter.api.BeforeEach; import org.junit.jupiter.api.Test; +import org.junit.jupiter.api.io.TempDir; import java.io.File; import java.io.IOException; import java.util.ArrayList; import java.util.List; -import java.util.UUID; import static org.junit.jupiter.api.Assertions.assertEquals; @@ -55,16 +52,14 @@ public class TestSimplePipelineProvider { private NodeManager nodeManager; private PipelineProvider provider; private PipelineStateManager stateManager; + @TempDir private File testDir; private DBStore dbStore; @BeforeEach public void init() throws Exception { nodeManager = new MockNodeManager(true, 10); - final OzoneConfiguration conf = SCMTestUtils.getConf(); - testDir = GenericTestUtils.getTestDir( - TestSimplePipelineProvider.class.getSimpleName() + UUID.randomUUID()); - conf.set(HddsConfigKeys.OZONE_METADATA_DIRS, testDir.getAbsolutePath()); + final OzoneConfiguration conf = SCMTestUtils.getConf(testDir); dbStore = DBStoreBuilder.createDBStore( conf, new SCMDBDefinition()); SCMHAManager scmhaManager = SCMHAManagerStub.getInstance(true); @@ -82,8 +77,6 @@ public void cleanup() throws Exception { if (dbStore != null) { dbStore.close(); } - - FileUtil.fullyDelete(testDir); } @Test diff --git a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/pipeline/TestWritableECContainerProvider.java b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/pipeline/TestWritableECContainerProvider.java index 54d2ffed828..4f86450d03e 100644 --- a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/pipeline/TestWritableECContainerProvider.java +++ b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/pipeline/TestWritableECContainerProvider.java @@ -34,7 +34,11 @@ import org.apache.hadoop.hdds.scm.ha.SCMHAManager; import org.apache.hadoop.hdds.scm.ha.SCMHAManagerStub; import org.apache.hadoop.hdds.scm.metadata.SCMDBDefinition; +import org.apache.hadoop.hdds.scm.net.NetworkTopologyImpl; +import org.apache.hadoop.hdds.scm.net.NodeSchema; +import org.apache.hadoop.hdds.scm.net.NodeSchemaManager; import org.apache.hadoop.hdds.scm.pipeline.WritableECContainerProvider.WritableECContainerProviderConfig; +import org.apache.hadoop.hdds.scm.pipeline.choose.algorithms.CapacityPipelineChoosePolicy; import org.apache.hadoop.hdds.scm.pipeline.choose.algorithms.HealthyPipelineChoosePolicy; import org.apache.hadoop.hdds.scm.pipeline.choose.algorithms.RandomPipelineChoosePolicy; import org.apache.hadoop.hdds.utils.db.DBStore; @@ -54,8 +58,13 @@ import java.util.Map; import java.util.NavigableSet; import java.util.Set; +import java.util.stream.Collectors; +import java.util.stream.IntStream; import static org.apache.hadoop.hdds.conf.StorageUnit.BYTES; +import static org.apache.hadoop.hdds.scm.net.NetConstants.LEAF_SCHEMA; +import static org.apache.hadoop.hdds.scm.net.NetConstants.RACK_SCHEMA; +import static org.apache.hadoop.hdds.scm.net.NetConstants.ROOT_SCHEMA; import static org.apache.hadoop.hdds.scm.pipeline.Pipeline.PipelineState.CLOSED; import static org.assertj.core.api.Assertions.assertThat; import static org.junit.jupiter.api.Assertions.assertEquals; @@ -84,7 +93,7 @@ public class TestWritableECContainerProvider { private OzoneConfiguration conf; private DBStore dbStore; private SCMHAManager scmhaManager; - private MockNodeManager nodeManager; + private static MockNodeManager nodeManager; private WritableContainerProvider provider; private ECReplicationConfig repConfig; @@ -93,8 +102,20 @@ public class TestWritableECContainerProvider { public static Collection policies() { Collection policies = new ArrayList<>(); + // init nodeManager + NodeSchemaManager.getInstance().init(new NodeSchema[] + {ROOT_SCHEMA, RACK_SCHEMA, LEAF_SCHEMA}, true); + NetworkTopologyImpl cluster = + new NetworkTopologyImpl(NodeSchemaManager.getInstance()); + int count = 10; + List datanodes = IntStream.range(0, count) + .mapToObj(i -> MockDatanodeDetails.randomDatanodeDetails()) + .collect(Collectors.toList()); + nodeManager = new MockNodeManager(cluster, datanodes, false, count); + policies.add(new RandomPipelineChoosePolicy()); policies.add(new HealthyPipelineChoosePolicy()); + policies.add(new CapacityPipelineChoosePolicy().init(nodeManager)); return policies; } @@ -110,7 +131,6 @@ void setup(@TempDir File testDir) throws IOException { dbStore = DBStoreBuilder.createDBStore( conf, new SCMDBDefinition()); scmhaManager = SCMHAManagerStub.getInstance(true); - nodeManager = new MockNodeManager(true, 10); pipelineManager = new MockPipelineManager(dbStore, scmhaManager, nodeManager); diff --git a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/pipeline/choose/algorithms/TestCapacityPipelineChoosePolicy.java b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/pipeline/choose/algorithms/TestCapacityPipelineChoosePolicy.java new file mode 100644 index 00000000000..421d2396bfa --- /dev/null +++ b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/pipeline/choose/algorithms/TestCapacityPipelineChoosePolicy.java @@ -0,0 +1,107 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with this + * work for additional information regarding copyright ownership. The ASF + * licenses this file to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + *

+ * http://www.apache.org/licenses/LICENSE-2.0 + *

+ * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT + * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the + * License for the specific language governing permissions and limitations under + * the License. + */ +package org.apache.hadoop.hdds.scm.pipeline.choose.algorithms; + +import org.apache.hadoop.hdds.protocol.DatanodeDetails; +import org.apache.hadoop.hdds.protocol.MockDatanodeDetails; +import org.apache.hadoop.hdds.scm.PipelineChoosePolicy; +import org.apache.hadoop.hdds.scm.container.placement.metrics.SCMNodeMetric; +import org.apache.hadoop.hdds.scm.node.NodeManager; +import org.apache.hadoop.hdds.scm.pipeline.MockPipeline; +import org.apache.hadoop.hdds.scm.pipeline.MockRatisPipelineProvider; +import org.apache.hadoop.hdds.scm.pipeline.Pipeline; +import org.junit.jupiter.api.Test; + +import java.util.ArrayList; +import java.util.HashMap; +import java.util.List; +import java.util.Map; + +import static org.assertj.core.api.Assertions.assertThat; +import static org.junit.jupiter.api.Assertions.assertNotNull; +import static org.mockito.Mockito.mock; +import static org.mockito.Mockito.when; + +/** + * Test for the capacity pipeline choose policy. + */ +public class TestCapacityPipelineChoosePolicy { + + @Test + public void testChoosePipeline() throws Exception { + + // given 4 datanode + List datanodes = new ArrayList<>(); + for (int i = 0; i < 4; i++) { + datanodes.add(MockDatanodeDetails.randomDatanodeDetails()); + } + // dn0 dn1 dn2 dn3 + // used 0 10 20 30 + NodeManager mockNodeManager = mock(NodeManager.class); + when(mockNodeManager.getNodeStat(datanodes.get(0))) + .thenReturn(new SCMNodeMetric(100L, 0, 100L, 0, 0)); + when(mockNodeManager.getNodeStat(datanodes.get(1))) + .thenReturn(new SCMNodeMetric(100L, 10L, 90L, 0, 0)); + when(mockNodeManager.getNodeStat(datanodes.get(2))) + .thenReturn(new SCMNodeMetric(100L, 20L, 80L, 0, 0)); + when(mockNodeManager.getNodeStat(datanodes.get(3))) + .thenReturn(new SCMNodeMetric(100L, 30L, 70L, 0, 0)); + + PipelineChoosePolicy policy = new CapacityPipelineChoosePolicy().init(mockNodeManager); + + // generate 4 pipelines, and every pipeline has 3 datanodes + // + // pipeline0 dn1 dn2 dn3 + // pipeline1 dn0 dn2 dn3 + // pipeline2 dn0 dn1 dn3 + // pipeline3 dn0 dn1 dn2 + // + // In the above scenario, pipeline0 vs pipeline1 runs through three rounds + // of comparisons, (dn3 <-> dn3) -> (dn2 <-> dn2 ) -> (dn1 <-> dn0), + // finally comparing dn0 and dn1, and dn0 wins, so pipeline1 is selected. + // + List pipelines = new ArrayList<>(); + for (int i = 0; i < 4; i++) { + List dns = new ArrayList<>(); + for (int j = 0; j < datanodes.size(); j++) { + if (i != j) { + dns.add(datanodes.get(j)); + } + } + Pipeline pipeline = MockPipeline.createPipeline(dns); + MockRatisPipelineProvider.markPipelineHealthy(pipeline); + pipelines.add(pipeline); + } + + Map selectedCount = new HashMap<>(); + for (Pipeline pipeline : pipelines) { + selectedCount.put(pipeline, 0); + } + for (int i = 0; i < 1000; i++) { + // choosePipeline + Pipeline pipeline = policy.choosePipeline(pipelines, null); + assertNotNull(pipeline); + selectedCount.put(pipeline, selectedCount.get(pipeline) + 1); + } + + // The selected count from most to least should be : + // pipeline3 > pipeline2 > pipeline1 > pipeline0 + assertThat(selectedCount.get(pipelines.get(3))).isGreaterThan(selectedCount.get(pipelines.get(2))); + assertThat(selectedCount.get(pipelines.get(2))).isGreaterThan(selectedCount.get(pipelines.get(1))); + assertThat(selectedCount.get(pipelines.get(1))).isGreaterThan(selectedCount.get(pipelines.get(0))); + } +} diff --git a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/pipeline/choose/algorithms/TestPipelineChoosePolicyFactory.java b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/pipeline/choose/algorithms/TestPipelineChoosePolicyFactory.java index 7d0a72ed2fb..82fed5953aa 100644 --- a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/pipeline/choose/algorithms/TestPipelineChoosePolicyFactory.java +++ b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/pipeline/choose/algorithms/TestPipelineChoosePolicyFactory.java @@ -21,7 +21,9 @@ import org.apache.hadoop.hdds.scm.PipelineChoosePolicy; import org.apache.hadoop.hdds.scm.PipelineRequestInformation; import org.apache.hadoop.hdds.scm.ScmConfig; +import org.apache.hadoop.hdds.scm.container.MockNodeManager; import org.apache.hadoop.hdds.scm.exceptions.SCMException; +import org.apache.hadoop.hdds.scm.node.NodeManager; import org.apache.hadoop.hdds.scm.pipeline.Pipeline; import org.junit.jupiter.api.BeforeEach; import org.junit.jupiter.api.Test; @@ -42,17 +44,20 @@ public class TestPipelineChoosePolicyFactory { private ScmConfig scmConfig; + private NodeManager nodeManager; + @BeforeEach public void setup() { //initialize network topology instance conf = new OzoneConfiguration(); scmConfig = conf.getObject(ScmConfig.class); + nodeManager = new MockNodeManager(true, 5); } @Test public void testDefaultPolicy() throws IOException { PipelineChoosePolicy policy = PipelineChoosePolicyFactory - .getPolicy(scmConfig, false); + .getPolicy(nodeManager, scmConfig, false); assertSame(OZONE_SCM_PIPELINE_CHOOSE_POLICY_IMPL_DEFAULT, policy.getClass()); } @@ -60,7 +65,7 @@ public void testDefaultPolicy() throws IOException { @Test public void testDefaultPolicyEC() throws IOException { PipelineChoosePolicy policy = PipelineChoosePolicyFactory - .getPolicy(scmConfig, true); + .getPolicy(nodeManager, scmConfig, true); assertSame(OZONE_SCM_EC_PIPELINE_CHOOSE_POLICY_IMPL_DEFAULT, policy.getClass()); } @@ -69,7 +74,7 @@ public void testDefaultPolicyEC() throws IOException { public void testNonDefaultPolicyEC() throws IOException { scmConfig.setECPipelineChoosePolicyName(DummyGoodImpl.class.getName()); PipelineChoosePolicy policy = PipelineChoosePolicyFactory - .getPolicy(scmConfig, true); + .getPolicy(nodeManager, scmConfig, true); assertSame(DummyGoodImpl.class, policy.getClass()); } @@ -121,10 +126,10 @@ public void testConstructorNotFound() throws SCMException { scmConfig.setPipelineChoosePolicyName(DummyImpl.class.getName()); scmConfig.setECPipelineChoosePolicyName(DummyImpl.class.getName()); PipelineChoosePolicy policy = - PipelineChoosePolicyFactory.getPolicy(scmConfig, false); + PipelineChoosePolicyFactory.getPolicy(nodeManager, scmConfig, false); assertSame(OZONE_SCM_PIPELINE_CHOOSE_POLICY_IMPL_DEFAULT, policy.getClass()); - policy = PipelineChoosePolicyFactory.getPolicy(scmConfig, true); + policy = PipelineChoosePolicyFactory.getPolicy(nodeManager, scmConfig, true); assertSame(OZONE_SCM_EC_PIPELINE_CHOOSE_POLICY_IMPL_DEFAULT, policy.getClass()); } @@ -137,10 +142,10 @@ public void testClassNotImplemented() throws SCMException { scmConfig.setECPipelineChoosePolicyName( "org.apache.hadoop.hdds.scm.pipeline.choose.policy.HelloWorld"); PipelineChoosePolicy policy = - PipelineChoosePolicyFactory.getPolicy(scmConfig, false); + PipelineChoosePolicyFactory.getPolicy(nodeManager, scmConfig, false); assertSame(OZONE_SCM_PIPELINE_CHOOSE_POLICY_IMPL_DEFAULT, policy.getClass()); - policy = PipelineChoosePolicyFactory.getPolicy(scmConfig, true); + policy = PipelineChoosePolicyFactory.getPolicy(nodeManager, scmConfig, true); assertSame(OZONE_SCM_EC_PIPELINE_CHOOSE_POLICY_IMPL_DEFAULT, policy.getClass()); } diff --git a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/safemode/TestHealthyPipelineSafeModeRule.java b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/safemode/TestHealthyPipelineSafeModeRule.java index 31cd2db1e55..98f16394902 100644 --- a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/safemode/TestHealthyPipelineSafeModeRule.java +++ b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/safemode/TestHealthyPipelineSafeModeRule.java @@ -23,9 +23,7 @@ import java.time.ZoneOffset; import java.util.ArrayList; import java.util.List; -import java.util.UUID; -import org.apache.hadoop.fs.FileUtil; import org.apache.hadoop.hdds.HddsConfigKeys; import org.apache.hadoop.hdds.client.RatisReplicationConfig; import org.apache.hadoop.hdds.conf.OzoneConfiguration; @@ -47,6 +45,7 @@ import org.apache.hadoop.hdds.server.events.EventQueue; import org.apache.ozone.test.GenericTestUtils; import org.junit.jupiter.api.Test; +import org.junit.jupiter.api.io.TempDir; import org.slf4j.LoggerFactory; import static org.junit.jupiter.api.Assertions.assertTrue; @@ -56,6 +55,8 @@ * This class tests HealthyPipelineSafeMode rule. */ public class TestHealthyPipelineSafeModeRule { + @TempDir + private File tempFile; @Test public void testHealthyPipelineSafeModeRuleWithNoPipelines() @@ -66,12 +67,9 @@ public void testHealthyPipelineSafeModeRuleWithNoPipelines() List containers = new ArrayList<>(HddsTestUtils.getContainerInfo(1)); - String storageDir = GenericTestUtils.getTempPath( - TestHealthyPipelineSafeModeRule.class.getName() + - UUID.randomUUID()); OzoneConfiguration config = new OzoneConfiguration(); MockNodeManager nodeManager = new MockNodeManager(true, 0); - config.set(HddsConfigKeys.OZONE_METADATA_DIRS, storageDir); + config.set(HddsConfigKeys.OZONE_METADATA_DIRS, tempFile.getPath()); // enable pipeline check config.setBoolean( HddsConfigKeys.HDDS_SCM_SAFEMODE_PIPELINE_AVAILABILITY_CHECK, true); @@ -106,14 +104,11 @@ public void testHealthyPipelineSafeModeRuleWithNoPipelines() assertTrue(healthyPipelineSafeModeRule.validate()); } finally { scmMetadataStore.getStore().close(); - FileUtil.fullyDelete(new File(storageDir)); } } @Test public void testHealthyPipelineSafeModeRuleWithPipelines() throws Exception { - String storageDir = GenericTestUtils.getTempPath( - TestHealthyPipelineSafeModeRule.class.getName() + UUID.randomUUID()); EventQueue eventQueue = new EventQueue(); SCMServiceManager serviceManager = new SCMServiceManager(); @@ -126,7 +121,7 @@ public void testHealthyPipelineSafeModeRuleWithPipelines() throws Exception { // stale and last one is dead, and this repeats. So for a 12 node, 9 // healthy, 2 stale and one dead. MockNodeManager nodeManager = new MockNodeManager(true, 12); - config.set(HddsConfigKeys.OZONE_METADATA_DIRS, storageDir); + config.set(HddsConfigKeys.OZONE_METADATA_DIRS, tempFile.getPath()); // enable pipeline check config.setBoolean( HddsConfigKeys.HDDS_SCM_SAFEMODE_PIPELINE_AVAILABILITY_CHECK, true); @@ -201,7 +196,6 @@ public void testHealthyPipelineSafeModeRuleWithPipelines() throws Exception { 1000, 5000); } finally { scmMetadataStore.getStore().close(); - FileUtil.fullyDelete(new File(storageDir)); } } @@ -209,10 +203,6 @@ public void testHealthyPipelineSafeModeRuleWithPipelines() throws Exception { @Test public void testHealthyPipelineSafeModeRuleWithMixedPipelines() throws Exception { - - String storageDir = GenericTestUtils.getTempPath( - TestHealthyPipelineSafeModeRule.class.getName() + UUID.randomUUID()); - EventQueue eventQueue = new EventQueue(); SCMServiceManager serviceManager = new SCMServiceManager(); SCMContext scmContext = SCMContext.emptyContext(); @@ -225,7 +215,7 @@ public void testHealthyPipelineSafeModeRuleWithMixedPipelines() // stale and last one is dead, and this repeats. So for a 12 node, 9 // healthy, 2 stale and one dead. MockNodeManager nodeManager = new MockNodeManager(true, 12); - config.set(HddsConfigKeys.OZONE_METADATA_DIRS, storageDir); + config.set(HddsConfigKeys.OZONE_METADATA_DIRS, tempFile.getPath()); // enable pipeline check config.setBoolean( HddsConfigKeys.HDDS_SCM_SAFEMODE_PIPELINE_AVAILABILITY_CHECK, true); @@ -308,7 +298,6 @@ public void testHealthyPipelineSafeModeRuleWithMixedPipelines() } finally { scmMetadataStore.getStore().close(); - FileUtil.fullyDelete(new File(storageDir)); } } diff --git a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/safemode/TestSCMSafeModeManager.java b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/safemode/TestSCMSafeModeManager.java index 79adf009f00..319caabe40a 100644 --- a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/safemode/TestSCMSafeModeManager.java +++ b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/safemode/TestSCMSafeModeManager.java @@ -19,17 +19,17 @@ import java.io.File; import java.io.IOException; -import java.nio.file.Path; import java.time.Clock; import java.time.ZoneOffset; import java.util.ArrayList; import java.util.Collections; import java.util.List; -import java.util.UUID; +import java.util.Map; +import java.util.Set; import java.util.concurrent.atomic.AtomicBoolean; import java.util.concurrent.atomic.AtomicInteger; -import org.apache.hadoop.fs.FileUtil; +import org.apache.commons.lang3.tuple.Pair; import org.apache.hadoop.hdds.HddsConfigKeys; import org.apache.hadoop.hdds.client.RatisReplicationConfig; import org.apache.hadoop.hdds.conf.OzoneConfiguration; @@ -85,9 +85,11 @@ public class TestSCMSafeModeManager { private List containers = Collections.emptyList(); private SCMMetadataStore scmMetadataStore; + @TempDir + private File tempDir; @BeforeEach - public void setUp(@TempDir Path tempDir) throws IOException { + public void setUp() throws IOException { queue = new EventQueue(); scmContext = SCMContext.emptyContext(); serviceManager = new SCMServiceManager(); @@ -95,7 +97,7 @@ public void setUp(@TempDir Path tempDir) throws IOException { config.setBoolean(HddsConfigKeys.HDDS_SCM_SAFEMODE_PIPELINE_CREATION, false); config.set(HddsConfigKeys.OZONE_METADATA_DIRS, - tempDir.toAbsolutePath().toString()); + tempDir.getAbsolutePath().toString()); scmMetadataStore = new SCMMetadataStoreImpl(config); } @@ -135,6 +137,7 @@ private void testSafeMode(int numContainers) throws Exception { serviceManager, scmContext); assertTrue(scmSafeModeManager.getInSafeMode()); + validateRuleStatus("DatanodeSafeModeRule", "registered datanodes 0"); queue.fireEvent(SCMEvents.NODE_REGISTRATION_CONT_REPORT, HddsTestUtils.createNodeRegistrationContainerReport(containers)); @@ -176,7 +179,8 @@ public void testSafeModeExitRule() throws Exception { .getNumContainerWithOneReplicaReportedThreshold().value()); assertTrue(scmSafeModeManager.getInSafeMode()); - + validateRuleStatus("ContainerSafeModeRule", + "% of containers with at least one reported"); testContainerThreshold(containers.subList(0, 25), 0.25); assertEquals(25, scmSafeModeManager.getSafeModeMetrics() .getCurrentContainersWithOneReplicaReportedCount().value()); @@ -316,6 +320,13 @@ public void testSafeModeExitRuleWithPipelineAvailabilityCheck( scmContext); assertTrue(scmSafeModeManager.getInSafeMode()); + if (healthyPipelinePercent > 0) { + validateRuleStatus("HealthyPipelineSafeModeRule", + "healthy Ratis/THREE pipelines"); + } + validateRuleStatus("OneReplicaPipelineSafeModeRule", + "reported Ratis/THREE pipelines with at least one datanode"); + testContainerThreshold(containers, 1.0); List pipelines = pipelineManager.getPipelines(); @@ -374,6 +385,22 @@ public void testSafeModeExitRuleWithPipelineAvailabilityCheck( 100, 1000 * 5); } + /** + * @param safeModeRule verify that this rule is not satisfied + * @param stringToMatch string to match in the rule status. + */ + private void validateRuleStatus(String safeModeRule, String stringToMatch) { + Set>> ruleStatuses = + scmSafeModeManager.getRuleStatus().entrySet(); + for (Map.Entry> entry : ruleStatuses) { + if (entry.getKey().equals(safeModeRule)) { + Pair value = entry.getValue(); + assertEquals(false, value.getLeft()); + assertThat(value.getRight()).contains(stringToMatch); + } + } + } + private void checkHealthy(int expectedCount) throws Exception { GenericTestUtils.waitFor(() -> scmSafeModeManager .getHealthyPipelineSafeModeRule() @@ -528,11 +555,8 @@ private void testContainerThreshold(List dnContainers, public void testSafeModePipelineExitRule() throws Exception { containers = new ArrayList<>(); containers.addAll(HddsTestUtils.getContainerInfo(25 * 4)); - String storageDir = GenericTestUtils.getTempPath( - TestSCMSafeModeManager.class.getName() + UUID.randomUUID()); try { MockNodeManager nodeManager = new MockNodeManager(true, 3); - config.set(HddsConfigKeys.OZONE_METADATA_DIRS, storageDir); // enable pipeline check config.setBoolean( HddsConfigKeys.HDDS_SCM_SAFEMODE_PIPELINE_AVAILABILITY_CHECK, true); @@ -578,13 +602,11 @@ public void testSafeModePipelineExitRule() throws Exception { config.setBoolean( HddsConfigKeys.HDDS_SCM_SAFEMODE_PIPELINE_AVAILABILITY_CHECK, false); - FileUtil.fullyDelete(new File(storageDir)); } } @Test - public void testPipelinesNotCreatedUntilPreCheckPasses() - throws Exception { + public void testPipelinesNotCreatedUntilPreCheckPasses() throws Exception { int numOfDns = 5; // enable pipeline check config.setBoolean( @@ -594,12 +616,6 @@ public void testPipelinesNotCreatedUntilPreCheckPasses() true); MockNodeManager nodeManager = new MockNodeManager(true, numOfDns); - String storageDir = GenericTestUtils.getTempPath( - TestSCMSafeModeManager.class.getName() + UUID.randomUUID()); - config.set(HddsConfigKeys.OZONE_METADATA_DIRS, storageDir); - // enable pipeline check - config.setBoolean( - HddsConfigKeys.HDDS_SCM_SAFEMODE_PIPELINE_AVAILABILITY_CHECK, true); PipelineManagerImpl pipelineManager = PipelineManagerImpl.newPipelineManager( diff --git a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/security/TestRootCARotationManager.java b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/security/TestRootCARotationManager.java index 7089f68ec71..b82ce15a384 100644 --- a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/security/TestRootCARotationManager.java +++ b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/security/TestRootCARotationManager.java @@ -17,7 +17,6 @@ package org.apache.hadoop.hdds.scm.security; import org.apache.commons.lang3.StringUtils; -import org.apache.hadoop.fs.FileUtil; import org.apache.hadoop.hdds.HddsConfigKeys; import org.apache.hadoop.hdds.conf.OzoneConfiguration; import org.apache.hadoop.hdds.scm.ha.SCMContext; @@ -40,6 +39,7 @@ import org.junit.jupiter.api.AfterEach; import org.junit.jupiter.api.BeforeEach; import org.junit.jupiter.api.Test; +import org.junit.jupiter.api.io.TempDir; import java.io.File; import java.io.IOException; @@ -56,7 +56,7 @@ import java.util.concurrent.TimeoutException; import static org.junit.jupiter.api.Assertions.assertEquals; -import static org.junit.jupiter.api.Assertions.fail; +import static org.junit.jupiter.api.Assertions.assertThrows; import static org.apache.hadoop.hdds.HddsConfigKeys.HDDS_X509_CA_ROTATION_ACK_TIMEOUT; import static org.apache.hadoop.hdds.HddsConfigKeys.HDDS_X509_CA_ROTATION_CHECK_INTERNAL; import static org.apache.hadoop.hdds.HddsConfigKeys.HDDS_X509_CA_ROTATION_ENABLED; @@ -66,7 +66,6 @@ import static org.apache.hadoop.hdds.HddsConfigKeys.HDDS_X509_RENEW_GRACE_DURATION; import static org.apache.hadoop.hdds.HddsConfigKeys.HDDS_X509_ROOTCA_CERTIFICATE_POLLING_INTERVAL; import static org.assertj.core.api.Assertions.assertThat; -import static org.junit.jupiter.api.Assertions.assertInstanceOf; import static org.mockito.Mockito.doNothing; import static org.mockito.Mockito.times; import static org.mockito.Mockito.verify; @@ -94,6 +93,7 @@ public class TestRootCARotationManager { private SCMSecurityProtocolServer scmSecurityProtocolServer; private RootCARotationHandlerImpl handler; private StatefulServiceStateManager statefulServiceStateManager; + @TempDir private File testDir; private String cID = UUID.randomUUID().toString(); private String scmID = UUID.randomUUID().toString(); @@ -103,8 +103,6 @@ public class TestRootCARotationManager { public void init() throws IOException, TimeoutException, CertificateException { ozoneConfig = new OzoneConfiguration(); - testDir = GenericTestUtils.getTestDir( - TestRootCARotationManager.class.getSimpleName() + UUID.randomUUID()); ozoneConfig .set(HddsConfigKeys.OZONE_METADATA_DIRS, testDir.getAbsolutePath()); ozoneConfig @@ -146,60 +144,37 @@ public void tearDown() throws Exception { if (rootCARotationManager != null) { rootCARotationManager.stop(); } - - FileUtil.fullyDelete(testDir); } @Test - public void testProperties() { + void testProperties() throws Exception { // invalid check interval ozoneConfig.set(HDDS_X509_CA_ROTATION_CHECK_INTERNAL, "P28"); - try { - rootCARotationManager = new RootCARotationManager(scm); - fail("Should fail"); - } catch (Exception e) { - assertInstanceOf(DateTimeParseException.class, e); - } + assertThrows(DateTimeParseException.class, () -> rootCARotationManager = new RootCARotationManager(scm)); // check interval should be less than grace period ozoneConfig.set(HDDS_X509_CA_ROTATION_CHECK_INTERNAL, "P28D"); - try { - rootCARotationManager = new RootCARotationManager(scm); - fail("Should fail"); - } catch (Exception e) { - assertInstanceOf(IllegalArgumentException.class, e); - assertThat(e.getMessage()).contains("should be smaller than"); - } + IllegalArgumentException ex = + assertThrows(IllegalArgumentException.class, () -> rootCARotationManager = new RootCARotationManager(scm)); + assertThat(ex.getMessage()).contains("should be smaller than"); // invalid time of day format ozoneConfig.set(HDDS_X509_CA_ROTATION_CHECK_INTERNAL, "P1D"); ozoneConfig.set(HDDS_X509_CA_ROTATION_TIME_OF_DAY, "01:00"); - try { - rootCARotationManager = new RootCARotationManager(scm); - fail("Should fail"); - } catch (Exception e) { - assertInstanceOf(IllegalArgumentException.class, e); - assertThat(e.getMessage()).contains("should follow the hh:mm:ss format"); - } + ex = assertThrows(IllegalArgumentException.class, () -> rootCARotationManager = new RootCARotationManager(scm)); + assertThat(ex.getMessage()).contains("should follow the hh:mm:ss format"); // valid properties ozoneConfig.set(HDDS_X509_CA_ROTATION_CHECK_INTERNAL, "P1D"); ozoneConfig.set(HDDS_X509_CA_ROTATION_TIME_OF_DAY, "01:00:00"); - try { - rootCARotationManager = new RootCARotationManager(scm); - } catch (Exception e) { - fail("Should succeed"); - } + rootCARotationManager = new RootCARotationManager(scm); // invalid property value is ignored when auto rotation is disabled. ozoneConfig.setBoolean(HDDS_X509_CA_ROTATION_ENABLED, false); ozoneConfig.set(HDDS_X509_CA_ROTATION_CHECK_INTERNAL, "P28D"); - try { - rootCARotationManager = new RootCARotationManager(scm); - } catch (Exception e) { - fail("Should succeed"); - } + + rootCARotationManager = new RootCARotationManager(scm); } @Test diff --git a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/server/TestSCMClientProtocolServer.java b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/server/TestSCMClientProtocolServer.java index 79be275788a..7c06b79a2ff 100644 --- a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/server/TestSCMClientProtocolServer.java +++ b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/server/TestSCMClientProtocolServer.java @@ -31,6 +31,9 @@ import org.junit.jupiter.api.AfterEach; import org.junit.jupiter.api.BeforeEach; import org.junit.jupiter.api.Test; +import org.junit.jupiter.api.io.TempDir; + +import java.io.File; import java.io.IOException; import static org.apache.hadoop.ozone.OzoneConfigKeys.OZONE_READONLY_ADMINISTRATORS; @@ -50,8 +53,8 @@ public class TestSCMClientProtocolServer { private StorageContainerLocationProtocolServerSideTranslatorPB service; @BeforeEach - void setUp() throws Exception { - config = SCMTestUtils.getConf(); + void setUp(@TempDir File testDir) throws Exception { + config = SCMTestUtils.getConf(testDir); SCMConfigurator configurator = new SCMConfigurator(); configurator.setSCMHAManager(SCMHAManagerStub.getInstance(true)); configurator.setScmContext(SCMContext.emptyContext()); diff --git a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/ozone/container/common/TestEndPoint.java b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/ozone/container/common/TestEndPoint.java index 46cd784c47e..58f65df8fd8 100644 --- a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/ozone/container/common/TestEndPoint.java +++ b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/ozone/container/common/TestEndPoint.java @@ -100,6 +100,9 @@ public class TestEndPoint { private static DatanodeLayoutStorage layoutStorage; private static DatanodeDetails dnDetails; + @TempDir + private File tempDir; + @AfterAll public static void tearDown() throws Exception { if (scmServer != null) { @@ -110,7 +113,7 @@ public static void tearDown() throws Exception { @BeforeAll static void setUp() throws Exception { serverAddress = SCMTestUtils.getReuseableAddress(); - ozoneConf = SCMTestUtils.getConf(); + ozoneConf = SCMTestUtils.getConf(testDir); scmServerImpl = new ScmTestMock(); dnDetails = randomDatanodeDetails(); layoutStorage = new DatanodeLayoutStorage(ozoneConf, @@ -128,7 +131,7 @@ static void setUp() throws Exception { @Test public void testGetVersion() throws Exception { try (EndpointStateMachine rpcEndPoint = - createEndpoint(SCMTestUtils.getConf(), + createEndpoint(SCMTestUtils.getConf(tempDir), serverAddress, 1000)) { SCMVersionResponseProto responseProto = rpcEndPoint.getEndPoint() .getVersion(null); @@ -316,7 +319,7 @@ public void testDnLayoutVersionFile() throws Exception { */ @Test public void testGetVersionToInvalidEndpoint() throws Exception { - OzoneConfiguration conf = SCMTestUtils.getConf(); + OzoneConfiguration conf = SCMTestUtils.getConf(tempDir); InetSocketAddress nonExistentServerAddress = SCMTestUtils .getReuseableAddress(); try (EndpointStateMachine rpcEndPoint = createEndpoint(conf, @@ -344,7 +347,7 @@ public void testGetVersionToInvalidEndpoint() throws Exception { public void testGetVersionAssertRpcTimeOut() throws Exception { final long rpcTimeout = 1000; final long tolerance = 100; - OzoneConfiguration conf = SCMTestUtils.getConf(); + OzoneConfiguration conf = SCMTestUtils.getConf(tempDir); try (EndpointStateMachine rpcEndPoint = createEndpoint(conf, serverAddress, (int) rpcTimeout)) { @@ -369,7 +372,7 @@ public void testGetVersionAssertRpcTimeOut() throws Exception { public void testRegister() throws Exception { DatanodeDetails nodeToRegister = randomDatanodeDetails(); try (EndpointStateMachine rpcEndPoint = createEndpoint( - SCMTestUtils.getConf(), serverAddress, 1000)) { + SCMTestUtils.getConf(tempDir), serverAddress, 1000)) { SCMRegisteredResponseProto responseProto = rpcEndPoint.getEndPoint() .register(nodeToRegister.getExtendedProtoBufMessage(), HddsTestUtils .createNodeReport( @@ -403,7 +406,7 @@ private MetadataStorageReportProto getMetadataStorageReports(UUID id) { private EndpointStateMachine registerTaskHelper(InetSocketAddress scmAddress, int rpcTimeout, boolean clearDatanodeDetails ) throws Exception { - OzoneConfiguration conf = SCMTestUtils.getConf(); + OzoneConfiguration conf = SCMTestUtils.getConf(tempDir); EndpointStateMachine rpcEndPoint = createEndpoint(conf, scmAddress, rpcTimeout); @@ -481,7 +484,7 @@ public void testRegisterRpcTimeout() throws Exception { public void testHeartbeat() throws Exception { DatanodeDetails dataNode = randomDatanodeDetails(); try (EndpointStateMachine rpcEndPoint = - createEndpoint(SCMTestUtils.getConf(), + createEndpoint(SCMTestUtils.getConf(tempDir), serverAddress, 1000)) { SCMHeartbeatRequestProto request = SCMHeartbeatRequestProto.newBuilder() .setDatanodeDetails(dataNode.getProtoBufMessage()) @@ -501,7 +504,7 @@ public void testHeartbeat() throws Exception { public void testHeartbeatWithCommandStatusReport() throws Exception { DatanodeDetails dataNode = randomDatanodeDetails(); try (EndpointStateMachine rpcEndPoint = - createEndpoint(SCMTestUtils.getConf(), + createEndpoint(SCMTestUtils.getConf(tempDir), serverAddress, 1000)) { // Add some scmCommands for heartbeat response addScmCommands(); @@ -572,7 +575,7 @@ private StateContext heartbeatTaskHelper( InetSocketAddress scmAddress, int rpcTimeout ) throws Exception { - OzoneConfiguration conf = SCMTestUtils.getConf(); + OzoneConfiguration conf = SCMTestUtils.getConf(tempDir); // Mini Ozone cluster will not come up if the port is not true, since // Ratis will exit if the server port cannot be bound. We can remove this // hard coding once we fix the Ratis default behaviour. diff --git a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/ozone/container/testutils/ReplicationNodeManagerMock.java b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/ozone/container/testutils/ReplicationNodeManagerMock.java index 0a865043356..92a6fd455d8 100644 --- a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/ozone/container/testutils/ReplicationNodeManagerMock.java +++ b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/ozone/container/testutils/ReplicationNodeManagerMock.java @@ -371,13 +371,11 @@ public RegisteredCommand register(DatanodeDetails dd, * Send heartbeat to indicate the datanode is alive and doing well. * * @param dd - Datanode Details. - * @param layoutInfo - Layout Version Proto * @param commandQueueReportProto - Command Queue Report Proto * @return SCMheartbeat response list */ @Override public List processHeartbeat(DatanodeDetails dd, - LayoutVersionProto layoutInfo, CommandQueueReportProto commandQueueReportProto) { return null; } diff --git a/hadoop-hdds/test-utils/pom.xml b/hadoop-hdds/test-utils/pom.xml index 4f78bd1f14d..ea3c94b65a6 100644 --- a/hadoop-hdds/test-utils/pom.xml +++ b/hadoop-hdds/test-utils/pom.xml @@ -49,10 +49,6 @@ https://maven.apache.org/xsd/maven-4.0.0.xsd"> commons-logging commons-logging - - junit - junit - org.junit.jupiter junit-jupiter-api @@ -66,6 +62,10 @@ https://maven.apache.org/xsd/maven-4.0.0.xsd"> ch.qos.reload4j reload4j + + jakarta.annotation + jakarta.annotation-api + org.apache.commons commons-lang3 diff --git a/hadoop-hdds/test-utils/src/main/java/org/apache/ozone/test/DisableOnProperty.java b/hadoop-hdds/test-utils/src/main/java/org/apache/ozone/test/DisableOnProperty.java deleted file mode 100644 index cddbbd18080..00000000000 --- a/hadoop-hdds/test-utils/src/main/java/org/apache/ozone/test/DisableOnProperty.java +++ /dev/null @@ -1,44 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - *

- * http://www.apache.org/licenses/LICENSE-2.0 - *

- * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package org.apache.ozone.test; - -import org.junit.rules.TestRule; -import org.junit.runner.Description; -import org.junit.runners.model.Statement; - -import java.util.Objects; - -/** - * Disables the delegate rule if the given system property matches a specific - * value. - */ -public class DisableOnProperty implements TestRule { - - private final TestRule delegate; - private final boolean enabled; - - public DisableOnProperty(TestRule delegate, String key, String value) { - this.delegate = delegate; - enabled = !Objects.equals(value, System.getProperty(key, "")); - } - - @Override - public Statement apply(Statement base, Description description) { - return enabled ? delegate.apply(base, description) : base; - } -} diff --git a/hadoop-hdds/test-utils/src/main/java/org/apache/ozone/test/GenericTestUtils.java b/hadoop-hdds/test-utils/src/main/java/org/apache/ozone/test/GenericTestUtils.java index 406a58768a8..c9fa668445d 100644 --- a/hadoop-hdds/test-utils/src/main/java/org/apache/ozone/test/GenericTestUtils.java +++ b/hadoop-hdds/test-utils/src/main/java/org/apache/ozone/test/GenericTestUtils.java @@ -24,6 +24,9 @@ import java.io.PrintStream; import java.io.StringWriter; import java.io.UnsupportedEncodingException; +import java.lang.reflect.InvocationTargetException; +import java.lang.reflect.Method; +import java.time.Instant; import java.util.List; import java.util.Map; import java.util.concurrent.TimeoutException; @@ -73,6 +76,18 @@ public abstract class GenericTestUtils { "target" + File.separator + "test" + File.separator + "data"; } + /** + * Return current time in millis as an {@code Instant}. This may be + * before {@link Instant#now()}, since the latter includes nanoseconds, too. + * This is needed for some tests that verify volume/bucket creation time, + * which also uses {@link Instant#ofEpochMilli(long)}. + * + * @return current time as {@code Instant}; + */ + public static Instant getTestStartTime() { + return Instant.ofEpochMilli(System.currentTimeMillis()); + } + /** * Get the (created) base directory for tests. * @@ -203,17 +218,13 @@ public static void setLogLevel(org.slf4j.Logger logger, setLogLevel(toLog4j(logger), Level.toLevel(level.toString())); } - public static void setRootLogLevel(org.slf4j.event.Level level) { - setLogLevel(LogManager.getRootLogger(), Level.toLevel(level.toString())); - } - public static T mockFieldReflection(Object object, String fieldName) throws NoSuchFieldException, IllegalAccessException { Field field = object.getClass().getDeclaredField(fieldName); boolean isAccessible = field.isAccessible(); field.setAccessible(true); - Field modifiersField = Field.class.getDeclaredField("modifiers"); + Field modifiersField = ReflectionUtils.getModifiersField(); boolean modifierFieldAccessible = modifiersField.isAccessible(); modifiersField.setAccessible(true); int modifierVal = modifiersField.getInt(field); @@ -233,7 +244,7 @@ public static T getFieldReflection(Object object, String fieldName) boolean isAccessible = field.isAccessible(); field.setAccessible(true); - Field modifiersField = Field.class.getDeclaredField("modifiers"); + Field modifiersField = ReflectionUtils.getModifiersField(); boolean modifierFieldAccessible = modifiersField.isAccessible(); modifiersField.setAccessible(true); int modifierVal = modifiersField.getInt(field); @@ -455,4 +466,45 @@ public static String anyHostWithFreePort() { } } + /** + * This class is a utility class for java reflection operations. + */ + public static final class ReflectionUtils { + + /** + * This method provides the modifiers field using reflection approach which is compatible + * for both pre Java 9 and post java 9 versions. + * @return modifiers field + * @throws IllegalAccessException + * @throws NoSuchFieldException + */ + public static Field getModifiersField() throws IllegalAccessException, NoSuchFieldException { + Field modifiersField = null; + try { + modifiersField = Field.class.getDeclaredField("modifiers"); + } catch (NoSuchFieldException e) { + try { + Method getDeclaredFields0 = Class.class.getDeclaredMethod( + "getDeclaredFields0", boolean.class); + boolean accessibleBeforeSet = getDeclaredFields0.isAccessible(); + getDeclaredFields0.setAccessible(true); + Field[] fields = (Field[]) getDeclaredFields0.invoke(Field.class, false); + getDeclaredFields0.setAccessible(accessibleBeforeSet); + for (Field field : fields) { + if ("modifiers".equals(field.getName())) { + modifiersField = field; + break; + } + } + if (modifiersField == null) { + throw e; + } + } catch (InvocationTargetException | NoSuchMethodException ex) { + e.addSuppressed(ex); + throw e; + } + } + return modifiersField; + } + } } diff --git a/hadoop-hdds/test-utils/src/main/java/org/apache/ozone/test/MetricsAsserts.java b/hadoop-hdds/test-utils/src/main/java/org/apache/ozone/test/MetricsAsserts.java index 28d3b936eca..83cc8465169 100644 --- a/hadoop-hdds/test-utils/src/main/java/org/apache/ozone/test/MetricsAsserts.java +++ b/hadoop-hdds/test-utils/src/main/java/org/apache/ozone/test/MetricsAsserts.java @@ -20,8 +20,8 @@ import static org.apache.hadoop.metrics2.lib.Interns.info; import static org.assertj.core.api.Assertions.assertThat; import static org.mockito.AdditionalMatchers.geq; -import static org.mockito.ArgumentMatchers.anyString; -import static org.mockito.ArgumentMatchers.argThat; +import static org.mockito.Mockito.anyString; +import static org.mockito.Mockito.argThat; import static org.mockito.Mockito.atLeast; import static org.mockito.Mockito.mock; import static org.mockito.Mockito.verify; @@ -47,6 +47,8 @@ /** * Helpers for metrics source tests. + *

+ * Copied from Hadoop and migrated to AssertJ. */ public final class MetricsAsserts { diff --git a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/function/Predicates.java b/hadoop-hdds/test-utils/src/main/java/org/apache/ozone/test/OzoneTestBase.java similarity index 60% rename from hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/function/Predicates.java rename to hadoop-hdds/test-utils/src/main/java/org/apache/ozone/test/OzoneTestBase.java index 58e79ef05e7..bb675bddafd 100644 --- a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/function/Predicates.java +++ b/hadoop-hdds/test-utils/src/main/java/org/apache/ozone/test/OzoneTestBase.java @@ -6,42 +6,39 @@ * to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at - *

+ * * http://www.apache.org/licenses/LICENSE-2.0 - *

+ * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ -package org.apache.hadoop.hdds.function; +package org.apache.ozone.test; -import java.util.function.BiPredicate; -import java.util.function.Predicate; +import org.junit.jupiter.api.BeforeEach; +import org.junit.jupiter.api.TestInfo; + +import java.lang.reflect.Method; /** - * Common predicates. + * Base class for Ozone JUnit tests. + * Provides test method name, which can be used to create unique items. */ -public final class Predicates { +public abstract class OzoneTestBase { - public static Predicate yes() { - return x -> true; - } + private TestInfo info; - public static Predicate no() { - return x -> false; + @BeforeEach + void storeTestInfo(TestInfo testInfo) { + this.info = testInfo; } - public static BiPredicate yesBi() { - return (t, u) -> true; + protected String getTestName() { + return info.getTestMethod() + .map(Method::getName) + .orElse("unknown"); } - public static BiPredicate noBi() { - return (t, u) -> false; - } - - private Predicates() { - // no instances - } } diff --git a/hadoop-hdds/test-utils/src/main/java/org/apache/ozone/test/TimedOutTestsListener.java b/hadoop-hdds/test-utils/src/main/java/org/apache/ozone/test/TimedOutTestsListener.java index 390d69a083c..e27776c9e98 100644 --- a/hadoop-hdds/test-utils/src/main/java/org/apache/ozone/test/TimedOutTestsListener.java +++ b/hadoop-hdds/test-utils/src/main/java/org/apache/ozone/test/TimedOutTestsListener.java @@ -35,7 +35,7 @@ import org.junit.platform.launcher.TestExecutionListener; import org.junit.platform.launcher.TestIdentifier; -import javax.annotation.Nullable; +import jakarta.annotation.Nullable; /** * JUnit test execution listener which prints full thread dump to System.err diff --git a/hadoop-hdds/tools/pom.xml b/hadoop-hdds/tools/pom.xml index 665b56d3ab0..5122f1d4a45 100644 --- a/hadoop-hdds/tools/pom.xml +++ b/hadoop-hdds/tools/pom.xml @@ -30,7 +30,6 @@ https://maven.apache.org/xsd/maven-4.0.0.xsd"> jar - false diff --git a/hadoop-hdds/tools/src/main/java/org/apache/hadoop/hdds/scm/cli/ContainerOperationClient.java b/hadoop-hdds/tools/src/main/java/org/apache/hadoop/hdds/scm/cli/ContainerOperationClient.java index 7aa91cec73c..d07e696e7ef 100644 --- a/hadoop-hdds/tools/src/main/java/org/apache/hadoop/hdds/scm/cli/ContainerOperationClient.java +++ b/hadoop-hdds/tools/src/main/java/org/apache/hadoop/hdds/scm/cli/ContainerOperationClient.java @@ -59,6 +59,7 @@ import java.util.List; import java.util.Map; import java.util.Optional; +import java.util.UUID; import static org.apache.hadoop.hdds.HddsConfigKeys.HDDS_CONTAINER_TOKEN_ENABLED; import static org.apache.hadoop.hdds.HddsConfigKeys.HDDS_CONTAINER_TOKEN_ENABLED_DEFAULT; @@ -215,6 +216,11 @@ public ContainerWithPipeline createContainer(HddsProtos.ReplicationType type, } } + @Override + public Map> getContainersOnDecomNode(DatanodeDetails dn) throws IOException { + return storageContainerLocationClient.getContainersOnDecomNode(dn); + } + @Override public List queryNode( HddsProtos.NodeOperationalState opState, @@ -225,6 +231,11 @@ public List queryNode( queryScope, poolName, ClientVersion.CURRENT_VERSION); } + @Override + public HddsProtos.Node queryNode(UUID uuid) throws IOException { + return storageContainerLocationClient.queryNode(uuid); + } + @Override public List decommissionNodes(List hosts) throws IOException { diff --git a/hadoop-hdds/tools/src/main/java/org/apache/hadoop/hdds/scm/cli/container/ReportSubcommand.java b/hadoop-hdds/tools/src/main/java/org/apache/hadoop/hdds/scm/cli/container/ReportSubcommand.java index 554316c2e92..7ef34236bf2 100644 --- a/hadoop-hdds/tools/src/main/java/org/apache/hadoop/hdds/scm/cli/container/ReportSubcommand.java +++ b/hadoop-hdds/tools/src/main/java/org/apache/hadoop/hdds/scm/cli/container/ReportSubcommand.java @@ -52,6 +52,10 @@ public class ReportSubcommand extends ScmSubcommand { @Override public void execute(ScmClient scmClient) throws IOException { ReplicationManagerReport report = scmClient.getReplicationManagerReport(); + if (report.getReportTimeStamp() == 0) { + System.err.println("The Container Report is not available until Replication Manager completes" + + " its first run after startup or fail over. All values will be zero until that time.\n"); + } if (json) { output(JsonUtils.toJsonStringWithDefaultPrettyPrinter(report)); @@ -68,9 +72,11 @@ public void execute(ScmClient scmClient) throws IOException { } private void outputHeader(long epochMs) { + if (epochMs == 0) { + epochMs = Instant.now().toEpochMilli(); + } Instant reportTime = Instant.ofEpochSecond(epochMs / 1000); outputHeading("Container Summary Report generated at " + reportTime); - } private void outputContainerStats(ReplicationManagerReport report) { diff --git a/hadoop-hdds/tools/src/main/java/org/apache/hadoop/hdds/scm/cli/datanode/DecommissionStatusSubCommand.java b/hadoop-hdds/tools/src/main/java/org/apache/hadoop/hdds/scm/cli/datanode/DecommissionStatusSubCommand.java index bbf1d840760..b53632f8eec 100644 --- a/hadoop-hdds/tools/src/main/java/org/apache/hadoop/hdds/scm/cli/datanode/DecommissionStatusSubCommand.java +++ b/hadoop-hdds/tools/src/main/java/org/apache/hadoop/hdds/scm/cli/datanode/DecommissionStatusSubCommand.java @@ -23,10 +23,12 @@ import org.apache.hadoop.hdds.protocol.proto.HddsProtos; import org.apache.hadoop.hdds.scm.cli.ScmSubcommand; import org.apache.hadoop.hdds.scm.client.ScmClient; +import org.apache.hadoop.hdds.scm.container.ContainerID; import picocli.CommandLine; import java.io.IOException; import java.util.List; +import java.util.Map; import java.util.stream.Collectors; import java.util.stream.Stream; @@ -83,6 +85,8 @@ public void execute(ScmClient scmClient) throws IOException { DatanodeDetails datanode = DatanodeDetails.getFromProtoBuf( node.getNodeID()); printDetails(datanode); + Map> containers = scmClient.getContainersOnDecomNode(datanode); + System.out.println(containers); } } private void printDetails(DatanodeDetails datanode) { diff --git a/hadoop-hdds/tools/src/main/java/org/apache/hadoop/hdds/scm/cli/datanode/DecommissionSubCommand.java b/hadoop-hdds/tools/src/main/java/org/apache/hadoop/hdds/scm/cli/datanode/DecommissionSubCommand.java index 23ff9176df9..e7d3a444383 100644 --- a/hadoop-hdds/tools/src/main/java/org/apache/hadoop/hdds/scm/cli/datanode/DecommissionSubCommand.java +++ b/hadoop-hdds/tools/src/main/java/org/apache/hadoop/hdds/scm/cli/datanode/DecommissionSubCommand.java @@ -27,6 +27,7 @@ import java.io.IOException; import java.util.ArrayList; import java.util.List; +import java.util.Scanner; /** * Decommission one or more datanodes. @@ -41,12 +42,26 @@ public class DecommissionSubCommand extends ScmSubcommand { @CommandLine.Spec private CommandLine.Model.CommandSpec spec; - @CommandLine.Parameters(description = "List of fully qualified host names") - private List hosts = new ArrayList<>(); + @CommandLine.Parameters(description = "One or more host names separated by spaces. " + + "To read from stdin, specify '-' and supply the host names " + + "separated by newlines.", + paramLabel = "") + private List parameters = new ArrayList<>(); @Override public void execute(ScmClient scmClient) throws IOException { - if (hosts.size() > 0) { + if (parameters.size() > 0) { + List hosts; + // Whether to read from stdin + if (parameters.get(0).equals("-")) { + hosts = new ArrayList<>(); + Scanner scanner = new Scanner(System.in, "UTF-8"); + while (scanner.hasNextLine()) { + hosts.add(scanner.nextLine().trim()); + } + } else { + hosts = parameters; + } List errors = scmClient.decommissionNodes(hosts); System.out.println("Started decommissioning datanode(s):\n" + String.join("\n", hosts)); diff --git a/hadoop-hdds/tools/src/main/java/org/apache/hadoop/hdds/scm/cli/datanode/ListInfoSubcommand.java b/hadoop-hdds/tools/src/main/java/org/apache/hadoop/hdds/scm/cli/datanode/ListInfoSubcommand.java index db12ee2aacb..325e362d4f4 100644 --- a/hadoop-hdds/tools/src/main/java/org/apache/hadoop/hdds/scm/cli/datanode/ListInfoSubcommand.java +++ b/hadoop-hdds/tools/src/main/java/org/apache/hadoop/hdds/scm/cli/datanode/ListInfoSubcommand.java @@ -29,6 +29,7 @@ import java.io.IOException; import java.util.List; +import java.util.UUID; import java.util.stream.Collectors; import java.util.stream.Stream; @@ -82,6 +83,15 @@ public class ListInfoSubcommand extends ScmSubcommand { @Override public void execute(ScmClient scmClient) throws IOException { pipelines = scmClient.listPipelines(); + if (!Strings.isNullOrEmpty(uuid)) { + HddsProtos.Node node = scmClient.queryNode(UUID.fromString(uuid)); + DatanodeWithAttributes dwa = new DatanodeWithAttributes(DatanodeDetails + .getFromProtoBuf(node.getNodeID()), + node.getNodeOperationalStates(0), + node.getNodeStates(0)); + printDatanodeInfo(dwa); + return; + } Stream allNodes = getAllNodes(scmClient).stream(); if (!Strings.isNullOrEmpty(ipaddress)) { allNodes = allNodes.filter(p -> p.getDatanodeDetails().getIpAddress() @@ -91,10 +101,6 @@ public void execute(ScmClient scmClient) throws IOException { allNodes = allNodes.filter(p -> p.getDatanodeDetails().getHostName() .compareToIgnoreCase(hostname) == 0); } - if (!Strings.isNullOrEmpty(uuid)) { - allNodes = allNodes.filter(p -> - p.getDatanodeDetails().getUuidString().equals(uuid)); - } if (!Strings.isNullOrEmpty(nodeOperationalState)) { allNodes = allNodes.filter(p -> p.getOpState().toString() .compareToIgnoreCase(nodeOperationalState) == 0); diff --git a/hadoop-hdds/tools/src/main/java/org/apache/hadoop/hdds/scm/cli/datanode/MaintenanceSubCommand.java b/hadoop-hdds/tools/src/main/java/org/apache/hadoop/hdds/scm/cli/datanode/MaintenanceSubCommand.java index a64c400f66f..82d263b416f 100644 --- a/hadoop-hdds/tools/src/main/java/org/apache/hadoop/hdds/scm/cli/datanode/MaintenanceSubCommand.java +++ b/hadoop-hdds/tools/src/main/java/org/apache/hadoop/hdds/scm/cli/datanode/MaintenanceSubCommand.java @@ -27,6 +27,7 @@ import java.io.IOException; import java.util.ArrayList; import java.util.List; +import java.util.Scanner; /** * Place one or more datanodes into Maintenance Mode. @@ -41,8 +42,11 @@ public class MaintenanceSubCommand extends ScmSubcommand { @CommandLine.Spec private CommandLine.Model.CommandSpec spec; - @CommandLine.Parameters(description = "List of fully qualified host names") - private List hosts = new ArrayList<>(); + @CommandLine.Parameters(description = "One or more host names separated by spaces. " + + "To read from stdin, specify '-' and supply the host names " + + "separated by newlines.", + paramLabel = "") + private List parameters = new ArrayList<>(); @CommandLine.Option(names = {"--end"}, description = "Automatically end maintenance after the given hours. " + @@ -51,7 +55,18 @@ public class MaintenanceSubCommand extends ScmSubcommand { @Override public void execute(ScmClient scmClient) throws IOException { - if (hosts.size() > 0) { + if (parameters.size() > 0) { + List hosts; + // Whether to read from stdin + if (parameters.get(0).equals("-")) { + hosts = new ArrayList<>(); + Scanner scanner = new Scanner(System.in, "UTF-8"); + while (scanner.hasNextLine()) { + hosts.add(scanner.nextLine().trim()); + } + } else { + hosts = parameters; + } List errors = scmClient.startMaintenanceNodes(hosts, endInHours); System.out.println("Entering maintenance mode on datanode(s):\n" + diff --git a/hadoop-hdds/tools/src/main/java/org/apache/hadoop/hdds/scm/cli/datanode/RecommissionSubCommand.java b/hadoop-hdds/tools/src/main/java/org/apache/hadoop/hdds/scm/cli/datanode/RecommissionSubCommand.java index 61f7826cf64..e21d61ed3d7 100644 --- a/hadoop-hdds/tools/src/main/java/org/apache/hadoop/hdds/scm/cli/datanode/RecommissionSubCommand.java +++ b/hadoop-hdds/tools/src/main/java/org/apache/hadoop/hdds/scm/cli/datanode/RecommissionSubCommand.java @@ -27,6 +27,7 @@ import java.io.IOException; import java.util.ArrayList; import java.util.List; +import java.util.Scanner; /** * Recommission one or more datanodes. @@ -42,12 +43,26 @@ public class RecommissionSubCommand extends ScmSubcommand { @CommandLine.Spec private CommandLine.Model.CommandSpec spec; - @CommandLine.Parameters(description = "List of fully qualified host names") - private List hosts = new ArrayList<>(); + @CommandLine.Parameters(description = "One or more host names separated by spaces. " + + "To read from stdin, specify '-' and supply the host names " + + "separated by newlines.", + paramLabel = "") + private List parameters = new ArrayList<>(); @Override public void execute(ScmClient scmClient) throws IOException { - if (hosts.size() > 0) { + if (parameters.size() > 0) { + List hosts; + // Whether to read from stdin + if (parameters.get(0).equals("-")) { + hosts = new ArrayList<>(); + Scanner scanner = new Scanner(System.in, "UTF-8"); + while (scanner.hasNextLine()) { + hosts.add(scanner.nextLine().trim()); + } + } else { + hosts = parameters; + } List errors = scmClient.recommissionNodes(hosts); System.out.println("Started recommissioning datanode(s):\n" + String.join("\n", hosts)); diff --git a/hadoop-hdds/tools/src/test/java/org/apache/hadoop/hdds/scm/cli/container/TestInfoSubCommand.java b/hadoop-hdds/tools/src/test/java/org/apache/hadoop/hdds/scm/cli/container/TestInfoSubCommand.java index c0950e0143f..d8c1addb78e 100644 --- a/hadoop-hdds/tools/src/test/java/org/apache/hadoop/hdds/scm/cli/container/TestInfoSubCommand.java +++ b/hadoop-hdds/tools/src/test/java/org/apache/hadoop/hdds/scm/cli/container/TestInfoSubCommand.java @@ -56,10 +56,10 @@ import static org.junit.jupiter.api.Assertions.assertFalse; import static org.junit.jupiter.api.Assertions.assertThrows; import static org.junit.jupiter.api.Assertions.assertTrue; +import static org.mockito.Mockito.anyLong; +import static org.mockito.Mockito.any; import static org.mockito.Mockito.mock; import static org.mockito.Mockito.when; -import static org.mockito.ArgumentMatchers.anyLong; -import static org.mockito.ArgumentMatchers.any; /** * Tests for InfoSubCommand class. diff --git a/hadoop-hdds/tools/src/test/java/org/apache/hadoop/hdds/scm/cli/container/TestReportSubCommand.java b/hadoop-hdds/tools/src/test/java/org/apache/hadoop/hdds/scm/cli/container/TestReportSubCommand.java index 58eeaee3d28..87d88617e78 100644 --- a/hadoop-hdds/tools/src/test/java/org/apache/hadoop/hdds/scm/cli/container/TestReportSubCommand.java +++ b/hadoop-hdds/tools/src/test/java/org/apache/hadoop/hdds/scm/cli/container/TestReportSubCommand.java @@ -74,18 +74,20 @@ public void testCorrectValuesAppearInEmptyReport() throws IOException { cmd.execute(scmClient); + Pattern p = Pattern.compile("^The Container Report is not available until Replication Manager completes.*"); + Matcher m = p.matcher(errContent.toString(DEFAULT_ENCODING)); + assertTrue(m.find()); + for (HddsProtos.LifeCycleState state : HddsProtos.LifeCycleState.values()) { - Pattern p = Pattern.compile( - "^" + state.toString() + ": 0$", Pattern.MULTILINE); - Matcher m = p.matcher(outContent.toString(DEFAULT_ENCODING)); + p = Pattern.compile("^" + state.toString() + ": 0$", Pattern.MULTILINE); + m = p.matcher(outContent.toString(DEFAULT_ENCODING)); assertTrue(m.find()); } for (ReplicationManagerReport.HealthState state : ReplicationManagerReport.HealthState.values()) { - Pattern p = Pattern.compile( - "^" + state.toString() + ": 0$", Pattern.MULTILINE); - Matcher m = p.matcher(outContent.toString(DEFAULT_ENCODING)); + p = Pattern.compile("^" + state.toString() + ": 0$", Pattern.MULTILINE); + m = p.matcher(outContent.toString(DEFAULT_ENCODING)); assertTrue(m.find()); } } @@ -101,6 +103,10 @@ public void testValidJsonOutput() throws IOException { c.parseArgs("--json"); cmd.execute(scmClient); + Pattern p = Pattern.compile("^The Container Report is not available until Replication Manager completes.*"); + Matcher m = p.matcher(errContent.toString(DEFAULT_ENCODING)); + assertTrue(m.find()); + ObjectMapper mapper = new ObjectMapper(); JsonNode json = mapper.readTree(outContent.toString("UTF-8")); diff --git a/hadoop-hdds/tools/src/test/java/org/apache/hadoop/hdds/scm/cli/container/upgrade/TestUpgradeManager.java b/hadoop-hdds/tools/src/test/java/org/apache/hadoop/hdds/scm/cli/container/upgrade/TestUpgradeManager.java index 33c01e4abd9..3be931c1321 100644 --- a/hadoop-hdds/tools/src/test/java/org/apache/hadoop/hdds/scm/cli/container/upgrade/TestUpgradeManager.java +++ b/hadoop-hdds/tools/src/test/java/org/apache/hadoop/hdds/scm/cli/container/upgrade/TestUpgradeManager.java @@ -18,7 +18,6 @@ package org.apache.hadoop.hdds.scm.cli.container.upgrade; import com.google.common.collect.Lists; -import org.apache.commons.io.FileUtils; import org.apache.hadoop.hdds.client.BlockID; import org.apache.hadoop.hdds.conf.OzoneConfiguration; import org.apache.hadoop.hdds.protocol.DatanodeDetails; @@ -45,10 +44,9 @@ import org.apache.hadoop.ozone.container.keyvalue.interfaces.BlockManager; import org.apache.hadoop.ozone.container.metadata.DatanodeSchemaThreeDBDefinition; import org.apache.hadoop.ozone.container.metadata.DatanodeStoreSchemaThreeImpl; -import org.apache.ozone.test.GenericTestUtils; -import org.junit.jupiter.api.AfterEach; import org.junit.jupiter.api.BeforeEach; import org.junit.jupiter.api.Test; +import org.junit.jupiter.api.io.TempDir; import java.io.File; import java.io.IOException; @@ -66,8 +64,8 @@ import static org.apache.hadoop.ozone.container.common.states.endpoint.VersionEndpointTask.LOG; import static org.junit.jupiter.api.Assertions.assertEquals; import static org.junit.jupiter.api.Assertions.assertTrue; -import static org.mockito.ArgumentMatchers.anyList; -import static org.mockito.ArgumentMatchers.anyLong; +import static org.mockito.Mockito.anyList; +import static org.mockito.Mockito.anyLong; import static org.mockito.Mockito.mock; import static org.mockito.Mockito.when; @@ -78,6 +76,7 @@ public class TestUpgradeManager { private static final String SCM_ID = UUID.randomUUID().toString(); private static final OzoneConfiguration CONF = new OzoneConfiguration(); + @TempDir private File testRoot; private MutableVolumeSet volumeSet; private UUID datanodeId; @@ -93,12 +92,6 @@ public void setup() throws Exception { dc.setContainerSchemaV3Enabled(true); CONF.setFromObject(dc); - testRoot = - GenericTestUtils.getTestDir(TestUpgradeManager.class.getSimpleName()); - if (testRoot.exists()) { - FileUtils.cleanDirectory(testRoot); - } - final File volume1Path = new File(testRoot, "volume1"); final File volume2Path = new File(testRoot, "volume2"); @@ -142,11 +135,6 @@ public void setup() throws Exception { chunkManager = new FilePerBlockStrategy(true, blockManager, null); } - @AfterEach - public void after() throws IOException { - FileUtils.deleteDirectory(testRoot); - } - @Test public void testUpgrade() throws IOException { int num = 2; diff --git a/hadoop-hdds/tools/src/test/java/org/apache/hadoop/hdds/scm/cli/datanode/TestDecommissionStatusSubCommand.java b/hadoop-hdds/tools/src/test/java/org/apache/hadoop/hdds/scm/cli/datanode/TestDecommissionStatusSubCommand.java index 902ee5e7a8d..41c31caf1f0 100644 --- a/hadoop-hdds/tools/src/test/java/org/apache/hadoop/hdds/scm/cli/datanode/TestDecommissionStatusSubCommand.java +++ b/hadoop-hdds/tools/src/test/java/org/apache/hadoop/hdds/scm/cli/datanode/TestDecommissionStatusSubCommand.java @@ -17,8 +17,10 @@ */ package org.apache.hadoop.hdds.scm.cli.datanode; +import org.apache.hadoop.hdds.protocol.DatanodeDetails; import org.apache.hadoop.hdds.protocol.proto.HddsProtos; import org.apache.hadoop.hdds.scm.client.ScmClient; +import org.apache.hadoop.hdds.scm.container.ContainerID; import org.junit.jupiter.api.AfterEach; import org.junit.jupiter.api.BeforeEach; import org.junit.jupiter.api.Test; @@ -29,7 +31,9 @@ import java.io.UnsupportedEncodingException; import java.nio.charset.StandardCharsets; import java.util.ArrayList; +import java.util.HashMap; import java.util.List; +import java.util.Map; import java.util.UUID; import java.util.regex.Matcher; import java.util.regex.Pattern; @@ -38,7 +42,7 @@ import static org.junit.jupiter.api.Assertions.assertFalse; import static org.junit.jupiter.api.Assertions.assertTrue; -import static org.mockito.ArgumentMatchers.any; +import static org.mockito.Mockito.any; import static org.mockito.Mockito.mock; import static org.mockito.Mockito.when; @@ -55,6 +59,7 @@ public class TestDecommissionStatusSubCommand { private final PrintStream originalErr = System.err; private DecommissionStatusSubCommand cmd; private List nodes = getNodeDetails(2); + private Map> containerOnDecom = getContainersOnDecomNodes(); @BeforeEach public void setup() throws UnsupportedEncodingException { @@ -74,6 +79,7 @@ public void testSuccessWhenDecommissionStatus() throws IOException { ScmClient scmClient = mock(ScmClient.class); when(scmClient.queryNode(any(), any(), any(), any())) .thenAnswer(invocation -> nodes); // 2 nodes decommissioning + when(scmClient.getContainersOnDecomNode(any())).thenReturn(containerOnDecom); cmd.execute(scmClient); Pattern p = Pattern.compile("Decommission\\sStatus:\\s" + @@ -85,9 +91,15 @@ public void testSuccessWhenDecommissionStatus() throws IOException { p = Pattern.compile("Datanode:\\s.*host0\\)"); m = p.matcher(outContent.toString(DEFAULT_ENCODING)); assertTrue(m.find()); + p = Pattern.compile("host0.*[\r\n].*UnderReplicated.*UnClosed", Pattern.MULTILINE); + m = p.matcher(outContent.toString(DEFAULT_ENCODING)); + assertTrue(m.find()); p = Pattern.compile("Datanode:\\s.*host1\\)"); m = p.matcher(outContent.toString(DEFAULT_ENCODING)); assertTrue(m.find()); + p = Pattern.compile("host1.*[\r\n].*UnderReplicated.*UnClosed", Pattern.MULTILINE); + m = p.matcher(outContent.toString(DEFAULT_ENCODING)); + assertTrue(m.find()); } @Test @@ -96,6 +108,7 @@ public void testNoNodesWhenDecommissionStatus() throws IOException { // No nodes in decommissioning. No error is printed when(scmClient.queryNode(any(), any(), any(), any())) .thenReturn(new ArrayList<>()); + when(scmClient.getContainersOnDecomNode(any())).thenReturn(new HashMap<>()); cmd.execute(scmClient); Pattern p = Pattern.compile("Decommission\\sStatus:\\s" + @@ -117,6 +130,7 @@ public void testIdOptionDecommissionStatusSuccess() throws IOException { ScmClient scmClient = mock(ScmClient.class); when(scmClient.queryNode(any(), any(), any(), any())) .thenAnswer(invocation -> nodes); // 2 nodes decommissioning + when(scmClient.getContainersOnDecomNode(any())).thenReturn(containerOnDecom); CommandLine c = new CommandLine(cmd); c.parseArgs("--id", nodes.get(0).getNodeID().getUuid()); @@ -125,11 +139,17 @@ public void testIdOptionDecommissionStatusSuccess() throws IOException { Pattern p = Pattern.compile("Datanode:\\s.*host0\\)", Pattern.MULTILINE); Matcher m = p.matcher(outContent.toString(DEFAULT_ENCODING)); assertTrue(m.find()); + p = Pattern.compile("host0.*[\r\n].*UnderReplicated.*UnClosed", Pattern.MULTILINE); + m = p.matcher(outContent.toString(DEFAULT_ENCODING)); + assertTrue(m.find()); // as uuid of only host0 is passed, host1 should NOT be displayed p = Pattern.compile("Datanode:\\s.*host1.\\)", Pattern.MULTILINE); m = p.matcher(outContent.toString(DEFAULT_ENCODING)); assertFalse(m.find()); + p = Pattern.compile("host1.*[\r\n].*UnderReplicated.*UnClosed", Pattern.MULTILINE); + m = p.matcher(outContent.toString(DEFAULT_ENCODING)); + assertFalse(m.find()); } @Test @@ -137,6 +157,10 @@ public void testIdOptionDecommissionStatusFail() throws IOException { ScmClient scmClient = mock(ScmClient.class); when(scmClient.queryNode(any(), any(), any(), any())) .thenAnswer(invocation -> nodes.subList(0, 1)); // host0 decommissioning + when(scmClient.getContainersOnDecomNode(DatanodeDetails.getFromProtoBuf(nodes.get(0).getNodeID()))) + .thenReturn(containerOnDecom); + when(scmClient.getContainersOnDecomNode(DatanodeDetails.getFromProtoBuf(nodes.get(1).getNodeID()))) + .thenReturn(new HashMap<>()); CommandLine c = new CommandLine(cmd); c.parseArgs("--id", nodes.get(1).getNodeID().getUuid()); @@ -161,6 +185,7 @@ public void testIpOptionDecommissionStatusSuccess() throws IOException { ScmClient scmClient = mock(ScmClient.class); when(scmClient.queryNode(any(), any(), any(), any())) .thenAnswer(invocation -> nodes); // 2 nodes decommissioning + when(scmClient.getContainersOnDecomNode(any())).thenReturn(containerOnDecom); CommandLine c = new CommandLine(cmd); c.parseArgs("--ip", nodes.get(1).getNodeID().getIpAddress()); @@ -169,11 +194,17 @@ public void testIpOptionDecommissionStatusSuccess() throws IOException { Pattern p = Pattern.compile("Datanode:\\s.*host1\\)", Pattern.MULTILINE); Matcher m = p.matcher(outContent.toString(DEFAULT_ENCODING)); assertTrue(m.find()); + p = Pattern.compile("host1.*[\r\n].*UnderReplicated.*UnClosed", Pattern.MULTILINE); + m = p.matcher(outContent.toString(DEFAULT_ENCODING)); + assertTrue(m.find()); // as IpAddress of only host1 is passed, host0 should NOT be displayed p = Pattern.compile("Datanode:\\s.*host0.\\)", Pattern.MULTILINE); m = p.matcher(outContent.toString(DEFAULT_ENCODING)); assertFalse(m.find()); + p = Pattern.compile("host0.*[\r\n].*UnderReplicated.*UnClosed", Pattern.MULTILINE); + m = p.matcher(outContent.toString(DEFAULT_ENCODING)); + assertFalse(m.find()); } @Test @@ -181,6 +212,10 @@ public void testIpOptionDecommissionStatusFail() throws IOException { ScmClient scmClient = mock(ScmClient.class); when(scmClient.queryNode(any(), any(), any(), any())) .thenAnswer(invocation -> nodes.subList(0, 1)); // host0 decommissioning + when(scmClient.getContainersOnDecomNode(DatanodeDetails.getFromProtoBuf(nodes.get(0).getNodeID()))) + .thenReturn(containerOnDecom); + when(scmClient.getContainersOnDecomNode(DatanodeDetails.getFromProtoBuf(nodes.get(1).getNodeID()))) + .thenReturn(new HashMap<>()); CommandLine c = new CommandLine(cmd); c.parseArgs("--ip", nodes.get(1).getNodeID().getIpAddress()); @@ -225,4 +260,19 @@ private List getNodeDetails(int n) { return nodesList; } + private Map> getContainersOnDecomNodes() { + Map> containerMap = new HashMap<>(); + List underReplicated = new ArrayList<>(); + underReplicated.add(new ContainerID(1L)); + underReplicated.add(new ContainerID(2L)); + underReplicated.add(new ContainerID(3L)); + containerMap.put("UnderReplicated", underReplicated); + List unclosed = new ArrayList<>(); + unclosed.add(new ContainerID(10L)); + unclosed.add(new ContainerID(11L)); + unclosed.add(new ContainerID(12L)); + containerMap.put("UnClosed", unclosed); + return containerMap; + } + } diff --git a/hadoop-hdds/tools/src/test/java/org/apache/hadoop/hdds/scm/cli/datanode/TestDecommissionSubCommand.java b/hadoop-hdds/tools/src/test/java/org/apache/hadoop/hdds/scm/cli/datanode/TestDecommissionSubCommand.java index 7e5b857d179..e7e01ffaa1a 100644 --- a/hadoop-hdds/tools/src/test/java/org/apache/hadoop/hdds/scm/cli/datanode/TestDecommissionSubCommand.java +++ b/hadoop-hdds/tools/src/test/java/org/apache/hadoop/hdds/scm/cli/datanode/TestDecommissionSubCommand.java @@ -23,6 +23,7 @@ import org.junit.jupiter.api.BeforeEach; import org.junit.jupiter.api.Test; +import java.io.ByteArrayInputStream; import java.io.ByteArrayOutputStream; import java.io.IOException; import java.io.PrintStream; @@ -34,8 +35,8 @@ import picocli.CommandLine; +import static org.junit.jupiter.api.Assertions.assertThrows; import static org.junit.jupiter.api.Assertions.assertTrue; -import static org.junit.jupiter.api.Assertions.fail; import static org.mockito.Mockito.anyList; import static org.mockito.Mockito.mock; import static org.mockito.Mockito.when; @@ -47,6 +48,7 @@ public class TestDecommissionSubCommand { private DecommissionSubCommand cmd; + private ScmClient scmClient; private final ByteArrayOutputStream outContent = new ByteArrayOutputStream(); private final ByteArrayOutputStream errContent = new ByteArrayOutputStream(); private final PrintStream originalOut = System.out; @@ -56,6 +58,7 @@ public class TestDecommissionSubCommand { @BeforeEach public void setup() throws UnsupportedEncodingException { cmd = new DecommissionSubCommand(); + scmClient = mock(ScmClient.class); System.setOut(new PrintStream(outContent, false, DEFAULT_ENCODING)); System.setErr(new PrintStream(errContent, false, DEFAULT_ENCODING)); } @@ -66,9 +69,37 @@ public void tearDown() { System.setErr(originalErr); } + @Test + public void testMultipleHostnamesCanBeReadFromStdin() throws Exception { + when(scmClient.decommissionNodes(anyList())) + .thenAnswer(invocation -> new ArrayList()); + + String input = "host1\nhost2\nhost3\n"; + System.setIn(new ByteArrayInputStream(input.getBytes(DEFAULT_ENCODING))); + CommandLine c = new CommandLine(cmd); + c.parseArgs("-"); + cmd.execute(scmClient); + + Pattern p = Pattern.compile( + "^Started\\sdecommissioning\\sdatanode\\(s\\)", Pattern.MULTILINE); + Matcher m = p.matcher(outContent.toString(DEFAULT_ENCODING)); + assertTrue(m.find()); + + p = Pattern.compile("^host1$", Pattern.MULTILINE); + m = p.matcher(outContent.toString(DEFAULT_ENCODING)); + assertTrue(m.find()); + + p = Pattern.compile("^host2$", Pattern.MULTILINE); + m = p.matcher(outContent.toString(DEFAULT_ENCODING)); + assertTrue(m.find()); + + p = Pattern.compile("^host3$", Pattern.MULTILINE); + m = p.matcher(outContent.toString(DEFAULT_ENCODING)); + assertTrue(m.find()); + } + @Test public void testNoErrorsWhenDecommissioning() throws IOException { - ScmClient scmClient = mock(ScmClient.class); when(scmClient.decommissionNodes(anyList())) .thenAnswer(invocation -> new ArrayList()); @@ -92,7 +123,6 @@ public void testNoErrorsWhenDecommissioning() throws IOException { @Test public void testErrorsReportedWhenDecommissioning() throws IOException { - ScmClient scmClient = mock(ScmClient.class); when(scmClient.decommissionNodes(anyList())) .thenAnswer(invocation -> { ArrayList e = new ArrayList<>(); @@ -102,12 +132,7 @@ public void testErrorsReportedWhenDecommissioning() throws IOException { CommandLine c = new CommandLine(cmd); c.parseArgs("host1", "host2"); - try { - cmd.execute(scmClient); - fail("Should not succeed without an exception"); - } catch (IOException e) { - // Expected - } + assertThrows(IOException.class, () -> cmd.execute(scmClient)); Pattern p = Pattern.compile( "^Started\\sdecommissioning\\sdatanode\\(s\\)", Pattern.MULTILINE); diff --git a/hadoop-hdds/tools/src/test/java/org/apache/hadoop/hdds/scm/cli/datanode/TestListInfoSubcommand.java b/hadoop-hdds/tools/src/test/java/org/apache/hadoop/hdds/scm/cli/datanode/TestListInfoSubcommand.java index b6ae0a8ff4f..1247b783b5c 100644 --- a/hadoop-hdds/tools/src/test/java/org/apache/hadoop/hdds/scm/cli/datanode/TestListInfoSubcommand.java +++ b/hadoop-hdds/tools/src/test/java/org/apache/hadoop/hdds/scm/cli/datanode/TestListInfoSubcommand.java @@ -32,6 +32,7 @@ import java.util.UUID; import java.util.regex.Matcher; import java.util.regex.Pattern; +import picocli.CommandLine; import static org.junit.jupiter.api.Assertions.assertTrue; import static org.mockito.Mockito.mock; @@ -101,6 +102,32 @@ public void testDataNodeOperationalStateAndHealthIncludedInOutput() assertTrue(m.find()); } + @Test + public void testDataNodeByUuidOutput() + throws Exception { + List nodes = getNodeDetails(); + + ScmClient scmClient = mock(ScmClient.class); + when(scmClient.queryNode(any())) + .thenAnswer(invocation -> nodes.get(0)); + when(scmClient.listPipelines()) + .thenReturn(new ArrayList<>()); + + CommandLine c = new CommandLine(cmd); + c.parseArgs("--id", nodes.get(0).getNodeID().getUuid()); + cmd.execute(scmClient); + + Pattern p = Pattern.compile( + "^Operational State:\\s+IN_SERVICE$", Pattern.MULTILINE); + Matcher m = p.matcher(outContent.toString(DEFAULT_ENCODING)); + assertTrue(m.find()); + + p = Pattern.compile(nodes.get(0).getNodeID().getUuid().toString(), + Pattern.MULTILINE); + m = p.matcher(outContent.toString(DEFAULT_ENCODING)); + assertTrue(m.find()); + } + private List getNodeDetails() { List nodes = new ArrayList<>(); diff --git a/hadoop-hdds/tools/src/test/java/org/apache/hadoop/hdds/scm/cli/datanode/TestMaintenanceSubCommand.java b/hadoop-hdds/tools/src/test/java/org/apache/hadoop/hdds/scm/cli/datanode/TestMaintenanceSubCommand.java index d3f7f026ddb..d2a4c54b8bf 100644 --- a/hadoop-hdds/tools/src/test/java/org/apache/hadoop/hdds/scm/cli/datanode/TestMaintenanceSubCommand.java +++ b/hadoop-hdds/tools/src/test/java/org/apache/hadoop/hdds/scm/cli/datanode/TestMaintenanceSubCommand.java @@ -23,6 +23,7 @@ import org.junit.jupiter.api.BeforeEach; import org.junit.jupiter.api.Test; +import java.io.ByteArrayInputStream; import java.io.ByteArrayOutputStream; import java.io.IOException; import java.io.PrintStream; @@ -34,8 +35,8 @@ import picocli.CommandLine; +import static org.junit.jupiter.api.Assertions.assertThrows; import static org.junit.jupiter.api.Assertions.assertTrue; -import static org.junit.jupiter.api.Assertions.fail; import static org.mockito.Mockito.anyInt; import static org.mockito.Mockito.anyList; import static org.mockito.Mockito.mock; @@ -48,6 +49,7 @@ public class TestMaintenanceSubCommand { private MaintenanceSubCommand cmd; + private ScmClient scmClient; private final ByteArrayOutputStream outContent = new ByteArrayOutputStream(); private final ByteArrayOutputStream errContent = new ByteArrayOutputStream(); private final PrintStream originalOut = System.out; @@ -57,6 +59,7 @@ public class TestMaintenanceSubCommand { @BeforeEach public void setup() throws UnsupportedEncodingException { cmd = new MaintenanceSubCommand(); + scmClient = mock(ScmClient.class); System.setOut(new PrintStream(outContent, false, DEFAULT_ENCODING)); System.setErr(new PrintStream(errContent, false, DEFAULT_ENCODING)); } @@ -67,9 +70,37 @@ public void tearDown() { System.setErr(originalErr); } + @Test + public void testMultipleHostnamesCanBeReadFromStdin() throws Exception { + when(scmClient.decommissionNodes(anyList())) + .thenAnswer(invocation -> new ArrayList()); + + String input = "host1\nhost2\nhost3\n"; + System.setIn(new ByteArrayInputStream(input.getBytes(DEFAULT_ENCODING))); + CommandLine c = new CommandLine(cmd); + c.parseArgs("-"); + cmd.execute(scmClient); + + Pattern p = Pattern.compile( + "^Entering\\smaintenance\\smode\\son\\sdatanode\\(s\\)", Pattern.MULTILINE); + Matcher m = p.matcher(outContent.toString(DEFAULT_ENCODING)); + assertTrue(m.find()); + + p = Pattern.compile("^host1$", Pattern.MULTILINE); + m = p.matcher(outContent.toString(DEFAULT_ENCODING)); + assertTrue(m.find()); + + p = Pattern.compile("^host2$", Pattern.MULTILINE); + m = p.matcher(outContent.toString(DEFAULT_ENCODING)); + assertTrue(m.find()); + + p = Pattern.compile("^host3$", Pattern.MULTILINE); + m = p.matcher(outContent.toString(DEFAULT_ENCODING)); + assertTrue(m.find()); + } + @Test public void testNoErrorsWhenEnteringMaintenance() throws IOException { - ScmClient scmClient = mock(ScmClient.class); when(scmClient.startMaintenanceNodes(anyList(), anyInt())) .thenAnswer(invocation -> new ArrayList()); @@ -94,7 +125,6 @@ public void testNoErrorsWhenEnteringMaintenance() throws IOException { @Test public void testErrorsReportedWhenEnteringMaintenance() throws IOException { - ScmClient scmClient = mock(ScmClient.class); when(scmClient.startMaintenanceNodes(anyList(), anyInt())) .thenAnswer(invocation -> { ArrayList e = new ArrayList<>(); @@ -104,12 +134,7 @@ public void testErrorsReportedWhenEnteringMaintenance() throws IOException { CommandLine c = new CommandLine(cmd); c.parseArgs("host1", "host2"); - try { - cmd.execute(scmClient); - fail("Should not succeed without an exception"); - } catch (IOException e) { - // Expected - } + assertThrows(IOException.class, () -> cmd.execute(scmClient)); Pattern p = Pattern.compile( "^Entering\\smaintenance\\smode\\son\\sdatanode\\(s\\)", diff --git a/hadoop-hdds/tools/src/test/java/org/apache/hadoop/hdds/scm/cli/datanode/TestRecommissionSubCommand.java b/hadoop-hdds/tools/src/test/java/org/apache/hadoop/hdds/scm/cli/datanode/TestRecommissionSubCommand.java index 41ce0d90cb7..e274cd4fd54 100644 --- a/hadoop-hdds/tools/src/test/java/org/apache/hadoop/hdds/scm/cli/datanode/TestRecommissionSubCommand.java +++ b/hadoop-hdds/tools/src/test/java/org/apache/hadoop/hdds/scm/cli/datanode/TestRecommissionSubCommand.java @@ -23,6 +23,7 @@ import org.junit.jupiter.api.BeforeEach; import org.junit.jupiter.api.Test; +import java.io.ByteArrayInputStream; import java.io.ByteArrayOutputStream; import java.io.IOException; import java.io.PrintStream; @@ -34,8 +35,8 @@ import picocli.CommandLine; +import static org.junit.jupiter.api.Assertions.assertThrows; import static org.junit.jupiter.api.Assertions.assertTrue; -import static org.junit.jupiter.api.Assertions.fail; import static org.mockito.Mockito.anyList; import static org.mockito.Mockito.mock; import static org.mockito.Mockito.when; @@ -47,6 +48,7 @@ public class TestRecommissionSubCommand { private RecommissionSubCommand cmd; + private ScmClient scmClient; private final ByteArrayOutputStream outContent = new ByteArrayOutputStream(); private final ByteArrayOutputStream errContent = new ByteArrayOutputStream(); private final PrintStream originalOut = System.out; @@ -56,6 +58,7 @@ public class TestRecommissionSubCommand { @BeforeEach public void setup() throws UnsupportedEncodingException { cmd = new RecommissionSubCommand(); + scmClient = mock(ScmClient.class); System.setOut(new PrintStream(outContent, false, DEFAULT_ENCODING)); System.setErr(new PrintStream(errContent, false, DEFAULT_ENCODING)); } @@ -66,9 +69,37 @@ public void tearDown() { System.setErr(originalErr); } + @Test + public void testMultipleHostnamesCanBeReadFromStdin() throws Exception { + when(scmClient.decommissionNodes(anyList())) + .thenAnswer(invocation -> new ArrayList()); + + String input = "host1\nhost2\nhost3\n"; + System.setIn(new ByteArrayInputStream(input.getBytes(DEFAULT_ENCODING))); + CommandLine c = new CommandLine(cmd); + c.parseArgs("-"); + cmd.execute(scmClient); + + Pattern p = Pattern.compile( + "^Started\\srecommissioning\\sdatanode\\(s\\)", Pattern.MULTILINE); + Matcher m = p.matcher(outContent.toString(DEFAULT_ENCODING)); + assertTrue(m.find()); + + p = Pattern.compile("^host1$", Pattern.MULTILINE); + m = p.matcher(outContent.toString(DEFAULT_ENCODING)); + assertTrue(m.find()); + + p = Pattern.compile("^host2$", Pattern.MULTILINE); + m = p.matcher(outContent.toString(DEFAULT_ENCODING)); + assertTrue(m.find()); + + p = Pattern.compile("^host3$", Pattern.MULTILINE); + m = p.matcher(outContent.toString(DEFAULT_ENCODING)); + assertTrue(m.find()); + } + @Test public void testNoErrorsWhenRecommissioning() throws IOException { - ScmClient scmClient = mock(ScmClient.class); when(scmClient.recommissionNodes(anyList())) .thenAnswer(invocation -> new ArrayList()); @@ -92,7 +123,6 @@ public void testNoErrorsWhenRecommissioning() throws IOException { @Test public void testErrorsReportedWhenRecommissioning() throws IOException { - ScmClient scmClient = mock(ScmClient.class); when(scmClient.recommissionNodes(anyList())) .thenAnswer(invocation -> { ArrayList e = new ArrayList<>(); @@ -102,12 +132,7 @@ public void testErrorsReportedWhenRecommissioning() throws IOException { CommandLine c = new CommandLine(cmd); c.parseArgs("host1", "host2"); - try { - cmd.execute(scmClient); - fail("Should not succeed without an exception"); - } catch (IOException e) { - // Expected - } + assertThrows(IOException.class, () -> cmd.execute(scmClient)); Pattern p = Pattern.compile( "^Started\\srecommissioning\\sdatanode\\(s\\)", Pattern.MULTILINE); diff --git a/hadoop-hdds/tools/src/test/java/org/apache/hadoop/hdds/scm/cli/datanode/TestUsageInfoSubcommand.java b/hadoop-hdds/tools/src/test/java/org/apache/hadoop/hdds/scm/cli/datanode/TestUsageInfoSubcommand.java index db777e4396e..09f6621735e 100644 --- a/hadoop-hdds/tools/src/test/java/org/apache/hadoop/hdds/scm/cli/datanode/TestUsageInfoSubcommand.java +++ b/hadoop-hdds/tools/src/test/java/org/apache/hadoop/hdds/scm/cli/datanode/TestUsageInfoSubcommand.java @@ -37,9 +37,9 @@ import java.util.List; import static com.fasterxml.jackson.databind.node.JsonNodeType.ARRAY; +import static org.assertj.core.api.Assertions.assertThat; import static org.junit.jupiter.api.Assertions.assertEquals; import static org.junit.jupiter.api.Assertions.assertNotNull; -import static org.junit.jupiter.api.Assertions.assertTrue; import static org.mockito.Mockito.anyBoolean; import static org.mockito.Mockito.anyInt; import static org.mockito.Mockito.mock; @@ -111,20 +111,20 @@ public void testOutputDataFieldsAligning() throws IOException { // then String output = outContent.toString(CharEncoding.UTF_8); - assertTrue(output.contains("UUID :")); - assertTrue(output.contains("IP Address :")); - assertTrue(output.contains("Hostname :")); - assertTrue(output.contains("Capacity :")); - assertTrue(output.contains("Total Used :")); - assertTrue(output.contains("Total Used % :")); - assertTrue(output.contains("Ozone Used :")); - assertTrue(output.contains("Ozone Used % :")); - assertTrue(output.contains("Remaining :")); - assertTrue(output.contains("Remaining % :")); - assertTrue(output.contains("Container(s) :")); - assertTrue(output.contains("Container Pre-allocated :")); - assertTrue(output.contains("Remaining Allocatable :")); - assertTrue(output.contains("Free Space To Spare :")); + assertThat(output).contains("UUID :"); + assertThat(output).contains("IP Address :"); + assertThat(output).contains("Hostname :"); + assertThat(output).contains("Capacity :"); + assertThat(output).contains("Total Used :"); + assertThat(output).contains("Total Used % :"); + assertThat(output).contains("Ozone Used :"); + assertThat(output).contains("Ozone Used % :"); + assertThat(output).contains("Remaining :"); + assertThat(output).contains("Remaining % :"); + assertThat(output).contains("Container(s) :"); + assertThat(output).contains("Container Pre-allocated :"); + assertThat(output).contains("Remaining Allocatable :"); + assertThat(output).contains("Free Space To Spare :"); } private List getUsageProto() { diff --git a/hadoop-ozone/client/pom.xml b/hadoop-ozone/client/pom.xml index a5598311c4c..a5a43643618 100644 --- a/hadoop-ozone/client/pom.xml +++ b/hadoop-ozone/client/pom.xml @@ -28,7 +28,6 @@ https://maven.apache.org/xsd/maven-4.0.0.xsd"> Apache Ozone Client jar - false diff --git a/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/OzoneBucket.java b/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/OzoneBucket.java index 441d9143b59..ca885b3b6b0 100644 --- a/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/OzoneBucket.java +++ b/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/OzoneBucket.java @@ -413,6 +413,12 @@ public void setListCacheSize(int listCacheSize) { this.listCacheSize = listCacheSize; } + @Deprecated + public void setEncryptionKey(String bekName) throws IOException { + proxy.setEncryptionKey(volumeName, name, bekName); + encryptionKeyName = bekName; + } + /** * Creates a new key in the bucket, with default replication type RATIS and * with replication factor THREE. diff --git a/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/protocol/ClientProtocol.java b/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/protocol/ClientProtocol.java index 5316f7a99e9..46e7e20b51b 100644 --- a/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/protocol/ClientProtocol.java +++ b/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/protocol/ClientProtocol.java @@ -23,7 +23,7 @@ import java.util.List; import java.util.Map; -import javax.annotation.Nonnull; +import jakarta.annotation.Nonnull; import org.apache.hadoop.crypto.key.KeyProvider; import org.apache.hadoop.hdds.client.ReplicationConfig; import org.apache.hadoop.hdds.client.ReplicationFactor; @@ -997,6 +997,24 @@ void setBucketQuota(String volumeName, String bucketName, void setReplicationConfig(String volumeName, String bucketName, ReplicationConfig replicationConfig) throws IOException; + /** + * Set Bucket Encryption Key (BEK). + * + * @param volumeName + * @param bucketName + * @param bekName + * @throws IOException + * @deprecated This functionality is deprecated as it is not intended for + * users to reset bucket encryption under normal circumstances and may be + * removed in the future. Users are advised to exercise caution and consider + * alternative approaches for managing bucket encryption unless HDDS-7449 or + * HDDS-7526 is encountered. As a result, the setter methods for this + * functionality have been marked as deprecated. + */ + @Deprecated + void setEncryptionKey(String volumeName, String bucketName, + String bekName) throws IOException; + /** * Returns OzoneKey that contains the application generated/visible * metadata for an Ozone Object. diff --git a/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/rpc/RpcClient.java b/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/rpc/RpcClient.java index 850ae0d1937..7e1e6fe4560 100644 --- a/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/rpc/RpcClient.java +++ b/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/rpc/RpcClient.java @@ -25,7 +25,7 @@ import com.google.common.cache.RemovalListener; import com.google.common.cache.RemovalNotification; import com.google.common.util.concurrent.ThreadFactoryBuilder; -import javax.annotation.Nonnull; +import jakarta.annotation.Nonnull; import javax.crypto.Cipher; import javax.crypto.CipherInputStream; import org.apache.commons.lang3.StringUtils; @@ -145,7 +145,6 @@ import org.apache.hadoop.security.UserGroupInformation; import org.apache.hadoop.security.token.Token; import org.apache.ratis.protocol.ClientId; -import org.jetbrains.annotations.NotNull; import org.slf4j.Logger; import org.slf4j.LoggerFactory; @@ -364,7 +363,7 @@ static boolean validateOmVersion(OzoneManagerVersion minimumVersion, return found; } - @NotNull + @Nonnull @VisibleForTesting protected XceiverClientFactory createXceiverClientFactory( ServiceInfoEx serviceInfo) throws IOException { @@ -1213,6 +1212,22 @@ public void setBucketQuota(String volumeName, String bucketName, } + @Deprecated + @Override + public void setEncryptionKey(String volumeName, String bucketName, + String bekName) throws IOException { + verifyVolumeName(volumeName); + verifyBucketName(bucketName); + OmBucketArgs.Builder builder = OmBucketArgs.newBuilder(); + BucketEncryptionKeyInfo bek = new BucketEncryptionKeyInfo.Builder() + .setKeyName(bekName).build(); + builder.setVolumeName(volumeName) + .setBucketName(bucketName) + .setBucketEncryptionKey(bek); + OmBucketArgs finalArgs = builder.build(); + ozoneManagerClient.setBucketProperty(finalArgs); + } + @Override public void setReplicationConfig( String volumeName, String bucketName, ReplicationConfig replicationConfig) @@ -1640,7 +1655,7 @@ public OzoneKeyDetails getKeyDetails( return getOzoneKeyDetails(keyInfo); } - @NotNull + @Nonnull private OzoneKeyDetails getOzoneKeyDetails(OmKeyInfo keyInfo) { List ozoneKeyLocations = new ArrayList<>(); long lastKeyOffset = 0L; @@ -1684,7 +1699,7 @@ public OzoneKeyDetails getS3KeyDetails(String bucketName, String keyName, return getOzoneKeyDetails(keyInfo); } - @NotNull + @Nonnull private OmKeyInfo getS3KeyInfo( String bucketName, String keyName, boolean isHeadOp) throws IOException { verifyBucketName(bucketName); diff --git a/hadoop-ozone/client/src/test/java/org/apache/hadoop/ozone/client/MockOmTransport.java b/hadoop-ozone/client/src/test/java/org/apache/hadoop/ozone/client/MockOmTransport.java index e4a8a80a631..31f5e20bc88 100644 --- a/hadoop-ozone/client/src/test/java/org/apache/hadoop/ozone/client/MockOmTransport.java +++ b/hadoop-ozone/client/src/test/java/org/apache/hadoop/ozone/client/MockOmTransport.java @@ -56,6 +56,8 @@ import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.ServiceListResponse; import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.Status; import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.VolumeInfo; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; import java.io.IOException; import java.util.HashMap; @@ -67,6 +69,8 @@ * OM transport for testing with in-memory state. */ public class MockOmTransport implements OmTransport { + private static final Logger LOG = + LoggerFactory.getLogger(MockOmTransport.class); private final MockBlockAllocator blockAllocator; //volumename -> volumeinfo @@ -185,11 +189,44 @@ private GetKeyInfoResponse getKeyInfo(GetKeyInfoRequest request) { .build(); } + private boolean isHSync(CommitKeyRequest commitKeyRequest) { + return commitKeyRequest.hasHsync() && commitKeyRequest.getHsync(); + } + + private boolean isRecovery(CommitKeyRequest commitKeyRequest) { + return commitKeyRequest.hasRecovery() && commitKeyRequest.getRecovery(); + } + + private String toOperationString(CommitKeyRequest commitKeyRequest) { + boolean hsync = isHSync(commitKeyRequest); + boolean recovery = isRecovery(commitKeyRequest); + if (hsync) { + return "hsync"; + } + if (recovery) { + return "recover"; + } + return "commit"; + } + + private CommitKeyResponse commitKey(CommitKeyRequest commitKeyRequest) { final KeyArgs keyArgs = commitKeyRequest.getKeyArgs(); final KeyInfo openKey = openKeys.get(keyArgs.getVolumeName()).get(keyArgs.getBucketName()) - .remove(keyArgs.getKeyName()); + .get(keyArgs.getKeyName()); + LOG.debug("{} open key vol: {} bucket: {} key: {}", + toOperationString(commitKeyRequest), + keyArgs.getVolumeName(), + keyArgs.getBucketName(), + keyArgs.getKeyName()); + boolean hsync = isHSync(commitKeyRequest); + if (!hsync) { + KeyInfo deleteKey = openKeys.get(keyArgs.getVolumeName()) + .get(keyArgs.getBucketName()) + .remove(keyArgs.getKeyName()); + assert deleteKey != null; + } final KeyInfo.Builder committedKeyInfoWithLocations = KeyInfo.newBuilder().setVolumeName(keyArgs.getVolumeName()) .setBucketName(keyArgs.getBucketName()) diff --git a/hadoop-ozone/client/src/test/java/org/apache/hadoop/ozone/client/TestBlockOutputStreamIncrementalPutBlock.java b/hadoop-ozone/client/src/test/java/org/apache/hadoop/ozone/client/TestBlockOutputStreamIncrementalPutBlock.java new file mode 100644 index 00000000000..1014b943a2a --- /dev/null +++ b/hadoop-ozone/client/src/test/java/org/apache/hadoop/ozone/client/TestBlockOutputStreamIncrementalPutBlock.java @@ -0,0 +1,163 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.ozone.client; + +import java.io.IOException; +import java.nio.ByteBuffer; +import java.nio.charset.StandardCharsets; +import java.util.Arrays; +import java.util.HashMap; +import java.util.UUID; + +import jakarta.annotation.Nonnull; +import org.apache.hadoop.ozone.OzoneConfigKeys; +import org.apache.hadoop.ozone.om.helpers.ServiceInfoEx; + +import org.apache.commons.lang3.RandomStringUtils; +import org.apache.hadoop.hdds.client.ReplicationConfig; +import org.apache.hadoop.hdds.conf.ConfigurationSource; +import org.apache.hadoop.hdds.conf.InMemoryConfiguration; +import org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos; +import org.apache.hadoop.hdds.scm.OzoneClientConfig; +import org.apache.hadoop.hdds.scm.XceiverClientFactory; +import org.apache.hadoop.ozone.client.io.OzoneInputStream; +import org.apache.hadoop.ozone.client.io.OzoneOutputStream; +import org.apache.hadoop.ozone.client.rpc.RpcClient; +import org.apache.hadoop.ozone.om.protocolPB.OmTransport; +import org.junit.jupiter.api.AfterEach; +import org.junit.jupiter.params.ParameterizedTest; +import org.junit.jupiter.params.provider.MethodSource; + +import static org.apache.hadoop.hdds.scm.ScmConfigKeys.OZONE_CHUNK_LIST_INCREMENTAL; +import static org.junit.jupiter.api.Assertions.assertArrayEquals; + + +/** + * Verify BlockOutputStream with incremental PutBlock feature. + * (ozone.client.incremental.chunk.list = true) + */ +public class TestBlockOutputStreamIncrementalPutBlock { + private OzoneClient client; + private final String keyName = UUID.randomUUID().toString(); + private final String volumeName = UUID.randomUUID().toString(); + private final String bucketName = UUID.randomUUID().toString(); + private OzoneBucket bucket; + private final ConfigurationSource config = new InMemoryConfiguration(); + + public static Iterable parameters() { + return Arrays.asList(true, false); + } + + private void init(boolean incrementalChunkList) throws IOException { + OzoneClientConfig clientConfig = config.getObject(OzoneClientConfig.class); + + clientConfig.setIncrementalChunkList(incrementalChunkList); + clientConfig.setChecksumType(ContainerProtos.ChecksumType.CRC32C); + + ((InMemoryConfiguration)config).setFromObject(clientConfig); + + ((InMemoryConfiguration) config).setBoolean( + OzoneConfigKeys.OZONE_FS_HSYNC_ENABLED, true); + ((InMemoryConfiguration) config).setBoolean( + OZONE_CHUNK_LIST_INCREMENTAL, incrementalChunkList); + + RpcClient rpcClient = new RpcClient(config, null) { + + @Override + protected OmTransport createOmTransport( + String omServiceId) + throws IOException { + return new MockOmTransport(); + } + + @Nonnull + @Override + protected XceiverClientFactory createXceiverClientFactory( + ServiceInfoEx serviceInfo) throws IOException { + return new MockXceiverClientFactory(); + } + }; + + client = new OzoneClient(config, rpcClient); + ObjectStore store = client.getObjectStore(); + + store.createVolume(volumeName); + OzoneVolume volume = store.getVolume(volumeName); + volume.createBucket(bucketName); + bucket = volume.getBucket(bucketName); + } + + @AfterEach + public void close() throws IOException { + client.close(); + } + + @ParameterizedTest + @MethodSource("parameters") + public void writeSmallChunk(boolean incrementalChunkList) + throws IOException { + init(incrementalChunkList); + + int size = 1024; + String s = RandomStringUtils.randomAlphabetic(1024); + ByteBuffer byteBuffer = ByteBuffer.wrap(s.getBytes(StandardCharsets.UTF_8)); + + try (OzoneOutputStream out = bucket.createKey(keyName, size, + ReplicationConfig.getDefault(config), new HashMap<>())) { + for (int i = 0; i < 4097; i++) { + out.write(byteBuffer); + out.hsync(); + } + } + + try (OzoneInputStream is = bucket.readKey(keyName)) { + ByteBuffer readBuffer = ByteBuffer.allocate(size); + for (int i = 0; i < 4097; i++) { + is.read(readBuffer); + assertArrayEquals(readBuffer.array(), byteBuffer.array()); + } + } + } + + @ParameterizedTest + @MethodSource("parameters") + public void writeLargeChunk(boolean incrementalChunkList) + throws IOException { + init(incrementalChunkList); + + int size = 1024 * 1024 + 1; + ByteBuffer byteBuffer = ByteBuffer.allocate(size); + + try (OzoneOutputStream out = bucket.createKey(keyName, size, + ReplicationConfig.getDefault(config), new HashMap<>())) { + for (int i = 0; i < 4; i++) { + out.write(byteBuffer); + out.hsync(); + } + } + + try (OzoneInputStream is = bucket.readKey(keyName)) { + ByteBuffer readBuffer = ByteBuffer.allocate(size); + for (int i = 0; i < 4; i++) { + is.read(readBuffer); + assertArrayEquals(readBuffer.array(), byteBuffer.array()); + } + } + } +} diff --git a/hadoop-ozone/client/src/test/java/org/apache/hadoop/ozone/client/TestOzoneClient.java b/hadoop-ozone/client/src/test/java/org/apache/hadoop/ozone/client/TestOzoneClient.java index 42b9d807671..09a6c0a5c0e 100644 --- a/hadoop-ozone/client/src/test/java/org/apache/hadoop/ozone/client/TestOzoneClient.java +++ b/hadoop-ozone/client/src/test/java/org/apache/hadoop/ozone/client/TestOzoneClient.java @@ -35,7 +35,7 @@ import org.apache.hadoop.ozone.om.helpers.ServiceInfoEx; import org.apache.hadoop.ozone.om.protocolPB.OmTransport; import org.apache.ozone.test.LambdaTestUtils.VoidCallable; -import org.jetbrains.annotations.NotNull; +import jakarta.annotation.Nonnull; import org.junit.jupiter.api.AfterEach; import org.junit.jupiter.api.BeforeEach; import org.junit.jupiter.api.Test; @@ -47,7 +47,7 @@ import static java.nio.charset.StandardCharsets.UTF_8; import static org.apache.hadoop.hdds.client.ReplicationFactor.ONE; -import static org.junit.jupiter.api.Assertions.fail; +import static org.apache.ozone.test.GenericTestUtils.getTestStartTime; import static org.junit.jupiter.api.Assertions.assertFalse; import static org.junit.jupiter.api.Assertions.assertEquals; import static org.junit.jupiter.api.Assertions.assertThrows; @@ -67,12 +67,8 @@ public static void expectOmException( OMException.ResultCodes code, VoidCallable eval) throws Exception { - try { - eval.call(); - fail("OMException is expected"); - } catch (OMException ex) { - assertEquals(code, ex.getResult()); - } + OMException ex = assertThrows(OMException.class, () -> eval.call()); + assertEquals(code, ex.getResult()); } @BeforeEach @@ -90,7 +86,7 @@ protected OmTransport createOmTransport(String omServiceId) { return new MockOmTransport(blkAllocator); } - @NotNull + @Nonnull @Override protected XceiverClientFactory createXceiverClientFactory( ServiceInfoEx serviceInfo) { @@ -138,7 +134,7 @@ public void testCreateVolumeWithMetadata() @Test public void testCreateBucket() throws IOException { - Instant testStartTime = Instant.now(); + Instant testStartTime = getTestStartTime(); String volumeName = UUID.randomUUID().toString(); String bucketName = UUID.randomUUID().toString(); store.createVolume(volumeName); @@ -152,7 +148,7 @@ public void testCreateBucket() @Test public void testPutKeyRatisOneNode() throws IOException { - Instant testStartTime = Instant.now(); + Instant testStartTime = getTestStartTime(); String value = "sample value"; OzoneBucket bucket = getOzoneBucket(); diff --git a/hadoop-ozone/client/src/test/java/org/apache/hadoop/ozone/client/TestOzoneECClient.java b/hadoop-ozone/client/src/test/java/org/apache/hadoop/ozone/client/TestOzoneECClient.java index a2287ecc524..25a3ad2d9c8 100644 --- a/hadoop-ozone/client/src/test/java/org/apache/hadoop/ozone/client/TestOzoneECClient.java +++ b/hadoop-ozone/client/src/test/java/org/apache/hadoop/ozone/client/TestOzoneECClient.java @@ -66,7 +66,6 @@ import static java.nio.charset.StandardCharsets.UTF_8; import static org.junit.jupiter.api.Assertions.assertInstanceOf; -import static org.junit.jupiter.api.Assertions.fail; import static org.junit.jupiter.api.Assertions.assertThrows; import static org.junit.jupiter.api.Assertions.assertEquals; import static org.junit.jupiter.api.Assertions.assertArrayEquals; @@ -706,20 +705,15 @@ public void testStripeWriteRetriesOn4FailuresWith3RetriesAllowed() nodesIndexesToMarkFailure[2] = 10; //To mark node failed in fourth block group. nodesIndexesToMarkFailure[3] = 15; - try { - // Mocked MultiNodePipelineBlockAllocator#allocateBlock implementation can - // pick good block group, but client retries should be limited - // OZONE_CLIENT_MAX_EC_STRIPE_WRITE_RETRIES_ON_FAILURE(here it was - // configured as 3). So, it should fail as we have marked 3 nodes as bad. - testStripeWriteRetriesOnFailures(con, 20, nodesIndexesToMarkFailure); - fail( - "Expecting it to fail as retries should exceed the max allowed times:" - + " " + 3); - } catch (IOException e) { - assertEquals( - "Completed max allowed retries 3 on stripe failures.", - e.getMessage()); - } + // Mocked MultiNodePipelineBlockAllocator#allocateBlock implementation can + // pick good block group, but client retries should be limited + // OZONE_CLIENT_MAX_EC_STRIPE_WRITE_RETRIES_ON_FAILURE(here it was + // configured as 3). So, it should fail as we have marked 3 nodes as bad. + IOException e = assertThrows(IOException.class, + () -> testStripeWriteRetriesOnFailures(con, 20, nodesIndexesToMarkFailure)); + assertEquals( + "Completed max allowed retries 3 on stripe failures.", + e.getMessage()); } public void testStripeWriteRetriesOnFailures(OzoneConfiguration con, @@ -1035,7 +1029,7 @@ public void testPartialStripeWithPartialChunkRetry() } @Test - public void testDiscardPreAllocatedBlocksPreventRetryExceeds() + void testDiscardPreAllocatedBlocksPreventRetryExceeds() throws Exception { close(); OzoneConfiguration con = createConfiguration(); @@ -1105,16 +1099,10 @@ public void testDiscardPreAllocatedBlocksPreventRetryExceeds() factoryStub.setFailedStorages(failedDNs); // Writes that will retry due to failed DNs - try { - for (int j = 0; j < numStripesAfterFailure; j++) { - for (int i = 0; i < dataBlocks; i++) { - out.write(inputChunks[i]); - } + for (int j = 0; j < numStripesAfterFailure; j++) { + for (int i = 0; i < dataBlocks; i++) { + out.write(inputChunks[i]); } - } catch (IOException e) { - // If we don't discard pre-allocated blocks, - // retries should exceed the maxRetries and write will fail. - fail("Max retries exceeded"); } } diff --git a/hadoop-ozone/client/src/test/java/org/apache/hadoop/ozone/client/checksum/TestReplicatedBlockChecksumComputer.java b/hadoop-ozone/client/src/test/java/org/apache/hadoop/ozone/client/checksum/TestReplicatedBlockChecksumComputer.java index 5cf4401bae2..6162f1ae5a4 100644 --- a/hadoop-ozone/client/src/test/java/org/apache/hadoop/ozone/client/checksum/TestReplicatedBlockChecksumComputer.java +++ b/hadoop-ozone/client/src/test/java/org/apache/hadoop/ozone/client/checksum/TestReplicatedBlockChecksumComputer.java @@ -17,6 +17,7 @@ */ package org.apache.hadoop.ozone.client.checksum; +import org.apache.commons.lang3.RandomUtils; import org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos; import org.apache.hadoop.io.MD5Hash; import org.apache.hadoop.util.DataChecksum; @@ -27,7 +28,6 @@ import java.nio.ByteBuffer; import java.util.Collections; import java.util.List; -import java.util.Random; import static org.apache.hadoop.hdds.scm.OzoneClientConfig.ChecksumCombineMode.COMPOSITE_CRC; import static org.apache.hadoop.hdds.scm.OzoneClientConfig.ChecksumCombineMode.MD5MD5CRC; @@ -40,9 +40,8 @@ public class TestReplicatedBlockChecksumComputer { @Test public void testComputeMd5Crc() throws IOException { final int lenOfBytes = 32; - byte[] randomChunkChecksum = new byte[lenOfBytes]; - Random r = new Random(); - r.nextBytes(randomChunkChecksum); + byte[] randomChunkChecksum = RandomUtils.nextBytes(lenOfBytes); + MD5Hash emptyBlockMD5 = MD5Hash.digest(randomChunkChecksum); byte[] emptyBlockMD5Hash = emptyBlockMD5.getDigest(); AbstractBlockChecksumComputer computer = @@ -56,9 +55,7 @@ public void testComputeMd5Crc() throws IOException { @Test public void testComputeCompositeCrc() throws IOException { final int lenOfBytes = 32; - byte[] randomChunkChecksum = new byte[lenOfBytes]; - Random r = new Random(); - r.nextBytes(randomChunkChecksum); + byte[] randomChunkChecksum = RandomUtils.nextBytes(lenOfBytes); CrcComposer crcComposer = CrcComposer.newCrcComposer(DataChecksum.Type.CRC32C, 4); diff --git a/hadoop-ozone/client/src/test/java/org/apache/hadoop/ozone/client/checksum/TestReplicatedFileChecksumHelper.java b/hadoop-ozone/client/src/test/java/org/apache/hadoop/ozone/client/checksum/TestReplicatedFileChecksumHelper.java index fa80f72b7f3..702a450ee75 100644 --- a/hadoop-ozone/client/src/test/java/org/apache/hadoop/ozone/client/checksum/TestReplicatedFileChecksumHelper.java +++ b/hadoop-ozone/client/src/test/java/org/apache/hadoop/ozone/client/checksum/TestReplicatedFileChecksumHelper.java @@ -52,7 +52,7 @@ import org.apache.hadoop.util.DataChecksum; import org.apache.hadoop.util.Time; import org.apache.ratis.thirdparty.com.google.protobuf.ByteString; -import org.jetbrains.annotations.NotNull; +import jakarta.annotation.Nonnull; import org.junit.jupiter.api.AfterEach; import org.junit.jupiter.api.BeforeEach; import org.junit.jupiter.api.Test; @@ -72,9 +72,9 @@ import static org.junit.jupiter.api.Assertions.assertEquals; import static org.junit.jupiter.api.Assertions.assertInstanceOf; import static org.junit.jupiter.api.Assertions.assertNull; +import static org.mockito.Mockito.any; import static org.mockito.Mockito.when; import static org.mockito.Mockito.mock; -import static org.mockito.ArgumentMatchers.any; /** * Unit tests for ReplicatedFileChecksumHelper class. @@ -101,7 +101,7 @@ protected OmTransport createOmTransport( return new MockOmTransport(); } - @NotNull + @Nonnull @Override protected XceiverClientFactory createXceiverClientFactory( ServiceInfoEx serviceInfo) { diff --git a/hadoop-ozone/client/src/test/java/org/apache/hadoop/ozone/client/io/TestKeyInputStreamEC.java b/hadoop-ozone/client/src/test/java/org/apache/hadoop/ozone/client/io/TestKeyInputStreamEC.java index abf3e9c1323..6af5c4b4e0d 100644 --- a/hadoop-ozone/client/src/test/java/org/apache/hadoop/ozone/client/io/TestKeyInputStreamEC.java +++ b/hadoop-ozone/client/src/test/java/org/apache/hadoop/ozone/client/io/TestKeyInputStreamEC.java @@ -38,8 +38,8 @@ import java.util.Map; import static org.apache.hadoop.ozone.OzoneConsts.MB; -import static org.mockito.ArgumentMatchers.any; -import static org.mockito.ArgumentMatchers.anyBoolean; +import static org.mockito.Mockito.any; +import static org.mockito.Mockito.anyBoolean; import static org.mockito.Mockito.mock; import static org.mockito.Mockito.when; import static org.junit.jupiter.api.Assertions.assertEquals; diff --git a/hadoop-ozone/client/src/test/java/org/apache/hadoop/ozone/client/rpc/TestOzoneKMSUtil.java b/hadoop-ozone/client/src/test/java/org/apache/hadoop/ozone/client/rpc/TestOzoneKMSUtil.java index ea70f19fdfe..8d9efd96325 100644 --- a/hadoop-ozone/client/src/test/java/org/apache/hadoop/ozone/client/rpc/TestOzoneKMSUtil.java +++ b/hadoop-ozone/client/src/test/java/org/apache/hadoop/ozone/client/rpc/TestOzoneKMSUtil.java @@ -25,7 +25,7 @@ import java.io.IOException; import static org.junit.jupiter.api.Assertions.assertEquals; -import static org.junit.jupiter.api.Assertions.fail; +import static org.junit.jupiter.api.Assertions.assertThrows; /** * Test class for {@link OzoneKMSUtil}. @@ -41,12 +41,8 @@ public void setUp() { @Test public void getKeyProvider() { - try { - OzoneKMSUtil.getKeyProvider(config, null); - fail("Expected IOException."); - } catch (IOException ioe) { - assertEquals(ioe.getMessage(), "KMS serverProviderUri is " + - "not configured."); - } + IOException ioe = + assertThrows(IOException.class, () -> OzoneKMSUtil.getKeyProvider(config, null)); + assertEquals(ioe.getMessage(), "KMS serverProviderUri is " + "not configured."); } } diff --git a/hadoop-ozone/common/pom.xml b/hadoop-ozone/common/pom.xml index 813edcb7d71..4af3fb18523 100644 --- a/hadoop-ozone/common/pom.xml +++ b/hadoop-ozone/common/pom.xml @@ -29,7 +29,6 @@ https://maven.apache.org/xsd/maven-4.0.0.xsd"> jar - false diff --git a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/client/io/SelectorOutputStream.java b/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/client/io/SelectorOutputStream.java index 6f2ad0bfa88..8ffa3c45c09 100644 --- a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/client/io/SelectorOutputStream.java +++ b/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/client/io/SelectorOutputStream.java @@ -20,7 +20,7 @@ import org.apache.hadoop.fs.Syncable; import org.apache.ratis.util.function.CheckedFunction; -import javax.annotation.Nonnull; +import jakarta.annotation.Nonnull; import java.io.IOException; import java.io.OutputStream; import java.util.Objects; diff --git a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/S3InMemoryCache.java b/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/S3InMemoryCache.java index 4f1f66faccb..122b04b715d 100644 --- a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/S3InMemoryCache.java +++ b/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/S3InMemoryCache.java @@ -44,13 +44,7 @@ public void put(String id, S3SecretValue secretValue) { @Override public void invalidate(String id) { - S3SecretValue secret = cache.getIfPresent(id); - if (secret == null) { - return; - } - secret.setDeleted(true); - secret.setAwsSecret(null); - cache.put(id, secret); + cache.asMap().computeIfPresent(id, (k, secret) -> secret.deleted()); } /** diff --git a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/helpers/OmBucketArgs.java b/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/helpers/OmBucketArgs.java index f8c752aab27..e382377dff4 100644 --- a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/helpers/OmBucketArgs.java +++ b/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/helpers/OmBucketArgs.java @@ -27,6 +27,7 @@ import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.BucketArgs; import com.google.common.base.Preconditions; +import org.apache.hadoop.ozone.protocolPB.OMPBHelper; /** * A class that encapsulates Bucket Arguments. @@ -50,6 +51,10 @@ public final class OmBucketArgs extends WithMetadata implements Auditable { */ private StorageType storageType; + /** + * Bucket encryption key info if encryption is enabled. + */ + private BucketEncryptionKeyInfo bekInfo; private long quotaInBytes = OzoneConsts.QUOTA_RESET; private long quotaInNamespace = OzoneConsts.QUOTA_RESET; private boolean quotaInBytesSet = false; @@ -150,6 +155,10 @@ public DefaultReplicationConfig getDefaultReplicationConfig() { return defaultReplicationConfig; } + public BucketEncryptionKeyInfo getBucketEncryptionKeyInfo() { + return bekInfo; + } + /** * Sets the Bucket default replication config. */ @@ -168,6 +177,12 @@ private void setQuotaInNamespace(long quotaInNamespace) { this.quotaInNamespace = quotaInNamespace; } + @Deprecated + private void setBucketEncryptionKey( + BucketEncryptionKeyInfo bucketEncryptionKey) { + this.bekInfo = bucketEncryptionKey; + } + /** * Returns Bucket Owner Name. * @@ -216,6 +231,7 @@ public static class Builder { private long quotaInBytes; private boolean quotaInNamespaceSet = false; private long quotaInNamespace; + private BucketEncryptionKeyInfo bekInfo; private DefaultReplicationConfig defaultReplicationConfig; private String ownerName; /** @@ -241,6 +257,12 @@ public Builder setIsVersionEnabled(Boolean versionFlag) { return this; } + @Deprecated + public Builder setBucketEncryptionKey(BucketEncryptionKeyInfo info) { + this.bekInfo = info; + return this; + } + public Builder addMetadata(Map metadataMap) { this.metadata = metadataMap; return this; @@ -291,6 +313,9 @@ public OmBucketArgs build() { if (quotaInNamespaceSet) { omBucketArgs.setQuotaInNamespace(quotaInNamespace); } + if (bekInfo != null && bekInfo.getKeyName() != null) { + omBucketArgs.setBucketEncryptionKey(bekInfo); + } return omBucketArgs; } } @@ -322,6 +347,11 @@ public BucketArgs getProtobuf() { if (ownerName != null) { builder.setOwnerName(ownerName); } + + if (bekInfo != null && bekInfo.getKeyName() != null) { + builder.setBekInfo(OMPBHelper.convert(bekInfo)); + } + return builder.build(); } @@ -355,6 +385,11 @@ public static OmBucketArgs getFromProtobuf(BucketArgs bucketArgs) { if (bucketArgs.hasQuotaInNamespace()) { omBucketArgs.setQuotaInNamespace(bucketArgs.getQuotaInNamespace()); } + + if (bucketArgs.hasBekInfo()) { + omBucketArgs.setBucketEncryptionKey( + OMPBHelper.convert(bucketArgs.getBekInfo())); + } return omBucketArgs; } } diff --git a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/helpers/OmKeyArgs.java b/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/helpers/OmKeyArgs.java index 8e43d057729..453dc3b957c 100644 --- a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/helpers/OmKeyArgs.java +++ b/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/helpers/OmKeyArgs.java @@ -23,7 +23,7 @@ import org.apache.hadoop.ozone.audit.Auditable; import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.KeyArgs; import org.apache.hadoop.ozone.security.GDPRSymmetricKey; -import org.jetbrains.annotations.NotNull; +import jakarta.annotation.Nonnull; import java.util.ArrayList; import java.util.HashMap; @@ -196,7 +196,7 @@ public OmKeyArgs.Builder toBuilder() { .setForceUpdateContainerCacheFromSCM(forceUpdateContainerCacheFromSCM); } - @NotNull + @Nonnull public KeyArgs toProtobuf() { return KeyArgs.newBuilder() .setVolumeName(getVolumeName()) diff --git a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/helpers/OzoneFSUtils.java b/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/helpers/OzoneFSUtils.java index 624e479ce3d..74effbd80a3 100644 --- a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/helpers/OzoneFSUtils.java +++ b/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/helpers/OzoneFSUtils.java @@ -21,7 +21,7 @@ import org.apache.hadoop.util.StringUtils; import org.apache.hadoop.util.Time; -import javax.annotation.Nonnull; +import jakarta.annotation.Nonnull; import java.nio.file.Paths; import java.util.UUID; diff --git a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/helpers/S3SecretValue.java b/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/helpers/S3SecretValue.java index e97adc0a50f..cb1ed0976a0 100644 --- a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/helpers/S3SecretValue.java +++ b/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/helpers/S3SecretValue.java @@ -27,7 +27,7 @@ /** * S3Secret to be saved in database. */ -public class S3SecretValue { +public final class S3SecretValue { private static final Codec CODEC = new DelegatedCodec<>( Proto2Codec.get(S3Secret.getDefaultInstance()), S3SecretValue::fromProtobuf, @@ -38,16 +38,29 @@ public static Codec getCodec() { } // TODO: This field should be renamed to accessId for generalization. - private String kerberosID; - private String awsSecret; - private boolean isDeleted; - private long transactionLogIndex; + private final String kerberosID; + private final String awsSecret; + private final boolean isDeleted; + private final long transactionLogIndex; - public S3SecretValue(String kerberosID, String awsSecret) { - this(kerberosID, awsSecret, false, 0L); + public static S3SecretValue of(String kerberosID, String awsSecret) { + return of(kerberosID, awsSecret, 0); } - public S3SecretValue(String kerberosID, String awsSecret, boolean isDeleted, + public static S3SecretValue of(String kerberosID, String awsSecret, long transactionLogIndex) { + return new S3SecretValue( + Objects.requireNonNull(kerberosID), + Objects.requireNonNull(awsSecret), + false, + transactionLogIndex + ); + } + + public S3SecretValue deleted() { + return new S3SecretValue(kerberosID, "", true, transactionLogIndex); + } + + private S3SecretValue(String kerberosID, String awsSecret, boolean isDeleted, long transactionLogIndex) { this.kerberosID = kerberosID; this.awsSecret = awsSecret; @@ -59,26 +72,14 @@ public String getKerberosID() { return kerberosID; } - public void setKerberosID(String kerberosID) { - this.kerberosID = kerberosID; - } - public String getAwsSecret() { return awsSecret; } - public void setAwsSecret(String awsSecret) { - this.awsSecret = awsSecret; - } - public boolean isDeleted() { return isDeleted; } - public void setDeleted(boolean status) { - this.isDeleted = status; - } - public String getAwsAccessKey() { return kerberosID; } @@ -87,12 +88,8 @@ public long getTransactionLogIndex() { return transactionLogIndex; } - public void setTransactionLogIndex(long transactionLogIndex) { - this.transactionLogIndex = transactionLogIndex; - } - public static S3SecretValue fromProtobuf(S3Secret s3Secret) { - return new S3SecretValue(s3Secret.getKerberosID(), s3Secret.getAwsSecret()); + return S3SecretValue.of(s3Secret.getKerberosID(), s3Secret.getAwsSecret()); } public S3Secret getProtobuf() { diff --git a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/multitenant/AccessPolicy.java b/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/multitenant/AccessPolicy.java deleted file mode 100644 index ee64d5ae092..00000000000 --- a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/multitenant/AccessPolicy.java +++ /dev/null @@ -1,152 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one or more - * contributor license agreements. See the NOTICE file distributed with this - * work for additional information regarding copyright ownership. The ASF - * licenses this file to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT - * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the - * License for the specific language governing permissions and limitations under - * the License. - */ -package org.apache.hadoop.ozone.om.multitenant; - -import java.io.IOException; -import java.security.Principal; -import java.util.HashSet; -import java.util.List; - -import org.apache.hadoop.hdds.annotation.InterfaceAudience; -import org.apache.hadoop.hdds.annotation.InterfaceStability; -import org.apache.hadoop.ozone.security.acl.IAccessAuthorizer.ACLType; -import org.apache.hadoop.ozone.security.acl.OzoneObj; -import com.google.gson.JsonObject; - -/** - * AccessPolicy interface for Ozone Multi-Tenancy. - */ -@InterfaceAudience.LimitedPrivate({"HDFS", "Yarn", "Ranger", "Hive", "HBase"}) -@InterfaceStability.Evolving -public interface AccessPolicy { - - /** - * Ozone could support different authorization engines e.g. - * native-authorization, Ranger Authorization, - * any-other-external-authorization. This interface is an in memory - * version of a generic access policy. Any Ozone policy can be uniquely - * identified by its policy-ID. Ozone can choose to persist this policy-ID - * in its internal database. A remote/native authorizer can retrieve/update - * an access policy associated with its Policy-ID ID. - * - */ - enum AccessPolicyType { NATIVE_ACL, RANGER_POLICY, AWS_POLICY, OTHER }; - - /** - * Allow or deny. - */ - enum AccessGrantType { ALLOW, DENY }; - - /** - * Defines an access policy entry. - */ - class AccessPolicyElem { - private OzoneObj object; - private Principal principal; - private ACLType aclType; - private AccessGrantType grantType; - - public AccessPolicyElem(OzoneObj obj, Principal id, - ACLType acl, AccessGrantType grant) { - object = obj; - principal = id; - aclType = acl; - grantType = grant; - } - - public OzoneObj getObject() { - return object; - } - - public Principal getPrincipal() { - return principal; - } - - public ACLType getAclType() { - return aclType; - } - - public AccessGrantType getAccessGrantType() { - return grantType; - } - } - - /** - * @param id This would be policy-ID that an external/native authorizer - * could return. - */ - void setPolicyName(String id); - - String getPolicyID(); - - /** - * @return unique policy-name for this policy. - */ - String getPolicyName(); - - /** - * - * @return Policy in a Json string format. Individual implementation can - * choose different AccessPolicyType e.g. Ranger-Compatible-Json-Policy, - * AWS-Compatible-Json-policy etc. It could be an Opaque data to the caller - * and they can directly send it to an authorizer (e.g. Ranger). - * All Authorizer policy engines are supposed to provide an implementation - * of AccessPolicy interface. - */ - String serializePolicyToJsonString() throws IOException; - - /** - * Given a serialized accessPolicy in a Json format, deserializes and - * constructs a valid access Policy. - * @return - * @throws IOException - */ - String deserializePolicyFromJsonString(JsonObject jsonObject) - throws IOException; - - /** - * @return AccessPolicyType (Native or otherwise). - */ - AccessPolicyType getAccessPolicyType(); - - void addAccessPolicyElem(OzoneObj object, - Principal principal, ACLType acl, - AccessGrantType grant) throws IOException; - - void removeAccessPolicyElem(OzoneObj object, - Principal principal, - ACLType acl, AccessGrantType grant) - throws IOException; - - List getAccessPolicyElem(); - - /** - * Sets the last update time to mtime. - * @param mtime Time in epoch milliseconds - */ - void setPolicyLastUpdateTime(long mtime); - - /** - * Returns the last update time of Ranger policies. - */ - long getPolicyLastUpdateTime(); - - /** - * @return list of roles associated with this policy - */ - HashSet getRoleList(); -} diff --git a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/multitenant/RangerAccessPolicy.java b/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/multitenant/RangerAccessPolicy.java deleted file mode 100644 index cebb540ba6d..00000000000 --- a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/multitenant/RangerAccessPolicy.java +++ /dev/null @@ -1,316 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package org.apache.hadoop.ozone.om.multitenant; - -import org.apache.hadoop.ozone.security.acl.IAccessAuthorizer.ACLType; -import org.apache.hadoop.ozone.security.acl.OzoneObj; - -import com.google.gson.JsonArray; -import com.google.gson.JsonObject; - -import java.io.IOException; -import java.security.Principal; -import java.util.ArrayList; -import java.util.HashSet; -import java.util.Iterator; -import java.util.List; -import java.util.Map; -import java.util.concurrent.ConcurrentHashMap; - -import static org.apache.hadoop.ozone.OzoneConsts.OZONE_TENANT_RANGER_POLICY_LABEL; -import static org.apache.hadoop.ozone.om.multitenant.AccessPolicy.AccessPolicyType.RANGER_POLICY; - -/** - * This is used for Ozone tenant access policy control. - */ -public class RangerAccessPolicy implements AccessPolicy { - - // For now RangerAccessPolicy supports only one object per policy - private OzoneObj accessObject; - private final Map> policyMap; - private final HashSet roleList; - private String policyID; - private String policyJsonString; - private String policyName; - private long lastPolicyUpdateTimeEpochMillis; - - public RangerAccessPolicy(String name) { - policyMap = new ConcurrentHashMap<>(); - policyName = name; - roleList = new HashSet<>(); - } - - public void setPolicyName(String id) { - policyID = id; - } - - public String getPolicyID() { - return policyID; - } - - public String getPolicyName() { - return policyName; - } - - public HashSet getRoleList() { - return roleList; - } - - @Override - public void setPolicyLastUpdateTime(long mtime) { - lastPolicyUpdateTimeEpochMillis = mtime; - } - - @Override - public long getPolicyLastUpdateTime() { - return lastPolicyUpdateTimeEpochMillis; - } - - @Override - public String serializePolicyToJsonString() throws IOException { - updatePolicyJsonString(); - return policyJsonString; - } - - @Override - public String deserializePolicyFromJsonString(JsonObject jsonObject) { - setPolicyName(jsonObject.get("id").getAsString()); - try { - JsonArray policyItems = jsonObject - .getAsJsonArray("policyItems"); - for (int j = 0; j < policyItems.size(); ++j) { - JsonObject policy = policyItems.get(j).getAsJsonObject(); - JsonArray roles = policy.getAsJsonArray("roles"); - for (int k = 0; k < roles.size(); ++k) { - if (!roleList.contains(roles.get(k).getAsString())) { - // We only get the role name here. We need to query and populate it. - roleList.add(roles.get(k).getAsString()); - } - } - } - } catch (Exception e) { - // Ignore Exception here. - } - // TODO : retrieve other policy fields as well. - try { - setPolicyLastUpdateTime(jsonObject.get("updateTime").getAsLong()); - } catch (Exception e) { - // lets ignore the exception in case the field is not set. - } - return null; - } - - @Override - public AccessPolicyType getAccessPolicyType() { - return RANGER_POLICY; - } - - @Override - public void addAccessPolicyElem(OzoneObj object, - Principal principal, - ACLType acl, AccessGrantType grant) - throws IOException { - if (accessObject == null) { - accessObject = object; - } else if (!object.toString().equals(accessObject.toString())) { - throw new IOException( - "RangerAccessPolicy supports only one object per" + " policy"); - } - AccessPolicyElem elem = new AccessPolicyElem(object, principal, acl, grant); - if (!policyMap.containsKey(principal.getName())) { - List elemList = new ArrayList<>(); - elemList.add(elem); - policyMap.put(principal.getName(), elemList); - return; - } - List elemList = policyMap.get(principal.getName()); - for (AccessPolicyElem e : elemList) { - if (e.getAclType() == acl) { - throw new IOException( - "RangerAccessPolicy: Principal " + principal.getName() - + " already exists with access " + acl); - } - } - elemList.add(elem); - } - - @Override - public List getAccessPolicyElem() { - List list = new ArrayList<>(); - for (Map.Entry> entry : policyMap - .entrySet()) { - list.addAll(entry.getValue()); - } - return list; - } - - @Override - public void removeAccessPolicyElem(OzoneObj object, - Principal principal, ACLType acl, - AccessGrantType grant) - throws IOException { - if (accessObject == null) { - throw new IOException("removeAccessPolicyElem: Invalid Arguments."); - } else if (!object.toString().equals(accessObject.toString())) { - throw new IOException( - "removeAccessPolicyElem: Object not found." + object.toString()); - } - if (!policyMap.containsKey(principal.getName())) { - throw new IOException( - "removeAccessPolicyElem: Principal not found." + object.toString()); - } - List elemList = policyMap.get(principal.getName()); - for (AccessPolicyElem e : elemList) { - if (e.getAclType() == acl) { - elemList.remove(e); - } - } - if (elemList.isEmpty()) { - policyMap.remove(principal.toString()); - } - throw new IOException( - "removeAccessPolicyElem: aclType not found." + object.toString()); - } - - private String createRangerResourceItems() { - StringBuilder resourceItems = new StringBuilder(); - resourceItems.append("\"resources\":{" + - "\"volume\":{" + - "\"values\":[\""); - resourceItems.append(accessObject.getVolumeName()); - resourceItems.append("\"]," + - "\"isRecursive\":false," + - "\"isExcludes\":false" + - "}"); - if ((accessObject.getResourceType() == OzoneObj.ResourceType.BUCKET) || - (accessObject.getResourceType() == OzoneObj.ResourceType.KEY)) { - resourceItems.append( - ",\"bucket\":{" + - "\"values\":[\""); - resourceItems.append(accessObject.getBucketName()); - resourceItems.append("\"]," + - "\"isRecursive\":false," + - "\"isExcludes\":false" + - "}"); - } - if (accessObject.getResourceType() == OzoneObj.ResourceType.KEY) { - resourceItems.append(",\"key\":{" + - "\"values\":[\""); - resourceItems.append(accessObject.getKeyName()); - resourceItems.append("\"]," + - "\"isRecursive\":true," + - "\"isExcludes\":false" + - "}"); - } - resourceItems.append("},"); - return resourceItems.toString(); - } - - private String createRangerPolicyItems() throws IOException { - StringBuilder policyItems = new StringBuilder(); - policyItems.append("\"policyItems\":["); - int mapRemainingSize = policyMap.size(); - for (Map.Entry> mapElem : policyMap - .entrySet()) { - mapRemainingSize--; - List list = mapElem.getValue(); - if (list.isEmpty()) { - continue; - } - policyItems.append("{"); - if (list.get(0).getPrincipal() instanceof OzoneTenantRolePrincipal) { - policyItems.append("\"roles\":[\"" + mapElem.getKey() + "\"],"); - } else { - policyItems.append("\"users\":[\"" + mapElem.getKey() + "\"],"); - } - policyItems.append("\"accesses\":["); - Iterator iter = list.iterator(); - while (iter.hasNext()) { - AccessPolicyElem elem = iter.next(); - policyItems.append("{"); - policyItems.append("\"type\":\""); - policyItems.append(getRangerAclString(elem.getAclType())); - policyItems.append("\","); - if (elem.getAccessGrantType() == AccessGrantType.ALLOW) { - policyItems.append("\"isAllowed\":true"); - } else { - policyItems.append("\"isDenied\":true"); - } - policyItems.append("}"); - if (iter.hasNext()) { - policyItems.append(","); - } - } - policyItems.append("]"); - policyItems.append("}"); - if (mapRemainingSize > 0) { - policyItems.append(","); - } - } - policyItems.append("],"); - return policyItems.toString(); - } - - private String getRangerAclString(ACLType aclType) throws IOException { - switch (aclType) { - case ALL: - return "All"; - case LIST: - return "List"; - case READ: - return "Read"; - case WRITE: - return "Write"; - case CREATE: - return "Create"; - case DELETE: - return "Delete"; - case READ_ACL: - return "Read_ACL"; - case WRITE_ACL: - return "Write_ACL"; - case NONE: - return ""; - default: - throw new IOException("Unknown ACLType"); - } - } - - private void updatePolicyJsonString() throws IOException { - policyJsonString = - "{\"policyType\":\"0\"," + "\"name\":\"" + policyName + "\"," - + "\"isEnabled\":true," + "\"policyPriority\":0," - + "\"description\":\"Policy created by Ozone for Multi-Tenancy\"," - + "\"policyLabels\":[\"" + OZONE_TENANT_RANGER_POLICY_LABEL + "\"]," - + "\"description\":\"\"," - + "\"isAuditEnabled\":true," + createRangerResourceItems() - + "\"isDenyAllElse\":false," + createRangerPolicyItems() - + "\"allowExceptions\":[]," + "\"denyPolicyItems\":[]," - + "\"denyExceptions\":[]," + "\"service\":\"cm_ozone\"" + "}"; - } - - @Override - public String toString() { - return "RangerAccessPolicy{" + "accessObject=" + accessObject - + ", policyMap=" + policyMap + ", roleList=" + roleList + ", policyID='" - + policyID + '\'' + ", policyJsonString='" + policyJsonString + '\'' - + ", policyName='" + policyName + '\'' - + ", lastPolicyUpdateTimeEpochMillis=" + lastPolicyUpdateTimeEpochMillis - + '}'; - } -} diff --git a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/protocol/OzoneManagerProtocol.java b/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/protocol/OzoneManagerProtocol.java index e769e3035ef..f41f89b181d 100644 --- a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/protocol/OzoneManagerProtocol.java +++ b/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/protocol/OzoneManagerProtocol.java @@ -23,8 +23,7 @@ import java.util.List; import java.util.UUID; -import javax.annotation.Nonnull; - +import jakarta.annotation.Nonnull; import org.apache.hadoop.fs.SafeModeAction; import org.apache.hadoop.hdds.scm.container.common.helpers.ExcludeList; import org.apache.hadoop.ozone.OzoneAcl; diff --git a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/protocolPB/OzoneManagerProtocolClientSideTranslatorPB.java b/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/protocolPB/OzoneManagerProtocolClientSideTranslatorPB.java index 7b8d7ef9b2b..bd40dfcf024 100644 --- a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/protocolPB/OzoneManagerProtocolClientSideTranslatorPB.java +++ b/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/protocolPB/OzoneManagerProtocolClientSideTranslatorPB.java @@ -25,7 +25,7 @@ import java.util.UUID; import java.util.stream.Collectors; -import javax.annotation.Nonnull; +import jakarta.annotation.Nonnull; import org.apache.commons.lang3.StringUtils; import org.apache.hadoop.fs.SafeModeAction; import org.apache.hadoop.hdds.annotation.InterfaceAudience; diff --git a/hadoop-ozone/common/src/test/java/org/apache/hadoop/ozone/TestOzoneAcls.java b/hadoop-ozone/common/src/test/java/org/apache/hadoop/ozone/TestOzoneAcls.java index a815b72deca..08ae1fbc65b 100644 --- a/hadoop-ozone/common/src/test/java/org/apache/hadoop/ozone/TestOzoneAcls.java +++ b/hadoop-ozone/common/src/test/java/org/apache/hadoop/ozone/TestOzoneAcls.java @@ -36,7 +36,6 @@ import static org.apache.hadoop.ozone.security.acl.IAccessAuthorizer.ACLType.WRITE; import static org.apache.hadoop.ozone.security.acl.IAccessAuthorizer.ACLType.WRITE_ACL; import static org.assertj.core.api.Assertions.assertThat; -import static org.junit.jupiter.api.Assertions.fail; import static org.junit.jupiter.api.Assertions.assertEquals; import static org.junit.jupiter.api.Assertions.assertTrue; import static org.junit.jupiter.api.Assertions.assertFalse; @@ -122,14 +121,7 @@ void testAclParse() { if (entry.getValue()) { OzoneAcl.parseAcl(entry.getKey()); } else { - try { - OzoneAcl.parseAcl(entry.getKey()); - // should never get here since parseAcl will throw - fail("An exception was expected but did not happen. Key: " + - entry.getKey()); - } catch (IllegalArgumentException e) { - // nothing to do - } + assertThrows(IllegalArgumentException.class, () -> OzoneAcl.parseAcl(entry.getKey())); } } } diff --git a/hadoop-ozone/common/src/test/java/org/apache/hadoop/ozone/om/lock/TestKeyPathLock.java b/hadoop-ozone/common/src/test/java/org/apache/hadoop/ozone/om/lock/TestKeyPathLock.java index 75adb7e6a11..4a814852067 100644 --- a/hadoop-ozone/common/src/test/java/org/apache/hadoop/ozone/om/lock/TestKeyPathLock.java +++ b/hadoop-ozone/common/src/test/java/org/apache/hadoop/ozone/om/lock/TestKeyPathLock.java @@ -30,7 +30,7 @@ import java.util.concurrent.CountDownLatch; import static org.assertj.core.api.Assertions.assertThat; -import static org.junit.jupiter.api.Assertions.fail; +import static org.junit.jupiter.api.Assertions.assertThrows; import static org.junit.jupiter.api.Assertions.assertEquals; /** @@ -239,14 +239,11 @@ void testAcquireWriteBucketLockWhileAcquiredWriteKeyPathLock() { higherResourceName = new String[]{volumeName, bucketName}; lock.acquireWriteLock(resource, resourceName); - try { - lock.acquireWriteLock(higherResource, higherResourceName); - fail("testAcquireWriteBucketLockWhileAcquiredWriteKeyPathLock() failed"); - } catch (RuntimeException ex) { - String message = "cannot acquire " + higherResource.getName() + " lock " + - "while holding [" + resource.getName() + "] lock(s)."; - assertThat(ex).hasMessageContaining(message); - } + RuntimeException ex = + assertThrows(RuntimeException.class, () -> lock.acquireWriteLock(higherResource, higherResourceName)); + String message = "cannot acquire " + higherResource.getName() + " lock " + + "while holding [" + resource.getName() + "] lock(s)."; + assertThat(ex).hasMessageContaining(message); } @Test @@ -264,14 +261,11 @@ void testAcquireWriteBucketLockWhileAcquiredReadKeyPathLock() { higherResourceName = new String[]{volumeName, bucketName}; lock.acquireReadLock(resource, resourceName); - try { - lock.acquireWriteLock(higherResource, higherResourceName); - fail("testAcquireWriteBucketLockWhileAcquiredReadKeyPathLock() failed"); - } catch (RuntimeException ex) { - String message = "cannot acquire " + higherResource.getName() + " lock " + - "while holding [" + resource.getName() + "] lock(s)."; - assertThat(ex).hasMessageContaining(message); - } + RuntimeException ex = + assertThrows(RuntimeException.class, () -> lock.acquireWriteLock(higherResource, higherResourceName)); + String message = "cannot acquire " + higherResource.getName() + " lock " + + "while holding [" + resource.getName() + "] lock(s)."; + assertThat(ex).hasMessageContaining(message); } @Test @@ -289,14 +283,11 @@ void testAcquireReadBucketLockWhileAcquiredReadKeyPathLock() { higherResourceName = new String[]{volumeName, bucketName}; lock.acquireReadLock(resource, resourceName); - try { - lock.acquireReadLock(higherResource, higherResourceName); - fail("testAcquireReadBucketLockWhileAcquiredReadKeyPathLock() failed"); - } catch (RuntimeException ex) { - String message = "cannot acquire " + higherResource.getName() + " lock " + - "while holding [" + resource.getName() + "] lock(s)."; - assertThat(ex).hasMessageContaining(message); - } + RuntimeException ex = + assertThrows(RuntimeException.class, () -> lock.acquireReadLock(higherResource, higherResourceName)); + String message = "cannot acquire " + higherResource.getName() + " lock " + + "while holding [" + resource.getName() + "] lock(s)."; + assertThat(ex).hasMessageContaining(message); } @Test @@ -314,13 +305,10 @@ void testAcquireReadBucketLockWhileAcquiredWriteKeyPathLock() { higherResourceName = new String[]{volumeName, bucketName}; lock.acquireWriteLock(resource, resourceName); - try { - lock.acquireReadLock(higherResource, higherResourceName); - fail("testAcquireReadBucketLockWhileAcquiredWriteKeyPathLock() failed"); - } catch (RuntimeException ex) { - String message = "cannot acquire " + higherResource.getName() + " lock " + - "while holding [" + resource.getName() + "] lock(s)."; - assertThat(ex).hasMessageContaining(message); - } + RuntimeException ex = + assertThrows(RuntimeException.class, () -> lock.acquireReadLock(higherResource, higherResourceName)); + String message = "cannot acquire " + higherResource.getName() + " lock " + + "while holding [" + resource.getName() + "] lock(s)."; + assertThat(ex).hasMessageContaining(message); } } diff --git a/hadoop-ozone/common/src/test/java/org/apache/hadoop/ozone/om/lock/TestOzoneManagerLock.java b/hadoop-ozone/common/src/test/java/org/apache/hadoop/ozone/om/lock/TestOzoneManagerLock.java index 856f2b238c0..54ab718ccf9 100644 --- a/hadoop-ozone/common/src/test/java/org/apache/hadoop/ozone/om/lock/TestOzoneManagerLock.java +++ b/hadoop-ozone/common/src/test/java/org/apache/hadoop/ozone/om/lock/TestOzoneManagerLock.java @@ -40,7 +40,6 @@ import static org.junit.jupiter.api.Assertions.assertFalse; import static org.junit.jupiter.api.Assertions.assertEquals; import static org.junit.jupiter.api.Assertions.assertThrows; -import static org.junit.jupiter.api.Assertions.fail; /** * Class tests OzoneManagerLock. @@ -77,14 +76,11 @@ private void testResourceReacquireLock(String[] resourceName, resource == Resource.S3_SECRET_LOCK || resource == Resource.PREFIX_LOCK) { lock.acquireWriteLock(resource, resourceName); - try { - lock.acquireWriteLock(resource, resourceName); - fail("reacquireResourceLock failed"); - } catch (RuntimeException ex) { - String message = "cannot acquire " + resource.getName() + " lock " + - "while holding [" + resource.getName() + "] lock(s)."; - assertThat(ex).hasMessageContaining(message); - } + RuntimeException ex = + assertThrows(RuntimeException.class, () -> lock.acquireWriteLock(resource, resourceName)); + String message = "cannot acquire " + resource.getName() + " lock " + + "while holding [" + resource.getName() + "] lock(s)."; + assertThat(ex).hasMessageContaining(message); assertDoesNotThrow(() -> lock.releaseWriteLock(resource, resourceName)); } else { lock.acquireWriteLock(resource, resourceName); @@ -162,15 +158,13 @@ void testLockViolations() { stack.push(new ResourceInfo(resourceName, higherResource)); currentLocks.add(higherResource.getName()); // try to acquire lower level lock - try { - resourceName = generateResourceName(resource); - lock.acquireWriteLock(resource, resourceName); - fail("testLockViolations failed"); - } catch (RuntimeException ex) { - String message = "cannot acquire " + resource.getName() + " lock " + - "while holding " + currentLocks + " lock(s)."; - assertThat(ex).hasMessageContaining(message); - } + RuntimeException ex = assertThrows(RuntimeException.class, () -> { + String[] resourceName1 = generateResourceName(resource); + lock.acquireWriteLock(resource, resourceName1); + }); + String message = "cannot acquire " + resource.getName() + " lock " + + "while holding " + currentLocks + " lock(s)."; + assertThat(ex).hasMessageContaining(message); } } diff --git a/hadoop-ozone/common/src/test/java/org/apache/hadoop/ozone/om/protocolPB/TestS3GrpcOmTransport.java b/hadoop-ozone/common/src/test/java/org/apache/hadoop/ozone/om/protocolPB/TestS3GrpcOmTransport.java index 04bb4b240dd..3d73a42e694 100644 --- a/hadoop-ozone/common/src/test/java/org/apache/hadoop/ozone/om/protocolPB/TestS3GrpcOmTransport.java +++ b/hadoop-ozone/common/src/test/java/org/apache/hadoop/ozone/om/protocolPB/TestS3GrpcOmTransport.java @@ -20,6 +20,7 @@ import static org.apache.hadoop.ozone.ClientVersion.CURRENT_VERSION; import static org.apache.hadoop.ozone.om.OMConfigKeys.OZONE_OM_GRPC_MAXIMUM_RESPONSE_LENGTH_DEFAULT; +import static org.junit.jupiter.api.Assertions.assertThrows; import static org.mockito.AdditionalAnswers.delegatesTo; import static org.mockito.Mockito.mock; @@ -46,8 +47,6 @@ import com.google.protobuf.ServiceException; import org.apache.ratis.protocol.RaftPeerId; -import static org.junit.jupiter.api.Assertions.fail; -import static org.junit.jupiter.api.Assertions.assertTrue; import static org.junit.jupiter.api.Assertions.assertEquals; import static org.apache.hadoop.ozone.om.OMConfigKeys .OZONE_OM_GRPC_MAXIMUM_RESPONSE_LENGTH; @@ -61,13 +60,13 @@ public class TestS3GrpcOmTransport { private static final Logger LOG = LoggerFactory.getLogger(TestS3GrpcOmTransport.class); - private final String leaderOMNodeId = "TestOM"; + private static final String LEADER_OM_NODE_ID = "TestOM"; private final OMResponse omResponse = OMResponse.newBuilder() .setSuccess(true) .setStatus(org.apache.hadoop.ozone.protocol .proto.OzoneManagerProtocolProtos.Status.OK) - .setLeaderOMNodeId(leaderOMNodeId) + .setLeaderOMNodeId(LEADER_OM_NODE_ID) .setCmdType(Type.AllocateBlock) .build(); @@ -168,7 +167,7 @@ public void testSubmitRequestToServer() throws Exception { final OMResponse resp = client.submitRequest(omRequest); assertEquals(resp.getStatus(), org.apache.hadoop.ozone.protocol .proto.OzoneManagerProtocolProtos.Status.OK); - assertEquals(resp.getLeaderOMNodeId(), leaderOMNodeId); + assertEquals(resp.getLeaderOMNodeId(), LEADER_OM_NODE_ID); } @Test @@ -192,7 +191,7 @@ public void testGrpcFailoverProxy() throws Exception { final OMResponse resp = client.submitRequest(omRequest); assertEquals(resp.getStatus(), org.apache.hadoop.ozone.protocol .proto.OzoneManagerProtocolProtos.Status.OK); - assertEquals(resp.getLeaderOMNodeId(), leaderOMNodeId); + assertEquals(resp.getLeaderOMNodeId(), LEADER_OM_NODE_ID); } @Test @@ -216,12 +215,7 @@ public void testGrpcFailoverProxyExhaustRetry() throws Exception { // OMFailoverProvider returns Fail retry due to #attempts > // max failovers - try { - final OMResponse resp = client.submitRequest(omRequest); - fail(); - } catch (Exception e) { - assertTrue(true); - } + assertThrows(Exception.class, () -> client.submitRequest(omRequest)); } @Test @@ -251,11 +245,6 @@ public void testGrpcFailoverExceedMaxMesgLen() throws Exception { // len > 0, causing RESOURCE_EXHAUSTED exception. // This exception should cause failover to NOT retry, // rather to fail. - try { - final OMResponse resp = client.submitRequest(omRequest); - fail(); - } catch (Exception e) { - assertTrue(true); - } + assertThrows(Exception.class, () -> client.submitRequest(omRequest)); } } diff --git a/hadoop-ozone/common/src/test/java/org/apache/hadoop/ozone/om/protocolPB/grpc/TestClientAddressClientInterceptor.java b/hadoop-ozone/common/src/test/java/org/apache/hadoop/ozone/om/protocolPB/grpc/TestClientAddressClientInterceptor.java index f6909e410f7..6d9f70b0f4f 100644 --- a/hadoop-ozone/common/src/test/java/org/apache/hadoop/ozone/om/protocolPB/grpc/TestClientAddressClientInterceptor.java +++ b/hadoop-ozone/common/src/test/java/org/apache/hadoop/ozone/om/protocolPB/grpc/TestClientAddressClientInterceptor.java @@ -28,7 +28,7 @@ import org.junit.jupiter.api.Test; import org.mockito.MockedStatic; -import static org.mockito.ArgumentMatchers.eq; +import static org.mockito.Mockito.eq; import static org.mockito.Mockito.mock; import static org.mockito.Mockito.verify; import static org.mockito.Mockito.when; diff --git a/hadoop-ozone/common/src/test/java/org/apache/hadoop/ozone/om/protocolPB/grpc/TestClientAddressServerInterceptor.java b/hadoop-ozone/common/src/test/java/org/apache/hadoop/ozone/om/protocolPB/grpc/TestClientAddressServerInterceptor.java index 6a3cdf91d8a..e441a6d3820 100644 --- a/hadoop-ozone/common/src/test/java/org/apache/hadoop/ozone/om/protocolPB/grpc/TestClientAddressServerInterceptor.java +++ b/hadoop-ozone/common/src/test/java/org/apache/hadoop/ozone/om/protocolPB/grpc/TestClientAddressServerInterceptor.java @@ -28,7 +28,7 @@ import org.mockito.ArgumentCaptor; import org.mockito.MockedStatic; -import static org.mockito.ArgumentMatchers.eq; +import static org.mockito.Mockito.eq; import static org.mockito.Mockito.mock; import static org.mockito.Mockito.when; import static org.mockito.Mockito.mockStatic; diff --git a/hadoop-ozone/common/src/test/java/org/apache/hadoop/ozone/security/TestGDPRSymmetricKey.java b/hadoop-ozone/common/src/test/java/org/apache/hadoop/ozone/security/TestGDPRSymmetricKey.java index 0b69d0dd9b3..1ab01ee3e00 100644 --- a/hadoop-ozone/common/src/test/java/org/apache/hadoop/ozone/security/TestGDPRSymmetricKey.java +++ b/hadoop-ozone/common/src/test/java/org/apache/hadoop/ozone/security/TestGDPRSymmetricKey.java @@ -22,8 +22,8 @@ import java.security.SecureRandom; +import static org.junit.jupiter.api.Assertions.assertThrows; import static org.junit.jupiter.api.Assertions.assertTrue; -import static org.junit.jupiter.api.Assertions.fail; /** * Tests GDPRSymmetricKey structure. @@ -56,13 +56,8 @@ public void testKeyGenerationWithValidInput() throws Exception { @Test public void testKeyGenerationWithInvalidInput() throws Exception { - try { - new GDPRSymmetricKey(RandomStringUtils.randomAlphabetic(5), - OzoneConsts.GDPR_ALGORITHM_NAME); - fail("Expect length mismatched"); - } catch (IllegalArgumentException ex) { - assertTrue(ex.getMessage() - .equalsIgnoreCase("Secret must be exactly 16 characters")); - } + IllegalArgumentException e = assertThrows(IllegalArgumentException.class, + () -> new GDPRSymmetricKey(RandomStringUtils.randomAlphabetic(5), OzoneConsts.GDPR_ALGORITHM_NAME)); + assertTrue(e.getMessage().equalsIgnoreCase("Secret must be exactly 16 characters")); } } diff --git a/hadoop-ozone/csi/src/main/resources/proto.lock b/hadoop-ozone/csi/src/main/resources/proto.lock index 8f797b69db4..410598cbb66 100644 --- a/hadoop-ozone/csi/src/main/resources/proto.lock +++ b/hadoop-ozone/csi/src/main/resources/proto.lock @@ -204,12 +204,14 @@ { "id": 1, "name": "service", - "type": "Service" + "type": "Service", + "oneof_parent": "type" }, { "id": 2, "name": "volume_expansion", - "type": "VolumeExpansion" + "type": "VolumeExpansion", + "oneof_parent": "type" } ], "messages": [ @@ -302,12 +304,14 @@ { "id": 1, "name": "snapshot", - "type": "SnapshotSource" + "type": "SnapshotSource", + "oneof_parent": "type" }, { "id": 2, "name": "volume", - "type": "VolumeSource" + "type": "VolumeSource", + "oneof_parent": "type" } ], "messages": [ @@ -349,12 +353,14 @@ { "id": 1, "name": "block", - "type": "BlockVolume" + "type": "BlockVolume", + "oneof_parent": "access_type" }, { "id": 2, "name": "mount", - "type": "MountVolume" + "type": "MountVolume", + "oneof_parent": "access_type" }, { "id": 3, @@ -793,7 +799,8 @@ { "id": 1, "name": "rpc", - "type": "RPC" + "type": "RPC", + "oneof_parent": "type" } ], "messages": [ @@ -1243,7 +1250,8 @@ { "id": 1, "name": "rpc", - "type": "RPC" + "type": "RPC", + "oneof_parent": "type" } ], "messages": [ diff --git a/hadoop-ozone/dev-support/checks/junit.sh b/hadoop-ozone/dev-support/checks/junit.sh index 417ae35e5e4..768a1f32a38 100755 --- a/hadoop-ozone/dev-support/checks/junit.sh +++ b/hadoop-ozone/dev-support/checks/junit.sh @@ -79,6 +79,12 @@ for i in $(seq 1 ${ITERATIONS}); do fi if [[ ${ITERATIONS} -gt 1 ]]; then + if ! grep -q "Tests run: [^0]" "${REPORT_DIR}/output.log"; then + echo "No tests were run" >> "${REPORT_DIR}/summary.txt" + irc=1 + FAIL_FAST=true + fi + REPORT_DIR="${original_report_dir}" echo "Iteration ${i} exit code: ${irc}" | tee -a "${REPORT_FILE}" fi diff --git a/hadoop-ozone/dev-support/checks/license.exceptions b/hadoop-ozone/dev-support/checks/license.exceptions index 66f17fb670d..5b22b88c492 100644 --- a/hadoop-ozone/dev-support/checks/license.exceptions +++ b/hadoop-ozone/dev-support/checks/license.exceptions @@ -16,7 +16,7 @@ # This file lists dependencies with acceptable license that # license-maven-plugin cannot find, or finds with unexpected license. -com.google.re2j:re2j:1.1 BSD 3-Clause +com.google.re2j:re2j:1.7 BSD 3-Clause javax.servlet:servlet-api:2.5 CDDL 1.1 javax.servlet.jsp:jsp-api:2.1 CDDL 1.1 org.codehaus.jettison:jettison:1.1 Apache License 2.0 diff --git a/hadoop-ozone/dev-support/checks/native.sh b/hadoop-ozone/dev-support/checks/native.sh index dc66f923a64..1eeca5c0f3d 100755 --- a/hadoop-ozone/dev-support/checks/native.sh +++ b/hadoop-ozone/dev-support/checks/native.sh @@ -19,6 +19,20 @@ DIR="$( cd "$( dirname "${BASH_SOURCE[0]}" )" >/dev/null 2>&1 && pwd )" CHECK=native +zlib_version=$(mvn -N help:evaluate -Dexpression=zlib.version -q -DforceStdout) +if [[ -z "${zlib_version}" ]]; then + echo "ERROR zlib.version not defined in pom.xml" + exit 1 +fi + +bzip2_version=$(mvn -N help:evaluate -Dexpression=bzip2.version -q -DforceStdout) +if [[ -z "${bzip2_version}" ]]; then + echo "ERROR bzip2.version not defined in pom.xml" + exit 1 +fi + source "${DIR}/junit.sh" -Pnative -Drocks_tools_native \ + -Dbzip2.url="https://github.com/libarchive/bzip2/archive/refs/tags/bzip2-${bzip2_version}.tar.gz" \ + -Dzlib.url="https://github.com/madler/zlib/releases/download/v${zlib_version}/zlib-${zlib_version}.tar.gz" \ -DexcludedGroups="unhealthy" \ "$@" diff --git a/hadoop-ozone/dev-support/checks/sonar.sh b/hadoop-ozone/dev-support/checks/sonar.sh index 9a36c70a663..27a971f691c 100755 --- a/hadoop-ozone/dev-support/checks/sonar.sh +++ b/hadoop-ozone/dev-support/checks/sonar.sh @@ -23,11 +23,8 @@ if [ ! "$SONAR_TOKEN" ]; then exit 1 fi -#Workaround: Sonar expects per-project Sonar XML report, but we have one, combined. Sonar seems to handle it well. -# Only the classes from the current project will be used. We can copy the same, combined report to all the subprojects. -if [ -f "$PROJECT_DIR/target/coverage/all.xml" ]; then - find "$PROJECT_DIR" -name pom.xml | grep -v target | xargs dirname | xargs -n1 -IDIR mkdir -p DIR/target/coverage/ - find "$PROJECT_DIR" -name pom.xml | grep -v target | xargs dirname | xargs -n1 -IDIR cp "$PROJECT_DIR/target/coverage/all.xml" DIR/target/coverage/ -fi -mvn -B verify -DskipShade -DskipTests -Dskip.npx -Dskip.installnpx org.sonarsource.scanner.maven:sonar-maven-plugin:3.6.0.1398:sonar -Dsonar.host.url=https://sonarcloud.io -Dsonar.organization=apache -Dsonar.projectKey=hadoop-ozone +mvn -V -B -DskipShade -DskipTests -Dskip.npx -Dskip.installnpx --no-transfer-progress \ + -Dsonar.coverage.jacoco.xmlReportPaths="$(pwd)/target/coverage/all.xml" \ + -Dsonar.host.url=https://sonarcloud.io -Dsonar.organization=apache -Dsonar.projectKey=hadoop-ozone \ + verify org.sonarsource.scanner.maven:sonar-maven-plugin:3.6.0.1398:sonar diff --git a/hadoop-ozone/dist/src/main/compose/upgrade/test.sh b/hadoop-ozone/dist/src/main/compose/upgrade/test.sh index a267080bb19..9d7ec5d4e60 100755 --- a/hadoop-ozone/dist/src/main/compose/upgrade/test.sh +++ b/hadoop-ozone/dist/src/main/compose/upgrade/test.sh @@ -35,8 +35,9 @@ RESULT_DIR="$ALL_RESULT_DIR" create_results_dir # This is the version of Ozone that should use the runner image to run the # code that was built. Other versions will pull images from docker hub. -export OZONE_CURRENT_VERSION=1.4.0 -run_test ha non-rolling-upgrade 1.3.0 "$OZONE_CURRENT_VERSION" +export OZONE_CURRENT_VERSION=1.5.0 +run_test ha non-rolling-upgrade 1.4.0 "$OZONE_CURRENT_VERSION" +# run_test ha non-rolling-upgrade 1.3.0 "$OZONE_CURRENT_VERSION" # run_test ha non-rolling-upgrade 1.2.1 "$OZONE_CURRENT_VERSION" # run_test om-ha non-rolling-upgrade 1.1.0 "$OZONE_CURRENT_VERSION" diff --git a/hadoop-ozone/dist/src/main/compose/xcompat/clients.yaml b/hadoop-ozone/dist/src/main/compose/xcompat/clients.yaml index 15d4c7e427d..2057cdd8a99 100644 --- a/hadoop-ozone/dist/src/main/compose/xcompat/clients.yaml +++ b/hadoop-ozone/dist/src/main/compose/xcompat/clients.yaml @@ -45,6 +45,13 @@ services: volumes: - ../..:/opt/ozone command: ["sleep","1000000"] + old_client_1_4_0: + image: apache/ozone:1.4.0 + env_file: + - docker-config + volumes: + - ../..:/opt/ozone + command: ["sleep","1000000"] new_client: image: ${OZONE_RUNNER_IMAGE}:${OZONE_RUNNER_VERSION} env_file: diff --git a/hadoop-ozone/dist/src/main/compose/xcompat/test.sh b/hadoop-ozone/dist/src/main/compose/xcompat/test.sh index baa239d56a8..419d397c19e 100755 --- a/hadoop-ozone/dist/src/main/compose/xcompat/test.sh +++ b/hadoop-ozone/dist/src/main/compose/xcompat/test.sh @@ -21,8 +21,8 @@ COMPOSE_DIR="$( cd "$( dirname "${BASH_SOURCE[0]}" )" >/dev/null 2>&1 && pwd )" export COMPOSE_DIR basename=$(basename ${COMPOSE_DIR}) -current_version=1.4.0 -old_versions="1.0.0 1.1.0 1.2.1 1.3.0" # container is needed for each version in clients.yaml +current_version=1.5.0 +old_versions="1.0.0 1.1.0 1.2.1 1.3.0 1.4.0" # container is needed for each version in clients.yaml # shellcheck source=hadoop-ozone/dist/src/main/compose/testlib.sh source "${COMPOSE_DIR}/../testlib.sh" @@ -77,7 +77,7 @@ test_cross_compatibility() { test_ec_cross_compatibility() { echo "Running Erasure Coded storage backward compatibility tests." - local cluster_versions_with_ec="1.3.0" + local cluster_versions_with_ec="1.3.0 1.4.0" local non_ec_client_versions="1.0.0 1.1.0 1.2.1" for cluster_version in ${cluster_versions_with_ec}; do diff --git a/hadoop-ozone/dist/src/main/license/bin/LICENSE.txt b/hadoop-ozone/dist/src/main/license/bin/LICENSE.txt index f5f6644efeb..e75cc7a9127 100644 --- a/hadoop-ozone/dist/src/main/license/bin/LICENSE.txt +++ b/hadoop-ozone/dist/src/main/license/bin/LICENSE.txt @@ -222,6 +222,26 @@ EPL 2.0 jakarta.ws.rs:jakarta.ws.rs-api org.aspectj:aspectjrt org.aspectj:aspectjweaver + org.glassfish.hk2.external:aopalliance-repackaged + org.glassfish.hk2.external:jakarta.inject + org.glassfish.hk2.external:javax.inject + org.glassfish.hk2:guice-bridge + org.glassfish.hk2:hk2-api + org.glassfish.hk2:hk2-locator + org.glassfish.hk2:hk2-utils + org.glassfish.hk2:osgi-resource-locator + org.glassfish.jersey.containers:jersey-container-servlet + org.glassfish.jersey.containers:jersey-container-servlet-core + org.glassfish.jersey.core:jersey-client + org.glassfish.jersey.core:jersey-common + org.glassfish.jersey.core:jersey-server + org.glassfish.jersey.ext.cdi:jersey-cdi1x + org.glassfish.jersey.ext:jersey-entity-filtering + org.glassfish.jersey.inject:jersey-hk2 + org.glassfish.jersey.media:jersey-media-jaxb + org.glassfish.jersey.media:jersey-media-json-jackson + org.jgrapht:jgrapht-core + org.jgrapht:jgrapht-ext CDDL 1.1 + GPLv2 with classpath exception @@ -239,26 +259,8 @@ CDDL 1.1 + GPLv2 with classpath exception javax.servlet:javax.servlet-api javax.servlet.jsp:jsp-api javax.ws.rs:jsr311-api - org.glassfish.hk2.external:aopalliance-repackaged - org.glassfish.hk2.external:jakarta.inject - org.glassfish.hk2.external:javax.inject - org.glassfish.hk2:guice-bridge - org.glassfish.hk2:hk2-api - org.glassfish.hk2:hk2-locator - org.glassfish.hk2:hk2-utils - org.glassfish.hk2:osgi-resource-locator org.glassfish.jaxb:jaxb-runtime org.glassfish.jaxb:txw2 - org.glassfish.jersey.containers:jersey-container-servlet - org.glassfish.jersey.containers:jersey-container-servlet-core - org.glassfish.jersey.core:jersey-client - org.glassfish.jersey.core:jersey-common - org.glassfish.jersey.core:jersey-server - org.glassfish.jersey.ext.cdi:jersey-cdi1x - org.glassfish.jersey.ext:jersey-entity-filtering - org.glassfish.jersey.inject:jersey-hk2 - org.glassfish.jersey.media:jersey-media-jaxb - org.glassfish.jersey.media:jersey-media-json-jackson Apache License 2.0 @@ -447,8 +449,9 @@ MIT com.bettercloud:vault-java-driver com.kstruct:gethostname4j - org.bouncycastle:bcpkix-jdk15on - org.bouncycastle:bcprov-jdk15on + org.bouncycastle:bcpkix-jdk18on + org.bouncycastle:bcprov-jdk18on + org.bouncycastle:bcutil-jdk18on org.checkerframework:checker-qual org.codehaus.mojo:animal-sniffer-annotations org.kohsuke.metainf-services:metainf-services @@ -456,24 +459,6 @@ MIT org.slf4j:slf4j-reload4j -EPL 2.0 -===================== - - jakarta.annotation:jakarta.annotation-api - jakarta.ws.rs:jakarta.ws.rs-api - org.jgrapht:jgrapht-core - org.jgrapht:jgrapht-ext - - -CDDL + GPLv2 with classpath exception -===================== - - javax.annotation:javax.annotation-api - javax.el:javax.el-api - javax.interceptor:javax.interceptor-api - javax.servlet:javax.servlet-api - - Public Domain ===================== diff --git a/hadoop-ozone/dist/src/main/license/bin/NOTICE.txt b/hadoop-ozone/dist/src/main/license/bin/NOTICE.txt index dafb8905d0f..44492fd26f0 100644 --- a/hadoop-ozone/dist/src/main/license/bin/NOTICE.txt +++ b/hadoop-ozone/dist/src/main/license/bin/NOTICE.txt @@ -482,10 +482,10 @@ For additional credits (generally to people who reported problems) see CREDITS file. -org.bouncycastle:bcprov-jdk15on +org.bouncycastle:bcpkix-jdk18on ==================== -Copyright (c) 2000 - 2019 The Legion of the Bouncy Castle Inc. (https://www.bouncycastle.org) +Copyright (c) 2000 - 2023 The Legion of the Bouncy Castle Inc. (https://www.bouncycastle.org) Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions: diff --git a/hadoop-ozone/dist/src/main/license/jar-report.txt b/hadoop-ozone/dist/src/main/license/jar-report.txt index 2b582ddaf64..51e30862366 100644 --- a/hadoop-ozone/dist/src/main/license/jar-report.txt +++ b/hadoop-ozone/dist/src/main/license/jar-report.txt @@ -9,8 +9,9 @@ share/ozone/lib/aspectjweaver.jar share/ozone/lib/aws-java-sdk-core.jar share/ozone/lib/aws-java-sdk-kms.jar share/ozone/lib/aws-java-sdk-s3.jar -share/ozone/lib/bcpkix-jdk15on.jar -share/ozone/lib/bcprov-jdk15on.jar +share/ozone/lib/bcpkix-jdk18on.jar +share/ozone/lib/bcprov-jdk18on.jar +share/ozone/lib/bcutil-jdk18on.jar share/ozone/lib/bonecp.RELEASE.jar share/ozone/lib/cdi-api.jar share/ozone/lib/checker-qual.jar diff --git a/hadoop-ozone/dist/src/main/smoketest/commonlib.robot b/hadoop-ozone/dist/src/main/smoketest/commonlib.robot index 7d9edcdef44..55ed9ddf504 100644 --- a/hadoop-ozone/dist/src/main/smoketest/commonlib.robot +++ b/hadoop-ozone/dist/src/main/smoketest/commonlib.robot @@ -32,10 +32,12 @@ Get test user principal [return] ${user}/${instance}@EXAMPLE.COM Kinit HTTP user + Pass Execution If '${SECURITY_ENABLED}' == 'false' Skip in unsecure cluster ${principal} = Get test user principal HTTP Wait Until Keyword Succeeds 2min 10sec Execute kinit -k -t /etc/security/keytabs/HTTP.keytab ${principal} Kinit test user + Pass Execution If '${SECURITY_ENABLED}' == 'false' Skip in unsecure cluster [arguments] ${user} ${keytab} ${TEST_USER} = Get test user principal ${user} Set Suite Variable ${TEST_USER} diff --git a/hadoop-ozone/dist/src/main/smoketest/freon/metadata-generate.robot b/hadoop-ozone/dist/src/main/smoketest/freon/metadata-generate.robot new file mode 100644 index 00000000000..a97fdda8f81 --- /dev/null +++ b/hadoop-ozone/dist/src/main/smoketest/freon/metadata-generate.robot @@ -0,0 +1,75 @@ +# Licensed to the Apache Software Foundation (ASF) under one or more +# contributor license agreements. See the NOTICE file distributed with +# this work for additional information regarding copyright ownership. +# The ASF licenses this file to You under the Apache License, Version 2.0 +# (the "License"); you may not use this file except in compliance with +# the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +*** Settings *** +Documentation Test freon ommg command +Resource ../ozone-lib/freon.robot +Test Timeout 5 minutes + +*** Variables *** +${PREFIX} ${EMPTY} +${n} 100 +${VOLUME} volume1 +${BUCKET_FSO} bucket-fso +${BUCKET_OBJ} bucket-obj + +*** Test Cases *** +[Setup] Create Volume and Buckets + ${result} = Execute ozone sh volume create /${VOLUME} + Should not contain ${result} Failed + ${result} = Execute ozone sh bucket create /${VOLUME}/${BUCKET_FSO} -l FILE_SYSTEM_OPTIMIZED + Should not contain ${result} Failed + ${result} = Execute ozone sh bucket create /${VOLUME}/${BUCKET_OBJ} -l OBJECT_STORE + Should not contain ${result} Failed + +[Read] Bucket Information + ${result} = Execute ozone freon ommg --operation INFO_BUCKET -n ${n} --bucket ${BUCKET_FSO} + Should contain ${result} Successful executions: ${n} + +[Create] File in FILE_SYSTEM_OPTIMIZED Bucket + ${result} = Execute ozone freon ommg --operation CREATE_FILE -n ${n} --size 4096 --volume ${VOLUME} --bucket ${BUCKET_FSO} + Should contain ${result} Successful executions: ${n} + +[Read] File in FILE_SYSTEM_OPTIMIZED Bucket + ${result} = Execute ozone freon ommg --operation READ_FILE -n ${n} --volume ${VOLUME} --bucket ${BUCKET_FSO} --size 4096 + Should contain ${result} Successful executions: ${n} + +[List] File Status in FILE_SYSTEM_OPTIMIZED Bucket + ${result} = Execute ozone freon ommg --operation LIST_STATUS -n 1 -t 1 --volume ${VOLUME} --bucket ${BUCKET_FSO} --batch-size ${n} + Should contain ${result} Successful executions: 1 + +[List] light File status in FILE_SYSTEM_OPTIMIZED Bucket + ${result} = Execute ozone freon ommg --operation LIST_STATUS_LIGHT -n 1 -t 1 --volume ${VOLUME} --bucket ${BUCKET_FSO} --batch-size ${n} + Should contain ${result} Successful executions: 1 + +[Create] Key in OBJECT_STORE Bucket + ${result} = Execute ozone freon ommg --operation CREATE_KEY -n ${n} --size 4096 --volume ${VOLUME} --bucket ${BUCKET_OBJ} + Should contain ${result} Successful executions: ${n} + +[Read] Key in OBJECT_STORE Bucket + ${result} = Execute ozone freon ommg --operation READ_KEY -n ${n} --volume ${VOLUME} --bucket ${BUCKET_OBJ} --size 4096 + Should contain ${result} Successful executions: ${n} + +[List] Keys in OBJECT_STORE Bucket + ${result} = Execute ozone freon ommg --operation LIST_KEYS -n 1 -t 1 --volume ${VOLUME} --bucket ${BUCKET_OBJ} --batch-size ${n} + Should contain ${result} Successful executions: 1 + +[List] Light Keys in OBJECT_STORE Bucket + ${result} = Execute ozone freon ommg --operation LIST_KEYS_LIGHT -n 1 -t 1 --volume ${VOLUME} --bucket ${BUCKET_OBJ} --batch-size ${n} + Should contain ${result} Successful executions: 1 + +[Get] Key Information in OBJECT_STORE Bucket + ${result} = Execute ozone freon ommg --operation GET_KEYINFO -n ${n} --volume ${VOLUME} --bucket ${BUCKET_OBJ} + Should contain ${result} Successful executions: ${n} diff --git a/hadoop-ozone/dist/src/main/smoketest/s3/commonawslib.robot b/hadoop-ozone/dist/src/main/smoketest/s3/commonawslib.robot index c0b2c9f7bfa..840fb963d8d 100644 --- a/hadoop-ozone/dist/src/main/smoketest/s3/commonawslib.robot +++ b/hadoop-ozone/dist/src/main/smoketest/s3/commonawslib.robot @@ -207,3 +207,9 @@ Verify Multipart Upload ${tmp} = Catenate @{files} Execute cat ${tmp} > /tmp/original${random} Compare files /tmp/original${random} /tmp/verify${random} + +Revoke S3 secrets + Execute and Ignore Error ozone s3 revokesecret -y + Execute and Ignore Error ozone s3 revokesecret -y -u testuser + Execute and Ignore Error ozone s3 revokesecret -y -u testuser2 + diff --git a/hadoop-ozone/dist/src/main/smoketest/s3/secretgenerate.robot b/hadoop-ozone/dist/src/main/smoketest/s3/secretgenerate.robot index b9f6993f45e..70dcfa1abed 100644 --- a/hadoop-ozone/dist/src/main/smoketest/s3/secretgenerate.robot +++ b/hadoop-ozone/dist/src/main/smoketest/s3/secretgenerate.robot @@ -21,30 +21,37 @@ Library String Resource ../commonlib.robot Resource ./commonawslib.robot Test Timeout 5 minutes -Suite Setup Setup s3 tests Default Tags no-bucket-type +Test Setup Run Keywords Kinit test user testuser testuser.keytab +... AND Revoke S3 secrets +Test Teardown Run Keyword Revoke S3 secrets *** Variables *** ${ENDPOINT_URL} http://s3g:9878 +${SECURITY_ENABLED} true *** Test Cases *** S3 Gateway Generate Secret - Run Keyword if '${SECURITY_ENABLED}' == 'true' Kinit HTTP user + Pass Execution If '${SECURITY_ENABLED}' == 'false' Skipping this check as security is not enabled ${result} = Execute curl -X PUT --negotiate -u : -v ${ENDPOINT_URL}/secret - IF '${SECURITY_ENABLED}' == 'true' - Should contain ${result} HTTP/1.1 200 OK ignore_case=True - Should Match Regexp ${result} .*.* - ELSE - Should contain ${result} S3 Secret endpoint is disabled. - END + Should contain ${result} HTTP/1.1 200 OK ignore_case=True + Should Match Regexp ${result} .*.* + +S3 Gateway Secret Already Exists + Pass Execution If '${SECURITY_ENABLED}' == 'false' Skipping this check as security is not enabled + Execute ozone s3 getsecret ${OM_HA_PARAM} + ${result} = Execute curl -X PUT --negotiate -u : -v ${ENDPOINT_URL}/secret + Should contain ${result} HTTP/1.1 400 S3_SECRET_ALREADY_EXISTS ignore_case=True S3 Gateway Generate Secret By Username - Run Keyword if '${SECURITY_ENABLED}' == 'true' Kinit test user testuser testuser.keytab + Pass Execution If '${SECURITY_ENABLED}' == 'false' Skipping this check as security is not enabled + ${result} = Execute curl -X PUT --negotiate -u : -v ${ENDPOINT_URL}/secret/testuser + Should contain ${result} HTTP/1.1 200 OK ignore_case=True + Should Match Regexp ${result} .*.* + +S3 Gateway Generate Secret By Username For Other User + Pass Execution If '${SECURITY_ENABLED}' == 'false' Skipping this check as security is not enabled ${result} = Execute curl -X PUT --negotiate -u : -v ${ENDPOINT_URL}/secret/testuser2 - IF '${SECURITY_ENABLED}' == 'true' - Should contain ${result} HTTP/1.1 200 OK ignore_case=True - Should Match Regexp ${result} .*.* - ELSE - Should contain ${result} S3 Secret endpoint is disabled. - END + Should contain ${result} HTTP/1.1 200 OK ignore_case=True + Should Match Regexp ${result} .*.* \ No newline at end of file diff --git a/hadoop-ozone/dist/src/main/smoketest/s3/secretrevoke.robot b/hadoop-ozone/dist/src/main/smoketest/s3/secretrevoke.robot index 27b4580f419..0f15f23067b 100644 --- a/hadoop-ozone/dist/src/main/smoketest/s3/secretrevoke.robot +++ b/hadoop-ozone/dist/src/main/smoketest/s3/secretrevoke.robot @@ -21,8 +21,9 @@ Library String Resource ../commonlib.robot Resource ./commonawslib.robot Test Timeout 5 minutes -Suite Setup Setup s3 tests Default Tags no-bucket-type +Test Setup Run Keywords Kinit test user testuser testuser.keytab +... AND Revoke S3 secrets *** Variables *** ${ENDPOINT_URL} http://s3g:9878 @@ -31,19 +32,19 @@ ${SECURITY_ENABLED} true *** Test Cases *** S3 Gateway Revoke Secret - Run Keyword if '${SECURITY_ENABLED}' == 'true' Kinit HTTP user + Pass Execution If '${SECURITY_ENABLED}' == 'false' Skipping this check as security is not enabled + Execute ozone s3 getsecret ${OM_HA_PARAM} ${result} = Execute curl -X DELETE --negotiate -u : -v ${ENDPOINT_URL}/secret - IF '${SECURITY_ENABLED}' == 'true' - Should contain ${result} HTTP/1.1 200 OK ignore_case=True - ELSE - Should contain ${result} S3 Secret endpoint is disabled. - END + Should contain ${result} HTTP/1.1 200 OK ignore_case=True S3 Gateway Revoke Secret By Username - Run Keyword if '${SECURITY_ENABLED}' == 'true' Kinit test user testuser testuser.keytab + Pass Execution If '${SECURITY_ENABLED}' == 'false' Skipping this check as security is not enabled + Execute ozone s3 getsecret -u testuser ${OM_HA_PARAM} + ${result} = Execute curl -X DELETE --negotiate -u : -v ${ENDPOINT_URL}/secret/testuser + Should contain ${result} HTTP/1.1 200 OK ignore_case=True + +S3 Gateway Revoke Secret By Username For Other User + Pass Execution If '${SECURITY_ENABLED}' == 'false' Skipping this check as security is not enabled + Execute ozone s3 getsecret -u testuser2 ${OM_HA_PARAM} ${result} = Execute curl -X DELETE --negotiate -u : -v ${ENDPOINT_URL}/secret/testuser2 - IF '${SECURITY_ENABLED}' == 'true' - Should contain ${result} HTTP/1.1 200 OK ignore_case=True - ELSE - Should contain ${result} S3 Secret endpoint is disabled. - END \ No newline at end of file + Should contain ${result} HTTP/1.1 200 OK ignore_case=True \ No newline at end of file diff --git a/hadoop-ozone/dist/src/shell/ozone/ozone-functions.sh b/hadoop-ozone/dist/src/shell/ozone/ozone-functions.sh index 392372bfc12..f9994438c2f 100755 --- a/hadoop-ozone/dist/src/shell/ozone/ozone-functions.sh +++ b/hadoop-ozone/dist/src/shell/ozone/ozone-functions.sh @@ -1428,12 +1428,13 @@ function ozone_set_module_access_args # populate JVM args based on java version if [[ "${JAVA_MAJOR_VERSION}" -ge 17 ]]; then - OZONE_MODULE_ACCESS_ARGS="--add-opens java.base/java.lang=ALL-UNNAMED" OZONE_MODULE_ACCESS_ARGS="${OZONE_MODULE_ACCESS_ARGS} --add-opens java.management/com.sun.jmx.mbeanserver=ALL-UNNAMED" OZONE_MODULE_ACCESS_ARGS="${OZONE_MODULE_ACCESS_ARGS} --add-exports java.management/com.sun.jmx.mbeanserver=ALL-UNNAMED" fi if [[ "${JAVA_MAJOR_VERSION}" -ge 9 ]]; then OZONE_MODULE_ACCESS_ARGS="${OZONE_MODULE_ACCESS_ARGS} --add-opens java.base/java.nio=ALL-UNNAMED" + OZONE_MODULE_ACCESS_ARGS="${OZONE_MODULE_ACCESS_ARGS} --add-opens java.base/java.lang=ALL-UNNAMED" + OZONE_MODULE_ACCESS_ARGS="${OZONE_MODULE_ACCESS_ARGS} --add-opens java.base/java.lang.reflect=ALL-UNNAMED" fi } diff --git a/hadoop-ozone/fault-injection-test/mini-chaos-tests/pom.xml b/hadoop-ozone/fault-injection-test/mini-chaos-tests/pom.xml index fa6e0ae5756..604608a07fb 100644 --- a/hadoop-ozone/fault-injection-test/mini-chaos-tests/pom.xml +++ b/hadoop-ozone/fault-injection-test/mini-chaos-tests/pom.xml @@ -34,21 +34,11 @@ http://maven.apache.org/xsd/maven-4.0.0.xsd"> - - junit - junit - test - org.junit.jupiter junit-jupiter-engine test - - org.junit.vintage - junit-vintage-engine - test - org.junit.platform junit-platform-launcher diff --git a/hadoop-ozone/fault-injection-test/mini-chaos-tests/src/test/java/org/apache/hadoop/ozone/MiniOzoneChaosCluster.java b/hadoop-ozone/fault-injection-test/mini-chaos-tests/src/test/java/org/apache/hadoop/ozone/MiniOzoneChaosCluster.java index 1ffed5323aa..26f896663b8 100644 --- a/hadoop-ozone/fault-injection-test/mini-chaos-tests/src/test/java/org/apache/hadoop/ozone/MiniOzoneChaosCluster.java +++ b/hadoop-ozone/fault-injection-test/mini-chaos-tests/src/test/java/org/apache/hadoop/ozone/MiniOzoneChaosCluster.java @@ -63,7 +63,7 @@ public class MiniOzoneChaosCluster extends MiniOzoneHAClusterImpl { private final FailureManager failureManager; - private final int waitForClusterToBeReadyTimeout = 120000; // 2 min + private static final int WAIT_FOR_CLUSTER_TO_BE_READY_TIMEOUT = 120000; // 2 min private final Set failedOmSet; private final Set failedScmSet; @@ -158,7 +158,7 @@ public void waitForClusterToBeReady() } } return true; - }, 1000, waitForClusterToBeReadyTimeout); + }, 1000, WAIT_FOR_CLUSTER_TO_BE_READY_TIMEOUT); } /** @@ -232,7 +232,7 @@ public Builder addFailures(Class clazz) { protected void initializeConfiguration() throws IOException { super.initializeConfiguration(); - OzoneClientConfig clientConfig = new OzoneClientConfig(); + OzoneClientConfig clientConfig = conf.getObject(OzoneClientConfig.class); clientConfig.setStreamBufferFlushSize(8 * 1024 * 1024); clientConfig.setStreamBufferMaxSize(16 * 1024 * 1024); clientConfig.setStreamBufferSize(4 * 1024); diff --git a/hadoop-ozone/insight/pom.xml b/hadoop-ozone/insight/pom.xml index dcd03c04fa8..f5e044ddac2 100644 --- a/hadoop-ozone/insight/pom.xml +++ b/hadoop-ozone/insight/pom.xml @@ -28,7 +28,6 @@ https://maven.apache.org/xsd/maven-4.0.0.xsd"> Apache Ozone Insight Tool jar - false diff --git a/hadoop-ozone/insight/src/test/java/org/apache/hadoop/ozone/insight/TestBaseInsightPoint.java b/hadoop-ozone/insight/src/test/java/org/apache/hadoop/ozone/insight/TestBaseInsightPoint.java index b2d68545d06..85faf99419a 100644 --- a/hadoop-ozone/insight/src/test/java/org/apache/hadoop/ozone/insight/TestBaseInsightPoint.java +++ b/hadoop-ozone/insight/src/test/java/org/apache/hadoop/ozone/insight/TestBaseInsightPoint.java @@ -17,12 +17,14 @@ */ package org.apache.hadoop.ozone.insight; -import org.junit.jupiter.api.Assertions; import org.junit.jupiter.api.Test; import java.util.HashMap; import java.util.Map; +import static org.junit.jupiter.api.Assertions.assertFalse; +import static org.junit.jupiter.api.Assertions.assertTrue; + /** * Test common insight point utility methods. */ @@ -42,14 +44,14 @@ public String getDescription() { Map filters = new HashMap<>(); filters.put("datanode", "123"); - Assertions.assertTrue(insightPoint + assertTrue(insightPoint .filterLog(filters, "This a log specific to [datanode=123]")); - Assertions.assertFalse(insightPoint + assertFalse(insightPoint .filterLog(filters, "This a log specific to [datanode=234]")); //with empty filters - Assertions.assertTrue(insightPoint + assertTrue(insightPoint .filterLog(new HashMap<>(), "This a log specific to [datanode=234]")); //with multiple filters @@ -57,14 +59,14 @@ public String getDescription() { filters.put("datanode", "123"); filters.put("pipeline", "abcd"); - Assertions.assertFalse(insightPoint + assertFalse(insightPoint .filterLog(filters, "This a log specific to [datanode=123]")); - Assertions.assertTrue(insightPoint + assertTrue(insightPoint .filterLog(filters, "This a log specific to [datanode=123] [pipeline=abcd]")); - Assertions.assertFalse(insightPoint + assertFalse(insightPoint .filterLog(filters, "This a log specific to [datanode=456] [pipeline=abcd]")); diff --git a/hadoop-ozone/insight/src/test/java/org/apache/hadoop/ozone/insight/TestConfigurationSubCommand.java b/hadoop-ozone/insight/src/test/java/org/apache/hadoop/ozone/insight/TestConfigurationSubCommand.java index 9be82ebc41d..701652bee09 100644 --- a/hadoop-ozone/insight/src/test/java/org/apache/hadoop/ozone/insight/TestConfigurationSubCommand.java +++ b/hadoop-ozone/insight/src/test/java/org/apache/hadoop/ozone/insight/TestConfigurationSubCommand.java @@ -27,11 +27,12 @@ import org.apache.hadoop.hdds.conf.ConfigTag; import org.apache.hadoop.hdds.conf.OzoneConfiguration; -import org.junit.jupiter.api.Assertions; import org.junit.jupiter.api.Test; import org.junit.jupiter.api.AfterEach; import org.junit.jupiter.api.BeforeEach; +import static org.assertj.core.api.Assertions.assertThat; + /** * Test insight report which prints out configs. */ @@ -60,12 +61,12 @@ public void testPrintConfig() throws UnsupportedEncodingException { subCommand.printConfig(CustomConfig.class, conf); final String output = out.toString(StandardCharsets.UTF_8.name()); - Assertions.assertTrue(output.contains(">>> ozone.scm.client.address")); - Assertions.assertTrue(output.contains("default: localhost")); - Assertions.assertTrue(output.contains("current: omclient")); - Assertions.assertTrue(output.contains(">>> ozone.scm.client.secure")); - Assertions.assertTrue(output.contains("default: true")); - Assertions.assertTrue(output.contains("current: true")); + assertThat(output).contains(">>> ozone.scm.client.address"); + assertThat(output).contains("default: localhost"); + assertThat(output).contains("current: omclient"); + assertThat(output).contains(">>> ozone.scm.client.secure"); + assertThat(output).contains("default: true"); + assertThat(output).contains("current: true"); } /** diff --git a/hadoop-ozone/insight/src/test/java/org/apache/hadoop/ozone/insight/TestLogSubcommand.java b/hadoop-ozone/insight/src/test/java/org/apache/hadoop/ozone/insight/TestLogSubcommand.java index 01402085861..f895a91c537 100644 --- a/hadoop-ozone/insight/src/test/java/org/apache/hadoop/ozone/insight/TestLogSubcommand.java +++ b/hadoop-ozone/insight/src/test/java/org/apache/hadoop/ozone/insight/TestLogSubcommand.java @@ -17,9 +17,10 @@ */ package org.apache.hadoop.ozone.insight; -import org.junit.jupiter.api.Assertions; import org.junit.jupiter.api.Test; +import static org.junit.jupiter.api.Assertions.assertEquals; + /** * Testing utility methods of the log subcommand test. */ @@ -36,6 +37,6 @@ public void filterLog() { + "storageLocation: \"/tmp/hadoop-neo/dfs/data\"\\n capacity: " + "250438021120\\n scmUsed: 16384\\n remaining: 212041244672\\n " + "storageType: DISK\\n failed: false\\n}\\n"); - Assertions.assertEquals(10, result.split("\n").length); + assertEquals(10, result.split("\n").length); } } diff --git a/hadoop-ozone/integration-test/dev-support/findbugsExcludeFile.xml b/hadoop-ozone/integration-test/dev-support/findbugsExcludeFile.xml index 00f7f4daf1d..92ceb203b11 100644 --- a/hadoop-ozone/integration-test/dev-support/findbugsExcludeFile.xml +++ b/hadoop-ozone/integration-test/dev-support/findbugsExcludeFile.xml @@ -105,16 +105,12 @@ - - - - - + @@ -130,7 +126,7 @@ - + diff --git a/hadoop-ozone/integration-test/pom.xml b/hadoop-ozone/integration-test/pom.xml index 3eef8fa58c0..913cd639bf7 100644 --- a/hadoop-ozone/integration-test/pom.xml +++ b/hadoop-ozone/integration-test/pom.xml @@ -119,21 +119,11 @@ https://maven.apache.org/xsd/maven-4.0.0.xsd"> test-jar test - - junit - junit - test - org.junit.jupiter junit-jupiter-engine test - - org.junit.vintage - junit-vintage-engine - test - org.junit.platform junit-platform-launcher diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/fs/contract/AbstractContractAppendTest.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/fs/contract/AbstractContractAppendTest.java index 2db8faaa6ea..51d75c07d2d 100644 --- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/fs/contract/AbstractContractAppendTest.java +++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/fs/contract/AbstractContractAppendTest.java @@ -22,7 +22,7 @@ import org.apache.hadoop.fs.FSDataOutputStream; import org.apache.hadoop.fs.Path; -import org.junit.Test; +import org.junit.jupiter.api.Test; import org.slf4j.Logger; import org.slf4j.LoggerFactory; diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/fs/contract/AbstractContractConcatTest.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/fs/contract/AbstractContractConcatTest.java index b4d494f771d..ff5ed3b0624 100644 --- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/fs/contract/AbstractContractConcatTest.java +++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/fs/contract/AbstractContractConcatTest.java @@ -20,7 +20,7 @@ import org.apache.hadoop.fs.Path; -import org.junit.Test; +import org.junit.jupiter.api.Test; import org.slf4j.Logger; import org.slf4j.LoggerFactory; diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/fs/contract/AbstractContractContentSummaryTest.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/fs/contract/AbstractContractContentSummaryTest.java index 5e5c9173954..07405dc9cd7 100644 --- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/fs/contract/AbstractContractContentSummaryTest.java +++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/fs/contract/AbstractContractContentSummaryTest.java @@ -23,7 +23,7 @@ import org.apache.hadoop.fs.Path; import org.assertj.core.api.Assertions; -import org.junit.Test; +import org.junit.jupiter.api.Test; import java.io.FileNotFoundException; diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/fs/contract/AbstractContractCopyFromLocalTest.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/fs/contract/AbstractContractCopyFromLocalTest.java index ca1a757e9d2..0d6c30e52c0 100644 --- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/fs/contract/AbstractContractCopyFromLocalTest.java +++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/fs/contract/AbstractContractCopyFromLocalTest.java @@ -25,7 +25,7 @@ import java.nio.charset.StandardCharsets; import java.nio.file.Files; -import org.junit.Test; +import org.junit.jupiter.api.Test; import org.apache.commons.io.FileUtils; import org.apache.commons.io.IOUtils; diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/fs/contract/AbstractContractCreateTest.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/fs/contract/AbstractContractCreateTest.java index b3b91ce467a..c39a9be1619 100644 --- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/fs/contract/AbstractContractCreateTest.java +++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/fs/contract/AbstractContractCreateTest.java @@ -28,7 +28,7 @@ import org.apache.hadoop.fs.StreamCapabilities; import org.assertj.core.api.Assertions; -import org.junit.Test; +import org.junit.jupiter.api.Test; import org.slf4j.Logger; import org.slf4j.LoggerFactory; diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/fs/contract/AbstractContractDeleteTest.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/fs/contract/AbstractContractDeleteTest.java index 7a606144851..8ea9357f23b 100644 --- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/fs/contract/AbstractContractDeleteTest.java +++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/fs/contract/AbstractContractDeleteTest.java @@ -21,7 +21,7 @@ import org.apache.hadoop.fs.Path; import org.assertj.core.api.Assertions; -import org.junit.Test; +import org.junit.jupiter.api.Test; import java.io.IOException; diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/fs/contract/AbstractContractEtagTest.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/fs/contract/AbstractContractEtagTest.java index 02c419b09a1..2624605ed25 100644 --- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/fs/contract/AbstractContractEtagTest.java +++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/fs/contract/AbstractContractEtagTest.java @@ -21,7 +21,7 @@ import java.nio.charset.StandardCharsets; import org.assertj.core.api.Assertions; -import org.junit.Test; +import org.junit.jupiter.api.Test; import org.slf4j.Logger; import org.slf4j.LoggerFactory; diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/fs/contract/AbstractContractGetFileStatusTest.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/fs/contract/AbstractContractGetFileStatusTest.java index 314d289c2a8..21290d1e889 100644 --- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/fs/contract/AbstractContractGetFileStatusTest.java +++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/fs/contract/AbstractContractGetFileStatusTest.java @@ -33,7 +33,7 @@ import org.apache.hadoop.fs.Path; import org.apache.hadoop.fs.PathFilter; import org.apache.hadoop.fs.RemoteIterator; -import org.junit.Test; +import org.junit.jupiter.api.Test; import static org.apache.hadoop.fs.contract.ContractTestUtils.createSubdirs; import static org.apache.hadoop.fs.contract.ContractTestUtils.iteratorToList; diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/fs/contract/AbstractContractLeaseRecoveryTest.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/fs/contract/AbstractContractLeaseRecoveryTest.java index fbe47302ffc..2bde7b757a1 100644 --- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/fs/contract/AbstractContractLeaseRecoveryTest.java +++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/fs/contract/AbstractContractLeaseRecoveryTest.java @@ -22,7 +22,7 @@ import java.io.IOException; import org.assertj.core.api.Assertions; -import org.junit.Test; +import org.junit.jupiter.api.Test; import org.apache.hadoop.fs.FileSystem; import org.apache.hadoop.fs.LeaseRecoverable; diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/fs/contract/AbstractContractMkdirTest.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/fs/contract/AbstractContractMkdirTest.java index a64398a54f7..22f947abc64 100644 --- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/fs/contract/AbstractContractMkdirTest.java +++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/fs/contract/AbstractContractMkdirTest.java @@ -23,7 +23,7 @@ import org.apache.hadoop.fs.ParentNotDirectoryException; import org.apache.hadoop.fs.Path; import org.assertj.core.api.Assertions; -import org.junit.Test; +import org.junit.jupiter.api.Test; import java.io.IOException; diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/fs/contract/AbstractContractMultipartUploaderTest.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/fs/contract/AbstractContractMultipartUploaderTest.java index 906c110d3dc..86363b55ccf 100644 --- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/fs/contract/AbstractContractMultipartUploaderTest.java +++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/fs/contract/AbstractContractMultipartUploaderTest.java @@ -30,7 +30,7 @@ import com.google.common.base.Charsets; import org.assertj.core.api.Assertions; -import org.junit.Test; +import org.junit.jupiter.api.Test; import org.slf4j.Logger; import org.slf4j.LoggerFactory; diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/fs/contract/AbstractContractOpenTest.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/fs/contract/AbstractContractOpenTest.java index 2527aacfd9e..166e8e301e4 100644 --- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/fs/contract/AbstractContractOpenTest.java +++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/fs/contract/AbstractContractOpenTest.java @@ -45,7 +45,7 @@ import static org.assertj.core.api.Assertions.fail; import org.assertj.core.api.Assertions; -import org.junit.Test; +import org.junit.jupiter.api.Test; /** * Test Open operations. diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/fs/contract/AbstractContractRenameTest.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/fs/contract/AbstractContractRenameTest.java index 6ae118d3463..96ecb01bfac 100644 --- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/fs/contract/AbstractContractRenameTest.java +++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/fs/contract/AbstractContractRenameTest.java @@ -22,7 +22,7 @@ import org.apache.hadoop.fs.FileSystem; import org.apache.hadoop.fs.Path; import org.assertj.core.api.Assertions; -import org.junit.Test; +import org.junit.jupiter.api.Test; import java.io.FileNotFoundException; import java.io.IOException; diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/fs/contract/AbstractContractRootDirectoryTest.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/fs/contract/AbstractContractRootDirectoryTest.java index 8721951e656..3ff3f72cc6e 100644 --- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/fs/contract/AbstractContractRootDirectoryTest.java +++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/fs/contract/AbstractContractRootDirectoryTest.java @@ -21,7 +21,7 @@ import org.apache.hadoop.fs.FileSystem; import org.apache.hadoop.fs.LocatedFileStatus; import org.apache.hadoop.fs.Path; -import org.junit.Test; +import org.junit.jupiter.api.Test; import org.assertj.core.api.Assertions; import org.slf4j.Logger; import org.slf4j.LoggerFactory; diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/fs/contract/AbstractContractSafeModeTest.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/fs/contract/AbstractContractSafeModeTest.java index 72d0dce9ff9..88666ee8a95 100644 --- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/fs/contract/AbstractContractSafeModeTest.java +++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/fs/contract/AbstractContractSafeModeTest.java @@ -19,7 +19,7 @@ package org.apache.hadoop.fs.contract; import org.assertj.core.api.Assertions; -import org.junit.Test; +import org.junit.jupiter.api.Test; import org.apache.hadoop.fs.FileSystem; import org.apache.hadoop.fs.SafeMode; diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/fs/contract/AbstractContractSeekTest.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/fs/contract/AbstractContractSeekTest.java index c9c51f360fd..618025dc06f 100644 --- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/fs/contract/AbstractContractSeekTest.java +++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/fs/contract/AbstractContractSeekTest.java @@ -18,6 +18,7 @@ package org.apache.hadoop.fs.contract; +import org.apache.commons.lang3.RandomUtils; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.fs.CommonConfigurationKeysPublic; import org.apache.hadoop.fs.FSDataInputStream; @@ -25,13 +26,12 @@ import org.apache.hadoop.fs.Path; import org.apache.hadoop.io.IOUtils; import org.assertj.core.api.Assertions; -import org.junit.Test; +import org.junit.jupiter.api.Test; import org.slf4j.Logger; import org.slf4j.LoggerFactory; import java.io.EOFException; import java.io.IOException; -import java.util.Random; import static org.apache.hadoop.fs.contract.ContractTestUtils.createFile; import static org.apache.hadoop.fs.contract.ContractTestUtils.dataset; @@ -341,15 +341,14 @@ public void testRandomSeeks() throws Throwable { byte[] buf = dataset(filesize, 0, 255); Path randomSeekFile = path("testrandomseeks.bin"); createFile(getFileSystem(), randomSeekFile, true, buf); - Random r = new Random(); // Record the sequence of seeks and reads which trigger a failure. int[] seeks = new int[10]; int[] reads = new int[10]; try (FSDataInputStream stm = getFileSystem().open(randomSeekFile)) { for (int i = 0; i < limit; i++) { - int seekOff = r.nextInt(buf.length); - int toRead = r.nextInt(Math.min(buf.length - seekOff, 32000)); + int seekOff = RandomUtils.nextInt(0, buf.length); + int toRead = RandomUtils.nextInt(0, Math.min(buf.length - seekOff, 32000)); seeks[i % seeks.length] = seekOff; reads[i % reads.length] = toRead; diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/fs/contract/AbstractContractSetTimesTest.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/fs/contract/AbstractContractSetTimesTest.java index f9267dbf519..b9a86ae366c 100644 --- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/fs/contract/AbstractContractSetTimesTest.java +++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/fs/contract/AbstractContractSetTimesTest.java @@ -21,7 +21,7 @@ import java.io.FileNotFoundException; import org.apache.hadoop.fs.Path; -import org.junit.Test; +import org.junit.jupiter.api.Test; import org.slf4j.Logger; import org.slf4j.LoggerFactory; diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/fs/contract/AbstractContractUnbufferTest.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/fs/contract/AbstractContractUnbufferTest.java index 6312bd6060a..07c4f26543a 100644 --- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/fs/contract/AbstractContractUnbufferTest.java +++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/fs/contract/AbstractContractUnbufferTest.java @@ -19,7 +19,7 @@ package org.apache.hadoop.fs.contract; import org.assertj.core.api.Assertions; -import org.junit.Test; +import org.junit.jupiter.api.Test; import java.io.IOException; import java.util.Arrays; diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/fs/contract/AbstractFSContractTestBase.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/fs/contract/AbstractFSContractTestBase.java index 586364eb076..b34e945a3dc 100644 --- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/fs/contract/AbstractFSContractTestBase.java +++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/fs/contract/AbstractFSContractTestBase.java @@ -22,19 +22,11 @@ import org.apache.hadoop.fs.FileStatus; import org.apache.hadoop.fs.FileSystem; import org.apache.hadoop.fs.Path; -import org.apache.ozone.test.JUnit5AwareTimeout; import org.assertj.core.api.Assertions; -import org.junit.After; -import org.junit.Before; -import org.junit.BeforeClass; -import org.junit.Rule; import org.junit.jupiter.api.AfterEach; import org.junit.jupiter.api.BeforeAll; import org.junit.jupiter.api.BeforeEach; import org.junit.jupiter.api.TestInfo; -import org.junit.rules.TestName; -import org.junit.rules.TestRule; -import org.junit.rules.Timeout; import org.slf4j.Logger; import org.slf4j.LoggerFactory; @@ -78,12 +70,8 @@ public abstract class AbstractFSContractTestBase implements ContractOptions { */ private Path testPath; - @Rule - public TestName methodName = new TestName(); - private String testMethodName; - @BeforeClass @BeforeAll public static void nameTestThread() { Thread.currentThread().setName("JUnit"); @@ -93,12 +81,6 @@ private void nameThread() { Thread.currentThread().setName("JUnit-" + getMethodName()); } - @Before - public void getTestMethodName() { - testMethodName = methodName.getMethodName(); - nameThread(); - } - @BeforeEach void getTestMethodName(TestInfo testInfo) { testInfo.getTestMethod().ifPresent(m -> testMethodName = m.getName()); @@ -179,12 +161,6 @@ protected Configuration createConfiguration() { return new Configuration(); } - /** - * Set the timeout for every test. - */ - @Rule - public TestRule testTimeout = new JUnit5AwareTimeout(new Timeout(getTestTimeoutMillis())); - /** * Option for tests to override the default timeout value. * @return the current test timeout @@ -198,7 +174,6 @@ protected int getTestTimeoutMillis() { * Setup: create the contract then init it. * @throws Exception on any failure */ - @Before @BeforeEach public void setup() throws Exception { Thread.currentThread().setName("setup"); @@ -231,7 +206,6 @@ public void setup() throws Exception { * Teardown. * @throws Exception on any failure */ - @After @AfterEach public void teardown() throws Exception { Thread.currentThread().setName("teardown"); diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/fs/ozone/AbstractOzoneFileSystemTest.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/fs/ozone/AbstractOzoneFileSystemTest.java index b62606a6865..a9fc2710ce3 100644 --- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/fs/ozone/AbstractOzoneFileSystemTest.java +++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/fs/ozone/AbstractOzoneFileSystemTest.java @@ -117,6 +117,7 @@ import static org.apache.hadoop.ozone.OzoneConsts.OZONE_URI_DELIMITER; import static org.apache.hadoop.ozone.om.exceptions.OMException.ResultCodes.KEY_NOT_FOUND; import static org.apache.hadoop.ozone.om.helpers.BucketLayout.FILE_SYSTEM_OPTIMIZED; +import static org.assertj.core.api.Assertions.assertThat; import static org.junit.jupiter.api.Assertions.assertEquals; import static org.junit.jupiter.api.Assertions.assertFalse; import static org.junit.jupiter.api.Assertions.assertInstanceOf; @@ -393,7 +394,7 @@ private void checkInvalidPath(Path path) { InvalidPathException pathException = assertThrows( InvalidPathException.class, () -> fs.create(path, false) ); - assertTrue(pathException.getMessage().contains("Invalid path Name")); + assertThat(pathException.getMessage()).contains("Invalid path Name"); } @Test @@ -467,7 +468,7 @@ public void testRecursiveDelete() throws Exception { // delete a dir with sub-file try { FileStatus[] parents = fs.listStatus(grandparent); - assertTrue(parents.length > 0); + assertThat(parents.length).isGreaterThan(0); fs.delete(parents[0].getPath(), false); fail("Must throw exception as dir is not empty!"); } catch (PathIsNotEmptyDirectoryException pde) { @@ -538,8 +539,8 @@ private void checkPath(Path path) { fs.getFileStatus(path); fail("testRecursiveDelete failed"); } catch (IOException ex) { - assertTrue(ex instanceof FileNotFoundException); - assertTrue(ex.getMessage().contains("No such file or directory")); + assertInstanceOf(FileNotFoundException.class, ex); + assertThat(ex.getMessage()).contains("No such file or directory"); } } @@ -749,7 +750,7 @@ public void testListStatusOnLargeDirectory() throws Exception { assertEquals(numDirs, fileStatuses.length, "Total directories listed do not match the existing directories"); for (int i = 0; i < numDirs; i++) { - assertTrue(paths.contains(fileStatuses[i].getPath().getName())); + assertThat(paths).contains(fileStatuses[i].getPath().getName()); } } @@ -1004,7 +1005,7 @@ public void testSeekOnFileLength() throws IOException { fs.open(fileNotExists); fail("Should throw FileNotFoundException as file doesn't exist!"); } catch (FileNotFoundException fnfe) { - assertTrue(fnfe.getMessage().contains("KEY_NOT_FOUND"), "Expected KEY_NOT_FOUND error"); + assertThat(fnfe.getMessage()).contains("KEY_NOT_FOUND"); } } @@ -1027,12 +1028,16 @@ public void testAllocateMoreThanOneBlock() throws IOException { FileStatus fileStatus = fs.getFileStatus(file); long blkSize = fileStatus.getBlockSize(); long fileLength = fileStatus.getLen(); - assertTrue(fileLength > blkSize, "Block allocation should happen"); + assertThat(fileLength) + .withFailMessage("Block allocation should happen") + .isGreaterThan(blkSize); long newNumBlockAllocations = cluster.getOzoneManager().getMetrics().getNumBlockAllocates(); - assertTrue((newNumBlockAllocations > numBlockAllocationsOrg), "Block allocation should happen"); + assertThat(newNumBlockAllocations) + .withFailMessage("Block allocation should happen") + .isGreaterThan(numBlockAllocationsOrg); stream.seek(fileLength); assertEquals(-1, stream.read()); @@ -1367,7 +1372,7 @@ public void testRenameDir() throws Exception { IllegalArgumentException exception = assertThrows( IllegalArgumentException.class, () -> fs.rename(new Path(fs.getUri().toString() + "fake" + dir), dest)); - assertTrue(exception.getMessage().contains("Wrong FS")); + assertThat(exception.getMessage()).contains("Wrong FS"); } private OzoneKeyDetails getKey(Path keyPath, boolean isDirectory) @@ -1420,7 +1425,7 @@ public void testGetDirectoryModificationTime() for (int i = 0; i < 5; i++) { Thread.sleep(10); fileStatuses = o3fs.listStatus(mdir1); - assertTrue(modificationTime <= fileStatuses[0].getModificationTime()); + assertThat(modificationTime).isLessThanOrEqualTo(fileStatuses[0].getModificationTime()); } } @@ -1818,7 +1823,7 @@ public void testOzFsReadWrite() throws IOException { // The timestamp of the newly created file should always be greater than // the time when the test was started - assertTrue(status.getModificationTime() > currentTime); + assertThat(status.getModificationTime()).isGreaterThan(currentTime); assertFalse(status.isDirectory()); assertEquals(FsPermission.getFileDefault(), status.getPermission()); @@ -1969,7 +1974,7 @@ void testListStatus2() throws IOException { assertChange(initialStats, statistics, Statistic.OBJECTS_LIST.getSymbol(), 2); assertEquals(initialListStatusCount + 2, omMetrics.getNumListStatus()); for (Path p : paths) { - assertTrue(Arrays.asList(statusList).contains(fs.getFileStatus(p))); + assertThat(Arrays.asList(statusList)).contains(fs.getFileStatus(p)); } } @@ -2007,7 +2012,7 @@ void testOzoneManagerFileSystemInterface() throws IOException { // doesn't actually exist on server; if it exists, it will be a fixed value. // In this case, the dir key exists. assertEquals(0, omStatus.getKeyInfo().getDataSize()); - assertTrue(omStatus.getKeyInfo().getModificationTime() <= currentTime); + assertThat(omStatus.getKeyInfo().getModificationTime()).isLessThanOrEqualTo(currentTime); assertEquals(new Path(omStatus.getPath()).getName(), o3fs.pathToKey(path)); } @@ -2021,13 +2026,12 @@ public void testOzoneManagerLocatedFileStatus() throws IOException { stream.writeBytes(data); } FileStatus status = fs.getFileStatus(path); - assertTrue(status instanceof LocatedFileStatus); - LocatedFileStatus locatedFileStatus = (LocatedFileStatus) status; - assertTrue(locatedFileStatus.getBlockLocations().length >= 1); + LocatedFileStatus locatedFileStatus = assertInstanceOf(LocatedFileStatus.class, status); + assertThat(locatedFileStatus.getBlockLocations().length).isGreaterThanOrEqualTo(1); for (BlockLocation blockLocation : locatedFileStatus.getBlockLocations()) { - assertTrue(blockLocation.getNames().length >= 1); - assertTrue(blockLocation.getHosts().length >= 1); + assertThat(blockLocation.getNames().length).isGreaterThanOrEqualTo(1); + assertThat(blockLocation.getHosts().length).isGreaterThanOrEqualTo(1); } } @@ -2047,8 +2051,7 @@ void testBlockOffsetsWithMultiBlockFile() throws Exception { stream.writeBytes(data); } FileStatus status = fs.getFileStatus(path); - assertTrue(status instanceof LocatedFileStatus); - LocatedFileStatus locatedFileStatus = (LocatedFileStatus) status; + LocatedFileStatus locatedFileStatus = assertInstanceOf(LocatedFileStatus.class, status); BlockLocation[] blockLocations = locatedFileStatus.getBlockLocations(); assertEquals(0, blockLocations[0].getOffset()); @@ -2100,7 +2103,7 @@ void testFileSystemWithObjectStoreLayout() throws IOException { config.set(FS_DEFAULT_NAME_KEY, obsRootPath); IllegalArgumentException e = assertThrows(IllegalArgumentException.class, () -> FileSystem.get(config)); - assertTrue(e.getMessage().contains("OBJECT_STORE, which does not support file system semantics")); + assertThat(e.getMessage()).contains("OBJECT_STORE, which does not support file system semantics"); } } diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/fs/ozone/AbstractOzoneFileSystemTestWithFSO.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/fs/ozone/AbstractOzoneFileSystemTestWithFSO.java index 2d4c310c886..f0ff1ab43b4 100644 --- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/fs/ozone/AbstractOzoneFileSystemTestWithFSO.java +++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/fs/ozone/AbstractOzoneFileSystemTestWithFSO.java @@ -45,8 +45,10 @@ import java.io.IOException; import java.util.ArrayList; +import static org.assertj.core.api.Assertions.assertThat; import static org.junit.jupiter.api.Assertions.assertEquals; import static org.junit.jupiter.api.Assertions.assertFalse; +import static org.junit.jupiter.api.Assertions.assertNotEquals; import static org.junit.jupiter.api.Assertions.assertNotNull; import static org.junit.jupiter.api.Assertions.assertThrows; import static org.junit.jupiter.api.Assertions.assertTrue; @@ -305,8 +307,8 @@ public void testRenameParentDirModificationTime() throws IOException { .getModificationTime(); // rename should change the parent directory of source and object files // modification time but not change modification time of the renamed file - assertTrue(dir1BeforeMTime < dir1AfterMTime); - assertTrue(dir2BeforeMTime < dir2AfterMTime); + assertThat(dir1BeforeMTime).isLessThan(dir1AfterMTime); + assertThat(dir2BeforeMTime).isLessThan(dir2AfterMTime); assertEquals(file1BeforeMTime, file1AfterMTime); // mv "/dir1/subdir1/" to "/dir2/subdir1/" @@ -323,8 +325,8 @@ public void testRenameParentDirModificationTime() throws IOException { dir2AfterMTime = getFs().getFileStatus(dir2).getModificationTime(); long subdir1AfterMTime = getFs().getFileStatus(renamedSubdir1) .getModificationTime(); - assertTrue(dir1BeforeMTime < dir1AfterMTime); - assertTrue(dir2BeforeMTime < dir2AfterMTime); + assertThat(dir1BeforeMTime).isLessThan(dir1AfterMTime); + assertThat(dir2BeforeMTime).isLessThan(dir2AfterMTime); assertEquals(subdir1BeforeMTime, subdir1AfterMTime); } @@ -379,7 +381,7 @@ private void renameAndAssert(OMMetadataManager omMgr, long bucketAfterMTime = omBucketInfo.getModificationTime(); long fileAfterMTime = getFs().getFileStatus(to).getModificationTime(); if (exceptChangeMtime) { - assertTrue(bucketBeforeMTime < bucketAfterMTime); + assertThat(bucketBeforeMTime).isLessThan(bucketAfterMTime); } else { assertEquals(bucketBeforeMTime, bucketAfterMTime); } @@ -434,7 +436,7 @@ public void testMultiLevelDirs() throws Exception { long d6ObjectID = verifyDirKey(volumeId, bucketId, d4ObjectID, "d6", "/d1/d2/d3/d4/d6", dirKeys, omMgr); - assertTrue(d5ObjectID != d6ObjectID, "Wrong objectIds for sub-dirs[" + d5ObjectID + "/d5, " + d6ObjectID + assertNotEquals(d5ObjectID, d6ObjectID, "Wrong objectIds for sub-dirs[" + d5ObjectID + "/d5, " + d6ObjectID + "/d6] of same parent!"); assertEquals(6, getCluster().getOzoneManager().getMetrics().getNumKeys(), "Wrong OM numKeys metrics"); @@ -520,10 +522,10 @@ public void testFSDeleteLogWarnNoExist() throws Exception { GenericTestUtils.LogCapturer logCapture = GenericTestUtils.LogCapturer .captureLogs(BasicOzoneClientAdapterImpl.LOG); getFs().delete(new Path("/d1/d3/noexist/"), true); - assertTrue(logCapture.getOutput().contains( - "delete key failed Unable to get file status")); - assertTrue(logCapture.getOutput().contains( - "WARN ozone.BasicOzoneClientAdapterImpl")); + assertThat(logCapture.getOutput()).contains( + "delete key failed Unable to get file status"); + assertThat(logCapture.getOutput()).contains( + "WARN ozone.BasicOzoneClientAdapterImpl"); } private void verifyOMFileInfoFormat(OmKeyInfo omKeyInfo, String fileName, @@ -546,7 +548,7 @@ long verifyDirKey(long volumeId, long bucketId, long parentId, " using dbKey: " + dbKey); assertEquals(parentId, dirInfo.getParentObjectID(), "Parent Id mismatches"); assertEquals(dirKey, dirInfo.getName(), "Mismatches directory name"); - assertTrue(dirInfo.getCreationTime() > 0, "Mismatches directory creation time param"); + assertThat(dirInfo.getCreationTime()).isGreaterThan(0); assertEquals(dirInfo.getCreationTime(), dirInfo.getModificationTime()); return dirInfo.getObjectID(); } diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/fs/ozone/AbstractRootedOzoneFileSystemTest.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/fs/ozone/AbstractRootedOzoneFileSystemTest.java index 9bd1025e694..d44342acc43 100644 --- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/fs/ozone/AbstractRootedOzoneFileSystemTest.java +++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/fs/ozone/AbstractRootedOzoneFileSystemTest.java @@ -96,7 +96,6 @@ import java.util.LinkedList; import java.util.List; import java.util.Optional; -import java.util.Random; import java.util.Set; import java.util.TreeSet; import java.util.UUID; @@ -125,6 +124,7 @@ import static org.apache.hadoop.ozone.security.acl.IAccessAuthorizer.ACLType.WRITE; import static org.apache.hadoop.ozone.security.acl.IAccessAuthorizer.ACLType.DELETE; import static org.apache.hadoop.ozone.security.acl.IAccessAuthorizer.ACLType.LIST; +import static org.assertj.core.api.Assertions.assertThat; import static org.junit.jupiter.api.Assertions.assertArrayEquals; import static org.junit.jupiter.api.Assertions.assertEquals; import static org.junit.jupiter.api.Assertions.assertFalse; @@ -776,7 +776,7 @@ void testListStatusOnLargeDirectory() throws Exception { assertEquals(numDirs, fileStatuses.length, "Total directories listed do not match the existing directories"); for (int i = 0; i < numDirs; i++) { - assertTrue(paths.contains(fileStatuses[i].getPath().getName())); + assertThat(paths).contains(fileStatuses[i].getPath().getName()); } } finally { // Cleanup @@ -1109,7 +1109,7 @@ void testListStatusRootAndVolumeRecursive() throws IOException { */ private FileStatus[] customListStatus(Path f, boolean recursive, String startPath, int numEntries) throws IOException { - assertTrue(numEntries > 0); + assertThat(numEntries).isGreaterThan(0); LinkedList statuses = new LinkedList<>(); List tmpStatusList; do { @@ -1486,9 +1486,9 @@ void testSymlinkList() throws Exception { new GenericTestUtils.SystemOutCapturer()) { String linkPathStr = rootPath + destVolume; ToolRunner.run(shell, new String[]{"-ls", linkPathStr}); - assertTrue(capture.getOutput().contains("drwxrwxrwx")); - assertTrue(capture.getOutput().contains(linkPathStr + - OZONE_URI_DELIMITER + srcBucket)); + assertThat(capture.getOutput()).contains("drwxrwxrwx"); + assertThat(capture.getOutput()).contains(linkPathStr + + OZONE_URI_DELIMITER + srcBucket); } finally { shell.close(); } @@ -1509,12 +1509,12 @@ void testSymlinkList() throws Exception { String linkPathStr = rootPath + destVolume; ToolRunner.run(shell, new String[]{"-ls", "-R", linkPathStr + OZONE_URI_DELIMITER + srcBucket}); - assertTrue(capture.getOutput().contains("drwxrwxrwx")); - assertTrue(capture.getOutput().contains(linkPathStr + - OZONE_URI_DELIMITER + srcBucket)); - assertTrue(capture.getOutput().contains("-rw-rw-rw-")); - assertTrue(capture.getOutput().contains(linkPathStr + - OZONE_URI_DELIMITER + srcBucket + OZONE_URI_DELIMITER + key)); + assertThat(capture.getOutput()).contains("drwxrwxrwx"); + assertThat(capture.getOutput()).contains(linkPathStr + + OZONE_URI_DELIMITER + srcBucket); + assertThat(capture.getOutput()).contains("-rw-rw-rw-"); + assertThat(capture.getOutput()).contains(linkPathStr + + OZONE_URI_DELIMITER + srcBucket + OZONE_URI_DELIMITER + key); } finally { shell.close(); } @@ -1678,7 +1678,7 @@ void testDeleteBucketLink() throws Exception { // confirm link is gone FileNotFoundException exception = assertThrows(FileNotFoundException.class, () -> fs.getFileStatus(dirPathLink)); - assertTrue(exception.getMessage().contains("File not found.")); + assertThat(exception.getMessage()).contains("File not found."); // Cleanup fs.delete(bucketPath1, true); @@ -1928,15 +1928,15 @@ void testTrash() throws Exception { }, 1000, 180000); if (isBucketFSOptimized) { - assertTrue(getOMMetrics() - .getNumTrashAtomicDirRenames() > prevNumTrashAtomicDirRenames); + assertThat(getOMMetrics().getNumTrashAtomicDirRenames()) + .isGreaterThan(prevNumTrashAtomicDirRenames); } else { // This condition should pass after the checkpoint - assertTrue(getOMMetrics() - .getNumTrashRenames() > prevNumTrashRenames); + assertThat(getOMMetrics().getNumTrashRenames()) + .isGreaterThan(prevNumTrashRenames); // With new layout version, file renames wouldn't be counted - assertTrue(getOMMetrics() - .getNumTrashFilesRenames() > prevNumTrashFileRenames); + assertThat(getOMMetrics().getNumTrashFilesRenames()) + .isGreaterThan(prevNumTrashFileRenames); } // wait for deletion of checkpoint dir @@ -1995,13 +1995,13 @@ void testCreateWithInvalidPaths() { private void checkInvalidPath(Path path) { InvalidPathException exception = assertThrows(InvalidPathException.class, () -> fs.create(path, false)); - assertTrue(exception.getMessage().contains("Invalid path Name")); + assertThat(exception.getMessage()).contains("Invalid path Name"); } @Test void testRenameFile() throws Exception { - final String dir = "/dir" + new Random().nextInt(1000); + final String dir = "/dir" + RandomUtils.nextInt(0, 1000); Path dirPath = new Path(getBucketPath() + dir); Path file1Source = new Path(getBucketPath() + dir + "/file1_Copy"); @@ -2027,7 +2027,7 @@ void testRenameFile() throws Exception { */ @Test void testRenameFileToDir() throws Exception { - final String dir = "/dir" + new Random().nextInt(1000); + final String dir = "/dir" + RandomUtils.nextInt(0, 1000); Path dirPath = new Path(getBucketPath() + dir); getFs().mkdirs(dirPath); @@ -2447,7 +2447,7 @@ void testSnapshotDiff() throws Exception { IllegalArgumentException.class, () -> ofs.getSnapshotDiffReport(volumePath1, finalFromSnap, finalToSnap)); - assertTrue(exception.getMessage().contains(errorMsg)); + assertThat(exception.getMessage()).contains(errorMsg); } @Test diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/fs/ozone/TestDirectoryDeletingServiceWithFSO.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/fs/ozone/TestDirectoryDeletingServiceWithFSO.java index d5c042bb036..87f114bd711 100644 --- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/fs/ozone/TestDirectoryDeletingServiceWithFSO.java +++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/fs/ozone/TestDirectoryDeletingServiceWithFSO.java @@ -57,7 +57,9 @@ import java.util.concurrent.TimeoutException; import java.util.function.LongSupplier; +import static org.assertj.core.api.Assertions.assertThat; import static org.junit.jupiter.api.Assertions.assertEquals; +import static org.junit.jupiter.api.Assertions.assertThrows; import static org.junit.jupiter.api.Assertions.assertTrue; import static org.junit.jupiter.api.Assertions.fail; import static org.apache.hadoop.ozone.OzoneConfigKeys.OZONE_ACL_ENABLED; @@ -177,7 +179,7 @@ public void testDeleteEmptyDirectory() throws Exception { assertEquals(root.getName(), iterator.next().getValue().getName()); } - assertTrue(dirDeletingService.getRunCount().get() > 1); + assertThat(dirDeletingService.getRunCount().get()).isGreaterThan(1); } /** @@ -244,9 +246,9 @@ public void testDeleteWithLargeSubPathsThanBatchSize() throws Exception { assertSubPathsCount(dirDeletingService::getDeletedDirsCount, 19); long elapsedRunCount = dirDeletingService.getRunCount().get() - preRunCount; - assertTrue(dirDeletingService.getRunCount().get() > 1); + assertThat(dirDeletingService.getRunCount().get()).isGreaterThan(1); // Ensure dir deleting speed, here provide a backup value for safe CI - assertTrue(elapsedRunCount >= 7); + assertThat(elapsedRunCount).isGreaterThanOrEqualTo(7); } @Test @@ -295,7 +297,7 @@ public void testDeleteWithMultiLevels() throws Exception { assertSubPathsCount(dirDeletingService::getMovedDirsCount, 2); assertSubPathsCount(dirDeletingService::getDeletedDirsCount, 5); - assertTrue(dirDeletingService.getRunCount().get() > 1); + assertThat(dirDeletingService.getRunCount().get()).isGreaterThan(1); } @Test @@ -545,13 +547,8 @@ private boolean assertTableRowCount(int expectedCount, } private void checkPath(Path path) { - try { - fs.getFileStatus(path); - fail("testRecursiveDelete failed"); - } catch (IOException ex) { - assertTrue(ex instanceof FileNotFoundException); - assertTrue(ex.getMessage().contains("No such file or directory")); - } + FileNotFoundException ex = assertThrows(FileNotFoundException.class, () -> fs.getFileStatus(path)); + assertThat(ex.getMessage()).contains("No such file or directory"); } private static BucketLayout getFSOBucketLayout() { diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/fs/ozone/TestHSync.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/fs/ozone/TestHSync.java index 8d7439604e8..a8c450e3cc9 100644 --- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/fs/ozone/TestHSync.java +++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/fs/ozone/TestHSync.java @@ -21,6 +21,8 @@ import java.io.Closeable; import java.io.FileNotFoundException; import java.io.IOException; +import java.nio.ByteBuffer; +import java.nio.charset.StandardCharsets; import java.security.GeneralSecurityException; import java.security.PrivilegedExceptionAction; import java.util.HashMap; @@ -28,29 +30,37 @@ import java.util.concurrent.ThreadLocalRandom; import java.util.concurrent.TimeUnit; import java.util.concurrent.atomic.AtomicReference; +import java.util.stream.Stream; import org.apache.commons.lang3.RandomStringUtils; import org.apache.hadoop.fs.FSDataInputStream; import org.apache.hadoop.fs.FileAlreadyExistsException; -import org.apache.hadoop.hdds.client.ReplicationFactor; -import org.apache.hadoop.hdds.client.ReplicationType; -import org.apache.hadoop.hdds.utils.IOUtils; -import org.apache.hadoop.conf.StorageUnit; import org.apache.hadoop.crypto.CipherSuite; import org.apache.hadoop.crypto.CryptoCodec; import org.apache.hadoop.crypto.CryptoOutputStream; import org.apache.hadoop.crypto.Encryptor; -import org.apache.hadoop.fs.CommonConfigurationKeysPublic; -import org.apache.hadoop.fs.FSDataOutputStream; -import org.apache.hadoop.fs.FileSystem; -import org.apache.hadoop.fs.Path; -import org.apache.hadoop.fs.StreamCapabilities; import org.apache.hadoop.hdds.client.DefaultReplicationConfig; import org.apache.hadoop.hdds.client.ECReplicationConfig; +import org.apache.hadoop.hdds.client.ReplicationFactor; +import org.apache.hadoop.hdds.client.ReplicationType; +import org.apache.hadoop.hdds.client.ReplicationConfig; import org.apache.hadoop.hdds.conf.OzoneConfiguration; +import org.apache.hadoop.hdds.conf.StorageUnit; import org.apache.hadoop.hdds.protocol.StorageType; +import org.apache.hadoop.hdds.utils.IOUtils; import org.apache.hadoop.hdds.utils.db.Table; import org.apache.hadoop.hdds.utils.db.TableIterator; +import org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos; +import org.apache.hadoop.hdds.scm.OzoneClientConfig; +import org.apache.hadoop.hdds.scm.storage.BlockInputStream; +import org.apache.hadoop.hdds.scm.storage.BlockOutputStream; +import org.apache.hadoop.fs.CommonConfigurationKeysPublic; +import org.apache.hadoop.fs.FSDataOutputStream; +import org.apache.hadoop.fs.FileSystem; +import org.apache.hadoop.fs.Path; +import org.apache.hadoop.fs.StreamCapabilities; + +import org.apache.hadoop.ozone.ClientConfigForTesting; import org.apache.hadoop.ozone.MiniOzoneCluster; import org.apache.hadoop.ozone.OzoneConfigKeys; import org.apache.hadoop.ozone.OzoneConsts; @@ -58,8 +68,10 @@ import org.apache.hadoop.ozone.client.BucketArgs; import org.apache.hadoop.ozone.client.OzoneBucket; import org.apache.hadoop.ozone.client.OzoneClient; +import org.apache.hadoop.ozone.client.OzoneKeyDetails; import org.apache.hadoop.ozone.client.io.ECKeyOutputStream; import org.apache.hadoop.ozone.client.io.KeyOutputStream; +import org.apache.hadoop.ozone.client.io.OzoneInputStream; import org.apache.hadoop.ozone.client.io.OzoneOutputStream; import org.apache.hadoop.ozone.om.OMMetadataManager; import org.apache.hadoop.ozone.om.OMMetrics; @@ -82,11 +94,16 @@ import org.junit.jupiter.api.Test; import org.junit.jupiter.api.TestMethodOrder; import org.junit.jupiter.api.Timeout; +import org.junit.jupiter.params.ParameterizedTest; +import org.junit.jupiter.params.provider.Arguments; +import org.junit.jupiter.params.provider.MethodSource; +import org.junit.jupiter.params.provider.ValueSource; import org.slf4j.Logger; import org.slf4j.LoggerFactory; import org.slf4j.event.Level; import static java.nio.charset.StandardCharsets.UTF_8; +import static org.apache.hadoop.hdds.scm.ScmConfigKeys.OZONE_CHUNK_LIST_INCREMENTAL; import static org.apache.hadoop.ozone.OzoneConfigKeys.OZONE_BLOCK_DELETING_SERVICE_INTERVAL; import static org.apache.hadoop.ozone.OzoneConsts.OZONE_OFS_URI_SCHEME; import static org.apache.hadoop.ozone.OzoneConsts.OZONE_ROOT; @@ -96,11 +113,13 @@ import static org.apache.hadoop.ozone.om.OMConfigKeys.OZONE_OM_ADDRESS_KEY; import static org.apache.hadoop.ozone.om.OMConfigKeys.OZONE_OM_RATIS_ENABLE_KEY; import static org.junit.jupiter.api.Assertions.assertNotNull; +import static org.junit.jupiter.api.Assertions.assertArrayEquals; import static org.junit.jupiter.api.Assertions.assertThrows; import static org.junit.jupiter.api.Assertions.fail; import static org.junit.jupiter.api.Assertions.assertEquals; import static org.junit.jupiter.api.Assertions.assertFalse; import static org.junit.jupiter.api.Assertions.assertTrue; +import static org.junit.jupiter.params.provider.Arguments.arguments; import static org.mockito.Mockito.mock; import static org.mockito.Mockito.when; @@ -120,12 +139,13 @@ public class TestHSync { private static OzoneClient client; private static final BucketLayout BUCKET_LAYOUT = BucketLayout.FILE_SYSTEM_OPTIMIZED; + private static final int CHUNK_SIZE = 4 << 12; + private static final int FLUSH_SIZE = 2 * CHUNK_SIZE; + private static final int MAX_FLUSH_SIZE = 2 * FLUSH_SIZE; + private static final int BLOCK_SIZE = 2 * MAX_FLUSH_SIZE; + @BeforeAll public static void init() throws Exception { - final int chunkSize = 4 << 10; - final int flushSize = 2 * chunkSize; - final int maxFlushSize = 2 * flushSize; - final int blockSize = 2 * maxFlushSize; final BucketLayout layout = BUCKET_LAYOUT; CONF.setBoolean(OZONE_OM_RATIS_ENABLE_KEY, false); @@ -133,17 +153,21 @@ public static void init() throws Exception { CONF.setBoolean(OzoneConfigKeys.OZONE_FS_HSYNC_ENABLED, true); // Reduce KeyDeletingService interval CONF.setTimeDuration(OZONE_BLOCK_DELETING_SERVICE_INTERVAL, 100, TimeUnit.MILLISECONDS); + CONF.setBoolean("ozone.client.incremental.chunk.list", true); + CONF.setBoolean(OZONE_CHUNK_LIST_INCREMENTAL, true); + ClientConfigForTesting.newBuilder(StorageUnit.BYTES) + .setBlockSize(BLOCK_SIZE) + .setChunkSize(CHUNK_SIZE) + .setStreamBufferFlushSize(FLUSH_SIZE) + .setStreamBufferMaxSize(MAX_FLUSH_SIZE) + .setDataStreamBufferFlushSize(MAX_FLUSH_SIZE) + .setDataStreamMinPacketSize(CHUNK_SIZE) + .setDataStreamWindowSize(5 * CHUNK_SIZE) + .applyTo(CONF); + cluster = MiniOzoneCluster.newBuilder(CONF) .setNumDatanodes(5) .setTotalPipelineNumLimit(10) - .setBlockSize(blockSize) - .setChunkSize(chunkSize) - .setStreamBufferFlushSize(flushSize) - .setStreamBufferMaxSize(maxFlushSize) - .setDataStreamBufferFlushize(maxFlushSize) - .setStreamBufferSizeUnit(StorageUnit.BYTES) - .setDataStreamMinPacketSize(chunkSize) - .setDataStreamStreamWindowSize(5 * chunkSize) .build(); cluster.waitForClusterToBeReady(); client = cluster.newClient(); @@ -155,6 +179,8 @@ public static void init() throws Exception { GenericTestUtils.setLogLevel(OMKeyRequest.LOG, Level.DEBUG); GenericTestUtils.setLogLevel(OMKeyCommitRequest.LOG, Level.DEBUG); GenericTestUtils.setLogLevel(OMKeyCommitRequestWithFSO.LOG, Level.DEBUG); + GenericTestUtils.setLogLevel(BlockOutputStream.LOG, Level.DEBUG); + GenericTestUtils.setLogLevel(BlockInputStream.LOG, Level.DEBUG); } @AfterAll @@ -287,13 +313,15 @@ public void testKeyHSyncThenClose() throws Exception { } } - @Test - public void testO3fsHSync() throws Exception { + @ParameterizedTest + @ValueSource(booleans = {false, true}) + public void testO3fsHSync(boolean incrementalChunkList) throws Exception { // Set the fs.defaultFS final String rootPath = String.format("%s://%s.%s/", OZONE_URI_SCHEME, bucket.getName(), bucket.getVolumeName()); CONF.set(CommonConfigurationKeysPublic.FS_DEFAULT_NAME_KEY, rootPath); + initClientConfig(incrementalChunkList); try (FileSystem fs = FileSystem.get(CONF)) { for (int i = 0; i < 10; i++) { final Path file = new Path("/file" + i); @@ -302,8 +330,10 @@ public void testO3fsHSync() throws Exception { } } - @Test - public void testOfsHSync() throws Exception { + + @ParameterizedTest + @ValueSource(booleans = {false, true}) + public void testOfsHSync(boolean incrementalChunkList) throws Exception { // Set the fs.defaultFS final String rootPath = String.format("%s://%s/", OZONE_OFS_URI_SCHEME, CONF.get(OZONE_OM_ADDRESS_KEY)); @@ -312,6 +342,7 @@ public void testOfsHSync() throws Exception { final String dir = OZONE_ROOT + bucket.getVolumeName() + OZONE_URI_DELIMITER + bucket.getName(); + initClientConfig(incrementalChunkList); try (FileSystem fs = FileSystem.get(CONF)) { for (int i = 0; i < 10; i++) { final Path file = new Path(dir, "file" + i); @@ -429,13 +460,11 @@ public void testHsyncKeyCallCount() throws Exception { ThreadLocalRandom.current().nextBytes(data); final Path file = new Path(dir, "file-hsync-then-close"); - long blockSize; try (FileSystem fs = FileSystem.get(CONF)) { - blockSize = fs.getDefaultBlockSize(file); long fileSize = 0; try (FSDataOutputStream outputStream = fs.create(file, true)) { // make sure at least writing 2 blocks data - while (fileSize <= blockSize) { + while (fileSize <= BLOCK_SIZE) { outputStream.write(data, 0, data.length); outputStream.hsync(); fileSize += data.length; @@ -448,9 +477,9 @@ public void testHsyncKeyCallCount() throws Exception { omMetrics.resetNumKeyHSyncs(); long writtenSize = 0; try (OzoneOutputStream outputStream = bucket.createKey("key-" + RandomStringUtils.randomNumeric(5), - blockSize * 2, ReplicationType.RATIS, ReplicationFactor.THREE, new HashMap<>())) { + BLOCK_SIZE * 2, ReplicationType.RATIS, ReplicationFactor.THREE, new HashMap<>())) { // make sure at least writing 2 blocks data - while (writtenSize <= blockSize) { + while (writtenSize <= BLOCK_SIZE) { outputStream.write(data, 0, data.length); outputStream.hsync(); writtenSize += data.length; @@ -733,4 +762,117 @@ private void testEncryptedStreamCapabilities(boolean isEC) throws IOException, assertFalse(cofsos.hasCapability(StreamCapabilities.HFLUSH)); } } + + public void initClientConfig(boolean incrementalChunkList) { + OzoneClientConfig clientConfig = CONF.getObject(OzoneClientConfig.class); + clientConfig.setIncrementalChunkList(incrementalChunkList); + clientConfig.setChecksumType(ContainerProtos.ChecksumType.CRC32C); + CONF.setFromObject(clientConfig); + } + + public static Stream parameters1() { + return Stream.of( + arguments(true, 512), + arguments(true, 511), + arguments(true, 513), + arguments(false, 512), + arguments(false, 511), + arguments(false, 513) + ); + } + + @ParameterizedTest + @MethodSource("parameters1") + public void writeWithSmallBuffer(boolean incrementalChunkList, int bufferSize) + throws IOException { + initClientConfig(incrementalChunkList); + + final String keyName = UUID.randomUUID().toString(); + int fileSize = 16 << 11; + String s = RandomStringUtils.randomAlphabetic(bufferSize); + ByteBuffer byteBuffer = ByteBuffer.wrap(s.getBytes(StandardCharsets.UTF_8)); + + int writtenSize = 0; + try (OzoneOutputStream out = bucket.createKey(keyName, fileSize, + ReplicationConfig.getDefault(CONF), new HashMap<>())) { + while (writtenSize < fileSize) { + int len = Math.min(bufferSize, fileSize - writtenSize); + out.write(byteBuffer, 0, len); + out.hsync(); + writtenSize += bufferSize; + } + } + + OzoneKeyDetails keyInfo = bucket.getKey(keyName); + assertEquals(fileSize, keyInfo.getDataSize()); + + int readSize = 0; + try (OzoneInputStream is = bucket.readKey(keyName)) { + while (readSize < fileSize) { + int len = Math.min(bufferSize, fileSize - readSize); + ByteBuffer readBuffer = ByteBuffer.allocate(len); + int readLen = is.read(readBuffer); + assertEquals(len, readLen); + if (len < bufferSize) { + for (int i = 0; i < len; i++) { + assertEquals(readBuffer.array()[i], byteBuffer.array()[i]); + } + } else { + assertArrayEquals(readBuffer.array(), byteBuffer.array()); + } + readSize += readLen; + } + } + bucket.deleteKey(keyName); + } + + public static Stream parameters2() { + return Stream.of( + arguments(true, 1024 * 1024 + 1), + arguments(true, 1024 * 1024 + 1 + CHUNK_SIZE), + arguments(true, 1024 * 1024 - 1 + CHUNK_SIZE), + arguments(false, 1024 * 1024 + 1), + arguments(false, 1024 * 1024 + 1 + CHUNK_SIZE), + arguments(false, 1024 * 1024 - 1 + CHUNK_SIZE) + ); + } + + @ParameterizedTest + @MethodSource("parameters2") + public void writeWithBigBuffer(boolean incrementalChunkList, int bufferSize) + throws IOException { + initClientConfig(incrementalChunkList); + + final String keyName = UUID.randomUUID().toString(); + int count = 2; + int fileSize = bufferSize * count; + ByteBuffer byteBuffer = ByteBuffer.allocate(bufferSize); + + try (OzoneOutputStream out = bucket.createKey(keyName, fileSize, + ReplicationConfig.getDefault(CONF), new HashMap<>())) { + for (int i = 0; i < count; i++) { + out.write(byteBuffer); + out.hsync(); + } + } + + OzoneKeyDetails keyInfo = bucket.getKey(keyName); + assertEquals(fileSize, keyInfo.getDataSize()); + int totalReadLen = 0; + try (OzoneInputStream is = bucket.readKey(keyName)) { + + for (int i = 0; i < count; i++) { + ByteBuffer readBuffer = ByteBuffer.allocate(bufferSize); + int readLen = is.read(readBuffer); + if (bufferSize != readLen) { + throw new IOException("failed to read " + bufferSize + " from offset " + totalReadLen + + ", actually read " + readLen + ", block " + totalReadLen / + BLOCK_SIZE); + } + assertArrayEquals(byteBuffer.array(), readBuffer.array()); + totalReadLen += readLen; + } + } + bucket.deleteKey(keyName); + } } diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/fs/ozone/TestLeaseRecovery.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/fs/ozone/TestLeaseRecovery.java index 93775f40136..4b45bb5fa0d 100644 --- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/fs/ozone/TestLeaseRecovery.java +++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/fs/ozone/TestLeaseRecovery.java @@ -18,7 +18,6 @@ package org.apache.hadoop.fs.ozone; import org.apache.commons.lang3.StringUtils; -import org.apache.hadoop.conf.StorageUnit; import org.apache.hadoop.fs.CommonConfigurationKeysPublic; import org.apache.hadoop.fs.FSDataInputStream; import org.apache.hadoop.fs.FSDataOutputStream; @@ -26,6 +25,7 @@ import org.apache.hadoop.fs.FileSystem; import org.apache.hadoop.fs.Path; import org.apache.hadoop.hdds.conf.OzoneConfiguration; +import org.apache.hadoop.hdds.conf.StorageUnit; import org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos; import org.apache.hadoop.hdds.scm.XceiverClientGrpc; import org.apache.hadoop.hdds.scm.container.ContainerInfo; @@ -33,6 +33,7 @@ import org.apache.hadoop.hdds.scm.pipeline.PipelineNotFoundException; import org.apache.hadoop.hdds.scm.server.StorageContainerManager; import org.apache.hadoop.hdds.utils.IOUtils; +import org.apache.hadoop.ozone.ClientConfigForTesting; import org.apache.hadoop.ozone.MiniOzoneCluster; import org.apache.hadoop.ozone.OzoneConfigKeys; import org.apache.hadoop.ozone.OzoneTestUtils; @@ -118,17 +119,19 @@ public void init() throws IOException, InterruptedException, conf.set(OzoneConfigKeys.OZONE_OM_LEASE_SOFT_LIMIT, "0s"); // make sure flush will write data to DN conf.setBoolean("ozone.client.stream.buffer.flush.delay", false); + ClientConfigForTesting.newBuilder(StorageUnit.BYTES) + .setBlockSize(blockSize) + .setChunkSize(chunkSize) + .setStreamBufferFlushSize(flushSize) + .setStreamBufferMaxSize(maxFlushSize) + .setDataStreamBufferFlushSize(maxFlushSize) + .setDataStreamMinPacketSize(chunkSize) + .setDataStreamWindowSize(5 * chunkSize) + .applyTo(conf); + cluster = MiniOzoneCluster.newBuilder(conf) .setNumDatanodes(3) .setTotalPipelineNumLimit(10) - .setBlockSize(blockSize) - .setChunkSize(chunkSize) - .setStreamBufferFlushSize(flushSize) - .setStreamBufferMaxSize(maxFlushSize) - .setDataStreamBufferFlushize(maxFlushSize) - .setStreamBufferSizeUnit(StorageUnit.BYTES) - .setDataStreamMinPacketSize(chunkSize) - .setDataStreamStreamWindowSize(5 * chunkSize) .build(); cluster.waitForClusterToBeReady(); client = cluster.newClient(); diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/fs/ozone/TestOzoneFSInputStream.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/fs/ozone/TestOzoneFSInputStream.java index 23d71047ef8..c6893c57e96 100644 --- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/fs/ozone/TestOzoneFSInputStream.java +++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/fs/ozone/TestOzoneFSInputStream.java @@ -35,9 +35,11 @@ import org.apache.hadoop.hdds.client.DefaultReplicationConfig; import org.apache.hadoop.hdds.client.ECReplicationConfig; import org.apache.hadoop.hdds.conf.OzoneConfiguration; +import org.apache.hadoop.hdds.conf.StorageUnit; import org.apache.hadoop.hdds.protocol.StorageType; import org.apache.hadoop.io.IOUtils; import org.apache.hadoop.io.SequenceFile; +import org.apache.hadoop.ozone.ClientConfigForTesting; import org.apache.hadoop.ozone.MiniOzoneCluster; import org.apache.hadoop.ozone.OzoneConsts; import org.apache.hadoop.ozone.TestDataUtil; @@ -52,9 +54,10 @@ import org.junit.jupiter.api.BeforeAll; import org.junit.jupiter.api.Test; import org.junit.jupiter.api.Timeout; -import org.junit.jupiter.api.Assertions; import static org.apache.hadoop.hdds.StringUtils.string2Bytes; +import static org.junit.jupiter.api.Assertions.assertEquals; +import static org.junit.jupiter.api.Assertions.assertArrayEquals; /** * Test OzoneFSInputStream by reading through multiple interfaces. @@ -82,12 +85,16 @@ public static void init() throws Exception { conf = new OzoneConfiguration(); conf.set(OMConfigKeys.OZONE_DEFAULT_BUCKET_LAYOUT, BucketLayout.LEGACY.name()); + + ClientConfigForTesting.newBuilder(StorageUnit.MB) + .setChunkSize(2) + .setBlockSize(8) + .setStreamBufferFlushSize(2) + .setStreamBufferMaxSize(4) + .applyTo(conf); + cluster = MiniOzoneCluster.newBuilder(conf) .setNumDatanodes(5) - .setChunkSize(2) // MB - .setBlockSize(8) // MB - .setStreamBufferFlushSize(2) // MB - .setStreamBufferMaxSize(4) // MB .build(); cluster.waitForClusterToBeReady(); client = cluster.newClient(); @@ -147,11 +154,11 @@ public void testO3FSSingleByteRead() throws IOException { break; } value[i] = (byte) val; - Assertions.assertEquals(value[i], data[i], "value mismatch at:" + i); + assertEquals(value[i], data[i], "value mismatch at:" + i); i++; } - Assertions.assertEquals(i, data.length); - Assertions.assertArrayEquals(value, data); + assertEquals(i, data.length); + assertArrayEquals(value, data); } } @@ -169,8 +176,8 @@ public void testO3FSMultiByteRead() throws IOException { System.arraycopy(tmp, 0, value, i * tmp.length, tmp.length); i++; } - Assertions.assertEquals((long) i * tmp.length, data.length); - Assertions.assertArrayEquals(value, data); + assertEquals((long) i * tmp.length, data.length); + assertArrayEquals(value, data); } } @@ -181,12 +188,12 @@ public void testO3FSByteBufferRead() throws IOException { ByteBuffer buffer = ByteBuffer.allocate(1024 * 1024); int byteRead = inputStream.read(buffer); - Assertions.assertEquals(byteRead, 1024 * 1024); + assertEquals(byteRead, 1024 * 1024); byte[] value = new byte[1024 * 1024]; System.arraycopy(data, 0, value, 0, value.length); - Assertions.assertArrayEquals(value, buffer.array()); + assertArrayEquals(value, buffer.array()); } } @@ -208,7 +215,7 @@ public void testSequenceFileReaderSync() throws IOException { in.sync(0); blockStart = in.getPosition(); // The behavior should be consistent with HDFS - Assertions.assertEquals(srcfile.length(), blockStart); + assertEquals(srcfile.length(), blockStart); in.close(); } @@ -230,7 +237,7 @@ public void testSequenceFileReaderSyncEC() throws IOException { in.sync(0); blockStart = in.getPosition(); // The behavior should be consistent with HDFS - Assertions.assertEquals(srcfile.length(), blockStart); + assertEquals(srcfile.length(), blockStart); in.close(); } } diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/fs/ozone/TestOzoneFSWithObjectStoreCreate.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/fs/ozone/TestOzoneFSWithObjectStoreCreate.java index 71d1e4bdddd..6dccd604208 100644 --- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/fs/ozone/TestOzoneFSWithObjectStoreCreate.java +++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/fs/ozone/TestOzoneFSWithObjectStoreCreate.java @@ -47,7 +47,6 @@ import org.junit.jupiter.api.Timeout; import java.io.FileNotFoundException; -import java.io.IOException; import java.net.URI; import java.util.ArrayList; import java.util.Arrays; @@ -60,11 +59,11 @@ import static java.nio.charset.StandardCharsets.UTF_8; import static org.apache.hadoop.ozone.OzoneConsts.OZONE_URI_SCHEME; import static org.apache.hadoop.ozone.om.exceptions.OMException.ResultCodes.NOT_A_FILE; +import static org.assertj.core.api.Assertions.assertThat; import static org.junit.jupiter.api.Assertions.assertEquals; import static org.junit.jupiter.api.Assertions.assertNotNull; import static org.junit.jupiter.api.Assertions.assertTrue; import static org.junit.jupiter.api.Assertions.assertThrows; -import static org.junit.jupiter.api.Assertions.fail; /** * Class tests create with object store and getFileStatus. @@ -264,16 +263,8 @@ public void testKeyCreationFailDuetoDirectoryCreationBeforeCommit() // Before close create directory with same name. o3fs.mkdirs(new Path("/a/b/c")); - - try { - ozoneOutputStream.close(); - fail("testKeyCreationFailDuetoDirectoryCreationBeforeCommit"); - } catch (IOException ex) { - assertTrue(ex instanceof OMException); - assertEquals(NOT_A_FILE, - ((OMException) ex).getResult()); - } - + OMException ex = assertThrows(OMException.class, () -> ozoneOutputStream.close()); + assertEquals(NOT_A_FILE, ex.getResult()); } @@ -308,14 +299,10 @@ public void testMPUFailDuetoDirectoryCreationBeforeComplete() partsMap.put(1, ozoneOutputStream.getCommitUploadPartInfo().getPartName()); // Should fail, as we have directory with same name. - try { - ozoneBucket.completeMultipartUpload(keyName, - omMultipartInfo.getUploadID(), partsMap); - fail("testMPUFailDuetoDirectoryCreationBeforeComplete failed"); - } catch (OMException ex) { - assertTrue(ex instanceof OMException); - assertEquals(NOT_A_FILE, ex.getResult()); - } + OMException ex = assertThrows(OMException.class, () -> ozoneBucket.completeMultipartUpload(keyName, + omMultipartInfo.getUploadID(), partsMap)); + assertEquals(NOT_A_FILE, ex.getResult()); + // Delete directory o3fs.delete(new Path(keyName), true); @@ -338,25 +325,16 @@ public void testMPUFailDuetoDirectoryCreationBeforeComplete() public void testCreateDirectoryFirstThenKeyAndFileWithSameName() throws Exception { o3fs.mkdirs(new Path("/t1/t2")); - - try { - o3fs.create(new Path("/t1/t2")); - fail("testCreateDirectoryFirstThenFileWithSameName failed"); - } catch (FileAlreadyExistsException ex) { - assertTrue(ex.getMessage().contains(NOT_A_FILE.name())); - } + FileAlreadyExistsException e = + assertThrows(FileAlreadyExistsException.class, () -> o3fs.create(new Path("/t1/t2"))); + assertThat(e.getMessage()).contains(NOT_A_FILE.name()); OzoneVolume ozoneVolume = client.getObjectStore().getVolume(volumeName); OzoneBucket ozoneBucket = ozoneVolume.getBucket(bucketName); ozoneBucket.createDirectory("t1/t2"); - try { - ozoneBucket.createKey("t1/t2", 0); - fail("testCreateDirectoryFirstThenFileWithSameName failed"); - } catch (OMException ex) { - assertTrue(ex instanceof OMException); - assertEquals(NOT_A_FILE, ex.getResult()); - } + OMException ex = assertThrows(OMException.class, () -> ozoneBucket.createKey("t1/t2", 0)); + assertEquals(NOT_A_FILE, ex.getResult()); } @@ -463,7 +441,7 @@ private void checkPath(Path path) { FileNotFoundException ex = assertThrows(FileNotFoundException.class, () -> o3fs.getFileStatus(path), "testObjectStoreCreateWithO3fs failed for Path" + path); - assertTrue(ex.getMessage().contains("No such file or directory")); + assertThat(ex.getMessage()).contains("No such file or directory"); } private void checkAncestors(Path p) throws Exception { diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/fs/ozone/TestOzoneFileSystemMetrics.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/fs/ozone/TestOzoneFileSystemMetrics.java index 6c0ecff0db8..5aba83bd412 100644 --- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/fs/ozone/TestOzoneFileSystemMetrics.java +++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/fs/ozone/TestOzoneFileSystemMetrics.java @@ -18,6 +18,7 @@ package org.apache.hadoop.fs.ozone; +import org.apache.hadoop.hdds.conf.StorageUnit; import org.apache.hadoop.hdds.utils.IOUtils; import org.apache.commons.lang3.RandomStringUtils; import org.apache.hadoop.fs.CommonConfigurationKeysPublic; @@ -25,6 +26,7 @@ import org.apache.hadoop.fs.FileSystem; import org.apache.hadoop.fs.Path; import org.apache.hadoop.hdds.conf.OzoneConfiguration; +import org.apache.hadoop.ozone.ClientConfigForTesting; import org.apache.hadoop.ozone.MiniOzoneCluster; import org.apache.hadoop.ozone.OzoneConsts; import org.apache.hadoop.ozone.TestDataUtil; @@ -40,9 +42,9 @@ import java.io.IOException; +import static org.assertj.core.api.Assertions.assertThat; import static org.apache.hadoop.hdds.StringUtils.string2Bytes; import static org.junit.jupiter.api.Assertions.assertEquals; -import static org.junit.jupiter.api.Assertions.assertTrue; /** * Test OM Metrics for OzoneFileSystem operations. @@ -72,12 +74,16 @@ public static void init() throws Exception { conf.set(OMConfigKeys.OZONE_DEFAULT_BUCKET_LAYOUT, BucketLayout.LEGACY.name()); conf.setBoolean(OMConfigKeys.OZONE_OM_ENABLE_FILESYSTEM_PATHS, true); + + ClientConfigForTesting.newBuilder(StorageUnit.MB) + .setChunkSize(2) + .setBlockSize(8) + .setStreamBufferFlushSize(2) + .setStreamBufferMaxSize(4) + .applyTo(conf); + cluster = MiniOzoneCluster.newBuilder(conf) .setNumDatanodes(3) - .setChunkSize(2) // MB - .setBlockSize(8) // MB - .setStreamBufferFlushSize(2) // MB - .setStreamBufferMaxSize(4) // MB .build(); cluster.waitForClusterToBeReady(); client = cluster.newClient(); @@ -149,13 +155,13 @@ private void testOzoneFileCommit(TestOps op) throws Exception { long numKeysAfterCommit = cluster .getOzoneManager().getMetrics().getNumKeys(); - assertTrue(numKeysAfterCommit > 0); + assertThat(numKeysAfterCommit).isGreaterThan(0); assertEquals(numKeysBeforeCreate + 2, numKeysAfterCommit); fs.delete(parentDir, true); long numKeysAfterDelete = cluster .getOzoneManager().getMetrics().getNumKeys(); - assertTrue(numKeysAfterDelete >= 0); + assertThat(numKeysAfterDelete).isGreaterThanOrEqualTo(0); assertEquals(numKeysBeforeCreate, numKeysAfterDelete); } } diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/fs/ozone/TestOzoneFileSystemMissingParent.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/fs/ozone/TestOzoneFileSystemMissingParent.java index d03cd4c81ce..228a820ed62 100644 --- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/fs/ozone/TestOzoneFileSystemMissingParent.java +++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/fs/ozone/TestOzoneFileSystemMissingParent.java @@ -38,8 +38,8 @@ import static org.apache.hadoop.ozone.OzoneConsts.OZONE_URI_DELIMITER; import static org.apache.hadoop.ozone.om.OMConfigKeys.OZONE_OM_ADDRESS_KEY; +import static org.assertj.core.api.Assertions.assertThat; import static org.junit.jupiter.api.Assertions.assertThrows; -import static org.junit.jupiter.api.Assertions.assertTrue; /** * Tests OFS behavior when filesystem paths are enabled and parent directory is @@ -111,8 +111,8 @@ public void testCloseFileWithDeletedParent() throws Exception { // Close should throw exception, Since parent doesn't exist. OMException omException = assertThrows(OMException.class, stream::close); - assertTrue(omException.getMessage().contains("Cannot create file : " + - "parent/file as parent directory doesn't exist")); + assertThat(omException.getMessage()) + .contains("Cannot create file : " + "parent/file as parent directory doesn't exist"); } /** @@ -132,8 +132,8 @@ public void testCloseFileWithRenamedParent() throws Exception { // Close should throw exception, Since parent has been moved. OMException omException = assertThrows(OMException.class, stream::close); - assertTrue(omException.getMessage().contains("Cannot create file : " + - "parent/file as parent directory doesn't exist")); + assertThat(omException.getMessage()) + .contains("Cannot create file : " + "parent/file as parent directory doesn't exist"); fs.delete(renamedPath, true); } diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/fs/ozone/TestOzoneFileSystemPrefixParser.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/fs/ozone/TestOzoneFileSystemPrefixParser.java index 972ad7dd2e7..37116f33e27 100644 --- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/fs/ozone/TestOzoneFileSystemPrefixParser.java +++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/fs/ozone/TestOzoneFileSystemPrefixParser.java @@ -35,11 +35,12 @@ import org.junit.jupiter.api.BeforeAll; import org.junit.jupiter.api.Test; import org.junit.jupiter.api.Timeout; -import org.junit.jupiter.api.Assertions; import java.io.IOException; import java.net.URI; +import static org.junit.jupiter.api.Assertions.assertEquals; + /** * Test Ozone Prefix Parser. */ @@ -115,18 +116,12 @@ public void testPrefixParsePath() throws Exception { private void assertPrefixStats(PrefixParser parser, int volumeCount, int bucketCount, int intermediateDirCount, int nonExistentDirCount, int fileCount, int dirCount) { - Assertions.assertEquals(volumeCount, - parser.getParserStats(PrefixParser.Types.VOLUME)); - Assertions.assertEquals(bucketCount, - parser.getParserStats(PrefixParser.Types.BUCKET)); - Assertions.assertEquals(intermediateDirCount, - parser.getParserStats(PrefixParser.Types.INTERMEDIATE_DIRECTORY)); - Assertions.assertEquals(nonExistentDirCount, - parser.getParserStats(PrefixParser.Types.NON_EXISTENT_DIRECTORY)); - Assertions.assertEquals(fileCount, - parser.getParserStats(PrefixParser.Types.FILE)); - Assertions.assertEquals(dirCount, - parser.getParserStats(PrefixParser.Types.DIRECTORY)); + assertEquals(volumeCount, parser.getParserStats(PrefixParser.Types.VOLUME)); + assertEquals(bucketCount, parser.getParserStats(PrefixParser.Types.BUCKET)); + assertEquals(intermediateDirCount, parser.getParserStats(PrefixParser.Types.INTERMEDIATE_DIRECTORY)); + assertEquals(nonExistentDirCount, parser.getParserStats(PrefixParser.Types.NON_EXISTENT_DIRECTORY)); + assertEquals(fileCount, parser.getParserStats(PrefixParser.Types.FILE)); + assertEquals(dirCount, parser.getParserStats(PrefixParser.Types.DIRECTORY)); } private void testPrefixParseWithInvalidPaths() throws Exception { diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/fs/ozone/TestOzoneFileSystemWithStreaming.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/fs/ozone/TestOzoneFileSystemWithStreaming.java index 5d068546828..2a6c8c456b9 100644 --- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/fs/ozone/TestOzoneFileSystemWithStreaming.java +++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/fs/ozone/TestOzoneFileSystemWithStreaming.java @@ -22,14 +22,15 @@ import java.io.OutputStream; import java.util.concurrent.ThreadLocalRandom; -import org.apache.hadoop.fs.FSDataOutputStream; -import org.apache.hadoop.hdds.utils.IOUtils; -import org.apache.hadoop.conf.StorageUnit; import org.apache.hadoop.fs.CommonConfigurationKeysPublic; import org.apache.hadoop.fs.FSDataInputStream; +import org.apache.hadoop.fs.FSDataOutputStream; import org.apache.hadoop.fs.FileSystem; import org.apache.hadoop.fs.Path; import org.apache.hadoop.hdds.conf.OzoneConfiguration; +import org.apache.hadoop.hdds.conf.StorageUnit; +import org.apache.hadoop.hdds.utils.IOUtils; +import org.apache.hadoop.ozone.ClientConfigForTesting; import org.apache.hadoop.ozone.MiniOzoneCluster; import org.apache.hadoop.ozone.TestDataUtil; import org.apache.hadoop.ozone.client.OzoneBucket; @@ -37,7 +38,6 @@ import org.apache.hadoop.ozone.client.io.SelectorOutputStream; import org.apache.hadoop.ozone.om.helpers.BucketLayout; import org.junit.jupiter.api.AfterAll; -import org.junit.jupiter.api.Assertions; import org.junit.jupiter.api.BeforeAll; import org.junit.jupiter.api.Test; import org.junit.jupiter.api.Timeout; @@ -55,6 +55,9 @@ import static org.apache.hadoop.ozone.om.OMConfigKeys.OZONE_OM_ADDRESS_KEY; import static org.apache.hadoop.ozone.om.OMConfigKeys.OZONE_OM_RATIS_ENABLE_KEY; import static org.junit.jupiter.api.Assertions.assertEquals; +import static org.junit.jupiter.api.Assertions.assertNotNull; +import static org.junit.jupiter.api.Assertions.assertNull; +import static org.junit.jupiter.api.Assertions.assertInstanceOf; /** * Ozone file system tests with Streaming. @@ -84,17 +87,20 @@ public static void init() throws Exception { CONF.set(OZONE_FS_DATASTREAM_AUTO_THRESHOLD, AUTO_THRESHOLD + "B"); CONF.setBoolean(OZONE_OM_RATIS_ENABLE_KEY, true); CONF.set(OZONE_DEFAULT_BUCKET_LAYOUT, layout.name()); - cluster = MiniOzoneCluster.newBuilder(CONF) - .setNumDatanodes(5) - .setTotalPipelineNumLimit(10) + + ClientConfigForTesting.newBuilder(StorageUnit.BYTES) .setBlockSize(blockSize) .setChunkSize(chunkSize) .setStreamBufferFlushSize(flushSize) .setStreamBufferMaxSize(maxFlushSize) - .setDataStreamBufferFlushize(maxFlushSize) - .setStreamBufferSizeUnit(StorageUnit.BYTES) + .setDataStreamBufferFlushSize(maxFlushSize) .setDataStreamMinPacketSize(chunkSize) - .setDataStreamStreamWindowSize(5 * chunkSize) + .setDataStreamWindowSize(5 * chunkSize) + .applyTo(CONF); + + cluster = MiniOzoneCluster.newBuilder(CONF) + .setNumDatanodes(5) + .setTotalPipelineNumLimit(10) .build(); cluster.waitForClusterToBeReady(); client = cluster.newClient(); @@ -152,7 +158,7 @@ static void createFile(FileSystem fs, Path path, boolean overwrite, final OutputStream wrapped = out.getWrappedStream(); LOG.info("wrapped: {}", wrapped.getClass()); - Assertions.assertEquals(SelectorOutputStream.class, wrapped.getClass()); + assertEquals(SelectorOutputStream.class, wrapped.getClass()); final SelectorOutputStream selector = (SelectorOutputStream) wrapped; final boolean belowThreshold = data.length <= AUTO_THRESHOLD; LOG.info("data.length={}, threshold={}, belowThreshold? {}", @@ -161,13 +167,12 @@ static void createFile(FileSystem fs, Path path, boolean overwrite, out.close(); final OutputStream underlying = selector.getUnderlying(); - Assertions.assertNotNull(underlying); + assertNotNull(underlying); LOG.info("underlying after close: {}", underlying.getClass()); if (belowThreshold) { - Assertions.assertTrue(underlying instanceof OzoneFSOutputStream); + assertInstanceOf(OzoneFSOutputStream.class, underlying); } else { - Assertions.assertEquals(OzoneFSDataStreamOutput.class, - underlying.getClass()); + assertEquals(OzoneFSDataStreamOutput.class, underlying.getClass()); } } @@ -177,10 +182,10 @@ static void assertUnderlying(SelectorOutputStream selector, LOG.info("underlying before close: {}", underlying != null ? underlying.getClass() : null); if (belowThreshold) { - Assertions.assertNull(underlying); + assertNull(underlying); } else { - Assertions.assertNotNull(underlying); - Assertions.assertEquals(OzoneFSDataStreamOutput.class, + assertNotNull(underlying); + assertEquals(OzoneFSDataStreamOutput.class, underlying.getClass()); } } diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/fs/ozone/TestOzoneFsHAURLs.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/fs/ozone/TestOzoneFsHAURLs.java index d639f2734fc..47dc9ac0c3b 100644 --- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/fs/ozone/TestOzoneFsHAURLs.java +++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/fs/ozone/TestOzoneFsHAURLs.java @@ -35,7 +35,6 @@ import org.apache.hadoop.ozone.client.OzoneVolume; import org.apache.hadoop.ozone.ha.ConfUtils; import org.apache.hadoop.ozone.om.OMConfigKeys; -import org.apache.hadoop.ozone.om.OMStorage; import org.apache.hadoop.ozone.om.OzoneManager; import org.apache.hadoop.ozone.om.helpers.BucketLayout; import org.apache.ozone.test.GenericTestUtils; @@ -46,19 +45,19 @@ import org.junit.jupiter.api.BeforeEach; import org.junit.jupiter.api.Test; import org.junit.jupiter.api.Timeout; +import org.junit.jupiter.api.io.TempDir; import org.slf4j.Logger; import org.slf4j.LoggerFactory; +import java.io.File; import java.util.Optional; import java.util.OptionalInt; -import java.util.UUID; import static org.apache.hadoop.hdds.HddsUtils.getHostName; import static org.apache.hadoop.hdds.HddsUtils.getHostPort; import static org.assertj.core.api.Assertions.assertThat; import static org.junit.jupiter.api.Assertions.assertEquals; import static org.junit.jupiter.api.Assertions.assertNotNull; -import static org.junit.jupiter.api.Assertions.assertTrue; /** * Test client-side URI handling with Ozone Manager HA. @@ -74,10 +73,7 @@ public class TestOzoneFsHAURLs { private OzoneConfiguration conf; private static MiniOzoneCluster cluster; - private static String omId; private static String omServiceId; - private static String clusterId; - private static String scmId; private static OzoneManager om; private static int numOfOMs; @@ -85,47 +81,35 @@ public class TestOzoneFsHAURLs { private String bucketName; private String rootPath; - private final String o3fsImplKey = + private static final String O3FS_IMPL_KEY = "fs." + OzoneConsts.OZONE_URI_SCHEME + ".impl"; - private final String o3fsImplValue = + private static final String O3FS_IMPL_VALUE = "org.apache.hadoop.fs.ozone.OzoneFileSystem"; private static OzoneClient client; - private final String ofsImplKey = + private static final String OFS_IMPL_KEY = "fs." + OzoneConsts.OZONE_OFS_URI_SCHEME + ".impl"; - private final String ofsImplValue = + private static final String OFS_IMPL_VALUE = "org.apache.hadoop.fs.ozone.RootedOzoneFileSystem"; @BeforeAll - public static void initClass() throws Exception { + static void initClass(@TempDir File tempDir) throws Exception { OzoneConfiguration conf = new OzoneConfiguration(); - omId = UUID.randomUUID().toString(); omServiceId = "om-service-test1"; numOfOMs = 3; - clusterId = UUID.randomUUID().toString(); - scmId = UUID.randomUUID().toString(); - final String path = GenericTestUtils.getTempPath(omId); - java.nio.file.Path metaDirPath = java.nio.file.Paths.get(path, "om-meta"); - conf.set(HddsConfigKeys.OZONE_METADATA_DIRS, metaDirPath.toString()); + conf.set(HddsConfigKeys.OZONE_METADATA_DIRS, tempDir.getAbsolutePath()); conf.setInt(ScmConfigKeys.OZONE_DATANODE_PIPELINE_LIMIT, 3); conf.set(OMConfigKeys.OZONE_DEFAULT_BUCKET_LAYOUT, BucketLayout.LEGACY.name()); conf.setBoolean(OMConfigKeys.OZONE_OM_ENABLE_FILESYSTEM_PATHS, true); - OMStorage omStore = new OMStorage(conf); - omStore.setClusterId(clusterId); - // writes the version file properties - omStore.initialize(); - // Start the cluster cluster = MiniOzoneCluster.newOMHABuilder(conf) .setNumDatanodes(5) .setTotalPipelineNumLimit(3) - .setClusterId(clusterId) - .setScmId(scmId) .setOMServiceId(omServiceId) .setNumOfOzoneManagers(numOfOMs) .build(); @@ -220,7 +204,7 @@ private int getPortFromAddress(String addr) { public void testWithQualifiedDefaultFS() throws Exception { OzoneConfiguration clientConf = new OzoneConfiguration(conf); clientConf.setQuietMode(false); - clientConf.set(o3fsImplKey, o3fsImplValue); + clientConf.set(O3FS_IMPL_KEY, O3FS_IMPL_VALUE); // fs.defaultFS = o3fs://bucketName.volumeName.omServiceId/ clientConf.set(CommonConfigurationKeysPublic.FS_DEFAULT_NAME_KEY, rootPath); @@ -315,7 +299,7 @@ public void testWithQualifiedDefaultFS() throws Exception { private void testWithDefaultFS(String defaultFS) throws Exception { OzoneConfiguration clientConf = new OzoneConfiguration(conf); clientConf.setQuietMode(false); - clientConf.set(o3fsImplKey, o3fsImplValue); + clientConf.set(O3FS_IMPL_KEY, O3FS_IMPL_VALUE); // fs.defaultFS = file:/// clientConf.set(CommonConfigurationKeysPublic.FS_DEFAULT_NAME_KEY, defaultFS); @@ -360,8 +344,8 @@ public void testOtherDefaultFS() throws Exception { public void testIncorrectAuthorityInURI() throws Exception { OzoneConfiguration clientConf = new OzoneConfiguration(conf); clientConf.setQuietMode(false); - clientConf.set(o3fsImplKey, o3fsImplValue); - clientConf.set(ofsImplKey, ofsImplValue); + clientConf.set(O3FS_IMPL_KEY, O3FS_IMPL_VALUE); + clientConf.set(OFS_IMPL_KEY, OFS_IMPL_VALUE); FsShell shell = new FsShell(clientConf); String incorrectSvcId = "dummy"; String o3fsPathWithCorrectSvcId = @@ -385,8 +369,7 @@ public void testIncorrectAuthorityInURI() throws Exception { res = ToolRunner.run(shell, new String[] {"-ls", ofsPathWithIncorrectSvcId }); assertEquals(1, res); - assertTrue( - capture.getOutput().contains("Cannot resolve OM host")); + assertThat(capture.getOutput()).contains("Cannot resolve OM host"); } try (GenericTestUtils.SystemErrCapturer capture = new @@ -394,8 +377,7 @@ public void testIncorrectAuthorityInURI() throws Exception { res = ToolRunner.run(shell, new String[] {"-ls", o3fsPathWithInCorrectSvcId }); assertEquals(1, res); - assertTrue( - capture.getOutput().contains("Cannot resolve OM host")); + assertThat(capture.getOutput()).contains("Cannot resolve OM host"); } } finally { shell.close(); diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/fs/ozone/TestOzoneFsSnapshot.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/fs/ozone/TestOzoneFsSnapshot.java index d74c77d3435..ae6a24a910c 100644 --- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/fs/ozone/TestOzoneFsSnapshot.java +++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/fs/ozone/TestOzoneFsSnapshot.java @@ -24,11 +24,8 @@ import java.nio.file.Files; import java.nio.file.Path; import java.nio.file.Paths; -import java.util.UUID; import java.util.stream.Stream; -import com.google.common.base.Strings; - import org.apache.commons.io.FileUtils; import org.apache.commons.lang3.RandomStringUtils; import org.apache.hadoop.hdds.conf.OzoneConfiguration; @@ -39,7 +36,6 @@ import org.apache.hadoop.util.ToolRunner; import org.apache.ozone.test.GenericTestUtils; import org.junit.jupiter.api.AfterAll; -import org.junit.jupiter.api.Assertions; import org.junit.jupiter.api.BeforeAll; import org.junit.jupiter.api.Test; import org.junit.jupiter.api.Timeout; @@ -56,6 +52,8 @@ import static org.apache.hadoop.ozone.OzoneConsts.OM_SNAPSHOT_INDICATOR; import static org.apache.hadoop.ozone.om.OmSnapshotManager.getSnapshotPath; import static org.assertj.core.api.Assertions.assertThat; +import static org.junit.jupiter.api.Assertions.assertEquals; +import static org.junit.jupiter.api.Assertions.assertNotNull; /** * Test client-side CRUD snapshot operations with Ozone Manager. @@ -89,8 +87,6 @@ static void initClass() throws Exception { // Start the cluster cluster = MiniOzoneCluster.newOMHABuilder(conf) - .setClusterId(UUID.randomUUID().toString()) - .setScmId(UUID.randomUUID().toString()) .setOMServiceId(OM_SERVICE_ID) .setNumOfOzoneManagers(1) .build(); @@ -120,13 +116,13 @@ private static void createVolBuckKey() // Create volume and bucket int res = ToolRunner.run(shell, new String[]{"-mkdir", "-p", BUCKET_PATH}); - Assertions.assertEquals(0, res); + assertEquals(0, res); // Create key res = ToolRunner.run(shell, new String[]{"-touch", KEY_PATH}); - Assertions.assertEquals(0, res); + assertEquals(0, res); // List the bucket to make sure that bucket exists. res = ToolRunner.run(shell, new String[]{"-ls", BUCKET_PATH}); - Assertions.assertEquals(0, res); + assertEquals(0, res); } @@ -137,12 +133,12 @@ void testCreateSnapshotDuplicateName() throws Exception { int res = ToolRunner.run(shell, new String[]{"-createSnapshot", BUCKET_PATH, snapshotName}); // Asserts that create request succeeded - Assertions.assertEquals(0, res); + assertEquals(0, res); res = ToolRunner.run(shell, new String[]{"-createSnapshot", BUCKET_PATH, snapshotName}); // Asserts that create request fails since snapshot name provided twice - Assertions.assertEquals(1, res); + assertEquals(1, res); } @Test @@ -162,19 +158,19 @@ void testCreateSnapshotWithSubDirInput() throws Exception { int res = ToolRunner.run(shell, new String[] { "-mkdir", "-p", dirPath}); - Assertions.assertEquals(0, res); + assertEquals(0, res); try (GenericTestUtils.SystemOutCapturer capture = new GenericTestUtils.SystemOutCapturer()) { res = ToolRunner.run(shell, new String[] { "-createSnapshot", dirPath, snapshotName}); // Asserts that create request succeeded - Assertions.assertEquals(0, res); + assertEquals(0, res); String expectedSnapshotPath = Paths.get( BUCKET_PATH, OM_SNAPSHOT_INDICATOR, snapshotName).toString(); String out = capture.getOutput().trim(); - Assertions.assertTrue(out.endsWith(expectedSnapshotPath)); + assertThat(out).endsWith(expectedSnapshotPath); } } @@ -192,7 +188,7 @@ void testCreateSnapshotSuccess(String snapshotName) int res = ToolRunner.run(shell, new String[]{"-createSnapshot", BUCKET_PATH, snapshotName}); // Asserts that create request succeeded - Assertions.assertEquals(0, res); + assertEquals(0, res); SnapshotInfo snapshotInfo = ozoneManager .getMetadataManager() @@ -202,7 +198,7 @@ void testCreateSnapshotSuccess(String snapshotName) // Assert that snapshot exists in RocksDB. // We can't use list or valid if snapshot directory exists because DB // transaction might not be flushed by the time. - Assertions.assertNotNull(snapshotInfo); + assertNotNull(snapshotInfo); } private static Stream createSnapshotFailureScenarios() { @@ -252,8 +248,7 @@ void testCreateSnapshotFailure(String description, String errorMessage = execShellCommandAndGetOutput(expectedResponse, new String[]{"-createSnapshot", paramBucketPath, snapshotName}); - Assertions.assertTrue(errorMessage - .contains(expectedMessage)); + assertThat(errorMessage).contains(expectedMessage); } /** @@ -291,7 +286,7 @@ void testFsLsSnapshot(@TempDir Path tempDir) throws Exception { int res = ToolRunner.run(shell, new String[]{"-deleteSnapshot", BUCKET_PATH, snapshotName1}); // Asserts that delete request succeeded - Assertions.assertEquals(0, res); + assertEquals(0, res); // Wait for the snapshot to be marked deleted. GenericTestUtils.waitFor(() -> { @@ -339,24 +334,22 @@ void testDeleteBucketWithSnapshot() throws Exception { String deleteKeyOut = execShellCommandAndGetOutput(0, new String[]{"-rm", "-r", "-skipTrash", KEY_PATH}); - Assertions.assertTrue(deleteKeyOut - .contains("Deleted " + BUCKET_PATH)); + assertThat(deleteKeyOut).contains("Deleted " + BUCKET_PATH); // Delete bucket should fail due to existing snapshot String deleteBucketOut = execShellCommandAndGetOutput(1, new String[]{"-rm", "-r", "-skipTrash", BUCKET_PATH}); - Assertions.assertTrue(deleteBucketOut - .contains(BUCKET + " can't be deleted when it has snapshots")); + assertThat(deleteBucketOut).contains(BUCKET + " can't be deleted when it has snapshots"); // Key shouldn't exist under bucket String listKeyOut = execShellCommandAndGetOutput(0, new String[]{"-ls", BUCKET_PATH}); - Assertions.assertTrue(Strings.isNullOrEmpty(listKeyOut)); + assertThat(listKeyOut).isNullOrEmpty(); // Key should still exist under snapshot String listSnapKeyOut = execShellCommandAndGetOutput(0, new String[]{"-ls", snapshotPath}); - Assertions.assertTrue(listSnapKeyOut.contains(snapshotKeyPath)); + assertThat(listSnapKeyOut).contains(snapshotKeyPath); } @Test @@ -366,7 +359,7 @@ void testSnapshotDeleteSuccess() throws Exception { int res = ToolRunner.run(shell, new String[]{"-deleteSnapshot", BUCKET_PATH, snapshotName}); // Asserts that delete request succeeded - Assertions.assertEquals(0, res); + assertEquals(0, res); // Wait for the snapshot to be marked deleted. GenericTestUtils.waitFor(() -> { @@ -417,8 +410,7 @@ void testSnapshotDeleteFailure(String description, String errorMessage = execShellCommandAndGetOutput(expectedResponse, new String[]{"-deleteSnapshot", paramBucketPath, snapshotName}); - Assertions.assertTrue(errorMessage - .contains(expectedMessage), errorMessage); + assertThat(errorMessage).contains(expectedMessage); } /** @@ -438,7 +430,7 @@ private String execShellCommandAndGetOutput( // Execute command int res = ToolRunner.run(shell, args); - Assertions.assertEquals(response, res); + assertEquals(response, res); // Store command output to a string, // if command should succeed then @@ -467,7 +459,7 @@ private String createSnapshot() throws Exception { int res = ToolRunner.run(shell, new String[]{"-createSnapshot", BUCKET_PATH, snapshotName}); // Asserts that create request succeeded - Assertions.assertEquals(0, res); + assertEquals(0, res); OzoneConfiguration conf = ozoneManager.getConfiguration(); diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/fs/ozone/TestRootedDDSWithFSO.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/fs/ozone/TestRootedDDSWithFSO.java index cd12e0d52a2..074a8e7df4b 100644 --- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/fs/ozone/TestRootedDDSWithFSO.java +++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/fs/ozone/TestRootedDDSWithFSO.java @@ -57,6 +57,7 @@ import static org.apache.hadoop.ozone.OzoneConfigKeys.OZONE_FS_ITERATE_BATCH_SIZE; import static org.apache.hadoop.ozone.OzoneConsts.OZONE_URI_DELIMITER; import static org.apache.hadoop.ozone.om.OMConfigKeys.OZONE_OM_ADDRESS_KEY; +import static org.assertj.core.api.Assertions.assertThat; import static org.junit.jupiter.api.Assertions.assertEquals; import static org.junit.jupiter.api.Assertions.assertTrue; import static org.junit.jupiter.api.Assertions.assertThrows; @@ -215,7 +216,7 @@ public void testDeleteVolumeAndBucket() throws Exception { private void checkPath(Path path) { FileNotFoundException ex = assertThrows(FileNotFoundException.class, () -> fs.getFileStatus(path), "testRecursiveDelete failed"); - assertTrue(ex.getMessage().contains("File not found")); + assertThat(ex.getMessage()).contains("File not found"); } private void assertTableRowCount(Table table, int count) diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/fs/ozone/contract/AbstractOzoneContract.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/fs/ozone/contract/AbstractOzoneContract.java new file mode 100644 index 00000000000..73c9fa7dc2a --- /dev/null +++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/fs/ozone/contract/AbstractOzoneContract.java @@ -0,0 +1,56 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + *

+ * http://www.apache.org/licenses/LICENSE-2.0 + *

+ * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.hadoop.fs.ozone.contract; + +import java.io.IOException; + +import org.apache.hadoop.fs.FileSystem; +import org.apache.hadoop.fs.contract.AbstractFSContract; +import org.apache.hadoop.ozone.MiniOzoneCluster; + +import static org.junit.jupiter.api.Assertions.assertNotNull; + +/** + * Base class for Ozone filesystem contracts. It needs a {@link MiniOzoneCluster}, + * and provides the {@link FileSystem} that's subject of the test. + */ +abstract class AbstractOzoneContract extends AbstractFSContract { + + private final MiniOzoneCluster cluster; + + /** + * @return root URI for the FileSystem + */ + protected abstract String getRootURI() throws IOException; + + protected MiniOzoneCluster getCluster() { + return cluster; + } + + AbstractOzoneContract(MiniOzoneCluster cluster) { + super(cluster.getConf()); + this.cluster = cluster; + } + + @Override + public FileSystem getTestFileSystem() throws IOException { + assertNotNull(cluster, "cluster not created"); + getConf().set("fs.defaultFS", getRootURI()); + return FileSystem.get(getConf()); + } +} diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/fs/ozone/contract/AbstractOzoneContractTest.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/fs/ozone/contract/AbstractOzoneContractTest.java new file mode 100644 index 00000000000..ab1736c3b0b --- /dev/null +++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/fs/ozone/contract/AbstractOzoneContractTest.java @@ -0,0 +1,315 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + *

+ * http://www.apache.org/licenses/LICENSE-2.0 + *

+ * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.hadoop.fs.ozone.contract; + +import org.apache.hadoop.conf.Configuration; +import org.apache.hadoop.fs.contract.AbstractContractCreateTest; +import org.apache.hadoop.fs.contract.AbstractContractDeleteTest; +import org.apache.hadoop.fs.contract.AbstractContractGetFileStatusTest; +import org.apache.hadoop.fs.contract.AbstractContractMkdirTest; +import org.apache.hadoop.fs.contract.AbstractContractOpenTest; +import org.apache.hadoop.fs.contract.AbstractContractRenameTest; +import org.apache.hadoop.fs.contract.AbstractContractRootDirectoryTest; +import org.apache.hadoop.fs.contract.AbstractContractSeekTest; +import org.apache.hadoop.fs.contract.AbstractContractUnbufferTest; +import org.apache.hadoop.fs.contract.AbstractFSContract; +import org.apache.hadoop.hdds.conf.DatanodeRatisServerConfig; +import org.apache.hadoop.hdds.conf.OzoneConfiguration; +import org.apache.hadoop.hdds.ratis.conf.RatisClientConfig; +import org.apache.hadoop.hdds.utils.IOUtils; +import org.apache.hadoop.ozone.MiniOzoneCluster; +import org.apache.hadoop.tools.contract.AbstractContractDistCpTest; +import org.junit.jupiter.api.AfterAll; +import org.junit.jupiter.api.BeforeAll; +import org.junit.jupiter.api.Nested; +import org.junit.jupiter.api.Test; +import org.junit.jupiter.api.TestInstance; + +import java.io.IOException; +import java.time.Duration; + +import static org.apache.hadoop.fs.contract.ContractTestUtils.cleanup; +import static org.apache.hadoop.ozone.OzoneConfigKeys.OZONE_FS_HSYNC_ENABLED; +import static org.apache.hadoop.ozone.OzoneConsts.OZONE_OFS_URI_SCHEME; +import static org.assertj.core.api.Assumptions.assumeThat; + +/** + * Base class for Ozone contract tests. Manages lifecycle of {@link MiniOzoneCluster}. + *

+ * All specific contract tests are implemented as {@link Nested} inner classes. This allows + * running all tests in the same cluster. + *

+ * Subclasses only need to implement {@link #createOzoneContract(Configuration)}, + * but can tweak configuration by also overriding {@link #createOzoneConfig()}. + */ +@TestInstance(TestInstance.Lifecycle.PER_CLASS) +abstract class AbstractOzoneContractTest { + + private static final String CONTRACT_XML = "contract/ozone.xml"; + + private MiniOzoneCluster cluster; + + /** + * This must be implemented by all subclasses. + * @return the FS contract + */ + abstract AbstractFSContract createOzoneContract(Configuration conf); + + /** + * Creates the base configuration for contract tests. This can be tweaked + * in subclasses by overriding {@link #createOzoneConfig()}. + */ + protected static OzoneConfiguration createBaseConfiguration() { + OzoneConfiguration conf = new OzoneConfiguration(); + DatanodeRatisServerConfig ratisServerConfig = + conf.getObject(DatanodeRatisServerConfig.class); + ratisServerConfig.setRequestTimeOut(Duration.ofSeconds(3)); + ratisServerConfig.setWatchTimeOut(Duration.ofSeconds(10)); + conf.setFromObject(ratisServerConfig); + + RatisClientConfig.RaftConfig raftClientConfig = + conf.getObject(RatisClientConfig.RaftConfig.class); + raftClientConfig.setRpcRequestTimeout(Duration.ofSeconds(3)); + raftClientConfig.setRpcWatchRequestTimeout(Duration.ofSeconds(10)); + conf.setFromObject(raftClientConfig); + + conf.addResource(CONTRACT_XML); + + conf.setBoolean(OZONE_FS_HSYNC_ENABLED, true); + + return conf; + } + + /** + * Hook method that allows tweaking the configuration. + */ + OzoneConfiguration createOzoneConfig() { + return createBaseConfiguration(); + } + + MiniOzoneCluster getCluster() { + return cluster; + } + + @BeforeAll + void setup() throws Exception { + cluster = MiniOzoneCluster.newBuilder(createOzoneConfig()) + .setNumDatanodes(5) + .build(); + cluster.waitForClusterToBeReady(); + } + + @AfterAll + void teardown() { + IOUtils.closeQuietly(cluster); + } + + @Nested + class TestContractCreate extends AbstractContractCreateTest { + @Override + protected Configuration createConfiguration() { + return createOzoneConfig(); + } + + @Override + protected AbstractFSContract createContract(Configuration conf) { + return createOzoneContract(conf); + } + } + + @Nested + class TestContractDistCp extends AbstractContractDistCpTest { + @Override + protected Configuration createConfiguration() { + return createOzoneConfig(); + } + + @Override + protected AbstractFSContract createContract(Configuration conf) { + return createOzoneContract(conf); + } + + @Override + protected void deleteTestDirInTeardown() throws IOException { + super.deleteTestDirInTeardown(); + cleanup("TEARDOWN", getLocalFS(), getLocalDir()); + } + } + + @Nested + class TestContractDelete extends AbstractContractDeleteTest { + @Override + protected Configuration createConfiguration() { + return createOzoneConfig(); + } + + @Override + protected AbstractFSContract createContract(Configuration conf) { + return createOzoneContract(conf); + } + } + + @Nested + class TestContractGetFileStatus extends AbstractContractGetFileStatusTest { + @Override + protected Configuration createConfiguration() { + return createOzoneConfig(); + } + + @Override + protected AbstractFSContract createContract(Configuration conf) { + return createOzoneContract(conf); + } + } + + @Nested + class TestContractMkdir extends AbstractContractMkdirTest { + @Override + protected Configuration createConfiguration() { + return createOzoneConfig(); + } + + @Override + protected AbstractFSContract createContract(Configuration conf) { + return createOzoneContract(conf); + } + } + + @Nested + class TestContractOpen extends AbstractContractOpenTest { + @Override + protected Configuration createConfiguration() { + return createOzoneConfig(); + } + + @Override + protected AbstractFSContract createContract(Configuration conf) { + return createOzoneContract(conf); + } + } + + @Nested + class TestContractRename extends AbstractContractRenameTest { + @Override + protected Configuration createConfiguration() { + return createOzoneConfig(); + } + + @Override + protected AbstractFSContract createContract(Configuration conf) { + return createOzoneContract(conf); + } + } + + @Nested + class TestContractRootDirectory extends AbstractContractRootDirectoryTest { + @Override + protected Configuration createConfiguration() { + return createOzoneConfig(); + } + + @Override + protected AbstractFSContract createContract(Configuration conf) { + return createOzoneContract(conf); + } + + @Override + @Test + public void testRmRootRecursive() throws Throwable { + // OFS doesn't support creating files directly under root + assumeThat(getContract().getScheme()) + .isNotEqualTo(OZONE_OFS_URI_SCHEME); + super.testRmRootRecursive(); + } + + @Override + @Test + public void testRmNonEmptyRootDirNonRecursive() throws Throwable { + // OFS doesn't support creating files directly under root + assumeThat(getContract().getScheme()) + .isNotEqualTo(OZONE_OFS_URI_SCHEME); + super.testRmNonEmptyRootDirNonRecursive(); + } + + @Override + @Test + public void testRmEmptyRootDirNonRecursive() throws Throwable { + // Internally test deletes volume recursively + // Which is not supported + assumeThat(getContract().getScheme()) + .isNotEqualTo(OZONE_OFS_URI_SCHEME); + super.testRmEmptyRootDirNonRecursive(); + } + + @Override + @Test + public void testListEmptyRootDirectory() throws IOException { + // Internally test deletes volume recursively + // Which is not supported + assumeThat(getContract().getScheme()) + .isNotEqualTo(OZONE_OFS_URI_SCHEME); + super.testListEmptyRootDirectory(); + } + + @Override + @Test + public void testSimpleRootListing() throws IOException { + // Recursive list is not supported + assumeThat(getContract().getScheme()) + .isNotEqualTo(OZONE_OFS_URI_SCHEME); + super.testSimpleRootListing(); + } + + @Override + @Test + public void testMkDirDepth1() throws Throwable { + // Internally test deletes volume recursively + // Which is not supported + assumeThat(getContract().getScheme()) + .isNotEqualTo(OZONE_OFS_URI_SCHEME); + super.testMkDirDepth1(); + } + } + + @Nested + class TestContractSeek extends AbstractContractSeekTest { + @Override + protected Configuration createConfiguration() { + return createOzoneConfig(); + } + + @Override + protected AbstractFSContract createContract(Configuration conf) { + return createOzoneContract(conf); + } + } + + @Nested + class TestContractUnbuffer extends AbstractContractUnbufferTest { + @Override + protected Configuration createConfiguration() { + return createOzoneConfig(); + } + + @Override + protected AbstractFSContract createContract(Configuration conf) { + return createOzoneContract(conf); + } + } + +} diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/fs/ozone/contract/ITestOzoneContractCreate.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/fs/ozone/contract/ITestOzoneContractCreate.java deleted file mode 100644 index fd4e4d416f0..00000000000 --- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/fs/ozone/contract/ITestOzoneContractCreate.java +++ /dev/null @@ -1,66 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package org.apache.hadoop.fs.ozone.contract; - -import java.io.IOException; -import java.util.Collection; - -import org.apache.hadoop.conf.Configuration; -import org.apache.hadoop.fs.contract.AbstractContractCreateTest; -import org.apache.hadoop.fs.contract.AbstractFSContract; - -import org.junit.AfterClass; -import org.junit.runner.RunWith; -import org.junit.runners.Parameterized; - -/** - * Ozone contract tests creating files. - */ -@RunWith(Parameterized.class) -public class ITestOzoneContractCreate extends AbstractContractCreateTest { - - public ITestOzoneContractCreate(boolean fso) { - // Actual init done in initParam(). - } - - @Parameterized.BeforeParam - public static void initParam(boolean fso) throws IOException { - OzoneContract.createCluster(fso); - } - - @Parameterized.AfterParam - public static void teardownParam() throws IOException { - OzoneContract.destroyCluster(); - } - - @AfterClass - public static void teardownCluster() throws IOException { - OzoneContract.destroyCluster(); - } - - @Override - protected AbstractFSContract createContract(Configuration conf) { - return new OzoneContract(conf); - } - - @Parameterized.Parameters - public static Collection data() { - return OzoneContract.getFsoCombinations(); - } -} diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/fs/ozone/contract/ITestOzoneContractDelete.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/fs/ozone/contract/ITestOzoneContractDelete.java deleted file mode 100644 index 8ca70f0a7f9..00000000000 --- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/fs/ozone/contract/ITestOzoneContractDelete.java +++ /dev/null @@ -1,66 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package org.apache.hadoop.fs.ozone.contract; - -import java.io.IOException; -import java.util.Collection; - -import org.apache.hadoop.conf.Configuration; -import org.apache.hadoop.fs.contract.AbstractContractDeleteTest; -import org.apache.hadoop.fs.contract.AbstractFSContract; - -import org.junit.AfterClass; -import org.junit.runner.RunWith; -import org.junit.runners.Parameterized; - -/** - * Ozone contract tests covering deletes. - */ -@RunWith(Parameterized.class) -public class ITestOzoneContractDelete extends AbstractContractDeleteTest { - - public ITestOzoneContractDelete(boolean fso) { - // Actual init done in initParam(). - } - - @Parameterized.BeforeParam - public static void initParam(boolean fso) throws IOException { - OzoneContract.createCluster(fso); - } - - @Parameterized.AfterParam - public static void teardownParam() throws IOException { - OzoneContract.destroyCluster(); - } - - @AfterClass - public static void teardownCluster() throws IOException { - OzoneContract.destroyCluster(); - } - - @Override - protected AbstractFSContract createContract(Configuration conf) { - return new OzoneContract(conf); - } - - @Parameterized.Parameters - public static Collection data() { - return OzoneContract.getFsoCombinations(); - } -} diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/fs/ozone/contract/ITestOzoneContractDistCp.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/fs/ozone/contract/ITestOzoneContractDistCp.java deleted file mode 100644 index cba18fe25a6..00000000000 --- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/fs/ozone/contract/ITestOzoneContractDistCp.java +++ /dev/null @@ -1,59 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package org.apache.hadoop.fs.ozone.contract; - -import java.io.IOException; - -import org.apache.hadoop.conf.Configuration; -import org.apache.hadoop.tools.contract.AbstractContractDistCpTest; - -import org.junit.AfterClass; -import org.junit.BeforeClass; - -import static org.apache.hadoop.fs.contract.ContractTestUtils.cleanup; - - -/** - * Contract test suite covering S3A integration with DistCp. - * Uses the block output stream, buffered to disk. This is the - * recommended output mechanism for DistCP due to its scalability. - */ -public class ITestOzoneContractDistCp extends AbstractContractDistCpTest { - - @BeforeClass - public static void createCluster() throws IOException { - OzoneContract.createCluster(); - } - - @AfterClass - public static void teardownCluster() throws IOException { - OzoneContract.destroyCluster(); - } - - @Override - protected OzoneContract createContract(Configuration conf) { - return new OzoneContract(conf); - } - - @Override - protected void deleteTestDirInTeardown() throws IOException { - super.deleteTestDirInTeardown(); - cleanup("TEARDOWN", getLocalFS(), getLocalDir()); - } -} diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/fs/ozone/contract/ITestOzoneContractDistCpWithFSO.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/fs/ozone/contract/ITestOzoneContractDistCpWithFSO.java deleted file mode 100644 index 333ef18f5f0..00000000000 --- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/fs/ozone/contract/ITestOzoneContractDistCpWithFSO.java +++ /dev/null @@ -1,65 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - *

- * http://www.apache.org/licenses/LICENSE-2.0 - *

- * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package org.apache.hadoop.fs.ozone.contract; - -import java.io.IOException; - -import org.apache.hadoop.conf.Configuration; -import org.apache.hadoop.tools.contract.AbstractContractDistCpTest; - -import org.junit.AfterClass; -import org.junit.BeforeClass; - -import static org.apache.hadoop.fs.contract.ContractTestUtils.cleanup; - - -/** - * Contract test suite covering S3A integration with DistCp. - * Uses the block output stream, buffered to disk. This is the - * recommended output mechanism for DistCP due to its scalability. - * This test suite runs the server in File System Optimized mode. - *

- * Note: It isn't possible to convert this into a parameterized test due to - * unrelated failures occurring while trying to handle directories with names - * containing '[' and ']' characters. - */ -public class ITestOzoneContractDistCpWithFSO - extends AbstractContractDistCpTest { - - @BeforeClass - public static void createCluster() throws IOException { - OzoneContract.createCluster(true); - } - - @AfterClass - public static void teardownCluster() throws IOException { - OzoneContract.destroyCluster(); - } - - @Override - protected OzoneContract createContract(Configuration conf) { - return new OzoneContract(conf); - } - - @Override - protected void deleteTestDirInTeardown() throws IOException { - super.deleteTestDirInTeardown(); - cleanup("TEARDOWN", getLocalFS(), getLocalDir()); - } -} diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/fs/ozone/contract/ITestOzoneContractGetFileStatus.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/fs/ozone/contract/ITestOzoneContractGetFileStatus.java deleted file mode 100644 index a8013387cd4..00000000000 --- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/fs/ozone/contract/ITestOzoneContractGetFileStatus.java +++ /dev/null @@ -1,83 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package org.apache.hadoop.fs.ozone.contract; - -import java.io.IOException; -import java.util.Collection; - -import org.apache.hadoop.conf.Configuration; -import org.apache.hadoop.fs.contract.AbstractContractGetFileStatusTest; -import org.apache.hadoop.fs.contract.AbstractFSContract; - -import org.junit.AfterClass; -import org.junit.runner.RunWith; -import org.junit.runners.Parameterized; -import org.slf4j.Logger; -import org.slf4j.LoggerFactory; - -/** - * Ozone contract tests covering getFileStatus. - */ -@RunWith(Parameterized.class) -public class ITestOzoneContractGetFileStatus - extends AbstractContractGetFileStatusTest { - - public ITestOzoneContractGetFileStatus(boolean fso) { - // Actual init done in initParam(). - } - - @Parameterized.BeforeParam - public static void initParam(boolean fso) throws IOException { - OzoneContract.createCluster(fso); - } - - @Parameterized.AfterParam - public static void teardownParam() throws IOException { - OzoneContract.destroyCluster(); - } - - private static final Logger LOG = - LoggerFactory.getLogger(ITestOzoneContractGetFileStatus.class); - - @AfterClass - public static void teardownCluster() throws IOException { - OzoneContract.destroyCluster(); - } - - @Override - protected AbstractFSContract createContract(Configuration conf) { - return new OzoneContract(conf); - } - - @Override - public void teardown() throws Exception { - LOG.info("FS details {}", getFileSystem()); - super.teardown(); - } - - @Override - protected Configuration createConfiguration() { - return super.createConfiguration(); - } - - @Parameterized.Parameters - public static Collection data() { - return OzoneContract.getFsoCombinations(); - } -} diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/fs/ozone/contract/ITestOzoneContractMkdir.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/fs/ozone/contract/ITestOzoneContractMkdir.java deleted file mode 100644 index 49118a0595a..00000000000 --- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/fs/ozone/contract/ITestOzoneContractMkdir.java +++ /dev/null @@ -1,66 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package org.apache.hadoop.fs.ozone.contract; - -import java.io.IOException; -import java.util.Collection; - -import org.apache.hadoop.conf.Configuration; -import org.apache.hadoop.fs.contract.AbstractContractMkdirTest; -import org.apache.hadoop.fs.contract.AbstractFSContract; - -import org.junit.AfterClass; -import org.junit.runner.RunWith; -import org.junit.runners.Parameterized; - -/** - * Test dir operations on Ozone. - */ -@RunWith(Parameterized.class) -public class ITestOzoneContractMkdir extends AbstractContractMkdirTest { - - public ITestOzoneContractMkdir(boolean fso) { - // Actual init done in initParam(). - } - - @Parameterized.BeforeParam - public static void initParam(boolean fso) throws IOException { - OzoneContract.createCluster(fso); - } - - @Parameterized.AfterParam - public static void teardownParam() throws IOException { - OzoneContract.destroyCluster(); - } - - @AfterClass - public static void teardownCluster() throws IOException { - OzoneContract.destroyCluster(); - } - - @Override - protected AbstractFSContract createContract(Configuration conf) { - return new OzoneContract(conf); - } - - @Parameterized.Parameters - public static Collection data() { - return OzoneContract.getFsoCombinations(); - } -} diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/fs/ozone/contract/ITestOzoneContractOpen.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/fs/ozone/contract/ITestOzoneContractOpen.java deleted file mode 100644 index 05babc015af..00000000000 --- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/fs/ozone/contract/ITestOzoneContractOpen.java +++ /dev/null @@ -1,66 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package org.apache.hadoop.fs.ozone.contract; - -import java.io.IOException; -import java.util.Collection; - -import org.apache.hadoop.conf.Configuration; -import org.apache.hadoop.fs.contract.AbstractContractOpenTest; -import org.apache.hadoop.fs.contract.AbstractFSContract; - -import org.junit.AfterClass; -import org.junit.runner.RunWith; -import org.junit.runners.Parameterized; - -/** - * Ozone contract tests opening files. - */ -@RunWith(Parameterized.class) -public class ITestOzoneContractOpen extends AbstractContractOpenTest { - - public ITestOzoneContractOpen(boolean fso) { - // Actual init done in initParam(). - } - - @Parameterized.BeforeParam - public static void initParam(boolean fso) throws IOException { - OzoneContract.createCluster(fso); - } - - @Parameterized.AfterParam - public static void teardownParam() throws IOException { - OzoneContract.destroyCluster(); - } - - @AfterClass - public static void teardownCluster() throws IOException { - OzoneContract.destroyCluster(); - } - - @Override - protected AbstractFSContract createContract(Configuration conf) { - return new OzoneContract(conf); - } - - @Parameterized.Parameters - public static Collection data() { - return OzoneContract.getFsoCombinations(); - } -} diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/fs/ozone/contract/ITestOzoneContractRename.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/fs/ozone/contract/ITestOzoneContractRename.java deleted file mode 100644 index fe5c112a109..00000000000 --- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/fs/ozone/contract/ITestOzoneContractRename.java +++ /dev/null @@ -1,67 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package org.apache.hadoop.fs.ozone.contract; - -import java.io.IOException; -import java.util.Collection; - -import org.apache.hadoop.conf.Configuration; -import org.apache.hadoop.fs.contract.AbstractContractRenameTest; -import org.apache.hadoop.fs.contract.AbstractFSContract; - -import org.junit.AfterClass; -import org.junit.runner.RunWith; -import org.junit.runners.Parameterized; - -/** - * Ozone contract tests covering rename. - */ -@RunWith(Parameterized.class) -public class ITestOzoneContractRename extends AbstractContractRenameTest { - - public ITestOzoneContractRename(boolean fso) { - // Actual init done in initParam(). - } - - @Parameterized.BeforeParam - public static void initParam(boolean fso) throws IOException { - OzoneContract.createCluster(fso); - } - - @Parameterized.AfterParam - public static void teardownParam() throws IOException { - OzoneContract.destroyCluster(); - } - - @AfterClass - public static void teardownCluster() throws IOException { - OzoneContract.destroyCluster(); - } - - @Override - protected AbstractFSContract createContract(Configuration conf) { - return new OzoneContract(conf); - } - - - @Parameterized.Parameters - public static Collection data() { - return OzoneContract.getFsoCombinations(); - } -} diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/fs/ozone/contract/ITestOzoneContractRootDir.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/fs/ozone/contract/ITestOzoneContractRootDir.java deleted file mode 100644 index f4ec389229e..00000000000 --- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/fs/ozone/contract/ITestOzoneContractRootDir.java +++ /dev/null @@ -1,68 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package org.apache.hadoop.fs.ozone.contract; - -import java.io.IOException; -import java.util.Collection; - -import org.apache.hadoop.conf.Configuration; -import org.apache.hadoop.fs.contract.AbstractContractRootDirectoryTest; -import org.apache.hadoop.fs.contract.AbstractFSContract; - -import org.junit.AfterClass; -import org.junit.runner.RunWith; -import org.junit.runners.Parameterized; - -/** - * Ozone contract test for ROOT directory operations. - */ -@RunWith(Parameterized.class) -public class ITestOzoneContractRootDir extends - AbstractContractRootDirectoryTest { - - public ITestOzoneContractRootDir(boolean fso) { - // Actual init done in initParam(). - } - - @Parameterized.BeforeParam - public static void initParam(boolean fso) throws IOException { - OzoneContract.createCluster(fso); - } - - @Parameterized.AfterParam - public static void teardownParam() throws IOException { - OzoneContract.destroyCluster(); - } - - @AfterClass - public static void teardownCluster() throws IOException { - OzoneContract.destroyCluster(); - } - - @Override - protected AbstractFSContract createContract(Configuration conf) { - return new OzoneContract(conf); - } - - @Parameterized.Parameters - public static Collection data() { - return OzoneContract.getFsoCombinations(); - } - -} diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/fs/ozone/contract/OzoneContract.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/fs/ozone/contract/OzoneContract.java index f13ca6cda34..e889c4f216d 100644 --- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/fs/ozone/contract/OzoneContract.java +++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/fs/ozone/contract/OzoneContract.java @@ -19,68 +19,24 @@ package org.apache.hadoop.fs.ozone.contract; import java.io.IOException; -import java.time.Duration; -import java.util.Arrays; -import java.util.List; -import org.apache.hadoop.hdds.utils.IOUtils; -import org.apache.hadoop.conf.Configuration; -import org.apache.hadoop.fs.FileSystem; import org.apache.hadoop.fs.Path; -import org.apache.hadoop.fs.contract.AbstractFSContract; -import org.apache.hadoop.hdds.conf.DatanodeRatisServerConfig; -import org.apache.hadoop.hdds.conf.OzoneConfiguration; -import org.apache.hadoop.hdds.protocol.proto.HddsProtos; -import org.apache.hadoop.hdds.ratis.conf.RatisClientConfig; -import org.apache.hadoop.hdds.scm.ScmConfigKeys; import org.apache.hadoop.ozone.MiniOzoneCluster; import org.apache.hadoop.ozone.OzoneConsts; import org.apache.hadoop.ozone.TestDataUtil; import org.apache.hadoop.ozone.client.OzoneBucket; import org.apache.hadoop.ozone.client.OzoneClient; -import org.apache.hadoop.ozone.om.OMConfigKeys; - import org.apache.hadoop.ozone.om.helpers.BucketLayout; -import static org.apache.hadoop.ozone.OzoneConfigKeys.OZONE_FS_HSYNC_ENABLED; -import static org.junit.jupiter.api.Assertions.assertNotNull; +import static org.apache.hadoop.ozone.om.OMConfigKeys.OZONE_DEFAULT_BUCKET_LAYOUT; /** - * The contract of Ozone: only enabled if the test bucket is provided. + * Filesystem contract for O3FS. */ -class OzoneContract extends AbstractFSContract { - - private static final List FSO_COMBINATIONS = Arrays.asList( - // FSO configuration is a cluster level server side configuration. - // If the cluster is configured with SIMPLE metadata layout, - // non-FSO bucket will created. - // If the cluster is configured with PREFIX metadata layout, - // FSO bucket will be created. - // Presently, OzoneClient checks bucketMetadata then invokes FSO or - // non-FSO specific code and it makes no sense to add client side - // configs now. Once the specific client API to set FSO or non-FSO - // bucket is provided the contract test can be refactored to include - // another parameter (fsoClient) which sets/unsets the client side - // configs. - true, // Server is configured with new layout (PREFIX) - // and new buckets will be operated on - false // Server is configured with old layout (SIMPLE) - // and old buckets will be operated on - ); - private static MiniOzoneCluster cluster; - private static final String CONTRACT_XML = "contract/ozone.xml"; - - private static boolean fsOptimizedServer; - private static OzoneClient client; +final class OzoneContract extends AbstractOzoneContract { - OzoneContract(Configuration conf) { - super(conf); - //insert the base features - addConfResource(CONTRACT_XML); - } - - static List getFsoCombinations() { - return FSO_COMBINATIONS; + OzoneContract(MiniOzoneCluster cluster) { + super(cluster); } @Override @@ -93,73 +49,12 @@ public Path getTestPath() { return new Path("/test"); } - public static void initOzoneConfiguration(boolean fsoServer) { - fsOptimizedServer = fsoServer; - } - - public static void createCluster(boolean fsoServer) throws IOException { - // Set the flag to enable/disable FSO on server. - initOzoneConfiguration(fsoServer); - createCluster(); - } - - public static void createCluster() throws IOException { - OzoneConfiguration conf = new OzoneConfiguration(); - DatanodeRatisServerConfig ratisServerConfig = - conf.getObject(DatanodeRatisServerConfig.class); - ratisServerConfig.setRequestTimeOut(Duration.ofSeconds(3)); - ratisServerConfig.setWatchTimeOut(Duration.ofSeconds(10)); - conf.setFromObject(ratisServerConfig); - - RatisClientConfig.RaftConfig raftClientConfig = - conf.getObject(RatisClientConfig.RaftConfig.class); - raftClientConfig.setRpcRequestTimeout(Duration.ofSeconds(3)); - raftClientConfig.setRpcWatchRequestTimeout(Duration.ofSeconds(10)); - conf.setFromObject(raftClientConfig); - - conf.addResource(CONTRACT_XML); - - BucketLayout bucketLayout = fsOptimizedServer - ? BucketLayout.FILE_SYSTEM_OPTIMIZED : BucketLayout.LEGACY; - conf.set(OMConfigKeys.OZONE_DEFAULT_BUCKET_LAYOUT, bucketLayout.name()); - conf.setBoolean(OZONE_FS_HSYNC_ENABLED, true); - - cluster = MiniOzoneCluster.newBuilder(conf).setNumDatanodes(5).build(); - try { - cluster.waitForClusterToBeReady(); - cluster.waitForPipelineTobeReady(HddsProtos.ReplicationFactor.THREE, - 180000); - client = cluster.newClient(); - } catch (Exception e) { - throw new IOException(e); - } - } - - private void copyClusterConfigs(String configKey) { - getConf().set(configKey, cluster.getConf().get(configKey)); - } - @Override - public FileSystem getTestFileSystem() throws IOException { - //assumes cluster is not null - assertNotNull(client); - - OzoneBucket bucket = TestDataUtil.createVolumeAndBucket(client); - - String uri = String.format("%s://%s.%s/", - OzoneConsts.OZONE_URI_SCHEME, bucket.getName(), bucket.getVolumeName()); - getConf().set("fs.defaultFS", uri); - copyClusterConfigs(OMConfigKeys.OZONE_OM_ADDRESS_KEY); - copyClusterConfigs(ScmConfigKeys.OZONE_SCM_CLIENT_ADDRESS_KEY); - copyClusterConfigs(OZONE_FS_HSYNC_ENABLED); - return FileSystem.get(getConf()); - } - - public static void destroyCluster() throws IOException { - IOUtils.closeQuietly(client); - if (cluster != null) { - cluster.shutdown(); - cluster = null; + protected String getRootURI() throws IOException { + try (OzoneClient client = getCluster().newClient()) { + BucketLayout layout = getConf().getEnum(OZONE_DEFAULT_BUCKET_LAYOUT, BucketLayout.DEFAULT); + OzoneBucket bucket = TestDataUtil.createVolumeAndBucket(client, layout); + return String.format("%s://%s.%s/", getScheme(), bucket.getName(), bucket.getVolumeName()); } } } diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/fs/ozone/contract/ITestOzoneContractSeek.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/fs/ozone/contract/RootedOzoneContract.java similarity index 56% rename from hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/fs/ozone/contract/ITestOzoneContractSeek.java rename to hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/fs/ozone/contract/RootedOzoneContract.java index 51a35ee7e3a..d617ca9de73 100644 --- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/fs/ozone/contract/ITestOzoneContractSeek.java +++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/fs/ozone/contract/RootedOzoneContract.java @@ -18,32 +18,31 @@ package org.apache.hadoop.fs.ozone.contract; -import java.io.IOException; - -import org.apache.hadoop.conf.Configuration; -import org.apache.hadoop.fs.contract.AbstractContractSeekTest; -import org.apache.hadoop.fs.contract.AbstractFSContract; - -import org.junit.AfterClass; -import org.junit.BeforeClass; +import org.apache.hadoop.fs.Path; +import org.apache.hadoop.ozone.MiniOzoneCluster; +import org.apache.hadoop.ozone.OzoneConsts; /** - * Ozone contract tests covering file seek. + * Filesystem contract for OFS. */ -public class ITestOzoneContractSeek extends AbstractContractSeekTest { +final class RootedOzoneContract extends AbstractOzoneContract { + + RootedOzoneContract(MiniOzoneCluster cluster) { + super(cluster); + } - @BeforeClass - public static void createCluster() throws IOException { - OzoneContract.createCluster(); + @Override + public String getScheme() { + return OzoneConsts.OZONE_OFS_URI_SCHEME; } - @AfterClass - public static void teardownCluster() throws IOException { - OzoneContract.destroyCluster(); + @Override + public Path getTestPath() { + return new Path("/testvol1/testbucket1/test"); } @Override - protected AbstractFSContract createContract(Configuration conf) { - return new OzoneContract(conf); + protected String getRootURI() { + return String.format("%s://localhost", getScheme()); } } diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/fs/ozone/contract/ITestOzoneContractUnbuffer.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/fs/ozone/contract/TestOzoneContractFSO.java similarity index 60% rename from hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/fs/ozone/contract/ITestOzoneContractUnbuffer.java rename to hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/fs/ozone/contract/TestOzoneContractFSO.java index 1af6b87b8fd..b45e68d85eb 100644 --- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/fs/ozone/contract/ITestOzoneContractUnbuffer.java +++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/fs/ozone/contract/TestOzoneContractFSO.java @@ -18,30 +18,26 @@ package org.apache.hadoop.fs.ozone.contract; import org.apache.hadoop.conf.Configuration; -import org.apache.hadoop.fs.contract.AbstractContractUnbufferTest; import org.apache.hadoop.fs.contract.AbstractFSContract; -import org.junit.AfterClass; -import org.junit.BeforeClass; +import org.apache.hadoop.hdds.conf.OzoneConfiguration; -import java.io.IOException; +import static org.apache.hadoop.ozone.om.OMConfigKeys.OZONE_DEFAULT_BUCKET_LAYOUT; +import static org.apache.hadoop.ozone.om.helpers.BucketLayout.FILE_SYSTEM_OPTIMIZED; /** - * Ozone contract tests for {@link org.apache.hadoop.fs.CanUnbuffer#unbuffer}. + * Tests O3FS with FSO bucket. */ -public class ITestOzoneContractUnbuffer extends AbstractContractUnbufferTest { +class TestOzoneContractFSO extends AbstractOzoneContractTest { - @BeforeClass - public static void createCluster() throws IOException { - OzoneContract.createCluster(); - } - - @AfterClass - public static void teardownCluster() throws IOException { - OzoneContract.destroyCluster(); + @Override + OzoneConfiguration createOzoneConfig() { + OzoneConfiguration conf = createBaseConfiguration(); + conf.set(OZONE_DEFAULT_BUCKET_LAYOUT, FILE_SYSTEM_OPTIMIZED.name()); + return conf; } @Override - protected AbstractFSContract createContract(Configuration conf) { - return new OzoneContract(conf); + AbstractFSContract createOzoneContract(Configuration conf) { + return new OzoneContract(getCluster()); } } diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/fs/ozone/contract/rooted/ITestRootedOzoneContractUnbuffer.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/fs/ozone/contract/TestOzoneContractLegacy.java similarity index 57% rename from hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/fs/ozone/contract/rooted/ITestRootedOzoneContractUnbuffer.java rename to hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/fs/ozone/contract/TestOzoneContractLegacy.java index e081e8d5b80..97ced88fcde 100644 --- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/fs/ozone/contract/rooted/ITestRootedOzoneContractUnbuffer.java +++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/fs/ozone/contract/TestOzoneContractLegacy.java @@ -15,34 +15,29 @@ * See the License for the specific language governing permissions and * limitations under the License. */ -package org.apache.hadoop.fs.ozone.contract.rooted; +package org.apache.hadoop.fs.ozone.contract; import org.apache.hadoop.conf.Configuration; -import org.apache.hadoop.fs.contract.AbstractContractUnbufferTest; import org.apache.hadoop.fs.contract.AbstractFSContract; -import org.junit.AfterClass; -import org.junit.BeforeClass; +import org.apache.hadoop.hdds.conf.OzoneConfiguration; -import java.io.IOException; +import static org.apache.hadoop.ozone.om.OMConfigKeys.OZONE_DEFAULT_BUCKET_LAYOUT; +import static org.apache.hadoop.ozone.om.helpers.BucketLayout.LEGACY; /** - * Ozone contract tests for {@link org.apache.hadoop.fs.CanUnbuffer#unbuffer}. + * Tests O3FS with LEGACY bucket. */ -public class ITestRootedOzoneContractUnbuffer - extends AbstractContractUnbufferTest { +class TestOzoneContractLegacy extends AbstractOzoneContractTest { - @BeforeClass - public static void createCluster() throws IOException { - RootedOzoneContract.createCluster(); - } - - @AfterClass - public static void teardownCluster() { - RootedOzoneContract.destroyCluster(); + @Override + OzoneConfiguration createOzoneConfig() { + OzoneConfiguration conf = createBaseConfiguration(); + conf.set(OZONE_DEFAULT_BUCKET_LAYOUT, LEGACY.name()); + return conf; } @Override - protected AbstractFSContract createContract(Configuration conf) { - return new RootedOzoneContract(conf); + AbstractFSContract createOzoneContract(Configuration conf) { + return new OzoneContract(getCluster()); } } diff --git a/hadoop-hdds/test-utils/src/main/java/org/apache/ozone/test/JUnit5AwareTimeout.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/fs/ozone/contract/TestRootedOzoneContract.java similarity index 70% rename from hadoop-hdds/test-utils/src/main/java/org/apache/ozone/test/JUnit5AwareTimeout.java rename to hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/fs/ozone/contract/TestRootedOzoneContract.java index 22840bd7a30..ab738f2f664 100644 --- a/hadoop-hdds/test-utils/src/main/java/org/apache/ozone/test/JUnit5AwareTimeout.java +++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/fs/ozone/contract/TestRootedOzoneContract.java @@ -15,18 +15,17 @@ * See the License for the specific language governing permissions and * limitations under the License. */ -package org.apache.ozone.test; +package org.apache.hadoop.fs.ozone.contract; -import org.junit.rules.Timeout; +import org.apache.hadoop.conf.Configuration; +import org.apache.hadoop.fs.contract.AbstractFSContract; /** - * Disables the given JUnit4 timeout rule if JUnit5-specific timeout-mode is set - * to "disabled". + * Tests OFS. */ -public class JUnit5AwareTimeout extends DisableOnProperty { - - public JUnit5AwareTimeout(Timeout delegate) { - super(delegate, "junit.jupiter.execution.timeout.mode", "disabled"); +class TestRootedOzoneContract extends AbstractOzoneContractTest { + @Override + AbstractFSContract createOzoneContract(Configuration conf) { + return new RootedOzoneContract(getCluster()); } - } diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/fs/ozone/contract/rooted/ITestRootedOzoneContractCreate.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/fs/ozone/contract/rooted/ITestRootedOzoneContractCreate.java deleted file mode 100644 index dd1312f3eb0..00000000000 --- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/fs/ozone/contract/rooted/ITestRootedOzoneContractCreate.java +++ /dev/null @@ -1,49 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package org.apache.hadoop.fs.ozone.contract.rooted; - -import java.io.IOException; - -import org.apache.hadoop.conf.Configuration; -import org.apache.hadoop.fs.contract.AbstractContractCreateTest; -import org.apache.hadoop.fs.contract.AbstractFSContract; - -import org.junit.AfterClass; -import org.junit.BeforeClass; - -/** - * Ozone contract tests creating files. - */ -public class ITestRootedOzoneContractCreate extends AbstractContractCreateTest { - - @BeforeClass - public static void createCluster() throws IOException { - RootedOzoneContract.createCluster(); - } - - @AfterClass - public static void teardownCluster() { - RootedOzoneContract.destroyCluster(); - } - - @Override - protected AbstractFSContract createContract(Configuration conf) { - return new RootedOzoneContract(conf); - } -} diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/fs/ozone/contract/rooted/ITestRootedOzoneContractDelete.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/fs/ozone/contract/rooted/ITestRootedOzoneContractDelete.java deleted file mode 100644 index 12971a3e2d6..00000000000 --- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/fs/ozone/contract/rooted/ITestRootedOzoneContractDelete.java +++ /dev/null @@ -1,49 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package org.apache.hadoop.fs.ozone.contract.rooted; - -import java.io.IOException; - -import org.apache.hadoop.conf.Configuration; -import org.apache.hadoop.fs.contract.AbstractContractDeleteTest; -import org.apache.hadoop.fs.contract.AbstractFSContract; - -import org.junit.AfterClass; -import org.junit.BeforeClass; - -/** - * Ozone contract tests covering deletes. - */ -public class ITestRootedOzoneContractDelete extends AbstractContractDeleteTest { - - @BeforeClass - public static void createCluster() throws IOException { - RootedOzoneContract.createCluster(); - } - - @AfterClass - public static void teardownCluster() { - RootedOzoneContract.destroyCluster(); - } - - @Override - protected AbstractFSContract createContract(Configuration conf) { - return new RootedOzoneContract(conf); - } -} diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/fs/ozone/contract/rooted/ITestRootedOzoneContractDistCp.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/fs/ozone/contract/rooted/ITestRootedOzoneContractDistCp.java deleted file mode 100644 index 026f63c7795..00000000000 --- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/fs/ozone/contract/rooted/ITestRootedOzoneContractDistCp.java +++ /dev/null @@ -1,59 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package org.apache.hadoop.fs.ozone.contract.rooted; - -import java.io.IOException; - -import org.apache.hadoop.conf.Configuration; -import org.apache.hadoop.tools.contract.AbstractContractDistCpTest; - -import org.junit.AfterClass; -import org.junit.BeforeClass; - -import static org.apache.hadoop.fs.contract.ContractTestUtils.cleanup; - - -/** - * Contract test suite covering S3A integration with DistCp. - * Uses the block output stream, buffered to disk. This is the - * recommended output mechanism for DistCP due to its scalability. - */ -public class ITestRootedOzoneContractDistCp extends AbstractContractDistCpTest { - - @BeforeClass - public static void createCluster() throws IOException { - RootedOzoneContract.createCluster(); - } - - @AfterClass - public static void teardownCluster() { - RootedOzoneContract.destroyCluster(); - } - - @Override - protected RootedOzoneContract createContract(Configuration conf) { - return new RootedOzoneContract(conf); - } - - @Override - protected void deleteTestDirInTeardown() throws IOException { - super.deleteTestDirInTeardown(); - cleanup("TEARDOWN", getLocalFS(), getLocalDir()); - } -} diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/fs/ozone/contract/rooted/ITestRootedOzoneContractGetFileStatus.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/fs/ozone/contract/rooted/ITestRootedOzoneContractGetFileStatus.java deleted file mode 100644 index c858b35ac74..00000000000 --- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/fs/ozone/contract/rooted/ITestRootedOzoneContractGetFileStatus.java +++ /dev/null @@ -1,55 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package org.apache.hadoop.fs.ozone.contract.rooted; - -import java.io.IOException; - -import org.apache.hadoop.conf.Configuration; -import org.apache.hadoop.fs.contract.AbstractContractGetFileStatusTest; -import org.apache.hadoop.fs.contract.AbstractFSContract; - -import org.junit.AfterClass; -import org.junit.BeforeClass; - -/** - * Ozone contract tests covering getFileStatus. - */ -public class ITestRootedOzoneContractGetFileStatus - extends AbstractContractGetFileStatusTest { - - @BeforeClass - public static void createCluster() throws IOException { - RootedOzoneContract.createCluster(); - } - - @AfterClass - public static void teardownCluster() { - RootedOzoneContract.destroyCluster(); - } - - @Override - protected AbstractFSContract createContract(Configuration conf) { - return new RootedOzoneContract(conf); - } - - @Override - protected Configuration createConfiguration() { - return super.createConfiguration(); - } -} diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/fs/ozone/contract/rooted/ITestRootedOzoneContractMkdir.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/fs/ozone/contract/rooted/ITestRootedOzoneContractMkdir.java deleted file mode 100644 index 680754f8406..00000000000 --- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/fs/ozone/contract/rooted/ITestRootedOzoneContractMkdir.java +++ /dev/null @@ -1,49 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package org.apache.hadoop.fs.ozone.contract.rooted; - -import java.io.IOException; - -import org.apache.hadoop.conf.Configuration; -import org.apache.hadoop.fs.contract.AbstractContractMkdirTest; -import org.apache.hadoop.fs.contract.AbstractFSContract; - -import org.junit.AfterClass; -import org.junit.BeforeClass; - -/** - * Test dir operations on Ozone. - */ -public class ITestRootedOzoneContractMkdir extends AbstractContractMkdirTest { - - @BeforeClass - public static void createCluster() throws IOException { - RootedOzoneContract.createCluster(); - } - - @AfterClass - public static void teardownCluster() { - RootedOzoneContract.destroyCluster(); - } - - @Override - protected AbstractFSContract createContract(Configuration conf) { - return new RootedOzoneContract(conf); - } -} diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/fs/ozone/contract/rooted/ITestRootedOzoneContractOpen.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/fs/ozone/contract/rooted/ITestRootedOzoneContractOpen.java deleted file mode 100644 index 6c98cc5284b..00000000000 --- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/fs/ozone/contract/rooted/ITestRootedOzoneContractOpen.java +++ /dev/null @@ -1,48 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package org.apache.hadoop.fs.ozone.contract.rooted; - -import java.io.IOException; - -import org.apache.hadoop.conf.Configuration; -import org.apache.hadoop.fs.contract.AbstractContractOpenTest; -import org.apache.hadoop.fs.contract.AbstractFSContract; - -import org.junit.AfterClass; -import org.junit.BeforeClass; - -/** - * Ozone contract tests opening files. - */ -public class ITestRootedOzoneContractOpen extends AbstractContractOpenTest { - @BeforeClass - public static void createCluster() throws IOException { - RootedOzoneContract.createCluster(); - } - - @AfterClass - public static void teardownCluster() { - RootedOzoneContract.destroyCluster(); - } - - @Override - protected AbstractFSContract createContract(Configuration conf) { - return new RootedOzoneContract(conf); - } -} diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/fs/ozone/contract/rooted/ITestRootedOzoneContractRename.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/fs/ozone/contract/rooted/ITestRootedOzoneContractRename.java deleted file mode 100644 index 56134053ead..00000000000 --- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/fs/ozone/contract/rooted/ITestRootedOzoneContractRename.java +++ /dev/null @@ -1,50 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package org.apache.hadoop.fs.ozone.contract.rooted; - -import java.io.IOException; - -import org.apache.hadoop.conf.Configuration; -import org.apache.hadoop.fs.contract.AbstractContractRenameTest; -import org.apache.hadoop.fs.contract.AbstractFSContract; - -import org.junit.AfterClass; -import org.junit.BeforeClass; - -/** - * Ozone contract tests covering rename. - */ -public class ITestRootedOzoneContractRename extends AbstractContractRenameTest { - - @BeforeClass - public static void createCluster() throws IOException { - RootedOzoneContract.createCluster(); - } - - @AfterClass - public static void teardownCluster() { - RootedOzoneContract.destroyCluster(); - } - - @Override - protected AbstractFSContract createContract(Configuration conf) { - return new RootedOzoneContract(conf); - } - -} diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/fs/ozone/contract/rooted/ITestRootedOzoneContractRootDir.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/fs/ozone/contract/rooted/ITestRootedOzoneContractRootDir.java deleted file mode 100644 index f4e27df2cdb..00000000000 --- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/fs/ozone/contract/rooted/ITestRootedOzoneContractRootDir.java +++ /dev/null @@ -1,83 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package org.apache.hadoop.fs.ozone.contract.rooted; - -import java.io.IOException; - -import org.apache.hadoop.conf.Configuration; -import org.apache.hadoop.fs.contract.AbstractContractRootDirectoryTest; -import org.apache.hadoop.fs.contract.AbstractFSContract; - -import org.junit.AfterClass; -import org.junit.BeforeClass; - -/** - * Ozone contract test for ROOT directory operations. - */ -public class ITestRootedOzoneContractRootDir extends - AbstractContractRootDirectoryTest { - - @BeforeClass - public static void createCluster() throws IOException { - RootedOzoneContract.createCluster(); - } - - @AfterClass - public static void teardownCluster() { - RootedOzoneContract.destroyCluster(); - } - - @Override - protected AbstractFSContract createContract(Configuration conf) { - return new RootedOzoneContract(conf); - } - - @Override - public void testRmRootRecursive() { - // OFS doesn't support creating files directly under root - } - - @Override - public void testRmNonEmptyRootDirNonRecursive() { - // OFS doesn't support creating files directly under root - } - - @Override - public void testRmEmptyRootDirNonRecursive() { - // Internally test deletes volume recursively - // Which is not supported - } - - @Override - public void testListEmptyRootDirectory() { - // Internally test deletes volume recursively - // Which is not supported - } - - @Override - public void testSimpleRootListing() { - // Recursive list is not supported - } - - @Override - public void testMkDirDepth1() { - // Internally test deletes volume recursively - // Which is not supported - } -} diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/fs/ozone/contract/rooted/ITestRootedOzoneContractSeek.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/fs/ozone/contract/rooted/ITestRootedOzoneContractSeek.java deleted file mode 100644 index ec456fe1a72..00000000000 --- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/fs/ozone/contract/rooted/ITestRootedOzoneContractSeek.java +++ /dev/null @@ -1,48 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package org.apache.hadoop.fs.ozone.contract.rooted; - -import java.io.IOException; - -import org.apache.hadoop.conf.Configuration; -import org.apache.hadoop.fs.contract.AbstractContractSeekTest; -import org.apache.hadoop.fs.contract.AbstractFSContract; - -import org.junit.AfterClass; -import org.junit.BeforeClass; - -/** - * Ozone contract tests covering file seek. - */ -public class ITestRootedOzoneContractSeek extends AbstractContractSeekTest { - @BeforeClass - public static void createCluster() throws IOException { - RootedOzoneContract.createCluster(); - } - - @AfterClass - public static void teardownCluster() { - RootedOzoneContract.destroyCluster(); - } - - @Override - protected AbstractFSContract createContract(Configuration conf) { - return new RootedOzoneContract(conf); - } -} diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/fs/ozone/contract/rooted/RootedOzoneContract.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/fs/ozone/contract/rooted/RootedOzoneContract.java deleted file mode 100644 index 09a05bfb337..00000000000 --- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/fs/ozone/contract/rooted/RootedOzoneContract.java +++ /dev/null @@ -1,117 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package org.apache.hadoop.fs.ozone.contract.rooted; - -import java.io.IOException; -import java.time.Duration; - -import org.apache.hadoop.conf.Configuration; -import org.apache.hadoop.fs.FileSystem; -import org.apache.hadoop.fs.Path; -import org.apache.hadoop.fs.contract.AbstractFSContract; -import org.apache.hadoop.hdds.conf.DatanodeRatisServerConfig; -import org.apache.hadoop.hdds.conf.OzoneConfiguration; -import org.apache.hadoop.hdds.protocol.proto.HddsProtos; -import org.apache.hadoop.hdds.ratis.conf.RatisClientConfig; -import org.apache.hadoop.hdds.scm.ScmConfigKeys; -import org.apache.hadoop.ozone.MiniOzoneCluster; -import org.apache.hadoop.ozone.OzoneConsts; -import org.apache.hadoop.ozone.om.OMConfigKeys; - -import static org.apache.hadoop.ozone.OzoneConfigKeys.OZONE_FS_HSYNC_ENABLED; -import static org.junit.jupiter.api.Assertions.assertNotNull; - -/** - * The contract of Rooted Ozone FileSystem (OFS). - */ -class RootedOzoneContract extends AbstractFSContract { - - private static MiniOzoneCluster cluster; - private static final String CONTRACT_XML = "contract/ozone.xml"; - - RootedOzoneContract(Configuration conf) { - super(conf); - // insert the base features - addConfResource(CONTRACT_XML); - } - - @Override - public String getScheme() { - return OzoneConsts.OZONE_OFS_URI_SCHEME; - } - - @Override - public Path getTestPath() { - return new Path("/testvol1/testbucket1/test"); - } - - public static void createCluster() throws IOException { - OzoneConfiguration conf = new OzoneConfiguration(); - DatanodeRatisServerConfig ratisServerConfig = - conf.getObject(DatanodeRatisServerConfig.class); - ratisServerConfig.setRequestTimeOut(Duration.ofSeconds(3)); - ratisServerConfig.setWatchTimeOut(Duration.ofSeconds(10)); - conf.setFromObject(ratisServerConfig); - - RatisClientConfig.RaftConfig raftClientConfig = - conf.getObject(RatisClientConfig.RaftConfig.class); - raftClientConfig.setRpcRequestTimeout(Duration.ofSeconds(3)); - raftClientConfig.setRpcWatchRequestTimeout(Duration.ofSeconds(10)); - conf.setFromObject(raftClientConfig); - conf.setBoolean(OZONE_FS_HSYNC_ENABLED, true); - - conf.addResource(CONTRACT_XML); - - cluster = MiniOzoneCluster.newBuilder(conf).setNumDatanodes(5).build(); - try { - cluster.waitForClusterToBeReady(); - cluster.waitForPipelineTobeReady(HddsProtos.ReplicationFactor.THREE, - 180000); - } catch (Exception e) { - throw new IOException(e); - } - } - - private void copyClusterConfigs(String configKey) { - getConf().set(configKey, cluster.getConf().get(configKey)); - } - - @Override - public FileSystem getTestFileSystem() throws IOException { - //assumes cluster is not null - assertNotNull(cluster); - - String uri = String.format("%s://localhost:%s/", - OzoneConsts.OZONE_OFS_URI_SCHEME, - cluster.getOzoneManager().getRpcPort()); - getConf().set("fs.defaultFS", uri); - // fs.ofs.impl should be loaded from META-INF, no need to explicitly set it - copyClusterConfigs(OMConfigKeys.OZONE_OM_ADDRESS_KEY); - copyClusterConfigs(ScmConfigKeys.OZONE_SCM_CLIENT_ADDRESS_KEY); - copyClusterConfigs(OZONE_FS_HSYNC_ENABLED); - return FileSystem.get(getConf()); - } - - public static void destroyCluster() { - if (cluster != null) { - cluster.shutdown(); - cluster = null; - } - } -} diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/scm/TestAllocateContainer.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/hdds/scm/TestAllocateContainer.java similarity index 97% rename from hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/scm/TestAllocateContainer.java rename to hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/hdds/scm/TestAllocateContainer.java index ff7ebd3b735..2b64d397eae 100644 --- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/scm/TestAllocateContainer.java +++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/hdds/scm/TestAllocateContainer.java @@ -15,7 +15,7 @@ * See the License for the specific language governing permissions and * limitations under the License. */ -package org.apache.hadoop.ozone.scm; +package org.apache.hadoop.hdds.scm; import static org.junit.jupiter.api.Assertions.assertNotNull; import static org.junit.jupiter.api.Assertions.assertThrows; @@ -23,7 +23,6 @@ import org.apache.hadoop.io.IOUtils; import org.apache.hadoop.ozone.MiniOzoneCluster; import org.apache.hadoop.hdds.conf.OzoneConfiguration; -import org.apache.hadoop.hdds.scm.XceiverClientManager; import org.apache.hadoop.hdds.scm.protocolPB.StorageContainerLocationProtocolClientSideTranslatorPB; import org.apache.hadoop.ozone.OzoneConsts; import org.apache.hadoop.ozone.container.common.SCMTestUtils; diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/scm/TestCloseContainer.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/hdds/scm/TestCloseContainer.java similarity index 99% rename from hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/scm/TestCloseContainer.java rename to hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/hdds/scm/TestCloseContainer.java index 11f4bf5c133..ac33bd2fdc2 100644 --- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/scm/TestCloseContainer.java +++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/hdds/scm/TestCloseContainer.java @@ -15,7 +15,7 @@ * See the License for the specific language governing permissions and * limitations under the License. */ -package org.apache.hadoop.ozone.scm; +package org.apache.hadoop.hdds.scm; import org.apache.hadoop.hdds.utils.IOUtils; import org.apache.hadoop.hdds.client.ReplicationFactor; diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/scm/TestContainerReportWithKeys.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/hdds/scm/TestContainerReportWithKeys.java similarity index 99% rename from hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/scm/TestContainerReportWithKeys.java rename to hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/hdds/scm/TestContainerReportWithKeys.java index 81597193a6e..43df6bf051d 100644 --- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/scm/TestContainerReportWithKeys.java +++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/hdds/scm/TestContainerReportWithKeys.java @@ -14,7 +14,7 @@ * License for the specific language governing permissions and limitations under * the License. */ -package org.apache.hadoop.ozone.scm; +package org.apache.hadoop.hdds.scm; import org.apache.commons.lang3.RandomStringUtils; diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/scm/TestContainerSmallFile.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/hdds/scm/TestContainerSmallFile.java similarity index 95% rename from hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/scm/TestContainerSmallFile.java rename to hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/hdds/scm/TestContainerSmallFile.java index 65a03baaef6..30c4e4cd5b4 100644 --- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/scm/TestContainerSmallFile.java +++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/hdds/scm/TestContainerSmallFile.java @@ -15,7 +15,7 @@ * See the License for the specific language governing permissions and * limitations under the License. */ -package org.apache.hadoop.ozone.scm; +package org.apache.hadoop.hdds.scm; import org.apache.hadoop.hdds.client.BlockID; import org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos; @@ -24,13 +24,8 @@ import org.apache.hadoop.ozone.MiniOzoneCluster; import org.apache.hadoop.hdds.conf.OzoneConfiguration; import org.apache.hadoop.hdds.protocol.proto.HddsProtos; -import org.apache.hadoop.hdds.scm.PlacementPolicy; import org.apache.hadoop.hdds.scm.container.placement.algorithms.SCMContainerPlacementCapacity; -import org.apache.hadoop.hdds.scm.ScmConfigKeys; -import org.apache.hadoop.hdds.scm.protocolPB - .StorageContainerLocationProtocolClientSideTranslatorPB; -import org.apache.hadoop.hdds.scm.XceiverClientManager; -import org.apache.hadoop.hdds.scm.XceiverClientSpi; +import org.apache.hadoop.hdds.scm.protocolPB.StorageContainerLocationProtocolClientSideTranslatorPB; import org.apache.hadoop.hdds.scm.container.common.helpers.StorageContainerException; import org.apache.hadoop.hdds.scm.storage.ContainerProtocolCalls; import org.apache.hadoop.ozone.OzoneConsts; diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/scm/TestFailoverWithSCMHA.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/hdds/scm/TestFailoverWithSCMHA.java similarity index 93% rename from hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/scm/TestFailoverWithSCMHA.java rename to hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/hdds/scm/TestFailoverWithSCMHA.java index c9324fab28c..688d13ad361 100644 --- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/scm/TestFailoverWithSCMHA.java +++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/hdds/scm/TestFailoverWithSCMHA.java @@ -14,7 +14,7 @@ * License for the specific language governing permissions and limitations under * the License. */ -package org.apache.hadoop.ozone.scm; +package org.apache.hadoop.hdds.scm; import com.google.protobuf.ByteString; import org.apache.hadoop.hdds.conf.OzoneConfiguration; @@ -28,7 +28,6 @@ import org.apache.hadoop.hdds.scm.container.balancer.IllegalContainerBalancerStateException; import org.apache.hadoop.hdds.scm.container.balancer.InvalidContainerBalancerConfigurationException; import org.apache.hadoop.hdds.scm.container.common.helpers.MoveDataNodePair; -import org.apache.hadoop.hdds.scm.ScmConfigKeys; import org.apache.hadoop.hdds.scm.protocol.ScmBlockLocationProtocol; import org.apache.hadoop.hdds.scm.protocol.StorageContainerLocationProtocol; import org.apache.hadoop.hdds.scm.protocolPB.ScmBlockLocationProtocolClientSideTranslatorPB; @@ -49,16 +48,15 @@ import java.io.IOException; import java.util.Map; -import java.util.UUID; import java.util.concurrent.TimeoutException; import static org.apache.hadoop.hdds.protocol.proto.HddsProtos.ContainerBalancerConfigurationProto; import static org.apache.hadoop.hdds.scm.HddsTestUtils.getContainer; import static org.apache.hadoop.hdds.protocol.MockDatanodeDetails.randomDatanodeDetails; +import static org.assertj.core.api.Assertions.assertThat; import static org.junit.jupiter.api.Assertions.assertEquals; import static org.junit.jupiter.api.Assertions.assertNotNull; import static org.junit.jupiter.api.Assertions.assertFalse; -import static org.junit.jupiter.api.Assertions.assertTrue; /** * Tests failover with SCM HA setup. @@ -66,8 +64,6 @@ public class TestFailoverWithSCMHA { private MiniOzoneHAClusterImpl cluster = null; private OzoneConfiguration conf; - private String clusterId; - private String scmId; private String omServiceId; private String scmServiceId; private int numOfOMs = 1; @@ -83,15 +79,13 @@ public class TestFailoverWithSCMHA { @BeforeEach public void init() throws Exception { conf = new OzoneConfiguration(); - clusterId = UUID.randomUUID().toString(); - scmId = UUID.randomUUID().toString(); omServiceId = "om-service-test1"; scmServiceId = "scm-service-test1"; conf.setLong(ScmConfigKeys.OZONE_SCM_HA_RATIS_SNAPSHOT_THRESHOLD, SNAPSHOT_THRESHOLD); cluster = (MiniOzoneHAClusterImpl) MiniOzoneCluster.newHABuilder(conf) - .setClusterId(clusterId).setScmId(scmId).setOMServiceId(omServiceId) + .setOMServiceId(omServiceId) .setSCMServiceId(scmServiceId).setNumOfOzoneManagers(numOfOMs) .setNumOfStorageContainerManagers(numOfSCMs).setNumOfActiveSCMs(3) .build(); @@ -133,8 +127,8 @@ public void testFailover() throws Exception { .createProxy(scmBlockLocationClient, ScmBlockLocationProtocol.class, conf); scmBlockLocationProtocol.getScmInfo(); - assertTrue(logCapture.getOutput() - .contains("Performing failover to suggested leader")); + assertThat(logCapture.getOutput()) + .contains("Performing failover to suggested leader"); scm = getLeader(cluster); SCMContainerLocationFailoverProxyProvider proxyProvider = new SCMContainerLocationFailoverProxyProvider(conf, null); @@ -150,8 +144,8 @@ public void testFailover() throws Exception { scmContainerClient.allocateContainer(HddsProtos.ReplicationType.RATIS, HddsProtos.ReplicationFactor.ONE, "ozone"); - assertTrue(logCapture.getOutput() - .contains("Performing failover to suggested leader")); + assertThat(logCapture.getOutput()) + .contains("Performing failover to suggested leader"); } @Test @@ -192,8 +186,8 @@ public void testMoveFailover() throws Exception { .createProxy(scmBlockLocationClient, ScmBlockLocationProtocol.class, conf); scmBlockLocationProtocol.getScmInfo(); - assertTrue(logCapture.getOutput() - .contains("Performing failover to suggested leader")); + assertThat(logCapture.getOutput()) + .contains("Performing failover to suggested leader"); scm = getLeader(cluster); assertNotNull(scm); @@ -201,7 +195,7 @@ public void testMoveFailover() throws Exception { //get the same inflightMove Map inflightMove = scm.getReplicationManager().getMoveScheduler().getInflightMove(); - assertTrue(inflightMove.containsKey(id)); + assertThat(inflightMove).containsKey(id); MoveDataNodePair mp = inflightMove.get(id); assertEquals(dn2, mp.getTgt()); assertEquals(dn1, mp.getSrc()); @@ -225,8 +219,8 @@ public void testMoveFailover() throws Exception { scmContainerClient.allocateContainer(HddsProtos.ReplicationType.RATIS, HddsProtos.ReplicationFactor.ONE, "ozone"); - assertTrue(logCapture.getOutput() - .contains("Performing failover to suggested leader")); + assertThat(logCapture.getOutput()) + .contains("Performing failover to suggested leader"); //switch to the new leader successfully, new leader should //get the same inflightMove , which should not contains @@ -235,7 +229,7 @@ public void testMoveFailover() throws Exception { assertNotNull(scm); inflightMove = scm.getReplicationManager() .getMoveScheduler().getInflightMove(); - assertFalse(inflightMove.containsKey(id)); + assertThat(inflightMove).doesNotContainKey(id); } /** diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/scm/TestGetCommittedBlockLengthAndPutKey.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/hdds/scm/TestGetCommittedBlockLengthAndPutKey.java similarity index 91% rename from hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/scm/TestGetCommittedBlockLengthAndPutKey.java rename to hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/hdds/scm/TestGetCommittedBlockLengthAndPutKey.java index 3f59f8c601f..43fc45efd09 100644 --- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/scm/TestGetCommittedBlockLengthAndPutKey.java +++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/hdds/scm/TestGetCommittedBlockLengthAndPutKey.java @@ -15,7 +15,7 @@ * See the License for the specific language governing permissions and * limitations under the License. */ -package org.apache.hadoop.ozone.scm; +package org.apache.hadoop.hdds.scm; import org.apache.commons.lang3.RandomStringUtils; import org.apache.commons.lang3.RandomUtils; @@ -23,19 +23,11 @@ import org.apache.hadoop.hdds.conf.OzoneConfiguration; import org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos; import org.apache.hadoop.hdds.protocol.proto.HddsProtos; -import org.apache.hadoop.hdds.scm.ScmConfigKeys; -import org.apache.hadoop.hdds.scm.XceiverClientManager; -import org.apache.hadoop.hdds.scm.XceiverClientSpi; -import org.apache.hadoop.hdds.scm.container.common.helpers. - ContainerWithPipeline; +import org.apache.hadoop.hdds.scm.container.common.helpers.ContainerWithPipeline; import org.apache.hadoop.hdds.scm.pipeline.Pipeline; -import org.apache.hadoop.hdds.scm.container.common.helpers. - StorageContainerException; -import org.apache.hadoop.hdds.scm.PlacementPolicy; -import org.apache.hadoop.hdds.scm.container.placement.algorithms. - SCMContainerPlacementCapacity; -import org.apache.hadoop.hdds.scm.protocolPB. - StorageContainerLocationProtocolClientSideTranslatorPB; +import org.apache.hadoop.hdds.scm.container.common.helpers.StorageContainerException; +import org.apache.hadoop.hdds.scm.container.placement.algorithms.SCMContainerPlacementCapacity; +import org.apache.hadoop.hdds.scm.protocolPB.StorageContainerLocationProtocolClientSideTranslatorPB; import org.apache.hadoop.hdds.scm.storage.ContainerProtocolCalls; import org.apache.hadoop.io.IOUtils; import org.apache.hadoop.ozone.MiniOzoneCluster; @@ -54,9 +46,9 @@ import java.util.concurrent.atomic.AtomicReference; import static java.nio.charset.StandardCharsets.UTF_8; +import static org.assertj.core.api.Assertions.assertThat; import static org.junit.jupiter.api.Assertions.assertEquals; import static org.junit.jupiter.api.Assertions.assertThrows; -import static org.junit.jupiter.api.Assertions.assertTrue; /** * Test Container calls. @@ -152,7 +144,7 @@ public void testGetCommittedBlockLengthForInvalidBlock() throws Exception { Throwable t = assertThrows(StorageContainerException.class, () -> ContainerProtocolCalls.getCommittedBlockLength(client, blockID, null)); - assertTrue(t.getMessage().contains("Unable to find the block")); + assertThat(t.getMessage()).contains("Unable to find the block"); xceiverClientManager.releaseClient(client, false); } @@ -183,8 +175,8 @@ public void tesPutKeyResposne() throws Exception { .getPutBlockRequest(pipeline, writeChunkRequest.getWriteChunk()); response = client.sendCommand(putKeyRequest).getPutBlock(); assertEquals(response.getCommittedBlockLength().getBlockLength(), data.length); - assertTrue(response.getCommittedBlockLength().getBlockID() - .getBlockCommitSequenceId() > 0); + assertThat(response.getCommittedBlockLength().getBlockID().getBlockCommitSequenceId()) + .isGreaterThan(0); BlockID responseBlockID = BlockID .getFromProtobuf(response.getCommittedBlockLength().getBlockID()); blockID diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/hdds/scm/TestRatisPipelineLeader.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/hdds/scm/TestRatisPipelineLeader.java index e5053b3ec78..6f0bd40dde0 100644 --- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/hdds/scm/TestRatisPipelineLeader.java +++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/hdds/scm/TestRatisPipelineLeader.java @@ -33,7 +33,7 @@ import org.apache.ozone.test.GenericTestUtils; import static org.apache.hadoop.ozone.OzoneConfigKeys.DFS_RATIS_LEADER_ELECTION_MINIMUM_TIMEOUT_DURATION_KEY; -import static org.junit.jupiter.api.Assertions.assertFalse; +import static org.assertj.core.api.Assertions.assertThat; import static org.junit.jupiter.api.Assertions.assertTrue; import static org.junit.jupiter.api.Assertions.fail; import org.apache.ratis.protocol.ClientId; @@ -81,7 +81,7 @@ public void testLeaderIdUsedOnFirstCall() throws Exception { List pipelines = cluster.getStorageContainerManager() .getPipelineManager().getPipelines(RatisReplicationConfig.getInstance( ReplicationFactor.THREE)); - assertFalse(pipelines.isEmpty()); + assertThat(pipelines).isNotEmpty(); Optional optional = pipelines.stream() .filter(Pipeline::isHealthy) .findFirst(); @@ -109,10 +109,8 @@ public void testLeaderIdUsedOnFirstCall() throws Exception { ContainerProtocolCalls.createContainer(xceiverClientRatis, 1L, null); } logCapturer.stopCapturing(); - assertFalse( - logCapturer.getOutput().contains( - "org.apache.ratis.protocol.NotLeaderException"), - "Client should connect to pipeline leader on first try."); + assertThat(logCapturer.getOutput()) + .doesNotContain("org.apache.ratis.protocol.NotLeaderException"); } @Test @Timeout(unit = TimeUnit.MILLISECONDS, value = 120000) @@ -120,7 +118,7 @@ public void testLeaderIdAfterLeaderChange() throws Exception { List pipelines = cluster.getStorageContainerManager() .getPipelineManager().getPipelines(RatisReplicationConfig.getInstance( ReplicationFactor.THREE)); - assertFalse(pipelines.isEmpty()); + assertThat(pipelines).isNotEmpty(); Optional optional = pipelines.stream() .filter(Pipeline::isHealthy) .findFirst(); diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/scm/TestSCMContainerPlacementPolicyMetrics.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/hdds/scm/TestSCMContainerPlacementPolicyMetrics.java similarity index 98% rename from hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/scm/TestSCMContainerPlacementPolicyMetrics.java rename to hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/hdds/scm/TestSCMContainerPlacementPolicyMetrics.java index d980d761de6..90f8375f829 100644 --- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/scm/TestSCMContainerPlacementPolicyMetrics.java +++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/hdds/scm/TestSCMContainerPlacementPolicyMetrics.java @@ -16,7 +16,7 @@ * limitations under the License. */ -package org.apache.hadoop.ozone.scm; +package org.apache.hadoop.hdds.scm; import org.apache.hadoop.hdds.HddsUtils; import org.apache.hadoop.hdds.client.RatisReplicationConfig; @@ -24,9 +24,7 @@ import org.apache.hadoop.hdds.conf.OzoneConfiguration; import org.apache.hadoop.hdds.protocol.DatanodeDetails; import org.apache.hadoop.hdds.protocol.proto.HddsProtos.ReplicationFactor; -import org.apache.hadoop.hdds.scm.ScmConfigKeys; -import org.apache.hadoop.hdds.scm.container.placement.algorithms - .SCMContainerPlacementMetrics; +import org.apache.hadoop.hdds.scm.container.placement.algorithms.SCMContainerPlacementMetrics; import org.apache.hadoop.hdds.scm.pipeline.Pipeline; import org.apache.hadoop.hdds.scm.pipeline.PipelineManager; import org.apache.hadoop.hdds.utils.IOUtils; diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/hdds/scm/TestSCMDbCheckpointServlet.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/hdds/scm/TestSCMDbCheckpointServlet.java index 5d4f9f013d1..6b5b1aedcda 100644 --- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/hdds/scm/TestSCMDbCheckpointServlet.java +++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/hdds/scm/TestSCMDbCheckpointServlet.java @@ -34,7 +34,6 @@ import java.util.ArrayList; import java.util.Collections; import java.util.List; -import java.util.UUID; import java.util.stream.Stream; import org.apache.hadoop.hdds.conf.OzoneConfiguration; @@ -59,15 +58,15 @@ import org.junit.jupiter.params.provider.MethodSource; import static org.apache.hadoop.ozone.OzoneConsts.OZONE_DB_CHECKPOINT_REQUEST_TO_EXCLUDE_SST; -import static org.junit.jupiter.api.Assertions.assertTrue; +import static org.assertj.core.api.Assertions.assertThat; import static org.junit.jupiter.params.provider.Arguments.arguments; -import static org.mockito.ArgumentMatchers.any; -import static org.mockito.ArgumentMatchers.anyBoolean; -import static org.mockito.ArgumentMatchers.anyInt; -import static org.mockito.ArgumentMatchers.anyString; -import static org.mockito.ArgumentMatchers.eq; +import static org.mockito.Mockito.any; +import static org.mockito.Mockito.anyBoolean; +import static org.mockito.Mockito.anyInt; +import static org.mockito.Mockito.anyString; import static org.mockito.Mockito.doCallRealMethod; import static org.mockito.Mockito.doNothing; +import static org.mockito.Mockito.eq; import static org.mockito.Mockito.mock; import static org.mockito.Mockito.verify; import static org.mockito.Mockito.when; @@ -81,9 +80,6 @@ public class TestSCMDbCheckpointServlet { private StorageContainerManager scm; private SCMMetrics scmMetrics; private OzoneConfiguration conf; - private String clusterId; - private String scmId; - private String omId; private HttpServletRequest requestMock; private HttpServletResponse responseMock; private String method; @@ -100,14 +96,8 @@ public class TestSCMDbCheckpointServlet { @BeforeEach public void init() throws Exception { conf = new OzoneConfiguration(); - clusterId = UUID.randomUUID().toString(); - scmId = UUID.randomUUID().toString(); - omId = UUID.randomUUID().toString(); conf.setBoolean(OZONE_ACL_ENABLED, true); cluster = MiniOzoneCluster.newBuilder(conf) - .setClusterId(clusterId) - .setScmId(scmId) - .setOmId(omId) .build(); cluster.waitForClusterToBeReady(); scm = cluster.getStorageContainerManager(); @@ -203,15 +193,13 @@ public void write(int b) throws IOException { doEndpoint(); - assertTrue(outputPath.toFile().length() > 0); - assertTrue( - scmMetrics.getDBCheckpointMetrics(). - getLastCheckpointCreationTimeTaken() > 0); - assertTrue( - scmMetrics.getDBCheckpointMetrics(). - getLastCheckpointStreamingTimeTaken() > 0); - assertTrue(scmMetrics.getDBCheckpointMetrics(). - getNumCheckpoints() > initialCheckpointCount); + assertThat(outputPath.toFile().length()).isGreaterThan(0); + assertThat(scmMetrics.getDBCheckpointMetrics().getLastCheckpointCreationTimeTaken()) + .isGreaterThan(0); + assertThat(scmMetrics.getDBCheckpointMetrics().getLastCheckpointStreamingTimeTaken()) + .isGreaterThan(0); + assertThat(scmMetrics.getDBCheckpointMetrics().getNumCheckpoints()) + .isGreaterThan(initialCheckpointCount); verify(scmDbCheckpointServletMock).writeDbDataToStream(any(), any(), any(), eq(toExcludeList), any(), any()); diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/scm/TestSCMInstallSnapshotWithHA.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/hdds/scm/TestSCMInstallSnapshotWithHA.java similarity index 96% rename from hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/scm/TestSCMInstallSnapshotWithHA.java rename to hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/hdds/scm/TestSCMInstallSnapshotWithHA.java index e63c0658de3..0aa2599637a 100644 --- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/scm/TestSCMInstallSnapshotWithHA.java +++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/hdds/scm/TestSCMInstallSnapshotWithHA.java @@ -14,7 +14,7 @@ * License for the specific language governing permissions and limitations under * the License. */ -package org.apache.hadoop.ozone.scm; +package org.apache.hadoop.hdds.scm; import java.io.File; import java.io.IOException; @@ -22,14 +22,12 @@ import java.util.ArrayList; import java.util.Iterator; import java.util.List; -import java.util.UUID; import java.util.concurrent.TimeoutException; import org.apache.commons.io.FileUtils; import org.apache.hadoop.hdds.client.RatisReplicationConfig; import org.apache.hadoop.hdds.conf.OzoneConfiguration; import org.apache.hadoop.hdds.protocol.proto.HddsProtos.ReplicationFactor; -import org.apache.hadoop.hdds.scm.ScmConfigKeys; import org.apache.hadoop.hdds.scm.container.ContainerInfo; import org.apache.hadoop.hdds.scm.ha.SCMHAManagerImpl; import org.apache.hadoop.hdds.scm.ha.SCMStateMachine; @@ -48,6 +46,7 @@ import org.apache.ozone.test.tag.Flaky; import org.apache.ratis.server.protocol.TermIndex; +import static org.assertj.core.api.Assertions.assertThat; import static org.junit.jupiter.api.Assertions.assertEquals; import static org.junit.jupiter.api.Assertions.assertNotNull; import static org.junit.jupiter.api.Assertions.assertNull; @@ -71,8 +70,6 @@ public class TestSCMInstallSnapshotWithHA { private MiniOzoneHAClusterImpl cluster = null; private OzoneConfiguration conf; - private String clusterId; - private String scmId; private String omServiceId; private String scmServiceId; private int numOfOMs = 1; @@ -89,8 +86,6 @@ public class TestSCMInstallSnapshotWithHA { @BeforeEach public void init() throws Exception { conf = new OzoneConfiguration(); - clusterId = UUID.randomUUID().toString(); - scmId = UUID.randomUUID().toString(); omServiceId = "om-service-test1"; scmServiceId = "scm-service-test1"; @@ -100,8 +95,6 @@ public void init() throws Exception { SNAPSHOT_THRESHOLD); cluster = (MiniOzoneHAClusterImpl) MiniOzoneCluster.newHABuilder(conf) - .setClusterId(clusterId) - .setScmId(scmId) .setOMServiceId(omServiceId) .setSCMServiceId(scmServiceId) .setNumOfOzoneManagers(numOfOMs) @@ -150,7 +143,7 @@ public void testInstallSnapshot() throws Exception { }, 100, 3000); long followerLastAppliedIndex = followerSM.getLastAppliedTermIndex().getIndex(); - assertTrue(followerLastAppliedIndex >= 200); + assertThat(followerLastAppliedIndex).isGreaterThanOrEqualTo(200); assertFalse(followerSM.getLifeCycleState().isPausingOrPaused()); // Verify that the follower 's DB contains the transactions which were @@ -207,7 +200,7 @@ public void testInstallOldCheckpointFailure() throws Exception { } String errorMsg = "Reloading old state of SCM"; - assertTrue(logCapture.getOutput().contains(errorMsg)); + assertThat(logCapture.getOutput()).contains(errorMsg); assertNull(newTermIndex, " installed checkpoint even though checkpoint " + "logIndex is less than it's lastAppliedIndex"); assertEquals(followerTermIndex, followerSM.getLastAppliedTermIndex()); @@ -272,8 +265,8 @@ public void testInstallCorruptedCheckpointFailure() throws Exception { scmhaManager.installCheckpoint(leaderCheckpointLocation, leaderCheckpointTrxnInfo); - assertTrue(logCapture.getOutput() - .contains("Failed to reload SCM state and instantiate services.")); + assertThat(logCapture.getOutput()) + .contains("Failed to reload SCM state and instantiate services."); final LifeCycle.State s = followerSM.getLifeCycleState(); assertTrue(s == LifeCycle.State.NEW || s.isPausingOrPaused(), "Unexpected lifeCycle state: " + s); diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/scm/TestSCMMXBean.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/hdds/scm/TestSCMMXBean.java similarity index 95% rename from hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/scm/TestSCMMXBean.java rename to hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/hdds/scm/TestSCMMXBean.java index 94019ed1d62..598a65fb48f 100644 --- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/scm/TestSCMMXBean.java +++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/hdds/scm/TestSCMMXBean.java @@ -16,7 +16,7 @@ * limitations under the License. */ -package org.apache.hadoop.ozone.scm; +package org.apache.hadoop.hdds.scm; import org.slf4j.Logger; import org.slf4j.LoggerFactory; @@ -47,9 +47,10 @@ import javax.management.openmbean.CompositeData; import javax.management.openmbean.TabularData; +import static org.assertj.core.api.Assertions.assertThat; import static org.junit.jupiter.api.Assertions.assertEquals; +import static org.junit.jupiter.api.Assertions.assertInstanceOf; import static org.junit.jupiter.api.Assertions.assertNotNull; -import static org.junit.jupiter.api.Assertions.assertTrue; /** * @@ -183,16 +184,15 @@ private void verifyEquals(TabularData actualData, assertNotNull(expectedData); for (Object obj : actualData.values()) { // Each TabularData is a set of CompositeData - assertTrue(obj instanceof CompositeData); - CompositeData cds = (CompositeData) obj; + CompositeData cds = assertInstanceOf(CompositeData.class, obj); assertEquals(2, cds.values().size()); Iterator it = cds.values().iterator(); String key = it.next().toString(); String value = it.next().toString(); int num = Integer.parseInt(value); - assertTrue(expectedData.containsKey(key)); + assertThat(expectedData).containsKey(key); assertEquals(expectedData.remove(key).intValue(), num); } - assertTrue(expectedData.isEmpty()); + assertThat(expectedData).isEmpty(); } } diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/scm/TestSCMNodeManagerMXBean.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/hdds/scm/TestSCMNodeManagerMXBean.java similarity index 93% rename from hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/scm/TestSCMNodeManagerMXBean.java rename to hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/hdds/scm/TestSCMNodeManagerMXBean.java index 2d43b625c44..dcc9b3e8e37 100644 --- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/scm/TestSCMNodeManagerMXBean.java +++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/hdds/scm/TestSCMNodeManagerMXBean.java @@ -16,7 +16,7 @@ * limitations under the License. */ -package org.apache.hadoop.ozone.scm; +package org.apache.hadoop.hdds.scm; import org.slf4j.Logger; import org.slf4j.LoggerFactory; @@ -40,9 +40,10 @@ import java.util.concurrent.TimeoutException; import static org.apache.hadoop.hdds.scm.ScmConfigKeys.OZONE_SCM_STALENODE_INTERVAL; +import static org.assertj.core.api.Assertions.assertThat; import static org.junit.jupiter.api.Assertions.assertEquals; +import static org.junit.jupiter.api.Assertions.assertInstanceOf; import static org.junit.jupiter.api.Assertions.assertNotNull; -import static org.junit.jupiter.api.Assertions.assertTrue; /** * Class which tests the SCMNodeManagerInfo Bean. @@ -128,17 +129,16 @@ private void verifyEquals(TabularData actualData, Map assertNotNull(actualData); assertNotNull(expectedData); for (Object obj : actualData.values()) { - assertTrue(obj instanceof CompositeData); - CompositeData cds = (CompositeData) obj; + CompositeData cds = assertInstanceOf(CompositeData.class, obj); assertEquals(2, cds.values().size()); Iterator it = cds.values().iterator(); String key = it.next().toString(); String value = it.next().toString(); long num = Long.parseLong(value); - assertTrue(expectedData.containsKey(key)); + assertThat(expectedData).containsKey(key); assertEquals(expectedData.remove(key).longValue(), num); } - assertTrue(expectedData.isEmpty()); + assertThat(expectedData).isEmpty(); } } diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/hdds/scm/TestSCMSnapshot.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/hdds/scm/TestSCMSnapshot.java index 5fd91fa46f2..0375d83baaf 100644 --- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/hdds/scm/TestSCMSnapshot.java +++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/hdds/scm/TestSCMSnapshot.java @@ -29,12 +29,10 @@ import org.junit.jupiter.api.BeforeAll; import org.junit.jupiter.api.Test; -import java.util.UUID; - import static org.apache.hadoop.hdds.protocol.proto.HddsProtos.ReplicationFactor.ONE; import static org.apache.hadoop.hdds.protocol.proto.HddsProtos.ReplicationFactor.THREE; +import static org.assertj.core.api.Assertions.assertThat; import static org.junit.jupiter.api.Assertions.assertDoesNotThrow; -import static org.junit.jupiter.api.Assertions.assertTrue; /** * Tests snapshots in SCM HA. @@ -52,7 +50,6 @@ public static void setup() throws Exception { cluster = MiniOzoneCluster .newBuilder(conf) .setNumDatanodes(3) - .setScmId(UUID.randomUUID().toString()) .build(); cluster.waitForClusterToBeReady(); } @@ -76,14 +73,12 @@ public void testSnapshot() throws Exception { long snapshotInfo2 = scm.getScmHAManager().asSCMHADBTransactionBuffer() .getLatestTrxInfo().getTransactionIndex(); - assertTrue(snapshotInfo2 > snapshotInfo1, - String.format("Snapshot index 2 %d should greater than Snapshot " + - "index 1 %d", snapshotInfo2, snapshotInfo1)); + assertThat(snapshotInfo2).isGreaterThan(snapshotInfo1); cluster.restartStorageContainerManager(false); TransactionInfo trxInfoAfterRestart = scm.getScmHAManager().asSCMHADBTransactionBuffer().getLatestTrxInfo(); - assertTrue(trxInfoAfterRestart.getTransactionIndex() >= snapshotInfo2); + assertThat(trxInfoAfterRestart.getTransactionIndex()).isGreaterThanOrEqualTo(snapshotInfo2); assertDoesNotThrow(() -> pipelineManager.getPipeline(ratisPipeline1.getId())); assertDoesNotThrow(() -> diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/scm/TestSecretKeySnapshot.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/hdds/scm/TestSecretKeySnapshot.java similarity index 95% rename from hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/scm/TestSecretKeySnapshot.java rename to hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/hdds/scm/TestSecretKeySnapshot.java index 1d4fc95dbf1..f7a3aa9c9b7 100644 --- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/scm/TestSecretKeySnapshot.java +++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/hdds/scm/TestSecretKeySnapshot.java @@ -15,14 +15,12 @@ * See the License for the specific language governing permissions and * limitations under the License. */ -package org.apache.hadoop.ozone.scm; +package org.apache.hadoop.hdds.scm; import org.apache.hadoop.hdds.client.RatisReplicationConfig; import org.apache.hadoop.hdds.conf.DefaultConfigManager; import org.apache.hadoop.hdds.conf.OzoneConfiguration; import org.apache.hadoop.hdds.protocol.proto.HddsProtos.ReplicationFactor; -import org.apache.hadoop.hdds.scm.ScmConfig; -import org.apache.hadoop.hdds.scm.ScmConfigKeys; import org.apache.hadoop.hdds.scm.container.ContainerInfo; import org.apache.hadoop.hdds.scm.ha.SCMStateMachine; import org.apache.hadoop.hdds.scm.server.SCMHTTPServerConfig; @@ -39,6 +37,7 @@ import org.junit.jupiter.api.BeforeEach; import org.junit.jupiter.api.Test; import org.junit.jupiter.api.Timeout; +import org.junit.jupiter.api.io.TempDir; import org.slf4j.Logger; import org.slf4j.LoggerFactory; @@ -48,7 +47,6 @@ import java.util.ArrayList; import java.util.List; import java.util.Properties; -import java.util.UUID; import java.util.concurrent.TimeoutException; import static org.apache.hadoop.fs.CommonConfigurationKeysPublic.HADOOP_SECURITY_AUTHENTICATION; @@ -70,6 +68,7 @@ import static org.apache.hadoop.ozone.om.OMConfigKeys.OZONE_OM_KERBEROS_KEYTAB_FILE_KEY; import static org.apache.hadoop.ozone.om.OMConfigKeys.OZONE_OM_KERBEROS_PRINCIPAL_KEY; import static org.apache.hadoop.security.UserGroupInformation.AuthenticationMethod.KERBEROS; +import static org.assertj.core.api.Assertions.assertThat; import static org.junit.jupiter.api.Assertions.assertEquals; import static org.junit.jupiter.api.Assertions.assertFalse; import static org.junit.jupiter.api.Assertions.assertNotNull; @@ -91,12 +90,11 @@ public final class TestSecretKeySnapshot { private MiniKdc miniKdc; private OzoneConfiguration conf; + @TempDir private File workDir; private File ozoneKeytab; private File spnegoKeytab; private String host; - private String clusterId; - private String scmId; private MiniOzoneHAClusterImpl cluster; @BeforeEach @@ -106,10 +104,6 @@ public void init() throws Exception { ExitUtils.disableSystemExit(); - workDir = GenericTestUtils.getTestDir(getClass().getSimpleName()); - clusterId = UUID.randomUUID().toString(); - scmId = UUID.randomUUID().toString(); - startMiniKdc(); setSecureConfig(); createCredentialsInKDC(); @@ -125,9 +119,7 @@ public void init() throws Exception { conf.set(HDDS_SECRET_KEY_EXPIRY_DURATION, EXPIRY_DURATION_MS + "ms"); MiniOzoneCluster.Builder builder = MiniOzoneCluster.newHABuilder(conf) - .setClusterId(clusterId) .setSCMServiceId("TestSecretKeySnapshot") - .setScmId(scmId) .setSCMServiceId("SCMServiceId") .setNumDatanodes(1) .setNumOfStorageContainerManagers(3) @@ -239,7 +231,7 @@ public void testInstallSnapshot() throws Exception { 100, 3000); long followerLastAppliedIndex = followerSM.getLastAppliedTermIndex().getIndex(); - assertTrue(followerLastAppliedIndex >= 200); + assertThat(followerLastAppliedIndex).isGreaterThanOrEqualTo(200); assertFalse(followerSM.getLifeCycleState().isPausingOrPaused()); // Verify that the follower has the secret keys created @@ -250,8 +242,8 @@ public void testInstallSnapshot() throws Exception { List followerKeys = followerSecretKeyManager.getSortedKeys(); LOG.info("Follower secret keys after snapshot: {}", followerKeys); - assertTrue(followerKeys.size() >= 2); - assertTrue(followerKeys.contains(currentKeyInLeader)); + assertThat(followerKeys.size()).isGreaterThanOrEqualTo(2); + assertThat(followerKeys).contains(currentKeyInLeader); assertEquals(leaderSecretKeyManager.getSortedKeys(), followerKeys); // Wait for the next rotation, assert that the updates can be synchronized diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/scm/TestSecretKeysApi.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/hdds/scm/TestSecretKeysApi.java similarity index 96% rename from hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/scm/TestSecretKeysApi.java rename to hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/hdds/scm/TestSecretKeysApi.java index 1b8c2843996..eb2442cd098 100644 --- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/scm/TestSecretKeysApi.java +++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/hdds/scm/TestSecretKeysApi.java @@ -15,7 +15,7 @@ * See the License for the specific language governing permissions and * limitations under the License. */ -package org.apache.hadoop.ozone.scm; +package org.apache.hadoop.hdds.scm; import org.apache.hadoop.hdds.annotation.InterfaceAudience; import org.apache.hadoop.hdds.conf.DefaultConfigManager; @@ -35,11 +35,12 @@ import org.apache.hadoop.util.ExitUtil; import org.apache.ozone.test.GenericTestUtils; import org.apache.ratis.util.ExitUtils; -import org.jetbrains.annotations.NotNull; +import jakarta.annotation.Nonnull; import org.junit.jupiter.api.AfterEach; import org.junit.jupiter.api.BeforeEach; import org.junit.jupiter.api.Test; import org.junit.jupiter.api.Timeout; +import org.junit.jupiter.api.io.TempDir; import org.slf4j.Logger; import org.slf4j.LoggerFactory; @@ -74,11 +75,11 @@ import static org.apache.hadoop.ozone.om.OMConfigKeys.OZONE_OM_KERBEROS_KEYTAB_FILE_KEY; import static org.apache.hadoop.ozone.om.OMConfigKeys.OZONE_OM_KERBEROS_PRINCIPAL_KEY; import static org.apache.hadoop.security.UserGroupInformation.AuthenticationMethod.KERBEROS; +import static org.assertj.core.api.Assertions.assertThat; import static org.junit.jupiter.api.Assertions.assertEquals; import static org.junit.jupiter.api.Assertions.assertNotNull; import static org.junit.jupiter.api.Assertions.assertNull; import static org.junit.jupiter.api.Assertions.assertThrows; -import static org.junit.jupiter.api.Assertions.assertTrue; /** * Integration test to verify symmetric SecretKeys APIs in a secure cluster. @@ -91,14 +92,13 @@ public final class TestSecretKeysApi { .getLogger(TestSecretKeysApi.class); private MiniKdc miniKdc; private OzoneConfiguration conf; + @TempDir private File workDir; private File ozoneKeytab; private File spnegoKeytab; private File testUserKeytab; private String testUserPrincipal; private String ozonePrincipal; - private String clusterId; - private String scmId; private MiniOzoneHAClusterImpl cluster; @BeforeEach @@ -109,10 +109,6 @@ public void init() throws Exception { ExitUtils.disableSystemExit(); ExitUtil.disableSystemExit(); - workDir = GenericTestUtils.getTestDir(getClass().getSimpleName()); - clusterId = UUID.randomUUID().toString(); - scmId = UUID.randomUUID().toString(); - startMiniKdc(); setSecureConfig(); createCredentialsInKDC(); @@ -309,9 +305,9 @@ private void testSecretKeyAuthorization() throws Exception { assertThrows(RemoteException.class, secretKeyProtocol::getCurrentSecretKey); assertEquals(AuthorizationException.class.getName(), ex.getClassName()); - assertTrue(ex.getMessage().contains( + assertThat(ex.getMessage()).contains( "User test@EXAMPLE.COM (auth:KERBEROS) is not authorized " + - "for protocol")); + "for protocol"); } @Test @@ -331,9 +327,7 @@ private void startCluster(int numSCMs) throws IOException, TimeoutException, InterruptedException { OzoneManager.setTestSecureOmFlag(true); MiniOzoneCluster.Builder builder = MiniOzoneCluster.newHABuilder(conf) - .setClusterId(clusterId) .setSCMServiceId("TestSecretKey") - .setScmId(scmId) .setNumDatanodes(3) .setNumOfStorageContainerManagers(numSCMs) .setNumOfOzoneManagers(1); @@ -342,12 +336,12 @@ private void startCluster(int numSCMs) cluster.waitForClusterToBeReady(); } - @NotNull + @Nonnull private SecretKeyProtocol getSecretKeyProtocol() throws IOException { return getSecretKeyProtocol(ozonePrincipal, ozoneKeytab); } - @NotNull + @Nonnull private SecretKeyProtocol getSecretKeyProtocol( String user, File keyTab) throws IOException { UserGroupInformation ugi = diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/scm/TestStorageContainerManager.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/hdds/scm/TestStorageContainerManager.java similarity index 92% rename from hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/scm/TestStorageContainerManager.java rename to hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/hdds/scm/TestStorageContainerManager.java index b0c73a592a7..e973c842de4 100644 --- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/scm/TestStorageContainerManager.java +++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/hdds/scm/TestStorageContainerManager.java @@ -15,7 +15,7 @@ * See the License for the specific language governing permissions and * limitations under the License. */ -package org.apache.hadoop.ozone.scm; +package org.apache.hadoop.hdds.scm; import com.google.common.annotations.VisibleForTesting; import com.google.common.collect.Lists; @@ -30,16 +30,11 @@ import org.apache.hadoop.hdds.protocol.DatanodeDetails; import org.apache.hadoop.hdds.protocol.proto.HddsProtos; import org.apache.hadoop.hdds.protocol.proto.HddsProtos.NodeType; -import org.apache.hadoop.hdds.protocol.proto.StorageContainerDatanodeProtocolProtos; import org.apache.hadoop.hdds.protocol.proto.StorageContainerDatanodeProtocolProtos.ContainerReportsProto; import org.apache.hadoop.hdds.protocol.proto.StorageContainerDatanodeProtocolProtos.DeletedBlocksTransaction; import org.apache.hadoop.hdds.protocol.proto.StorageContainerDatanodeProtocolProtos.IncrementalContainerReportProto; import org.apache.hadoop.hdds.protocol.proto.StorageContainerDatanodeProtocolProtos.SCMCommandProto; -import org.apache.hadoop.hdds.scm.HddsTestUtils; -import org.apache.hadoop.hdds.scm.ScmConfig; -import org.apache.hadoop.hdds.scm.ScmConfigKeys; -import org.apache.hadoop.hdds.scm.ScmInfo; -import org.apache.hadoop.hdds.scm.XceiverClientManager; +import org.apache.hadoop.hdds.ratis.RatisHelper; import org.apache.hadoop.hdds.scm.block.DeletedBlockLog; import org.apache.hadoop.hdds.scm.block.SCMBlockDeletingService; import org.apache.hadoop.hdds.scm.container.ContainerInfo; @@ -78,6 +73,7 @@ import org.apache.hadoop.ozone.OzoneTestUtils; import org.apache.hadoop.ozone.container.ContainerTestHelper; import org.apache.hadoop.ozone.HddsDatanodeService; +import org.apache.hadoop.ozone.container.common.statemachine.DatanodeConfiguration; import org.apache.hadoop.ozone.container.common.statemachine.DatanodeStateMachine; import org.apache.hadoop.ozone.container.common.statemachine.EndpointStateMachine; import org.apache.hadoop.ozone.container.common.states.endpoint.HeartbeatEndpointTask; @@ -88,7 +84,6 @@ import org.apache.hadoop.ozone.protocol.commands.CommandForDatanode; import org.apache.hadoop.ozone.protocol.commands.DeleteBlocksCommand; import org.apache.hadoop.ozone.protocol.commands.SCMCommand; -import org.apache.hadoop.ozone.upgrade.LayoutVersionManager; import org.apache.hadoop.security.UserGroupInformation; import org.apache.hadoop.security.authentication.client.AuthenticationException; import org.apache.hadoop.util.ExitUtil; @@ -139,6 +134,7 @@ import static org.apache.hadoop.hdds.HddsConfigKeys.HDDS_COMMAND_STATUS_REPORT_INTERVAL; import static org.apache.hadoop.hdds.scm.HddsWhiteboxTestUtils.setInternalState; import static org.apache.hadoop.hdds.scm.HddsTestUtils.mockRemoteUser; +import static org.apache.hadoop.ozone.OzoneConfigKeys.OZONE_BLOCK_DELETING_SERVICE_INTERVAL; import static org.assertj.core.api.Assertions.assertThat; import static org.junit.jupiter.api.Assertions.assertEquals; import static org.junit.jupiter.api.Assertions.assertFalse; @@ -149,10 +145,10 @@ import static org.junit.jupiter.api.Assertions.assertThrows; import static org.junit.jupiter.api.Assertions.assertTrue; import static org.junit.jupiter.api.Assertions.fail; -import static org.mockito.ArgumentMatchers.any; -import static org.mockito.ArgumentMatchers.argThat; -import static org.mockito.ArgumentMatchers.eq; +import static org.mockito.Mockito.any; +import static org.mockito.Mockito.argThat; import static org.mockito.Mockito.doAnswer; +import static org.mockito.Mockito.eq; import static org.mockito.Mockito.mock; import static org.mockito.Mockito.spy; import static org.mockito.Mockito.verify; @@ -221,19 +217,14 @@ private void testRpcPermission(MiniOzoneCluster cluster, cluster.getStorageContainerManager().getClientProtocolServer()); mockRemoteUser(UserGroupInformation.createRemoteUser(fakeRemoteUsername)); - - try { - mockClientServer.deleteContainer( - ContainerTestHelper.getTestContainerID()); - fail("Operation should fail, expecting an IOException here."); - } catch (Exception e) { - if (expectPermissionDenied) { - verifyPermissionDeniedException(e, fakeRemoteUsername); - } else { - // If passes permission check, it should fail with - // container not exist exception. - assertInstanceOf(ContainerNotFoundException.class, e); - } + Exception ex = assertThrows(Exception.class, () -> mockClientServer.deleteContainer( + ContainerTestHelper.getTestContainerID())); + if (expectPermissionDenied) { + verifyPermissionDeniedException(ex, fakeRemoteUsername); + } else { + // If passes permission check, it should fail with + // container not exist exception. + assertInstanceOf(ContainerNotFoundException.class, ex); } try { @@ -249,18 +240,14 @@ private void testRpcPermission(MiniOzoneCluster cluster, verifyPermissionDeniedException(e, fakeRemoteUsername); } - try { - mockClientServer.getContainer( - ContainerTestHelper.getTestContainerID()); - fail("Operation should fail, expecting an IOException here."); - } catch (Exception e) { - if (expectPermissionDenied) { - verifyPermissionDeniedException(e, fakeRemoteUsername); - } else { - // If passes permission check, it should fail with - // key not exist exception. - assertInstanceOf(ContainerNotFoundException.class, e); - } + Exception e = assertThrows(Exception.class, () -> mockClientServer.getContainer( + ContainerTestHelper.getTestContainerID())); + if (expectPermissionDenied) { + verifyPermissionDeniedException(e, fakeRemoteUsername); + } else { + // If passes permission check, it should fail with + // key not exist exception. + assertInstanceOf(ContainerNotFoundException.class, e); } } @@ -275,26 +262,36 @@ private void verifyPermissionDeniedException(Exception e, String userName) { public void testBlockDeletionTransactions() throws Exception { int numKeys = 5; OzoneConfiguration conf = new OzoneConfiguration(); - conf.setTimeDuration(HDDS_CONTAINER_REPORT_INTERVAL, 100, + conf.setTimeDuration(OZONE_BLOCK_DELETING_SERVICE_INTERVAL, 100, TimeUnit.MILLISECONDS); - conf.setTimeDuration(HDDS_COMMAND_STATUS_REPORT_INTERVAL, 100, + DatanodeConfiguration datanodeConfiguration = conf.getObject( + DatanodeConfiguration.class); + datanodeConfiguration.setBlockDeletionInterval(Duration.ofMillis(100)); + conf.setFromObject(datanodeConfiguration); + ScmConfig scmConfig = conf.getObject(ScmConfig.class); + scmConfig.setBlockDeletionInterval(Duration.ofMillis(100)); + conf.setFromObject(scmConfig); + + conf.setTimeDuration(RatisHelper.HDDS_DATANODE_RATIS_PREFIX_KEY + + ".client.request.write.timeout", 30, TimeUnit.SECONDS); + conf.setTimeDuration(RatisHelper.HDDS_DATANODE_RATIS_PREFIX_KEY + + ".client.request.watch.timeout", 30, TimeUnit.SECONDS); + conf.setInt("hdds.datanode.block.delete.threads.max", 5); + conf.setInt("hdds.datanode.block.delete.queue.limit", 32); + conf.setTimeDuration(HDDS_HEARTBEAT_INTERVAL, 50, + TimeUnit.MILLISECONDS); + conf.setTimeDuration(HDDS_CONTAINER_REPORT_INTERVAL, 200, TimeUnit.MILLISECONDS); - conf.setTimeDuration(ScmConfigKeys.OZONE_SCM_HEARTBEAT_PROCESS_INTERVAL, - 3000, + conf.setTimeDuration(HDDS_COMMAND_STATUS_REPORT_INTERVAL, 200, TimeUnit.MILLISECONDS); conf.setInt(ScmConfigKeys.OZONE_SCM_BLOCK_DELETION_MAX_RETRY, 5); - conf.setTimeDuration(OzoneConfigKeys.OZONE_BLOCK_DELETING_SERVICE_INTERVAL, - 1, TimeUnit.SECONDS); - ScmConfig scmConfig = conf.getObject(ScmConfig.class); - scmConfig.setBlockDeletionInterval(Duration.ofSeconds(1)); - conf.setFromObject(scmConfig); // Reset container provision size, otherwise only one container // is created by default. conf.setInt(ScmConfigKeys.OZONE_SCM_PIPELINE_OWNER_CONTAINER_COUNT, numKeys); MiniOzoneCluster cluster = MiniOzoneCluster.newBuilder(conf) - .setHbInterval(100) + .setHbInterval(50) .build(); cluster.waitForClusterToBeReady(); @@ -313,13 +310,12 @@ public void testBlockDeletionTransactions() throws Exception { OzoneTestUtils.closeContainers(keyInfo.getKeyLocationVersions(), cluster.getStorageContainerManager()); } - Map> containerBlocks = createDeleteTXLog( cluster.getStorageContainerManager(), delLog, keyLocations, helper); // Verify a few TX gets created in the TX log. - assertTrue(delLog.getNumOfValidTransactions() > 0); + assertThat(delLog.getNumOfValidTransactions()).isGreaterThan(0); // Once TXs are written into the log, SCM starts to fetch TX // entries from the log and schedule block deletions in HB interval, @@ -336,7 +332,7 @@ public void testBlockDeletionTransactions() throws Exception { } catch (IOException e) { return false; } - }, 1000, 10000); + }, 1000, 22000); assertTrue(helper.verifyBlocksWithTxnTable(containerBlocks)); // Continue the work, add some TXs that with known container names, // but unknown block IDs. @@ -352,7 +348,7 @@ public void testBlockDeletionTransactions() throws Exception { } // Verify a few TX gets created in the TX log. - assertTrue(delLog.getNumOfValidTransactions() > 0); + assertThat(delLog.getNumOfValidTransactions()).isGreaterThan(0); // These blocks cannot be found in the container, skip deleting them // eventually these TX will success. @@ -378,7 +374,7 @@ public void testOldDNRegistersToReInitialisedSCM() throws Exception { MiniOzoneCluster cluster = MiniOzoneCluster.newBuilder(conf).setHbInterval(1000) .setHbProcessorInterval(3000).setNumDatanodes(1) - .setClusterId(UUID.randomUUID().toString()).build(); + .build(); cluster.waitForClusterToBeReady(); try { @@ -413,8 +409,8 @@ public void testOldDNRegistersToReInitialisedSCM() throws Exception { GenericTestUtils.LogCapturer versionEndPointTaskLog = GenericTestUtils.LogCapturer.captureLogs(VersionEndpointTask.LOG); // Initially empty - assertTrue(scmDnHBDispatcherLog.getOutput().isEmpty()); - assertTrue(versionEndPointTaskLog.getOutput().isEmpty()); + assertThat(scmDnHBDispatcherLog.getOutput()).isEmpty(); + assertThat(versionEndPointTaskLog.getOutput()).isEmpty(); // start the new SCM scm.start(); // Initially DatanodeStateMachine will be in Running state @@ -444,9 +440,9 @@ public void testOldDNRegistersToReInitialisedSCM() throws Exception { 5000); assertEquals(DatanodeStateMachine.DatanodeStates.SHUTDOWN, dsm.getContext().getState()); - assertTrue(versionEndPointTaskLog.getOutput().contains( + assertThat(versionEndPointTaskLog.getOutput()).contains( "org.apache.hadoop.ozone.common" + - ".InconsistentStorageStateException: Mismatched ClusterIDs")); + ".InconsistentStorageStateException: Mismatched ClusterIDs"); } finally { cluster.shutdown(); } @@ -458,7 +454,7 @@ public void testBlockDeletingThrottling() throws Exception { OzoneConfiguration conf = new OzoneConfiguration(); conf.setTimeDuration(HDDS_CONTAINER_REPORT_INTERVAL, 1, TimeUnit.SECONDS); conf.setInt(ScmConfigKeys.OZONE_SCM_BLOCK_DELETION_MAX_RETRY, 5); - conf.setTimeDuration(OzoneConfigKeys.OZONE_BLOCK_DELETING_SERVICE_INTERVAL, + conf.setTimeDuration(OZONE_BLOCK_DELETING_SERVICE_INTERVAL, 100, TimeUnit.MILLISECONDS); ScmConfig scmConfig = conf.getObject(ScmConfig.class); scmConfig.setBlockDeletionInterval(Duration.ofMillis(100)); @@ -501,23 +497,14 @@ public void testBlockDeletingThrottling() throws Exception { createDeleteTXLog(cluster.getStorageContainerManager(), delLog, keyLocations, helper); // Verify a few TX gets created in the TX log. - assertTrue(delLog.getNumOfValidTransactions() > 0); + assertThat(delLog.getNumOfValidTransactions()).isGreaterThan(0); // Verify the size in delete commands is expected. GenericTestUtils.waitFor(() -> { NodeManager nodeManager = cluster.getStorageContainerManager() .getScmNodeManager(); - LayoutVersionManager versionManager = - nodeManager.getLayoutVersionManager(); - StorageContainerDatanodeProtocolProtos.LayoutVersionProto layoutInfo - = StorageContainerDatanodeProtocolProtos.LayoutVersionProto - .newBuilder() - .setSoftwareLayoutVersion(versionManager.getSoftwareLayoutVersion()) - .setMetadataLayoutVersion(versionManager.getMetadataLayoutVersion()) - .build(); List commands = nodeManager.processHeartbeat( - nodeManager.getNodes(NodeStatus.inServiceHealthy()).get(0), - layoutInfo); + nodeManager.getNodes(NodeStatus.inServiceHealthy()).get(0)); if (commands != null) { for (SCMCommand cmd : commands) { if (cmd.getType() == SCMCommandProto.Type.deleteBlocksCommand) { @@ -555,7 +542,7 @@ private Map> createDeleteTXLog( for (OmKeyInfo info : keyLocations.values()) { totalCreatedBlocks += info.getKeyLocationVersions().size(); } - assertTrue(totalCreatedBlocks > 0); + assertThat(totalCreatedBlocks).isGreaterThan(0); assertEquals(totalCreatedBlocks, helper.getAllBlocks(containerNames).size()); @@ -727,12 +714,7 @@ public void testSCMReinitializationWithHAEnabled() throws Exception { final String clusterId = cluster.getStorageContainerManager().getClusterId(); // validate there is no ratis group pre existing - try { - validateRatisGroupExists(conf, clusterId); - fail(); - } catch (IOException ioe) { - // Exception is expected here - } + assertThrows(IOException.class, () -> validateRatisGroupExists(conf, clusterId)); conf.setBoolean(ScmConfigKeys.OZONE_SCM_HA_ENABLE_KEY, true); // This will re-initialize SCM @@ -794,7 +776,6 @@ public void testScmInfo() throws Exception { @Test public void testScmProcessDatanodeHeartbeat() throws Exception { OzoneConfiguration conf = new OzoneConfiguration(); - String scmId = UUID.randomUUID().toString(); conf.setClass(NET_TOPOLOGY_NODE_SWITCH_MAPPING_IMPL_KEY, StaticMapping.class, DNSToSwitchMapping.class); StaticMapping.addNodeToRack(NetUtils.normalizeHostNames( @@ -804,7 +785,6 @@ public void testScmProcessDatanodeHeartbeat() throws Exception { final int datanodeNum = 3; MiniOzoneCluster cluster = MiniOzoneCluster.newBuilder(conf) .setNumDatanodes(datanodeNum) - .setScmId(scmId) .build(); cluster.waitForClusterToBeReady(); StorageContainerManager scm = cluster.getStorageContainerManager(); @@ -824,7 +804,7 @@ public void testScmProcessDatanodeHeartbeat() throws Exception { for (DatanodeDetails node : allNodes) { DatanodeInfo datanodeInfo = (DatanodeInfo) scm.getScmNodeManager() .getNodeByUuid(node.getUuidString()); - assertTrue(datanodeInfo.getLastHeartbeatTime() > start); + assertThat(datanodeInfo.getLastHeartbeatTime()).isGreaterThan(start); assertEquals(datanodeInfo.getUuidString(), datanodeInfo.getNetworkName()); assertEquals("/rack1", datanodeInfo.getNetworkLocation()); @@ -841,7 +821,7 @@ public void testCloseContainerCommandOnRestart() throws Exception { OzoneConfiguration conf = new OzoneConfiguration(); conf.setTimeDuration(HDDS_CONTAINER_REPORT_INTERVAL, 1, TimeUnit.SECONDS); conf.setInt(ScmConfigKeys.OZONE_SCM_BLOCK_DELETION_MAX_RETRY, 5); - conf.setTimeDuration(OzoneConfigKeys.OZONE_BLOCK_DELETING_SERVICE_INTERVAL, + conf.setTimeDuration(OZONE_BLOCK_DELETING_SERVICE_INTERVAL, 100, TimeUnit.MILLISECONDS); conf.setInt(ScmConfigKeys.OZONE_SCM_PIPELINE_OWNER_CONTAINER_COUNT, numKeys); @@ -850,7 +830,6 @@ public void testCloseContainerCommandOnRestart() throws Exception { MiniOzoneCluster cluster = MiniOzoneCluster.newBuilder(conf) .setHbInterval(1000) .setHbProcessorInterval(3000) - .setTrace(false) .setNumDatanodes(1) .build(); cluster.waitForClusterToBeReady(); @@ -968,7 +947,7 @@ public void testContainerReportQueueWithDrop() throws Exception { eventQueue.fireEvent(SCMEvents.CONTAINER_REPORT, dndata); eventQueue.fireEvent(SCMEvents.CONTAINER_REPORT, dndata); eventQueue.fireEvent(SCMEvents.CONTAINER_REPORT, dndata); - assertTrue(containerReportExecutors.droppedEvents() > 1); + assertThat(containerReportExecutors.droppedEvents()).isGreaterThan(1); Thread.currentThread().sleep(1000); assertEquals(containerReportExecutors.droppedEvents() + containerReportExecutors.scheduledEvents(), @@ -1024,8 +1003,8 @@ public void testContainerReportQueueTakingMoreTime() throws Exception { = new ContainerReportFromDatanode(dn, report); eventQueue.fireEvent(SCMEvents.CONTAINER_REPORT, dndata2); semaphore.acquire(2); - assertTrue(containerReportExecutors.longWaitInQueueEvents() >= 1); - assertTrue(containerReportExecutors.longTimeExecutionEvents() >= 1); + assertThat(containerReportExecutors.longWaitInQueueEvents()).isGreaterThanOrEqualTo(1); + assertThat(containerReportExecutors.longTimeExecutionEvents()).isGreaterThanOrEqualTo(1); containerReportExecutors.close(); semaphore.release(2); } @@ -1080,10 +1059,7 @@ public void testNonRatisToRatis() throws IOException, AuthenticationException, InterruptedException, TimeoutException { final OzoneConfiguration conf = new OzoneConfiguration(); - final String clusterID = UUID.randomUUID().toString(); try (MiniOzoneCluster cluster = MiniOzoneCluster.newBuilder(conf) - .setClusterId(clusterID) - .setScmId(UUID.randomUUID().toString()) .setNumDatanodes(3) .build()) { final StorageContainerManager nonRatisSCM = cluster @@ -1095,7 +1071,7 @@ public void testNonRatisToRatis() DefaultConfigManager.clearDefaultConfigs(); conf.setBoolean(ScmConfigKeys.OZONE_SCM_HA_ENABLE_KEY, true); - StorageContainerManager.scmInit(conf, clusterID); + StorageContainerManager.scmInit(conf, cluster.getClusterId()); cluster.restartStorageContainerManager(false); final StorageContainerManager ratisSCM = cluster diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/scm/TestStorageContainerManagerHA.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/hdds/scm/TestStorageContainerManagerHA.java similarity index 97% rename from hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/scm/TestStorageContainerManagerHA.java rename to hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/hdds/scm/TestStorageContainerManagerHA.java index a0f5bc7834e..e62820cfb1d 100644 --- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/scm/TestStorageContainerManagerHA.java +++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/hdds/scm/TestStorageContainerManagerHA.java @@ -16,13 +16,12 @@ * limitations under the License. */ -package org.apache.hadoop.ozone.scm; +package org.apache.hadoop.hdds.scm; import org.apache.hadoop.hdds.client.RatisReplicationConfig; import org.apache.hadoop.hdds.conf.DefaultConfigManager; import org.apache.hadoop.hdds.conf.OzoneConfiguration; import org.apache.hadoop.hdds.protocol.proto.HddsProtos; -import org.apache.hadoop.hdds.scm.ScmConfigKeys; import org.apache.hadoop.hdds.scm.container.ContainerID; import org.apache.hadoop.hdds.scm.ha.SCMHAMetrics; import org.apache.hadoop.hdds.scm.ha.SCMRatisServerImpl; @@ -60,6 +59,7 @@ import static java.nio.charset.StandardCharsets.UTF_8; import static org.apache.hadoop.hdds.client.ReplicationFactor.ONE; import static org.apache.hadoop.hdds.client.ReplicationType.RATIS; +import static org.apache.ozone.test.GenericTestUtils.getTestStartTime; import static org.junit.jupiter.api.Assertions.assertEquals; import static org.junit.jupiter.api.Assertions.assertFalse; import static org.junit.jupiter.api.Assertions.assertNotEquals; @@ -74,8 +74,6 @@ public class TestStorageContainerManagerHA { private MiniOzoneHAClusterImpl cluster = null; private OzoneConfiguration conf; - private String clusterId; - private String scmId; private String omServiceId; private static int numOfOMs = 3; private String scmServiceId; @@ -95,13 +93,9 @@ public void init() throws Exception { conf.set(ScmConfigKeys.OZONE_SCM_HA_DBTRANSACTIONBUFFER_FLUSH_INTERVAL, "5s"); conf.set(ScmConfigKeys.OZONE_SCM_HA_RATIS_SNAPSHOT_GAP, "1"); - clusterId = UUID.randomUUID().toString(); - scmId = UUID.randomUUID().toString(); omServiceId = "om-service-test1"; scmServiceId = "scm-service-test1"; cluster = (MiniOzoneHAClusterImpl) MiniOzoneCluster.newHABuilder(conf) - .setClusterId(clusterId) - .setScmId(scmId) .setOMServiceId(omServiceId) .setSCMServiceId(scmServiceId) .setNumOfStorageContainerManagers(numOfSCMs) @@ -175,7 +169,7 @@ void testAllSCMAreRunning() throws Exception { private void doPutKey() throws Exception { String volumeName = UUID.randomUUID().toString(); String bucketName = UUID.randomUUID().toString(); - Instant testStartTime = Instant.now(); + Instant testStartTime = getTestStartTime(); try (OzoneClient client = cluster.newClient()) { ObjectStore store = client.getObjectStore(); String value = "sample value"; @@ -195,7 +189,7 @@ private void doPutKey() throws Exception { assertEquals(keyName, key.getName()); OzoneInputStream is = bucket.readKey(keyName); byte[] fileContent = new byte[value.getBytes(UTF_8).length]; - is.read(fileContent); + assertEquals(fileContent.length, is.read(fileContent)); assertEquals(value, new String(fileContent, UTF_8)); assertFalse(key.getCreationTime().isBefore(testStartTime)); assertFalse(key.getModificationTime().isBefore(testStartTime)); diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/scm/TestStorageContainerManagerHelper.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/hdds/scm/TestStorageContainerManagerHelper.java similarity index 99% rename from hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/scm/TestStorageContainerManagerHelper.java rename to hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/hdds/scm/TestStorageContainerManagerHelper.java index 23c0ef8496e..322b1e65bc6 100644 --- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/scm/TestStorageContainerManagerHelper.java +++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/hdds/scm/TestStorageContainerManagerHelper.java @@ -14,7 +14,7 @@ * License for the specific language governing permissions and limitations under * the License. */ -package org.apache.hadoop.ozone.scm; +package org.apache.hadoop.hdds.scm; import java.io.IOException; import java.util.ArrayList; diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/scm/TestXceiverClientGrpc.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/hdds/scm/TestXceiverClientGrpc.java similarity index 96% rename from hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/scm/TestXceiverClientGrpc.java rename to hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/hdds/scm/TestXceiverClientGrpc.java index f7835d30d5c..fb312dfb509 100644 --- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/scm/TestXceiverClientGrpc.java +++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/hdds/scm/TestXceiverClientGrpc.java @@ -15,11 +15,11 @@ * See the License for the specific language governing permissions and * limitations under the License. */ -package org.apache.hadoop.ozone.scm; +package org.apache.hadoop.hdds.scm; +import static org.assertj.core.api.Assertions.assertThat; import static org.junit.jupiter.api.Assertions.assertEquals; import static org.junit.jupiter.api.Assertions.assertNotEquals; -import static org.junit.jupiter.api.Assertions.assertTrue; import org.apache.hadoop.hdds.client.BlockID; import org.apache.hadoop.hdds.client.RatisReplicationConfig; import org.apache.hadoop.hdds.conf.OzoneConfiguration; @@ -27,9 +27,6 @@ import org.apache.hadoop.hdds.protocol.MockDatanodeDetails; import org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos; import org.apache.hadoop.hdds.protocol.proto.HddsProtos.ReplicationFactor; -import org.apache.hadoop.hdds.scm.XceiverClientGrpc; -import org.apache.hadoop.hdds.scm.XceiverClientReply; -import org.apache.hadoop.hdds.scm.XceiverClientSpi; import org.apache.hadoop.hdds.scm.pipeline.Pipeline; import org.apache.hadoop.hdds.scm.pipeline.PipelineID; import org.apache.hadoop.hdds.scm.storage.ContainerProtocolCalls; @@ -114,7 +111,7 @@ public XceiverClientReply sendCommandAsync( @Timeout(5) public void testGetBlockRetryAlNodes() { final ArrayList allDNs = new ArrayList<>(dns); - assertTrue(allDNs.size() > 1); + assertThat(allDNs.size()).isGreaterThan(1); try (XceiverClientGrpc client = new XceiverClientGrpc(pipeline, conf) { @Override public XceiverClientReply sendCommandAsync( @@ -135,7 +132,7 @@ public XceiverClientReply sendCommandAsync( @Timeout(5) public void testReadChunkRetryAllNodes() { final ArrayList allDNs = new ArrayList<>(dns); - assertTrue(allDNs.size() > 1); + assertThat(allDNs.size()).isGreaterThan(1); try (XceiverClientGrpc client = new XceiverClientGrpc(pipeline, conf) { @Override public XceiverClientReply sendCommandAsync( diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/scm/TestXceiverClientManager.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/hdds/scm/TestXceiverClientManager.java similarity index 96% rename from hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/scm/TestXceiverClientManager.java rename to hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/hdds/scm/TestXceiverClientManager.java index f66315f851d..95a0b0e17fd 100644 --- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/scm/TestXceiverClientManager.java +++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/hdds/scm/TestXceiverClientManager.java @@ -15,7 +15,7 @@ * See the License for the specific language governing permissions and * limitations under the License. */ -package org.apache.hadoop.ozone.scm; +package org.apache.hadoop.hdds.scm; import com.google.common.cache.Cache; import org.apache.hadoop.hdds.scm.XceiverClientManager.ScmClientConfig; @@ -25,10 +25,7 @@ import org.apache.hadoop.ozone.MiniOzoneCluster; import org.apache.hadoop.hdds.conf.OzoneConfiguration; import org.apache.hadoop.hdds.protocol.proto.HddsProtos; -import org.apache.hadoop.hdds.scm.XceiverClientSpi; -import org.apache.hadoop.hdds.scm.XceiverClientManager; -import org.apache.hadoop.hdds.scm.protocolPB - .StorageContainerLocationProtocolClientSideTranslatorPB; +import org.apache.hadoop.hdds.scm.protocolPB.StorageContainerLocationProtocolClientSideTranslatorPB; import org.apache.hadoop.hdds.scm.storage.ContainerProtocolCalls; import org.apache.hadoop.ozone.OzoneConsts; import org.apache.hadoop.ozone.container.common.SCMTestUtils; @@ -45,12 +42,12 @@ import static org.apache.hadoop.hdds.HddsConfigKeys.HDDS_METADATA_DIR_NAME; import static org.apache.hadoop.ozone.OzoneConfigKeys.OZONE_SECURITY_ENABLED_KEY; +import static org.assertj.core.api.Assertions.assertThat; import static org.junit.jupiter.api.Assertions.assertEquals; import static org.junit.jupiter.api.Assertions.assertNotEquals; import static org.junit.jupiter.api.Assertions.assertNotNull; import static org.junit.jupiter.api.Assertions.assertNull; import static org.junit.jupiter.api.Assertions.assertThrows; -import static org.junit.jupiter.api.Assertions.assertTrue; import static org.mockito.Mockito.mock; /** @@ -177,7 +174,7 @@ public void testFreeByReference() throws IOException { Throwable t = assertThrows(IOException.class, () -> ContainerProtocolCalls.createContainer(client1, container1.getContainerInfo().getContainerID(), null)); - assertTrue(t.getMessage().contains("This channel is not connected")); + assertThat(t.getMessage()).contains("This channel is not connected"); clientManager.releaseClient(client2, false); } @@ -228,7 +225,7 @@ public void testFreeByEviction() throws IOException { Throwable t = assertThrows(IOException.class, () -> ContainerProtocolCalls.createContainer(client1, container1.getContainerInfo().getContainerID(), null)); - assertTrue(t.getMessage().contains("This channel is not connected")); + assertThat(t.getMessage()).contains("This channel is not connected"); clientManager.releaseClient(client2, false); } diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/scm/TestXceiverClientMetrics.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/hdds/scm/TestXceiverClientMetrics.java similarity index 96% rename from hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/scm/TestXceiverClientMetrics.java rename to hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/hdds/scm/TestXceiverClientMetrics.java index 42c7a58ccd3..c4f62040536 100644 --- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/scm/TestXceiverClientMetrics.java +++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/hdds/scm/TestXceiverClientMetrics.java @@ -15,7 +15,7 @@ * See the License for the specific language governing permissions and * limitations under the License. */ -package org.apache.hadoop.ozone.scm; +package org.apache.hadoop.hdds.scm; import static org.apache.hadoop.hdds.HddsConfigKeys.HDDS_METADATA_DIR_NAME; import static org.apache.ozone.test.MetricsAsserts.assertCounter; @@ -30,19 +30,14 @@ import org.apache.hadoop.hdds.client.BlockID; import org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos; -import org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos - .ContainerCommandRequestProto; -import org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos - .ContainerCommandResponseProto; +import org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos.ContainerCommandRequestProto; +import org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos.ContainerCommandResponseProto; import org.apache.hadoop.hdds.scm.container.common.helpers.ContainerWithPipeline; import org.apache.hadoop.metrics2.MetricsRecordBuilder; import org.apache.hadoop.ozone.MiniOzoneCluster; import org.apache.hadoop.hdds.conf.OzoneConfiguration; import org.apache.hadoop.ozone.OzoneConsts; import org.apache.hadoop.ozone.container.ContainerTestHelper; -import org.apache.hadoop.hdds.scm.XceiverClientManager; -import org.apache.hadoop.hdds.scm.XceiverClientMetrics; -import org.apache.hadoop.hdds.scm.XceiverClientSpi; import org.apache.hadoop.hdds.scm.protocolPB.StorageContainerLocationProtocolClientSideTranslatorPB; import org.apache.hadoop.ozone.container.common.SCMTestUtils; import org.apache.ozone.test.GenericTestUtils; diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/hdds/scm/container/TestContainerStateManagerIntegration.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/hdds/scm/container/TestContainerStateManagerIntegration.java index 72d1ebf4381..df5281e4240 100644 --- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/hdds/scm/container/TestContainerStateManagerIntegration.java +++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/hdds/scm/container/TestContainerStateManagerIntegration.java @@ -50,12 +50,11 @@ import java.util.concurrent.TimeoutException; import static org.apache.hadoop.hdds.scm.ScmConfigKeys.OZONE_DATANODE_PIPELINE_LIMIT; +import static org.assertj.core.api.Assertions.assertThat; import static org.junit.jupiter.api.Assertions.assertEquals; import static org.junit.jupiter.api.Assertions.assertNotEquals; import static org.junit.jupiter.api.Assertions.assertNotNull; import static org.junit.jupiter.api.Assertions.assertNull; -import static org.junit.jupiter.api.Assertions.assertFalse; -import static org.junit.jupiter.api.Assertions.assertTrue; /** * Tests for ContainerStateManager. @@ -214,7 +213,7 @@ public void testGetMatchingContainer() throws IOException { ContainerInfo info = containerManager .getMatchingContainer(OzoneConsts.GB * 3, OzoneConsts.OZONE, container1.getPipeline()); - assertTrue(info.getContainerID() > cid); + assertThat(info.getContainerID()).isGreaterThan(cid); cid = info.getContainerID(); } @@ -264,10 +263,9 @@ public void testGetMatchingContainerMultipleThreads() // TODO: #CLUTIL Look at the division of block allocations in different // containers. LOG.error("Total allocated block = " + matchedCount); - assertTrue(matchedCount <= - numBlockAllocates / container2MatchedCount.size() + threshold - && matchedCount >= - numBlockAllocates / container2MatchedCount.size() - threshold); + assertThat(matchedCount) + .isLessThanOrEqualTo(numBlockAllocates / container2MatchedCount.size() + threshold) + .isGreaterThanOrEqualTo(numBlockAllocates / container2MatchedCount.size() - threshold); } } @@ -375,43 +373,43 @@ public void testReplicaMap() throws Exception { containerStateManager.updateContainerReplica(id, replicaTwo); replicaSet = containerStateManager.getContainerReplicas(id); assertEquals(2, replicaSet.size()); - assertTrue(replicaSet.contains(replicaOne)); - assertTrue(replicaSet.contains(replicaTwo)); + assertThat(replicaSet).contains(replicaOne); + assertThat(replicaSet).contains(replicaTwo); // Test 3: Remove one replica node and then test containerStateManager.removeContainerReplica(id, replicaOne); replicaSet = containerStateManager.getContainerReplicas(id); assertEquals(1, replicaSet.size()); - assertFalse(replicaSet.contains(replicaOne)); - assertTrue(replicaSet.contains(replicaTwo)); + assertThat(replicaSet).doesNotContain(replicaOne); + assertThat(replicaSet).contains(replicaTwo); // Test 3: Remove second replica node and then test containerStateManager.removeContainerReplica(id, replicaTwo); replicaSet = containerStateManager.getContainerReplicas(id); assertEquals(0, replicaSet.size()); - assertFalse(replicaSet.contains(replicaOne)); - assertFalse(replicaSet.contains(replicaTwo)); + assertThat(replicaSet).doesNotContain(replicaOne); + assertThat(replicaSet).doesNotContain(replicaTwo); // Test 4: Re-insert dn1 containerStateManager.updateContainerReplica(id, replicaOne); replicaSet = containerStateManager.getContainerReplicas(id); assertEquals(1, replicaSet.size()); - assertTrue(replicaSet.contains(replicaOne)); - assertFalse(replicaSet.contains(replicaTwo)); + assertThat(replicaSet).contains(replicaOne); + assertThat(replicaSet).doesNotContain(replicaTwo); // Re-insert dn2 containerStateManager.updateContainerReplica(id, replicaTwo); replicaSet = containerStateManager.getContainerReplicas(id); assertEquals(2, replicaSet.size()); - assertTrue(replicaSet.contains(replicaOne)); - assertTrue(replicaSet.contains(replicaTwo)); + assertThat(replicaSet).contains(replicaOne); + assertThat(replicaSet).contains(replicaTwo); // Re-insert dn1 containerStateManager.updateContainerReplica(id, replicaOne); replicaSet = containerStateManager.getContainerReplicas(id); assertEquals(2, replicaSet.size()); - assertTrue(replicaSet.contains(replicaOne)); - assertTrue(replicaSet.contains(replicaTwo)); + assertThat(replicaSet).contains(replicaOne); + assertThat(replicaSet).contains(replicaTwo); } } diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/scm/node/TestDecommissionAndMaintenance.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/hdds/scm/node/TestDecommissionAndMaintenance.java similarity index 88% rename from hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/scm/node/TestDecommissionAndMaintenance.java rename to hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/hdds/scm/node/TestDecommissionAndMaintenance.java index 2df0d09db53..5ebf9b56a8e 100644 --- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/scm/node/TestDecommissionAndMaintenance.java +++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/hdds/scm/node/TestDecommissionAndMaintenance.java @@ -15,7 +15,7 @@ * See the License for the specific language governing permissions and * limitations under the License. */ -package org.apache.hadoop.ozone.scm.node; +package org.apache.hadoop.hdds.scm.node; import org.apache.hadoop.hdds.HddsConfigKeys; import org.apache.hadoop.hdds.client.ECReplicationConfig; @@ -31,8 +31,6 @@ import org.apache.hadoop.hdds.scm.container.ContainerReplica; import org.apache.hadoop.hdds.scm.container.replication.ContainerReplicaCount; import org.apache.hadoop.hdds.scm.container.replication.ReplicationManager.ReplicationManagerConfiguration; -import org.apache.hadoop.hdds.scm.node.NodeManager; -import org.apache.hadoop.hdds.scm.node.NodeStatus; import org.apache.hadoop.hdds.scm.pipeline.Pipeline; import org.apache.hadoop.hdds.scm.pipeline.PipelineManager; import org.apache.hadoop.hdds.scm.server.StorageContainerManager; @@ -66,6 +64,11 @@ import java.util.stream.Collectors; import static java.util.concurrent.TimeUnit.SECONDS; +import static org.apache.hadoop.hdds.scm.node.TestNodeUtil.getDNHostAndPort; +import static org.apache.hadoop.hdds.scm.node.TestNodeUtil.waitForDnToReachHealthState; +import static org.apache.hadoop.hdds.scm.node.TestNodeUtil.waitForDnToReachOpState; +import static org.apache.hadoop.hdds.scm.node.TestNodeUtil.waitForDnToReachPersistedOpState; +import static org.assertj.core.api.Assertions.assertThat; import static org.junit.jupiter.api.Assertions.assertDoesNotThrow; import static org.junit.jupiter.api.Assertions.assertEquals; import static org.junit.jupiter.api.Assertions.assertTrue; @@ -210,7 +213,7 @@ public void testNodeWithOpenPipelineCanBeDecommissionedAndRecommissioned() scmClient.decommissionNodes(Arrays.asList( getDNHostAndPort(toDecommission))); - waitForDnToReachOpState(toDecommission, DECOMMISSIONED); + waitForDnToReachOpState(nm, toDecommission, DECOMMISSIONED); // Ensure one node transitioned to DECOMMISSIONING List decomNodes = nm.getNodes( DECOMMISSIONED, @@ -226,7 +229,7 @@ public void testNodeWithOpenPipelineCanBeDecommissionedAndRecommissioned() // Stop the decommissioned DN int dnIndex = cluster.getHddsDatanodeIndex(toDecommission); cluster.shutdownHddsDatanode(toDecommission); - waitForDnToReachHealthState(toDecommission, DEAD); + waitForDnToReachHealthState(nm, toDecommission, DEAD); // Now the decommissioned node is dead, we should have // 3 replicas for the tracked container. @@ -237,7 +240,7 @@ public void testNodeWithOpenPipelineCanBeDecommissionedAndRecommissioned() cluster.restartHddsDatanode(dnIndex, true); scmClient.recommissionNodes(Arrays.asList( getDNHostAndPort(toDecommission))); - waitForDnToReachOpState(toDecommission, IN_SERVICE); + waitForDnToReachOpState(nm, toDecommission, IN_SERVICE); waitForDnToReachPersistedOpState(toDecommission, IN_SERVICE); } @@ -273,7 +276,7 @@ public void testDecommissioningNodesCompleteDecommissionOnSCMRestart() // After the SCM restart, the DN should report as DECOMMISSIONING, then // it should re-enter the decommission workflow and move to DECOMMISSIONED DatanodeDetails newDn = nm.getNodeByUuid(dn.getUuid().toString()); - waitForDnToReachOpState(newDn, DECOMMISSIONED); + waitForDnToReachOpState(nm, newDn, DECOMMISSIONED); waitForDnToReachPersistedOpState(newDn, DECOMMISSIONED); // Now the node is decommissioned, so restart SCM again @@ -283,7 +286,7 @@ public void testDecommissioningNodesCompleteDecommissionOnSCMRestart() // On initial registration, the DN should report its operational state // and if it is decommissioned, that should be updated in the NodeStatus - waitForDnToReachOpState(newDn, DECOMMISSIONED); + waitForDnToReachOpState(nm, newDn, DECOMMISSIONED); // Also confirm the datanodeDetails correctly reflect the operational // state. waitForDnToReachPersistedOpState(newDn, DECOMMISSIONED); @@ -292,7 +295,7 @@ public void testDecommissioningNodesCompleteDecommissionOnSCMRestart() // reflect the state of in SCM, in IN_SERVICE. int dnIndex = cluster.getHddsDatanodeIndex(dn); cluster.shutdownHddsDatanode(dnIndex); - waitForDnToReachHealthState(dn, DEAD); + waitForDnToReachHealthState(nm, dn, DEAD); // Datanode is shutdown and dead. Now recommission it in SCM scmClient.recommissionNodes(Arrays.asList(getDNHostAndPort(dn))); // Now restart it and ensure it remains IN_SERVICE @@ -302,8 +305,8 @@ public void testDecommissioningNodesCompleteDecommissionOnSCMRestart() // As this is not an initial registration since SCM was started, the DN // should report its operational state and if it differs from what SCM // has, then the SCM state should be used and the DN state updated. - waitForDnToReachHealthState(newDn, HEALTHY); - waitForDnToReachOpState(newDn, IN_SERVICE); + waitForDnToReachHealthState(nm, newDn, HEALTHY); + waitForDnToReachOpState(nm, newDn, IN_SERVICE); waitForDnToReachPersistedOpState(newDn, IN_SERVICE); } @@ -343,7 +346,7 @@ public void testSingleNodeWithOpenPipelineCanGotoMaintenance() scmClient.startMaintenanceNodes(Arrays.asList( getDNHostAndPort(dn)), 0); - waitForDnToReachOpState(dn, IN_MAINTENANCE); + waitForDnToReachOpState(nm, dn, IN_MAINTENANCE); waitForDnToReachPersistedOpState(dn, IN_MAINTENANCE); // Should still be 3 replicas online as no replication should happen for @@ -357,7 +360,7 @@ public void testSingleNodeWithOpenPipelineCanGotoMaintenance() // Stop the maintenance DN cluster.shutdownHddsDatanode(dn); - waitForDnToReachHealthState(dn, DEAD); + waitForDnToReachHealthState(nm, dn, DEAD); // Now the maintenance node is dead, we should still have // 3 replicas as we don't purge the replicas for a dead maintenance node @@ -369,13 +372,13 @@ public void testSingleNodeWithOpenPipelineCanGotoMaintenance() // Restart the DN and it should keep the IN_MAINTENANCE state cluster.restartHddsDatanode(dn, true); DatanodeDetails newDN = nm.getNodeByUuid(dn.getUuid().toString()); - waitForDnToReachHealthState(newDN, HEALTHY); + waitForDnToReachHealthState(nm, newDN, HEALTHY); waitForDnToReachPersistedOpState(newDN, IN_MAINTENANCE); // Stop the DN and wait for it to go dead. int dnIndex = cluster.getHddsDatanodeIndex(dn); cluster.shutdownHddsDatanode(dnIndex); - waitForDnToReachHealthState(dn, DEAD); + waitForDnToReachHealthState(nm, dn, DEAD); // Datanode is shutdown and dead. Now recommission it in SCM scmClient.recommissionNodes(Arrays.asList(getDNHostAndPort(dn))); @@ -387,8 +390,8 @@ public void testSingleNodeWithOpenPipelineCanGotoMaintenance() // As this is not an initial registration since SCM was started, the DN // should report its operational state and if it differs from what SCM // has, then the SCM state should be used and the DN state updated. - waitForDnToReachHealthState(newDn, HEALTHY); - waitForDnToReachOpState(newDn, IN_SERVICE); + waitForDnToReachHealthState(nm, newDn, HEALTHY); + waitForDnToReachOpState(nm, newDn, IN_SERVICE); waitForDnToReachPersistedOpState(dn, IN_SERVICE); } @@ -411,7 +414,7 @@ public void testContainerIsReplicatedWhenAllNodesGotoMaintenance() replicas.forEach(r -> forMaintenance.add(r.getDatanodeDetails())); scmClient.startMaintenanceNodes(forMaintenance.stream() - .map(this::getDNHostAndPort) + .map(TestNodeUtil::getDNHostAndPort) .collect(Collectors.toList()), 0); // Ensure all 3 DNs go to maintenance @@ -422,7 +425,7 @@ public void testContainerIsReplicatedWhenAllNodesGotoMaintenance() // There should now be 5-6 replicas of the container we are tracking Set newReplicas = cm.getContainerReplicas(container.containerID()); - assertTrue(newReplicas.size() >= 5); + assertThat(newReplicas.size()).isGreaterThanOrEqualTo(5); scmClient.recommissionNodes(forMaintenance.stream() .map(d -> getDNHostAndPort(d)) @@ -430,7 +433,7 @@ public void testContainerIsReplicatedWhenAllNodesGotoMaintenance() // Ensure all 3 DNs go to maintenance for (DatanodeDetails dn : forMaintenance) { - waitForDnToReachOpState(dn, IN_SERVICE); + waitForDnToReachOpState(nm, dn, IN_SERVICE); } waitForContainerReplicas(container, 3); @@ -445,18 +448,18 @@ public void testContainerIsReplicatedWhenAllNodesGotoMaintenance() .limit(2) .collect(Collectors.toList()); scmClient.startMaintenanceNodes(ecMaintenance.stream() - .map(this::getDNHostAndPort) + .map(TestNodeUtil::getDNHostAndPort) .collect(Collectors.toList()), 0); for (DatanodeDetails dn : ecMaintenance) { waitForDnToReachPersistedOpState(dn, IN_MAINTENANCE); } - assertTrue(cm.getContainerReplicas(ecContainer.containerID()).size() >= 6); + assertThat(cm.getContainerReplicas(ecContainer.containerID()).size()).isGreaterThanOrEqualTo(6); scmClient.recommissionNodes(ecMaintenance.stream() - .map(this::getDNHostAndPort) + .map(TestNodeUtil::getDNHostAndPort) .collect(Collectors.toList())); // Ensure the 2 DNs go to IN_SERVICE for (DatanodeDetails dn : ecMaintenance) { - waitForDnToReachOpState(dn, IN_SERVICE); + waitForDnToReachOpState(nm, dn, IN_SERVICE); } waitForContainerReplicas(ecContainer, 5); } @@ -479,7 +482,7 @@ public void testEnteringMaintenanceNodeCompletesAfterSCMRestart() replicas.forEach(r -> forMaintenance.add(r.getDatanodeDetails())); scmClient.startMaintenanceNodes(forMaintenance.stream() - .map(this::getDNHostAndPort) + .map(TestNodeUtil::getDNHostAndPort) .collect(Collectors.toList()), 0); // Ensure all 3 DNs go to entering_maintenance @@ -496,13 +499,13 @@ public void testEnteringMaintenanceNodeCompletesAfterSCMRestart() // Ensure all 3 DNs go to maintenance for (DatanodeDetails dn : newDns) { - waitForDnToReachOpState(dn, IN_MAINTENANCE); + waitForDnToReachOpState(nm, dn, IN_MAINTENANCE); } // There should now be 5-6 replicas of the container we are tracking Set newReplicas = cm.getContainerReplicas(container.containerID()); - assertTrue(newReplicas.size() >= 5); + assertThat(newReplicas.size()).isGreaterThanOrEqualTo(5); } @Test @@ -526,7 +529,7 @@ public void testMaintenanceEndsAutomaticallyAtTimeout() // decommission interface only allows us to specify hours from now as the // end time, that is not really suitable for a test like this. nm.setNodeOperationalState(dn, IN_MAINTENANCE, newEndTime); - waitForDnToReachOpState(dn, IN_SERVICE); + waitForDnToReachOpState(nm, dn, IN_SERVICE); waitForDnToReachPersistedOpState(dn, IN_SERVICE); // Put the node back into maintenance and then stop it and wait for it to @@ -534,11 +537,11 @@ public void testMaintenanceEndsAutomaticallyAtTimeout() scmClient.startMaintenanceNodes(Arrays.asList(getDNHostAndPort(dn)), 0); waitForDnToReachPersistedOpState(dn, IN_MAINTENANCE); cluster.shutdownHddsDatanode(dn); - waitForDnToReachHealthState(dn, DEAD); + waitForDnToReachHealthState(nm, dn, DEAD); newEndTime = System.currentTimeMillis() / 1000 + 5; nm.setNodeOperationalState(dn, IN_MAINTENANCE, newEndTime); - waitForDnToReachOpState(dn, IN_SERVICE); + waitForDnToReachOpState(nm, dn, IN_SERVICE); // Ensure there are 3 replicas not including the dead node, indicating a new // replica was created GenericTestUtils.waitFor(() -> getContainerReplicas(container) @@ -585,7 +588,7 @@ public void testSCMHandlesRestartForMaintenanceNode() // Now let the node go dead and repeat the test. This time ensure a new // replica is created. cluster.shutdownHddsDatanode(dn); - waitForDnToReachHealthState(dn, DEAD); + waitForDnToReachHealthState(nm, dn, DEAD); cluster.restartStorageContainerManager(false); setManagers(); @@ -632,18 +635,6 @@ private void generateData(int keyCount, String keyPrefix, } } - /** - * Retrieves the NodeStatus for the given DN or fails the test if the - * Node cannot be found. This is a helper method to allow the nodeStatus to be - * checked in lambda expressions. - * @param dn Datanode for which to retrieve the NodeStatus. - * @return - */ - private NodeStatus getNodeStatus(DatanodeDetails dn) { - return assertDoesNotThrow(() -> nm.getNodeStatus(dn), - "Unexpected exception getting the nodeState"); - } - /** * Retrieves the containerReplica set for a given container or fails the test * if the container cannot be found. This is a helper method to allow the @@ -669,61 +660,6 @@ private DatanodeDetails getOneDNHostingReplica( return c.getDatanodeDetails(); } - /** - * Given a Datanode, return a string consisting of the hostname and one of its - * ports in the for host:post. - * @param dn Datanode for which to retrieve the host:post string - * @return host:port for the given DN. - */ - private String getDNHostAndPort(DatanodeDetails dn) { - return dn.getHostName() + ":" + dn.getPorts().get(0).getValue(); - } - - /** - * Wait for the given datanode to reach the given operational state. - * @param dn Datanode for which to check the state - * @param state The state to wait for. - * @throws TimeoutException - * @throws InterruptedException - */ - private void waitForDnToReachOpState(DatanodeDetails dn, - HddsProtos.NodeOperationalState state) - throws TimeoutException, InterruptedException { - GenericTestUtils.waitFor( - () -> getNodeStatus(dn).getOperationalState().equals(state), - 200, 30000); - } - - /** - * Wait for the given datanode to reach the given Health state. - * @param dn Datanode for which to check the state - * @param state The state to wait for. - * @throws TimeoutException - * @throws InterruptedException - */ - private void waitForDnToReachHealthState(DatanodeDetails dn, - HddsProtos.NodeState state) - throws TimeoutException, InterruptedException { - GenericTestUtils.waitFor( - () -> getNodeStatus(dn).getHealth().equals(state), - 200, 30000); - } - - /** - * Wait for the given datanode to reach the given persisted state. - * @param dn Datanode for which to check the state - * @param state The state to wait for. - * @throws TimeoutException - * @throws InterruptedException - */ - private void waitForDnToReachPersistedOpState(DatanodeDetails dn, - HddsProtos.NodeOperationalState state) - throws TimeoutException, InterruptedException { - GenericTestUtils.waitFor( - () -> dn.getPersistedOpState().equals(state), - 200, 30000); - } - /** * Get any container present in the cluster and wait to ensure 3 replicas * have been reported before returning the container. diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/hdds/scm/node/TestNodeUtil.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/hdds/scm/node/TestNodeUtil.java new file mode 100644 index 00000000000..1cb5ef792f3 --- /dev/null +++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/hdds/scm/node/TestNodeUtil.java @@ -0,0 +1,102 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with this + * work for additional information regarding copyright ownership. The ASF + * licenses this file to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + *

+ * http://www.apache.org/licenses/LICENSE-2.0 + *

+ * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT + * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the + * License for the specific language governing permissions and limitations under + * the License. + */ +package org.apache.hadoop.hdds.scm.node; + +import org.apache.hadoop.hdds.protocol.DatanodeDetails; +import org.apache.hadoop.hdds.protocol.proto.HddsProtos; +import org.apache.ozone.test.GenericTestUtils; +import org.junit.jupiter.api.Assertions; + +import java.util.concurrent.TimeoutException; + +/** + * Utility class with helper methods for testing node state and status. + */ +public final class TestNodeUtil { + + private TestNodeUtil() { + } + + /** + * Wait for the given datanode to reach the given operational state. + * @param dn Datanode for which to check the state + * @param state The state to wait for. + * @throws TimeoutException + * @throws InterruptedException + */ + public static void waitForDnToReachOpState(NodeManager nodeManager, + DatanodeDetails dn, HddsProtos.NodeOperationalState state) + throws TimeoutException, InterruptedException { + GenericTestUtils.waitFor( + () -> getNodeStatus(nodeManager, dn) + .getOperationalState().equals(state), + 200, 30000); + } + + /** + * Wait for the given datanode to reach the given Health state. + * @param dn Datanode for which to check the state + * @param state The state to wait for. + * @throws TimeoutException + * @throws InterruptedException + */ + public static void waitForDnToReachHealthState(NodeManager nodeManager, + DatanodeDetails dn, HddsProtos.NodeState state) + throws TimeoutException, InterruptedException { + GenericTestUtils.waitFor( + () -> getNodeStatus(nodeManager, dn).getHealth().equals(state), + 200, 30000); + } + + /** + * Retrieves the NodeStatus for the given DN or fails the test if the + * Node cannot be found. This is a helper method to allow the nodeStatus to be + * checked in lambda expressions. + * @param dn Datanode for which to retrieve the NodeStatus. + */ + public static NodeStatus getNodeStatus(NodeManager nodeManager, + DatanodeDetails dn) { + return Assertions.assertDoesNotThrow( + () -> nodeManager.getNodeStatus(dn), + "Unexpected exception getting the nodeState"); + } + + /** + * Given a Datanode, return a string consisting of the hostname and one of its + * ports in the for host:post. + * @param dn Datanode for which to retrieve the host:post string + * @return host:port for the given DN. + */ + public static String getDNHostAndPort(DatanodeDetails dn) { + return dn.getHostName() + ":" + dn.getPorts().get(0).getValue(); + } + + /** + * Wait for the given datanode to reach the given persisted state. + * @param dn Datanode for which to check the state + * @param state The state to wait for. + * @throws TimeoutException + * @throws InterruptedException + */ + public static void waitForDnToReachPersistedOpState(DatanodeDetails dn, + HddsProtos.NodeOperationalState state) + throws TimeoutException, InterruptedException { + GenericTestUtils.waitFor( + () -> dn.getPersistedOpState().equals(state), + 200, 30000); + } +} diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/scm/node/TestQueryNode.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/hdds/scm/node/TestQueryNode.java similarity index 85% rename from hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/scm/node/TestQueryNode.java rename to hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/hdds/scm/node/TestQueryNode.java index 5f36cf53652..e8dc7455a11 100644 --- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/scm/node/TestQueryNode.java +++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/hdds/scm/node/TestQueryNode.java @@ -14,10 +14,9 @@ * License for the specific language governing permissions and limitations under * the License. */ -package org.apache.hadoop.ozone.scm.node; +package org.apache.hadoop.hdds.scm.node; import org.apache.hadoop.hdds.protocol.DatanodeDetails; -import org.apache.hadoop.hdds.scm.node.NodeManager; import org.apache.hadoop.hdds.scm.server.StorageContainerManager; import org.apache.hadoop.hdds.scm.ScmConfigKeys; import org.apache.hadoop.ozone.MiniOzoneCluster; @@ -37,33 +36,22 @@ import static java.util.concurrent.TimeUnit.SECONDS; -import static org.apache.hadoop.hdds.HddsConfigKeys - .HDDS_HEARTBEAT_INTERVAL; -import static org.apache.hadoop.hdds.HddsConfigKeys - .HDDS_PIPELINE_REPORT_INTERVAL; -import static org.apache.hadoop.hdds.HddsConfigKeys - .HDDS_COMMAND_STATUS_REPORT_INTERVAL; -import static org.apache.hadoop.hdds.HddsConfigKeys - .HDDS_CONTAINER_REPORT_INTERVAL; -import static org.apache.hadoop.hdds.HddsConfigKeys - .HDDS_NODE_REPORT_INTERVAL; +import static org.apache.hadoop.hdds.HddsConfigKeys.HDDS_HEARTBEAT_INTERVAL; +import static org.apache.hadoop.hdds.HddsConfigKeys.HDDS_PIPELINE_REPORT_INTERVAL; +import static org.apache.hadoop.hdds.HddsConfigKeys.HDDS_COMMAND_STATUS_REPORT_INTERVAL; +import static org.apache.hadoop.hdds.HddsConfigKeys.HDDS_CONTAINER_REPORT_INTERVAL; +import static org.apache.hadoop.hdds.HddsConfigKeys.HDDS_NODE_REPORT_INTERVAL; import static org.apache.hadoop.hdds.protocol.proto.HddsProtos.NodeState.DEAD; import static org.apache.hadoop.hdds.protocol.proto.HddsProtos.NodeState.HEALTHY; import static org.apache.hadoop.hdds.protocol.proto.HddsProtos.NodeState.STALE; -import static org.apache.hadoop.hdds.protocol.proto.HddsProtos. - NodeOperationalState.IN_SERVICE; -import static org.apache.hadoop.hdds.protocol.proto.HddsProtos. - NodeOperationalState.DECOMMISSIONING; -import static org.apache.hadoop.hdds.protocol.proto.HddsProtos. - NodeOperationalState.IN_MAINTENANCE; - -import static org.apache.hadoop.hdds.scm.ScmConfigKeys - .OZONE_SCM_DEADNODE_INTERVAL; -import static org.apache.hadoop.hdds.scm.ScmConfigKeys - .OZONE_SCM_HEARTBEAT_PROCESS_INTERVAL; -import static org.apache.hadoop.hdds.scm.ScmConfigKeys - .OZONE_SCM_STALENODE_INTERVAL; +import static org.apache.hadoop.hdds.protocol.proto.HddsProtos.NodeOperationalState.IN_SERVICE; +import static org.apache.hadoop.hdds.protocol.proto.HddsProtos.NodeOperationalState.DECOMMISSIONING; +import static org.apache.hadoop.hdds.protocol.proto.HddsProtos.NodeOperationalState.IN_MAINTENANCE; + +import static org.apache.hadoop.hdds.scm.ScmConfigKeys.OZONE_SCM_DEADNODE_INTERVAL; +import static org.apache.hadoop.hdds.scm.ScmConfigKeys.OZONE_SCM_HEARTBEAT_PROCESS_INTERVAL; +import static org.apache.hadoop.hdds.scm.ScmConfigKeys.OZONE_SCM_STALENODE_INTERVAL; import static org.junit.jupiter.api.Assertions.assertEquals; /** diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/scm/node/package-info.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/hdds/scm/node/package-info.java similarity index 95% rename from hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/scm/node/package-info.java rename to hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/hdds/scm/node/package-info.java index 1e03823692f..aa37f6d93a5 100644 --- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/scm/node/package-info.java +++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/hdds/scm/node/package-info.java @@ -21,4 +21,4 @@ /** * Unit tests for Node related functions in SCM. */ -package org.apache.hadoop.ozone.scm.node; +package org.apache.hadoop.hdds.scm.node; diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/hdds/scm/pipeline/TestLeaderChoosePolicy.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/hdds/scm/pipeline/TestLeaderChoosePolicy.java index 725b17ee9d6..439b563d633 100644 --- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/hdds/scm/pipeline/TestLeaderChoosePolicy.java +++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/hdds/scm/pipeline/TestLeaderChoosePolicy.java @@ -17,6 +17,7 @@ */ package org.apache.hadoop.hdds.scm.pipeline; +import org.apache.commons.lang3.RandomUtils; import org.apache.hadoop.hdds.client.RatisReplicationConfig; import org.apache.hadoop.hdds.conf.OzoneConfiguration; import org.apache.hadoop.hdds.protocol.proto.HddsProtos.ReplicationFactor; @@ -34,7 +35,6 @@ import java.util.concurrent.TimeUnit; import java.util.HashMap; import java.util.Map; -import java.util.Random; import java.util.UUID; import static org.apache.hadoop.hdds.scm.ScmConfigKeys.OZONE_SCM_PIPELINE_AUTO_CREATE_FACTOR_ONE; @@ -172,7 +172,6 @@ public void testMinLeaderCountChoosePolicy() throws Exception { // each datanode has leaderNumOfEachDn leaders after balance checkLeaderBalance(dnNum, leaderNumOfEachDn); - Random r = new Random(0); for (int i = 0; i < 10; i++) { // destroy some pipelines, wait new pipelines created, // then check leader balance @@ -181,7 +180,7 @@ public void testMinLeaderCountChoosePolicy() throws Exception { .getPipelines(RatisReplicationConfig.getInstance( ReplicationFactor.THREE), Pipeline.PipelineState.OPEN); - int destroyNum = r.nextInt(pipelines.size()); + int destroyNum = RandomUtils.nextInt(0, pipelines.size()); for (int k = 0; k <= destroyNum; k++) { pipelineManager.closePipeline(pipelines.get(k), false); } diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/hdds/scm/pipeline/TestMultiRaftSetup.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/hdds/scm/pipeline/TestMultiRaftSetup.java index 698c6b99bd0..cff6f03c1e7 100644 --- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/hdds/scm/pipeline/TestMultiRaftSetup.java +++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/hdds/scm/pipeline/TestMultiRaftSetup.java @@ -17,10 +17,9 @@ */ package org.apache.hadoop.hdds.scm.pipeline; +import static org.assertj.core.api.Assertions.assertThat; import static org.junit.jupiter.api.Assertions.assertEquals; -import static org.junit.jupiter.api.Assertions.assertFalse; import static org.junit.jupiter.api.Assertions.assertThrows; -import static org.junit.jupiter.api.Assertions.assertTrue; import org.apache.hadoop.hdds.HddsConfigKeys; import org.apache.hadoop.hdds.client.ReplicationConfig; import org.apache.hadoop.hdds.conf.OzoneConfiguration; @@ -150,10 +149,10 @@ public void testMultiRaft() throws Exception { private void assertNotSamePeers() { nodeManager.getAllNodes().forEach((dn) -> { Collection peers = nodeManager.getPeerList(dn); - assertFalse(peers.contains(dn)); + assertThat(peers).doesNotContain(dn); List trimList = nodeManager.getAllNodes(); trimList.remove(dn); - assertTrue(peers.containsAll(trimList)); + assertThat(peers).containsAll(trimList); }); } diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/hdds/scm/pipeline/TestNode2PipelineMap.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/hdds/scm/pipeline/TestNode2PipelineMap.java index 2c57ac0a6f4..841d0ef1684 100644 --- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/hdds/scm/pipeline/TestNode2PipelineMap.java +++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/hdds/scm/pipeline/TestNode2PipelineMap.java @@ -17,9 +17,8 @@ */ package org.apache.hadoop.hdds.scm.pipeline; +import static org.assertj.core.api.Assertions.assertThat; import static org.junit.jupiter.api.Assertions.assertEquals; -import static org.junit.jupiter.api.Assertions.assertFalse; -import static org.junit.jupiter.api.Assertions.assertTrue; import org.apache.hadoop.hdds.client.RatisReplicationConfig; import org.apache.hadoop.hdds.conf.OzoneConfiguration; import org.apache.hadoop.hdds.protocol.DatanodeDetails; @@ -104,7 +103,7 @@ public void testPipelineMap() throws IOException, // get pipeline details by dnid Set pipelines = scm.getScmNodeManager() .getPipelines(dns.get(0)); - assertTrue(pipelines.contains(ratisContainer.getPipeline().getId())); + assertThat(pipelines).contains(ratisContainer.getPipeline().getId()); // Now close the container and it should not show up while fetching // containers by pipeline @@ -120,6 +119,6 @@ public void testPipelineMap() throws IOException, pipelineManager.deletePipeline(ratisContainer.getPipeline().getId()); pipelines = scm.getScmNodeManager() .getPipelines(dns.get(0)); - assertFalse(pipelines.contains(ratisContainer.getPipeline().getId())); + assertThat(pipelines).doesNotContain(ratisContainer.getPipeline().getId()); } } diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/hdds/scm/pipeline/TestPipelineClose.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/hdds/scm/pipeline/TestPipelineClose.java index d46356fb8d4..858a4486757 100644 --- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/hdds/scm/pipeline/TestPipelineClose.java +++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/hdds/scm/pipeline/TestPipelineClose.java @@ -58,11 +58,10 @@ import java.util.concurrent.TimeUnit; import java.util.concurrent.TimeoutException; +import static org.assertj.core.api.Assertions.assertThat; import static org.junit.jupiter.api.Assertions.assertThrows; import static org.junit.jupiter.api.Assertions.assertEquals; -import static org.junit.jupiter.api.Assertions.assertFalse; import static org.junit.jupiter.api.Assertions.assertTrue; -import static org.junit.jupiter.api.Assertions.fail; import static org.mockito.Mockito.any; import static org.mockito.Mockito.mock; import static org.mockito.Mockito.timeout; @@ -149,8 +148,8 @@ public void testPipelineCloseWithClosedContainer() throws IOException, pipelineManager.deletePipeline(ratisContainer.getPipeline().getId()); for (DatanodeDetails dn : ratisContainer.getPipeline().getNodes()) { // Assert that the pipeline has been removed from Node2PipelineMap as well - assertFalse(scm.getScmNodeManager().getPipelines(dn) - .contains(ratisContainer.getPipeline().getId())); + assertThat(scm.getScmNodeManager().getPipelines(dn)) + .doesNotContain(ratisContainer.getPipeline().getId()); } } @@ -212,7 +211,7 @@ public void testPipelineCloseWithPipelineAction() throws Exception { } @Test - public void testPipelineCloseWithLogFailure() + void testPipelineCloseWithLogFailure() throws IOException, TimeoutException { EventQueue eventQ = (EventQueue) scm.getEventQueue(); PipelineActionHandler pipelineActionTest = @@ -230,11 +229,7 @@ public void testPipelineCloseWithLogFailure() Pipeline openPipeline = containerWithPipeline.getPipeline(); RaftGroupId groupId = RaftGroupId.valueOf(openPipeline.getId().getId()); - try { - pipelineManager.getPipeline(openPipeline.getId()); - } catch (PipelineNotFoundException e) { - fail("pipeline should exist"); - } + pipelineManager.getPipeline(openPipeline.getId()); DatanodeDetails datanodeDetails = openPipeline.getNodes().get(0); int index = cluster.getHddsDatanodeIndex(datanodeDetails); diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/scm/pipeline/TestPipelineManagerMXBean.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/hdds/scm/pipeline/TestPipelineManagerMXBean.java similarity index 98% rename from hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/scm/pipeline/TestPipelineManagerMXBean.java rename to hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/hdds/scm/pipeline/TestPipelineManagerMXBean.java index f1a533bdfdc..b6e85ab942d 100644 --- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/scm/pipeline/TestPipelineManagerMXBean.java +++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/hdds/scm/pipeline/TestPipelineManagerMXBean.java @@ -16,7 +16,7 @@ * limitations under the License. */ -package org.apache.hadoop.ozone.scm.pipeline; +package org.apache.hadoop.hdds.scm.pipeline; import org.apache.hadoop.hdds.conf.OzoneConfiguration; import org.apache.hadoop.ozone.MiniOzoneCluster; diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/scm/pipeline/TestSCMPipelineBytesWrittenMetrics.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/hdds/scm/pipeline/TestSCMPipelineBytesWrittenMetrics.java similarity index 97% rename from hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/scm/pipeline/TestSCMPipelineBytesWrittenMetrics.java rename to hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/hdds/scm/pipeline/TestSCMPipelineBytesWrittenMetrics.java index 29c9392a1d5..25a29410b64 100644 --- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/scm/pipeline/TestSCMPipelineBytesWrittenMetrics.java +++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/hdds/scm/pipeline/TestSCMPipelineBytesWrittenMetrics.java @@ -16,15 +16,13 @@ * limitations under the License. */ -package org.apache.hadoop.ozone.scm.pipeline; +package org.apache.hadoop.hdds.scm.pipeline; import org.apache.commons.lang3.RandomStringUtils; import org.apache.hadoop.hdds.HddsConfigKeys; import org.apache.hadoop.hdds.client.ReplicationFactor; import org.apache.hadoop.hdds.client.ReplicationType; import org.apache.hadoop.hdds.conf.OzoneConfiguration; -import org.apache.hadoop.hdds.scm.pipeline.Pipeline; -import org.apache.hadoop.hdds.scm.pipeline.SCMPipelineMetrics; import org.apache.hadoop.hdds.utils.IOUtils; import org.apache.hadoop.metrics2.MetricsRecordBuilder; import org.apache.hadoop.ozone.MiniOzoneCluster; diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/scm/pipeline/TestSCMPipelineMetrics.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/hdds/scm/pipeline/TestSCMPipelineMetrics.java similarity index 94% rename from hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/scm/pipeline/TestSCMPipelineMetrics.java rename to hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/hdds/scm/pipeline/TestSCMPipelineMetrics.java index 85a61e8cb4b..cf41fc60933 100644 --- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/scm/pipeline/TestSCMPipelineMetrics.java +++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/hdds/scm/pipeline/TestSCMPipelineMetrics.java @@ -16,7 +16,7 @@ * limitations under the License. */ -package org.apache.hadoop.ozone.scm.pipeline; +package org.apache.hadoop.hdds.scm.pipeline; import org.apache.hadoop.hdds.HddsConfigKeys; import org.apache.hadoop.hdds.client.RatisReplicationConfig; @@ -24,9 +24,6 @@ import org.apache.hadoop.hdds.protocol.proto.HddsProtos.ReplicationFactor; import org.apache.hadoop.hdds.scm.container.common.helpers.AllocatedBlock; import org.apache.hadoop.hdds.scm.container.common.helpers.ExcludeList; -import org.apache.hadoop.hdds.scm.pipeline.Pipeline; -import org.apache.hadoop.hdds.scm.pipeline.PipelineManager; -import org.apache.hadoop.hdds.scm.pipeline.SCMPipelineMetrics; import org.apache.hadoop.metrics2.MetricsRecordBuilder; import org.apache.hadoop.ozone.MiniOzoneCluster; @@ -77,7 +74,7 @@ public void testPipelineCreation() { long numPipelineCreated = getLongCounter("NumPipelineCreated", metrics); // Pipelines are created in background when the cluster starts. - assertTrue(numPipelineCreated > 0); + assertThat(numPipelineCreated).isGreaterThan(0); } /** diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/hdds/scm/safemode/TestSCMSafeModeWithPipelineRules.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/hdds/scm/safemode/TestSCMSafeModeWithPipelineRules.java index d09e924ca81..40a80103934 100644 --- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/hdds/scm/safemode/TestSCMSafeModeWithPipelineRules.java +++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/hdds/scm/safemode/TestSCMSafeModeWithPipelineRules.java @@ -86,7 +86,7 @@ public void setup(int numDatanodes) throws Exception { @Test - public void testScmSafeMode() throws Exception { + void testScmSafeMode() throws Exception { int datanodeCount = 6; setup(datanodeCount); waitForRatis3NodePipelines(datanodeCount / 3); @@ -136,11 +136,7 @@ public void testScmSafeMode() throws Exception { DatanodeDetails restartedDatanode = pipelineList.get(1).getFirstNode(); // Now restart one datanode from the 2nd pipeline - try { - cluster.restartHddsDatanode(restartedDatanode, false); - } catch (Exception ex) { - fail("Datanode restart failed"); - } + cluster.restartHddsDatanode(restartedDatanode, false); GenericTestUtils.waitFor(() -> scmSafeModeManager.getOneReplicaPipelineSafeModeRule() diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/hdds/scm/storage/TestCommitWatcher.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/hdds/scm/storage/TestCommitWatcher.java index 20fa713deb4..563e0162acc 100644 --- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/hdds/scm/storage/TestCommitWatcher.java +++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/hdds/scm/storage/TestCommitWatcher.java @@ -25,10 +25,10 @@ import java.util.concurrent.CompletableFuture; import java.util.concurrent.TimeUnit; -import org.apache.hadoop.conf.StorageUnit; import org.apache.hadoop.hdds.client.BlockID; import org.apache.hadoop.hdds.conf.DatanodeRatisServerConfig; import org.apache.hadoop.hdds.conf.OzoneConfiguration; +import org.apache.hadoop.hdds.conf.StorageUnit; import org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos.ChecksumType; import org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos.ContainerCommandRequestProto; import org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos.ContainerCommandResponseProto; @@ -45,6 +45,7 @@ import org.apache.hadoop.hdds.scm.pipeline.Pipeline; import org.apache.hadoop.hdds.scm.protocolPB.StorageContainerLocationProtocolClientSideTranslatorPB; import org.apache.hadoop.hdds.utils.IOUtils; +import org.apache.hadoop.ozone.ClientConfigForTesting; import org.apache.hadoop.ozone.MiniOzoneCluster; import org.apache.hadoop.ozone.OzoneConfigKeys; import org.apache.hadoop.ozone.OzoneConsts; @@ -56,10 +57,11 @@ import static java.util.Collections.singletonList; import static org.apache.hadoop.hdds.scm.ScmConfigKeys.OZONE_SCM_STALENODE_INTERVAL; +import static org.assertj.core.api.Assertions.assertThat; import static org.junit.jupiter.api.Assertions.assertEquals; -import static org.junit.jupiter.api.Assertions.assertFalse; +import static org.junit.jupiter.api.Assertions.assertInstanceOf; +import static org.junit.jupiter.api.Assertions.assertThrows; import static org.junit.jupiter.api.Assertions.assertTrue; -import static org.junit.jupiter.api.Assertions.fail; import org.apache.ratis.protocol.exceptions.AlreadyClosedException; import org.apache.ratis.protocol.exceptions.NotReplicatedException; @@ -126,21 +128,23 @@ public void init() throws Exception { ratisClientConfig.setWatchRequestTimeout(Duration.ofSeconds(10)); conf.setFromObject(ratisClientConfig); - OzoneClientConfig clientConfig = new OzoneClientConfig(); + OzoneClientConfig clientConfig = conf.getObject(OzoneClientConfig.class); clientConfig.setChecksumType(ChecksumType.NONE); conf.setFromObject(clientConfig); + ClientConfigForTesting.newBuilder(StorageUnit.BYTES) + .setBlockSize(blockSize) + .setChunkSize(chunkSize) + .setStreamBufferFlushSize(flushSize) + .setStreamBufferMaxSize(maxFlushSize) + .applyTo(conf); + conf.setQuietMode(false); conf.setStorageSize(OzoneConfigKeys.OZONE_SCM_BLOCK_SIZE, 4, StorageUnit.MB); cluster = MiniOzoneCluster.newBuilder(conf) .setNumDatanodes(5) .setTotalPipelineNumLimit(3) - .setBlockSize(blockSize) - .setChunkSize(chunkSize) - .setStreamBufferFlushSize(flushSize) - .setStreamBufferMaxSize(maxFlushSize) - .setStreamBufferSizeUnit(StorageUnit.BYTES) .build(); cluster.waitForClusterToBeReady(); //the easiest way to create an open container is creating a key @@ -178,8 +182,7 @@ public void testReleaseBuffers() throws Exception { long containerId = container.getContainerInfo().getContainerID(); try (XceiverClientSpi xceiverClient = mgr.acquireClient(pipeline)) { assertEquals(1, xceiverClient.getRefcount()); - assertTrue(xceiverClient instanceof XceiverClientRatis); - XceiverClientRatis ratisClient = (XceiverClientRatis) xceiverClient; + XceiverClientRatis ratisClient = assertInstanceOf(XceiverClientRatis.class, xceiverClient); CommitWatcher watcher = new CommitWatcher(bufferPool, ratisClient); BlockID blockID = ContainerTestHelper.getTestBlockID(containerId); List replies = new ArrayList<>(); @@ -225,17 +228,15 @@ public void testReleaseBuffers() throws Exception { assertEquals(2, watcher. getCommitIndexMap().size()); watcher.watchOnFirstIndex(); - assertFalse(watcher.getCommitIndexMap() - .containsKey(replies.get(0).getLogIndex())); - assertFalse(watcher.getFutureMap().containsKey((long) chunkSize)); - assertTrue(watcher.getTotalAckDataLength() >= chunkSize); + assertThat(watcher.getCommitIndexMap()).doesNotContainKey(replies.get(0).getLogIndex()); + assertThat(watcher.getFutureMap()).doesNotContainKey((long) chunkSize); + assertThat(watcher.getTotalAckDataLength()).isGreaterThanOrEqualTo(chunkSize); watcher.watchOnLastIndex(); - assertFalse(watcher.getCommitIndexMap() - .containsKey(replies.get(1).getLogIndex())); - assertFalse(watcher.getFutureMap().containsKey((long) 2 * chunkSize)); + assertThat(watcher.getCommitIndexMap()).doesNotContainKey(replies.get(1).getLogIndex()); + assertThat(watcher.getFutureMap()).doesNotContainKey((long) 2 * chunkSize); assertEquals(2 * chunkSize, watcher.getTotalAckDataLength()); - assertTrue(watcher.getFutureMap().isEmpty()); - assertTrue(watcher.getCommitIndexMap().isEmpty()); + assertThat(watcher.getFutureMap()).isEmpty(); + assertThat(watcher.getCommitIndexMap()).isEmpty(); } } } @@ -252,8 +253,7 @@ public void testReleaseBuffersOnException() throws Exception { long containerId = container.getContainerInfo().getContainerID(); try (XceiverClientSpi xceiverClient = mgr.acquireClient(pipeline)) { assertEquals(1, xceiverClient.getRefcount()); - assertTrue(xceiverClient instanceof XceiverClientRatis); - XceiverClientRatis ratisClient = (XceiverClientRatis) xceiverClient; + XceiverClientRatis ratisClient = assertInstanceOf(XceiverClientRatis.class, xceiverClient); CommitWatcher watcher = new CommitWatcher(bufferPool, ratisClient); BlockID blockID = ContainerTestHelper.getTestBlockID(containerId); List replies = new ArrayList<>(); @@ -298,32 +298,28 @@ public void testReleaseBuffersOnException() throws Exception { assertEquals(future2, watcher.getFutureMap().get((long) 2 * chunkSize)); assertEquals(2, watcher.getCommitIndexMap().size()); watcher.watchOnFirstIndex(); - assertFalse(watcher.getCommitIndexMap() - .containsKey(replies.get(0).getLogIndex())); - assertFalse(watcher.getFutureMap().containsKey((long) chunkSize)); - assertTrue(watcher.getTotalAckDataLength() >= chunkSize); + assertThat(watcher.getCommitIndexMap()).doesNotContainKey(replies.get(0).getLogIndex()); + assertThat(watcher.getFutureMap()).doesNotContainKey((long) chunkSize); + assertThat(watcher.getTotalAckDataLength()).isGreaterThanOrEqualTo(chunkSize); cluster.shutdownHddsDatanode(pipeline.getNodes().get(0)); cluster.shutdownHddsDatanode(pipeline.getNodes().get(1)); - try { - // just watch for a higher index so as to ensure, it does an actual - // call to Ratis. Otherwise, it may just return in case the - // commitInfoMap is updated to the latest index in putBlock response. - watcher.watchForCommit(replies.get(1).getLogIndex() + 100); - fail("Expected exception not thrown"); - } catch (IOException ioe) { - // with retry count set to noRetry and a lower watch request - // timeout, watch request will eventually - // fail with TimeoutIOException from ratis client or the client - // can itself get AlreadyClosedException from the Ratis Server - // and the write may fail with RaftRetryFailureException - Throwable t = HddsClientUtils.checkForException(ioe); - assertTrue( - t instanceof RaftRetryFailureException || - t instanceof TimeoutIOException || - t instanceof AlreadyClosedException || - t instanceof NotReplicatedException, - "Unexpected exception: " + t.getClass()); - } + // just watch for a higher index so as to ensure, it does an actual + // call to Ratis. Otherwise, it may just return in case the + // commitInfoMap is updated to the latest index in putBlock response. + IOException ioe = + assertThrows(IOException.class, () -> watcher.watchForCommit(replies.get(1).getLogIndex() + 100)); + Throwable t = HddsClientUtils.checkForException(ioe); + // with retry count set to noRetry and a lower watch request + // timeout, watch request will eventually + // fail with TimeoutIOException from ratis client or the client + // can itself get AlreadyClosedException from the Ratis Server + // and the write may fail with RaftRetryFailureException + assertTrue( + t instanceof RaftRetryFailureException || + t instanceof TimeoutIOException || + t instanceof AlreadyClosedException || + t instanceof NotReplicatedException, + "Unexpected exception: " + t.getClass()); if (ratisClient.getReplicatedMinCommitIndex() < replies.get(1) .getLogIndex()) { assertEquals(chunkSize, watcher.getTotalAckDataLength()); @@ -331,8 +327,8 @@ public void testReleaseBuffersOnException() throws Exception { assertEquals(1, watcher.getFutureMap().size()); } else { assertEquals(2 * chunkSize, watcher.getTotalAckDataLength()); - assertTrue(watcher.getFutureMap().isEmpty()); - assertTrue(watcher.getCommitIndexMap().isEmpty()); + assertThat(watcher.getFutureMap()).isEmpty(); + assertThat(watcher.getCommitIndexMap()).isEmpty(); } } } diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/hdds/scm/storage/TestContainerCommandsEC.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/hdds/scm/storage/TestContainerCommandsEC.java index 8ab74422516..3c980f94c59 100644 --- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/hdds/scm/storage/TestContainerCommandsEC.java +++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/hdds/scm/storage/TestContainerCommandsEC.java @@ -112,10 +112,11 @@ import static org.apache.hadoop.hdds.protocol.proto.HddsProtos.BlockTokenSecretProto.AccessModeProto.READ; import static org.apache.hadoop.hdds.protocol.proto.HddsProtos.BlockTokenSecretProto.AccessModeProto.WRITE; import static org.apache.hadoop.ozone.container.ContainerTestHelper.newWriteChunkRequestBuilder; +import static org.assertj.core.api.Assertions.assertThat; import static org.junit.jupiter.api.Assertions.assertEquals; import static org.junit.jupiter.api.Assertions.assertArrayEquals; +import static org.junit.jupiter.api.Assertions.assertInstanceOf; import static org.junit.jupiter.api.Assertions.assertThrows; -import static org.junit.jupiter.api.Assertions.assertTrue; /** * This class tests container commands on EC containers. @@ -129,8 +130,6 @@ public class TestContainerCommandsEC { private static ObjectStore store; private static StorageContainerLocationProtocolClientSideTranslatorPB storageContainerLocationClient; - private static final String SCM_ID = UUID.randomUUID().toString(); - private static final String CLUSTER_ID = UUID.randomUUID().toString(); private static final int EC_DATA = 3; private static final int EC_PARITY = 2; private static final EcCodec EC_CODEC = EcCodec.RS; @@ -423,17 +422,17 @@ public void testListBlock() throws Exception { ListBlockResponseProto response = ContainerProtocolCalls .listBlock(clients.get(i), containerID, null, Integer.MAX_VALUE, containerToken); - assertTrue( - minNumExpectedBlocks <= response.getBlockDataList().stream().filter( + assertThat(minNumExpectedBlocks) + .withFailMessage("blocks count should be same or more than min expected" + + " blocks count on DN " + i) + .isLessThanOrEqualTo(response.getBlockDataList().stream().filter( k -> k.getChunksCount() > 0 && k.getChunks(0).getLen() > 0) - .collect(Collectors.toList()).size(), - "blocks count should be same or more than min expected" + - " blocks count on DN " + i); - assertTrue( - minNumExpectedChunks <= response.getBlockDataList().stream() - .mapToInt(BlockData::getChunksCount).sum(), - "chunks count should be same or more than min expected" + - " chunks count on DN " + i); + .collect(Collectors.toList()).size()); + assertThat(minNumExpectedChunks) + .withFailMessage("chunks count should be same or more than min expected" + + " chunks count on DN " + i) + .isLessThanOrEqualTo(response.getBlockDataList().stream() + .mapToInt(BlockData::getChunksCount).sum()); } } @@ -797,7 +796,7 @@ private void createKeyAndWriteData(String keyString, OzoneBucket bucket, try (OzoneOutputStream out = bucket.createKey(keyString, 4096, new ECReplicationConfig(3, 2, EcCodec.RS, EC_CHUNK_SIZE), new HashMap<>())) { - assertTrue(out.getOutputStream() instanceof KeyOutputStream); + assertInstanceOf(KeyOutputStream.class, out.getOutputStream()); for (int i = 0; i < numChunks; i++) { out.write(inputChunks[i]); } @@ -925,7 +924,6 @@ public static void startCluster(OzoneConfiguration conf) throws Exception { secretKeyClient = new SecretKeyTestClient(); cluster = MiniOzoneCluster.newBuilder(conf).setNumDatanodes(NUM_DN) - .setScmId(SCM_ID).setClusterId(CLUSTER_ID) .setCertificateClient(certClient) .setSecretKeyClient(secretKeyClient) .build(); diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/hdds/upgrade/TestHDDSUpgrade.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/hdds/upgrade/TestHDDSUpgrade.java index caf9cadb165..57e807b7c75 100644 --- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/hdds/upgrade/TestHDDSUpgrade.java +++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/hdds/upgrade/TestHDDSUpgrade.java @@ -115,7 +115,7 @@ public class TestHDDSUpgrade { private StorageContainerManager scm; private ContainerManager scmContainerManager; private PipelineManager scmPipelineManager; - private final int numContainersCreated = 1; + private static final int NUM_CONTAINERS_CREATED = 1; private HDDSLayoutVersionManager scmVersionManager; private AtomicBoolean testPassed = new AtomicBoolean(true); private static @@ -316,7 +316,7 @@ public void testFinalizationFromInitialVersionToLatestVersion() // Verify Post-Upgrade conditions on the SCM. TestHddsUpgradeUtils.testPostUpgradeConditionsSCM( cluster.getStorageContainerManagersList(), - numContainersCreated, NUM_DATA_NODES); + NUM_CONTAINERS_CREATED, NUM_DATA_NODES); // All datanodes on the SCM should have moved to HEALTHY-READONLY state. TestHddsUpgradeUtils.testDataNodesStateOnSCM( @@ -327,7 +327,7 @@ public void testFinalizationFromInitialVersionToLatestVersion() // In the happy path case, no containers should have been quasi closed as // a result of the upgrade. TestHddsUpgradeUtils.testPostUpgradeConditionsDataNodes( - cluster.getHddsDatanodes(), numContainersCreated, CLOSED); + cluster.getHddsDatanodes(), NUM_CONTAINERS_CREATED, CLOSED); // Test that we can use a pipeline after upgrade. // Will fail with exception if there are no pipelines. @@ -871,7 +871,7 @@ public void testFinalizationWithFailureInjectionHelper( // Verify Post-Upgrade conditions on the SCM. // With failure injection TestHddsUpgradeUtils.testPostUpgradeConditionsSCM( - cluster.getStorageContainerManagersList(), numContainersCreated, + cluster.getStorageContainerManagersList(), NUM_CONTAINERS_CREATED, NUM_DATA_NODES); // All datanodes on the SCM should have moved to HEALTHY-READONLY state. @@ -898,7 +898,7 @@ public void testFinalizationWithFailureInjectionHelper( // Verify the SCM has driven all the DataNodes through Layout Upgrade. TestHddsUpgradeUtils.testPostUpgradeConditionsDataNodes( - cluster.getHddsDatanodes(), numContainersCreated); + cluster.getHddsDatanodes(), NUM_CONTAINERS_CREATED); // Verify that new pipeline can be created with upgraded datanodes. try { diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/hdds/upgrade/TestHddsUpgradeUtils.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/hdds/upgrade/TestHddsUpgradeUtils.java index 6fc964fd0ab..cd0fd9d4bbe 100644 --- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/hdds/upgrade/TestHddsUpgradeUtils.java +++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/hdds/upgrade/TestHddsUpgradeUtils.java @@ -49,6 +49,7 @@ import static org.apache.hadoop.hdds.scm.pipeline.Pipeline.PipelineState.OPEN; import static org.apache.hadoop.ozone.upgrade.UpgradeFinalizer.Status.ALREADY_FINALIZED; import static org.apache.hadoop.ozone.upgrade.UpgradeFinalizer.Status.FINALIZATION_DONE; +import static org.assertj.core.api.Assertions.assertThat; import static org.junit.jupiter.api.Assertions.assertEquals; import static org.junit.jupiter.api.Assertions.assertSame; import static org.junit.jupiter.api.Assertions.assertTrue; @@ -117,7 +118,7 @@ public static void testPostUpgradeConditionsSCM(StorageContainerManager scm, HDDSLayoutVersionManager scmVersionManager = scm.getLayoutVersionManager(); assertEquals(scmVersionManager.getSoftwareLayoutVersion(), scmVersionManager.getMetadataLayoutVersion()); - assertTrue(scmVersionManager.getMetadataLayoutVersion() >= 1); + assertThat(scmVersionManager.getMetadataLayoutVersion()).isGreaterThanOrEqualTo(1); // SCM should not return from finalization until there is at least one // pipeline to use. @@ -147,7 +148,7 @@ public static void testPostUpgradeConditionsSCM(StorageContainerManager scm, (ciState == HddsProtos.LifeCycleState.QUASI_CLOSED)); countContainers++; } - assertTrue(countContainers >= numContainers); + assertThat(countContainers).isGreaterThanOrEqualTo(numContainers); } /* @@ -173,7 +174,7 @@ public static void testPreUpgradeConditionsDataNodes( countContainers++; } } - assertTrue(countContainers >= 1); + assertThat(countContainers).isGreaterThanOrEqualTo(1); } /* @@ -217,7 +218,7 @@ public static void testPostUpgradeConditionsDataNodes( dsm.getLayoutVersionManager(); assertEquals(dnVersionManager.getSoftwareLayoutVersion(), dnVersionManager.getMetadataLayoutVersion()); - assertTrue(dnVersionManager.getMetadataLayoutVersion() >= 1); + assertThat(dnVersionManager.getMetadataLayoutVersion()).isGreaterThanOrEqualTo(1); // Also verify that all the existing containers are closed. for (Container container : @@ -228,7 +229,7 @@ public static void testPostUpgradeConditionsDataNodes( countContainers++; } } - assertTrue(countContainers >= numContainers); + assertThat(countContainers).isGreaterThanOrEqualTo(numContainers); } public static void testDataNodesStateOnSCM(List scms, diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/hdds/upgrade/TestScmHAFinalization.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/hdds/upgrade/TestScmHAFinalization.java index f9f6871f546..d2ae30efceb 100644 --- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/hdds/upgrade/TestScmHAFinalization.java +++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/hdds/upgrade/TestScmHAFinalization.java @@ -37,7 +37,6 @@ import org.apache.ozone.test.GenericTestUtils; import org.apache.ozone.test.tag.Flaky; import org.junit.jupiter.api.AfterEach; -import org.junit.jupiter.api.Assertions; import org.junit.jupiter.api.Test; import org.junit.jupiter.params.ParameterizedTest; import org.junit.jupiter.params.provider.Arguments; @@ -56,6 +55,12 @@ import java.util.stream.Stream; import static org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos.ContainerDataProto.State.CLOSED; +import static org.assertj.core.api.Assertions.assertThat; +import static org.junit.jupiter.api.Assertions.assertEquals; +import static org.junit.jupiter.api.Assertions.assertFalse; +import static org.junit.jupiter.api.Assertions.assertNotEquals; +import static org.junit.jupiter.api.Assertions.assertTrue; +import static org.junit.jupiter.api.Assertions.fail; /** * Tests upgrade finalization failure scenarios and corner cases specific to SCM @@ -167,7 +172,7 @@ public void testFinalizationWithLeaderChange( // Make sure the original SCM leader is not the leader anymore. StorageContainerManager newLeaderScm = cluster.getActiveSCM(); - Assertions.assertNotEquals(newLeaderScm.getSCMNodeId(), + assertNotEquals(newLeaderScm.getSCMNodeId(), oldLeaderScm.getSCMNodeId()); // Resume finalization from the new leader. @@ -288,8 +293,8 @@ public void testSnapshotFinalization() throws Exception { inactiveScm, 0, NUM_DATANODES); // Use log to verify a snapshot was installed. - Assertions.assertTrue(logCapture.getOutput().contains("New SCM snapshot " + - "received with metadata layout version")); + assertThat(logCapture.getOutput()).contains("New SCM snapshot " + + "received with metadata layout version"); } private void waitForScmsToFinalize(Collection scms) @@ -319,35 +324,31 @@ private void checkMidFinalizationConditions( for (StorageContainerManager scm: scms) { switch (haltingPoint) { case BEFORE_PRE_FINALIZE_UPGRADE: - Assertions.assertFalse( - scm.getPipelineManager().isPipelineCreationFrozen()); - Assertions.assertEquals( + assertFalse(scm.getPipelineManager().isPipelineCreationFrozen()); + assertEquals( scm.getScmContext().getFinalizationCheckpoint(), FinalizationCheckpoint.FINALIZATION_REQUIRED); break; case AFTER_PRE_FINALIZE_UPGRADE: - Assertions.assertTrue( - scm.getPipelineManager().isPipelineCreationFrozen()); - Assertions.assertEquals( + assertTrue(scm.getPipelineManager().isPipelineCreationFrozen()); + assertEquals( scm.getScmContext().getFinalizationCheckpoint(), FinalizationCheckpoint.FINALIZATION_STARTED); break; case AFTER_COMPLETE_FINALIZATION: - Assertions.assertFalse( - scm.getPipelineManager().isPipelineCreationFrozen()); - Assertions.assertEquals( + assertFalse(scm.getPipelineManager().isPipelineCreationFrozen()); + assertEquals( scm.getScmContext().getFinalizationCheckpoint(), FinalizationCheckpoint.MLV_EQUALS_SLV); break; case AFTER_POST_FINALIZE_UPGRADE: - Assertions.assertFalse( - scm.getPipelineManager().isPipelineCreationFrozen()); - Assertions.assertEquals( + assertFalse(scm.getPipelineManager().isPipelineCreationFrozen()); + assertEquals( scm.getScmContext().getFinalizationCheckpoint(), FinalizationCheckpoint.FINALIZATION_COMPLETE); break; default: - Assertions.fail("Unknown halting point in test: " + haltingPoint); + fail("Unknown halting point in test: " + haltingPoint); } } } diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/hdds/utils/db/managed/TestRocksObjectLeakDetector.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/hdds/utils/db/managed/TestRocksObjectLeakDetector.java index 87fbe23ac76..4197ac8a816 100644 --- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/hdds/utils/db/managed/TestRocksObjectLeakDetector.java +++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/hdds/utils/db/managed/TestRocksObjectLeakDetector.java @@ -27,10 +27,10 @@ import org.junit.jupiter.api.Test; import java.io.IOException; -import java.util.UUID; import java.util.concurrent.TimeoutException; import java.util.function.Supplier; +import static org.apache.hadoop.ozone.OzoneConfigKeys.OZONE_METADATA_STORE_ROCKSDB_STATISTICS; import static org.junit.jupiter.api.Assertions.assertEquals; import static org.junit.jupiter.api.Assertions.assertThrows; @@ -49,12 +49,9 @@ public class TestRocksObjectLeakDetector { static void setUp() throws IOException, InterruptedException, TimeoutException { OzoneConfiguration conf = new OzoneConfiguration(); - String clusterId = UUID.randomUUID().toString(); - String scmId = UUID.randomUUID().toString(); + conf.set(OZONE_METADATA_STORE_ROCKSDB_STATISTICS, "ALL"); String omServiceId = "omServiceId1"; cluster = MiniOzoneCluster.newBuilder(conf) - .setClusterId(clusterId) - .setScmId(scmId) .setOMServiceId(omServiceId) .setNumOfOzoneManagers(1) .build(); diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/ClientConfigForTesting.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/ClientConfigForTesting.java new file mode 100644 index 00000000000..d436a65dab2 --- /dev/null +++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/ClientConfigForTesting.java @@ -0,0 +1,135 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + *

+ * http://www.apache.org/licenses/LICENSE-2.0 + *

+ * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.hadoop.ozone; + +import org.apache.hadoop.hdds.conf.MutableConfigurationSource; +import org.apache.hadoop.hdds.conf.StorageUnit; +import org.apache.hadoop.hdds.scm.OzoneClientConfig; + +import static org.apache.hadoop.hdds.scm.ScmConfigKeys.OZONE_SCM_CHUNK_SIZE_KEY; +import static org.apache.hadoop.ozone.OzoneConfigKeys.OZONE_SCM_BLOCK_SIZE; + +/** + * Helper for tests that want to set client stream properties. + */ +public final class ClientConfigForTesting { + + private int chunkSize = 1024 * 1024; + private Long blockSize; + private Integer streamBufferSize; + private Long streamBufferFlushSize; + private Long dataStreamBufferFlushSize; + private Long dataStreamWindowSize; + private Long streamBufferMaxSize; + private Integer dataStreamMinPacketSize; + private final StorageUnit unit; + + /** + * @param unit Defines the unit in which size properties will be passed to the builder. + * All sizes are stored internally converted to {@link StorageUnit#BYTES}. + */ + public static ClientConfigForTesting newBuilder(StorageUnit unit) { + return new ClientConfigForTesting(unit); + } + + private ClientConfigForTesting(StorageUnit unit) { + this.unit = unit; + } + + public ClientConfigForTesting setChunkSize(int size) { + chunkSize = (int) toBytes(size); + return this; + } + + public ClientConfigForTesting setBlockSize(long size) { + blockSize = toBytes(size); + return this; + } + + @SuppressWarnings("unused") // kept for completeness + public ClientConfigForTesting setStreamBufferSize(int size) { + streamBufferSize = (int) toBytes(size); + return this; + } + + public ClientConfigForTesting setStreamBufferFlushSize(long size) { + streamBufferFlushSize = toBytes(size); + return this; + } + + public ClientConfigForTesting setStreamBufferMaxSize(long size) { + streamBufferMaxSize = toBytes(size); + return this; + } + + public ClientConfigForTesting setDataStreamMinPacketSize(int size) { + dataStreamMinPacketSize = (int) toBytes(size); + return this; + } + + public ClientConfigForTesting setDataStreamBufferFlushSize(long size) { + dataStreamBufferFlushSize = toBytes(size); + return this; + } + + public ClientConfigForTesting setDataStreamWindowSize(long size) { + dataStreamWindowSize = toBytes(size); + return this; + } + + public void applyTo(MutableConfigurationSource conf) { + if (streamBufferSize == null) { + streamBufferSize = chunkSize; + } + if (streamBufferFlushSize == null) { + streamBufferFlushSize = (long) chunkSize; + } + if (streamBufferMaxSize == null) { + streamBufferMaxSize = 2 * streamBufferFlushSize; + } + if (dataStreamBufferFlushSize == null) { + dataStreamBufferFlushSize = 4L * chunkSize; + } + if (dataStreamMinPacketSize == null) { + dataStreamMinPacketSize = chunkSize / 4; + } + if (dataStreamWindowSize == null) { + dataStreamWindowSize = 8L * chunkSize; + } + if (blockSize == null) { + blockSize = 2 * streamBufferMaxSize; + } + + OzoneClientConfig clientConfig = conf.getObject(OzoneClientConfig.class); + clientConfig.setStreamBufferSize(streamBufferSize); + clientConfig.setStreamBufferMaxSize(streamBufferMaxSize); + clientConfig.setStreamBufferFlushSize(streamBufferFlushSize); + clientConfig.setDataStreamBufferFlushSize(dataStreamBufferFlushSize); + clientConfig.setDataStreamMinPacketSize(dataStreamMinPacketSize); + clientConfig.setStreamWindowSize(dataStreamWindowSize); + + conf.setFromObject(clientConfig); + conf.setStorageSize(OZONE_SCM_CHUNK_SIZE_KEY, chunkSize, StorageUnit.BYTES); + conf.setStorageSize(OZONE_SCM_BLOCK_SIZE, blockSize, StorageUnit.BYTES); + } + + private long toBytes(long value) { + return Math.round(unit.toBytes(value)); + } + +} diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/MiniOzoneCluster.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/MiniOzoneCluster.java index 458a0ca891f..e864cae00b3 100644 --- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/MiniOzoneCluster.java +++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/MiniOzoneCluster.java @@ -20,11 +20,9 @@ import java.io.IOException; import java.util.List; import java.util.Optional; -import java.util.OptionalInt; import java.util.UUID; import java.util.concurrent.TimeoutException; -import org.apache.hadoop.conf.StorageUnit; import org.apache.hadoop.hdds.HddsConfigKeys; import org.apache.hadoop.hdds.conf.OzoneConfiguration; import org.apache.hadoop.hdds.protocol.DatanodeDetails; @@ -319,23 +317,12 @@ abstract class Builder { protected int numOfActiveSCMs = ACTIVE_SCMS_NOT_SET; protected SCMConfigurator scmConfigurator; - protected Optional enableTrace = Optional.of(false); protected Optional hbInterval = Optional.empty(); protected Optional hbProcessorInterval = Optional.empty(); - protected Optional scmId = Optional.empty(); - protected Optional omId = Optional.empty(); + protected String scmId = UUID.randomUUID().toString(); + protected String omId = UUID.randomUUID().toString(); - protected Boolean enableContainerDatastream = true; protected Optional datanodeReservedSpace = Optional.empty(); - protected Optional chunkSize = Optional.empty(); - protected OptionalInt streamBufferSize = OptionalInt.empty(); - protected Optional streamBufferFlushSize = Optional.empty(); - protected Optional dataStreamBufferFlushSize = Optional.empty(); - protected Optional datastreamWindowSize = Optional.empty(); - protected Optional streamBufferMaxSize = Optional.empty(); - protected OptionalInt dataStreamMinPacketSize = OptionalInt.empty(); - protected Optional blockSize = Optional.empty(); - protected Optional streamBufferSizeUnit = Optional.empty(); protected boolean includeRecon = false; @@ -343,9 +330,6 @@ abstract class Builder { protected Optional scmLayoutVersion = Optional.empty(); protected Optional dnLayoutVersion = Optional.empty(); - // Use relative smaller number of handlers for testing - protected int numOfOmHandlers = 20; - protected int numOfScmHandlers = 20; protected int numOfDatanodes = 3; protected int numDataVolumes = 1; protected boolean startDataNodes = true; @@ -375,14 +359,11 @@ public Builder setSCMConfigurator(SCMConfigurator configurator) { * Sets the cluster Id. * * @param id cluster Id - * - * @return MiniOzoneCluster.Builder */ - public Builder setClusterId(String id) { + void setClusterId(String id) { clusterId = id; path = GenericTestUtils.getTempPath( MiniOzoneClusterImpl.class.getSimpleName() + "-" + clusterId); - return this; } /** @@ -418,30 +399,6 @@ public Builder setSecretKeyClient(SecretKeyClient client) { return this; } - /** - * Sets the SCM id. - * - * @param id SCM Id - * - * @return MiniOzoneCluster.Builder - */ - public Builder setScmId(String id) { - scmId = Optional.of(id); - return this; - } - - /** - * Sets the OM id. - * - * @param id OM Id - * - * @return MiniOzoneCluster.Builder - */ - public Builder setOmId(String id) { - omId = Optional.of(id); - return this; - } - /** * Sets the number of HddsDatanodes to be started as part of * MiniOzoneCluster. @@ -503,18 +460,6 @@ public Builder setHbProcessorInterval(int val) { return this; } - /** - * When set to true, enables trace level logging. - * - * @param trace true or false - * - * @return MiniOzoneCluster.Builder - */ - public Builder setTrace(Boolean trace) { - enableTrace = Optional.of(trace); - return this; - } - /** * Sets the reserved space * {@link org.apache.hadoop.hdds.scm.ScmConfigKeys} @@ -533,66 +478,6 @@ public Builder setDatanodeReservedSpace(String reservedSpace) { return this; } - /** - * Sets the chunk size. - * - * @return MiniOzoneCluster.Builder - */ - public Builder setChunkSize(int size) { - chunkSize = Optional.of(size); - return this; - } - - public Builder setStreamBufferSize(int size) { - streamBufferSize = OptionalInt.of(size); - return this; - } - - /** - * Sets the flush size for stream buffer. - * - * @return MiniOzoneCluster.Builder - */ - public Builder setStreamBufferFlushSize(long size) { - streamBufferFlushSize = Optional.of(size); - return this; - } - - /** - * Sets the max size for stream buffer. - * - * @return MiniOzoneCluster.Builder - */ - public Builder setStreamBufferMaxSize(long size) { - streamBufferMaxSize = Optional.of(size); - return this; - } - - public Builder setDataStreamBufferFlushize(long size) { - dataStreamBufferFlushSize = Optional.of(size); - return this; - } - - public Builder setDataStreamMinPacketSize(int size) { - dataStreamMinPacketSize = OptionalInt.of(size); - return this; - } - - public Builder setDataStreamStreamWindowSize(long size) { - datastreamWindowSize = Optional.of(size); - return this; - } - - /** - * Sets the block size for stream buffer. - * - * @return MiniOzoneCluster.Builder - */ - public Builder setBlockSize(long size) { - blockSize = Optional.of(size); - return this; - } - public Builder setNumOfOzoneManagers(int numOMs) { this.numOfOMs = numOMs; return this; @@ -603,11 +488,6 @@ public Builder setNumOfActiveOMs(int numActiveOMs) { return this; } - public Builder setStreamBufferSizeUnit(StorageUnit unit) { - this.streamBufferSizeUnit = Optional.of(unit); - return this; - } - public Builder setOMServiceId(String serviceId) { this.omServiceId = serviceId; return this; diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/MiniOzoneClusterImpl.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/MiniOzoneClusterImpl.java index 24a3ff84f1c..400ae3ee2cc 100644 --- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/MiniOzoneClusterImpl.java +++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/MiniOzoneClusterImpl.java @@ -26,8 +26,6 @@ import java.util.Collection; import java.util.Collections; import java.util.List; -import java.util.Optional; -import java.util.OptionalInt; import java.util.Set; import java.util.UUID; import java.util.Iterator; @@ -36,7 +34,6 @@ import java.util.concurrent.TimeoutException; import org.apache.commons.lang3.StringUtils; -import org.apache.hadoop.conf.StorageUnit; import org.apache.hadoop.hdds.DFSConfigKeysLegacy; import org.apache.hadoop.hdds.HddsConfigKeys; import org.apache.hadoop.hdds.annotation.InterfaceAudience; @@ -45,7 +42,6 @@ import org.apache.hadoop.hdds.conf.OzoneConfiguration; import org.apache.hadoop.hdds.protocol.DatanodeDetails; import org.apache.hadoop.hdds.protocol.proto.HddsProtos; -import org.apache.hadoop.hdds.scm.OzoneClientConfig; import org.apache.hadoop.hdds.scm.ScmConfigKeys; import org.apache.hadoop.hdds.scm.HddsTestUtils; import org.apache.hadoop.hdds.scm.ha.SCMHANodeDetails; @@ -108,7 +104,6 @@ import org.hadoop.ozone.recon.codegen.ReconSqlDbConfig; import org.slf4j.Logger; import org.slf4j.LoggerFactory; -import org.slf4j.event.Level; /** * MiniOzoneCluster creates a complete in-process Ozone cluster suitable for @@ -656,58 +651,7 @@ protected void initializeConfiguration() throws IOException { Files.createDirectories(metaDir); conf.set(HddsConfigKeys.OZONE_METADATA_DIRS, metaDir.toString()); // conf.setBoolean(ScmConfigKeys.OZONE_SCM_HA_ENABLE_KEY, true); - if (!chunkSize.isPresent()) { - //set it to 1MB by default in tests - chunkSize = Optional.of(1); - } - if (!streamBufferSize.isPresent()) { - streamBufferSize = OptionalInt.of(chunkSize.get()); - } - if (!streamBufferFlushSize.isPresent()) { - streamBufferFlushSize = Optional.of((long) chunkSize.get()); - } - if (!streamBufferMaxSize.isPresent()) { - streamBufferMaxSize = Optional.of(2 * streamBufferFlushSize.get()); - } - if (!dataStreamBufferFlushSize.isPresent()) { - dataStreamBufferFlushSize = Optional.of((long) 4 * chunkSize.get()); - } - if (!dataStreamMinPacketSize.isPresent()) { - dataStreamMinPacketSize = OptionalInt.of(chunkSize.get() / 4); - } - if (!datastreamWindowSize.isPresent()) { - datastreamWindowSize = Optional.of((long) 8 * chunkSize.get()); - } - if (!blockSize.isPresent()) { - blockSize = Optional.of(2 * streamBufferMaxSize.get()); - } - if (!streamBufferSizeUnit.isPresent()) { - streamBufferSizeUnit = Optional.of(StorageUnit.MB); - } - - OzoneClientConfig clientConfig = conf.getObject(OzoneClientConfig.class); - clientConfig.setStreamBufferSize( - (int) Math.round( - streamBufferSizeUnit.get().toBytes(streamBufferSize.getAsInt()))); - clientConfig.setStreamBufferMaxSize(Math.round( - streamBufferSizeUnit.get().toBytes(streamBufferMaxSize.get()))); - clientConfig.setStreamBufferFlushSize(Math.round( - streamBufferSizeUnit.get().toBytes(streamBufferFlushSize.get()))); - clientConfig.setDataStreamBufferFlushSize(Math.round( - streamBufferSizeUnit.get().toBytes(dataStreamBufferFlushSize.get()))); - clientConfig.setDataStreamMinPacketSize((int) Math.round( - streamBufferSizeUnit.get() - .toBytes(dataStreamMinPacketSize.getAsInt()))); - clientConfig.setStreamWindowSize(Math.round( - streamBufferSizeUnit.get().toBytes(datastreamWindowSize.get()))); - conf.setFromObject(clientConfig); - - conf.setStorageSize(ScmConfigKeys.OZONE_SCM_CHUNK_SIZE_KEY, - chunkSize.get(), streamBufferSizeUnit.get()); - - conf.setStorageSize(OzoneConfigKeys.OZONE_SCM_BLOCK_SIZE, blockSize.get(), - streamBufferSizeUnit.get()); // MiniOzoneCluster should have global pipeline upper limit. conf.setInt(ScmConfigKeys.OZONE_SCM_RATIS_PIPELINE_LIMIT, pipelineNumLimit >= DEFAULT_PIPELINE_LIMIT ? @@ -722,7 +666,6 @@ protected void initializeConfiguration() throws IOException { // pipeline. conf.setInt(HddsConfigKeys.HDDS_SCM_SAFEMODE_MIN_DATANODE, numOfDatanodes >= 3 ? 3 : 1); - configureTrace(); } void removeConfiguration() { @@ -764,10 +707,7 @@ protected void initializeScmStorage(SCMStorageConfig scmStore) return; } scmStore.setClusterId(clusterId); - if (!scmId.isPresent()) { - scmId = Optional.of(UUID.randomUUID().toString()); - } - scmStore.setScmId(scmId.get()); + scmStore.setScmId(scmId); scmStore.initialize(); //TODO: HDDS-6897 //Disabling Ratis for only of MiniOzoneClusterImpl. @@ -777,7 +717,7 @@ protected void initializeScmStorage(SCMStorageConfig scmStore) && SCMHAUtils.isSCMHAEnabled(conf)) { scmStore.setSCMHAFlag(true); scmStore.persistCurrentState(); - SCMRatisServerImpl.initialize(clusterId, scmId.get(), + SCMRatisServerImpl.initialize(clusterId, scmId, SCMHANodeDetails.loadSCMHAConfig(conf, scmStore) .getLocalNodeDetails(), conf); } @@ -788,10 +728,10 @@ void initializeOmStorage(OMStorage omStorage) throws IOException { return; } omStorage.setClusterId(clusterId); - omStorage.setOmId(omId.orElse(UUID.randomUUID().toString())); + omStorage.setOmId(omId); // Initialize ozone certificate client if security is enabled. if (OzoneSecurityUtil.isSecurityEnabled(conf)) { - OzoneManager.initializeSecurity(conf, omStorage, scmId.get()); + OzoneManager.initializeSecurity(conf, omStorage, scmId); } omStorage.initialize(); } @@ -838,7 +778,6 @@ protected String getSCMAddresses(List scms) { protected List createHddsDatanodes( List scms, ReconServer reconServer) throws IOException { - configureHddsDatanodes(); String scmAddress = getSCMAddresses(scms); String[] args = new String[] {}; conf.setStrings(ScmConfigKeys.OZONE_SCM_NAMES, scmAddress); @@ -909,7 +848,6 @@ protected void configureSCM() { localhostWithFreePort()); conf.set(ScmConfigKeys.OZONE_SCM_HTTP_ADDRESS_KEY, localhostWithFreePort()); - conf.setInt(ScmConfigKeys.OZONE_SCM_HANDLER_COUNT_KEY, numOfScmHandlers); conf.set(HddsConfigKeys.HDDS_SCM_WAIT_TIME_AFTER_SAFE_MODE_EXIT, "3s"); configureSCMheartbeat(); @@ -944,12 +882,6 @@ private void configureOM() { conf.set(OMConfigKeys.OZONE_OM_HTTPS_ADDRESS_KEY, localhostWithFreePort()); conf.setInt(OMConfigKeys.OZONE_OM_RATIS_PORT_KEY, getFreePort()); - conf.setInt(OMConfigKeys.OZONE_OM_HANDLER_COUNT_KEY, numOfOmHandlers); - } - - private void configureHddsDatanodes() { - conf.setBoolean(OzoneConfigKeys.DFS_CONTAINER_RATIS_DATASTREAM_ENABLED, - enableContainerDatastream); } protected void configureDatanodePorts(ConfigurationTarget conf) { @@ -967,15 +899,6 @@ protected void configureDatanodePorts(ConfigurationTarget conf) { conf.setFromObject(new ReplicationConfig().setPort(getFreePort())); } - private void configureTrace() { - if (enableTrace.isPresent()) { - conf.setBoolean(OzoneConfigKeys.OZONE_TRACE_ENABLED_KEY, - enableTrace.get()); - GenericTestUtils.setRootLogLevel(Level.TRACE); - } - GenericTestUtils.setRootLogLevel(Level.INFO); - } - protected void configureRecon() throws IOException { ConfigurationProvider.resetConfiguration(); diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/MiniOzoneHAClusterImpl.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/MiniOzoneHAClusterImpl.java index d79f28ba158..797a7515f20 100644 --- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/MiniOzoneHAClusterImpl.java +++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/MiniOzoneHAClusterImpl.java @@ -170,18 +170,14 @@ public StorageContainerManager getScmLeader() { .findFirst().orElse(null); } - private OzoneManager getOMLeader(boolean waitForLeaderElection) + public OzoneManager waitForLeaderOM() throws TimeoutException, InterruptedException { - if (waitForLeaderElection) { - final OzoneManager[] om = new OzoneManager[1]; - GenericTestUtils.waitFor(() -> { - om[0] = getOMLeader(); - return om[0] != null; - }, 200, waitForClusterToBeReadyTimeout); - return om[0]; - } else { - return getOMLeader(); - } + final OzoneManager[] om = new OzoneManager[1]; + GenericTestUtils.waitFor(() -> { + om[0] = getOMLeader(); + return om[0] != null; + }, 200, waitForClusterToBeReadyTimeout); + return om[0]; } /** @@ -431,7 +427,6 @@ public MiniOzoneCluster build() throws IOException { protected void initOMRatisConf() { conf.setBoolean(OMConfigKeys.OZONE_OM_RATIS_ENABLE_KEY, true); - conf.setInt(OMConfigKeys.OZONE_OM_HANDLER_COUNT_KEY, numOfOmHandlers); // If test change the following config values we will respect, // otherwise we will set lower timeout values. @@ -731,7 +726,7 @@ public void bootstrapOzoneManager(String omNodeId, int retryCount = 0; OzoneManager om = null; - OzoneManager omLeader = getOMLeader(true); + OzoneManager omLeader = waitForLeaderOM(); long leaderSnapshotIndex = omLeader.getRatisSnapshotIndex(); while (true) { diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/OzoneTestUtils.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/OzoneTestUtils.java index c4b027074ff..884e435d25e 100644 --- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/OzoneTestUtils.java +++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/OzoneTestUtils.java @@ -35,8 +35,10 @@ import org.apache.ozone.test.GenericTestUtils; import org.apache.ozone.test.LambdaTestUtils.VoidCallable; import org.apache.ratis.util.function.CheckedConsumer; -import org.junit.jupiter.api.Assertions; +import static org.junit.jupiter.api.Assertions.assertEquals; +import static org.junit.jupiter.api.Assertions.assertFalse; +import static org.junit.jupiter.api.Assertions.assertThrows; /** * Helper class for Tests. @@ -92,7 +94,7 @@ public static void closeContainers( .updateContainerState(ContainerID.valueOf(blockID.getContainerID()), HddsProtos.LifeCycleEvent.CLOSE); } - Assertions.assertFalse(scm.getContainerManager() + assertFalse(scm.getContainerManager() .getContainer(ContainerID.valueOf(blockID.getContainerID())) .isOpen()); }, omKeyLocationInfoGroups); @@ -140,14 +142,10 @@ public static void performOperationOnKeyContainers( public static void expectOmException( OMException.ResultCodes code, - VoidCallable eval) - throws Exception { - try { - eval.call(); - Assertions.fail("OMException is expected"); - } catch (OMException ex) { - Assertions.assertEquals(code, ex.getResult()); - } + VoidCallable eval) { + + OMException ex = assertThrows(OMException.class, () -> eval.call(), "OMException is expected"); + assertEquals(code, ex.getResult()); } /** diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/RatisTestHelper.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/RatisTestHelper.java index 3f94387e1c6..5338cb8a0cc 100644 --- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/RatisTestHelper.java +++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/RatisTestHelper.java @@ -18,87 +18,31 @@ package org.apache.hadoop.ozone; -import java.io.Closeable; import java.io.IOException; import java.util.concurrent.TimeUnit; -import java.util.concurrent.TimeoutException; import org.apache.hadoop.hdds.conf.OzoneConfiguration; import org.apache.hadoop.hdds.protocol.DatanodeDetails; import org.apache.hadoop.hdds.ratis.RatisHelper; import org.apache.hadoop.hdds.scm.pipeline.Pipeline; -import org.apache.hadoop.ozone.client.protocol.ClientProtocol; -import org.apache.hadoop.ozone.client.rpc.RpcClient; - -import static org.apache.hadoop.hdds.HddsConfigKeys.HDDS_CONTAINER_REPORT_INTERVAL; -import static org.apache.hadoop.hdds.scm.ScmConfigKeys.OZONE_SCM_STALENODE_INTERVAL; - import org.apache.hadoop.ozone.container.common.transport.server.ratis.XceiverServerRatis; import org.apache.ratis.client.RaftClient; import org.apache.ratis.protocol.RaftPeer; import org.apache.ratis.rpc.RpcType; -import org.apache.ratis.rpc.SupportedRpcType; import org.apache.ratis.server.RaftServer; import org.apache.ratis.statemachine.StateMachine; import org.slf4j.Logger; import org.slf4j.LoggerFactory; +import static org.apache.hadoop.hdds.HddsConfigKeys.HDDS_CONTAINER_REPORT_INTERVAL; +import static org.apache.hadoop.hdds.scm.ScmConfigKeys.OZONE_SCM_STALENODE_INTERVAL; + /** * Helpers for Ratis tests. */ public interface RatisTestHelper { Logger LOG = LoggerFactory.getLogger(RatisTestHelper.class); - /** For testing Ozone with Ratis. */ - class RatisTestSuite implements Closeable { - static final RpcType RPC = SupportedRpcType.GRPC; - static final int NUM_DATANODES = 3; - - private final OzoneConfiguration conf; - private final MiniOzoneCluster cluster; - - /** - * Create a {@link MiniOzoneCluster} for testing by setting. - * OZONE_ENABLED = true - * RATIS_ENABLED = true - */ - public RatisTestSuite() - throws IOException, TimeoutException, InterruptedException { - conf = newOzoneConfiguration(RPC); - - cluster = newMiniOzoneCluster(NUM_DATANODES, conf); - } - - public OzoneConfiguration getConf() { - return conf; - } - - public MiniOzoneCluster getCluster() { - return cluster; - } - - public ClientProtocol newOzoneClient() - throws IOException { - return new RpcClient(conf, null); - } - - @Override - public void close() { - cluster.shutdown(); - } - - public int getDatanodeOzoneRestPort() { - return cluster.getHddsDatanodes().get(0).getDatanodeDetails() - .getPort(DatanodeDetails.Port.Name.REST).getValue(); - } - } - - static OzoneConfiguration newOzoneConfiguration(RpcType rpc) { - final OzoneConfiguration conf = new OzoneConfiguration(); - initRatisConf(rpc, conf); - return conf; - } - static void initRatisConf(RpcType rpc, OzoneConfiguration conf) { conf.setBoolean(OzoneConfigKeys.DFS_CONTAINER_RATIS_ENABLED_KEY, true); conf.set(OzoneConfigKeys.DFS_CONTAINER_RATIS_RPC_TYPE_KEY, rpc.name()); @@ -108,17 +52,6 @@ static void initRatisConf(RpcType rpc, OzoneConfiguration conf) { rpc.name()); } - static MiniOzoneCluster newMiniOzoneCluster( - int numDatanodes, OzoneConfiguration conf) - throws IOException, TimeoutException, InterruptedException { - final MiniOzoneCluster cluster = MiniOzoneCluster.newBuilder(conf) - .setHbInterval(1000) - .setHbProcessorInterval(1000) - .setNumDatanodes(numDatanodes).build(); - cluster.waitForClusterToBeReady(); - return cluster; - } - static void initXceiverServerRatis( RpcType rpc, DatanodeDetails dd, Pipeline pipeline) throws IOException { final RaftPeer p = RatisHelper.toRaftPeer(dd); diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/TestBlockTokens.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/TestBlockTokens.java index 9cf4f53a68f..a04c1236186 100644 --- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/TestBlockTokens.java +++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/TestBlockTokens.java @@ -116,8 +116,6 @@ public final class TestBlockTokens { private static File testUserKeytab; private static String testUserPrincipal; private static String host; - private static String clusterId; - private static String scmId; private static MiniOzoneHAClusterImpl cluster; private static OzoneClient client; private static BlockInputStreamFactory blockInputStreamFactory = @@ -132,8 +130,6 @@ public static void init() throws Exception { workDir = GenericTestUtils.getTestDir(TestBlockTokens.class.getSimpleName()); - clusterId = UUID.randomUUID().toString(); - scmId = UUID.randomUUID().toString(); startMiniKdc(); setSecureConfig(); @@ -383,9 +379,7 @@ private static void startCluster() throws IOException, TimeoutException, InterruptedException { OzoneManager.setTestSecureOmFlag(true); MiniOzoneCluster.Builder builder = MiniOzoneCluster.newHABuilder(conf) - .setClusterId(clusterId) .setSCMServiceId("TestSecretKey") - .setScmId(scmId) .setNumDatanodes(3) .setNumOfStorageContainerManagers(3) .setNumOfOzoneManagers(1); diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/TestBlockTokensCLI.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/TestBlockTokensCLI.java index 9a98a0a1897..a181a6f45e9 100644 --- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/TestBlockTokensCLI.java +++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/TestBlockTokensCLI.java @@ -47,7 +47,6 @@ import java.time.Instant; import java.util.Map; import java.util.Properties; -import java.util.UUID; import java.util.concurrent.TimeUnit; import java.util.concurrent.TimeoutException; @@ -71,11 +70,13 @@ import static org.apache.hadoop.ozone.om.OMConfigKeys.OZONE_OM_KERBEROS_PRINCIPAL_KEY; import static org.apache.hadoop.security.UserGroupInformation.AuthenticationMethod.KERBEROS; import static org.junit.jupiter.api.Assertions.assertEquals; +import static org.junit.jupiter.api.Assertions.assertNotEquals; + import org.junit.jupiter.api.AfterAll; import org.junit.jupiter.api.BeforeAll; import org.junit.jupiter.api.Test; import org.junit.jupiter.api.Timeout; -import org.junit.jupiter.api.Assertions; + /** * Integration test class to verify block token CLI commands functionality in a * secure cluster. @@ -92,8 +93,6 @@ public final class TestBlockTokensCLI { private static File ozoneKeytab; private static File spnegoKeytab; private static String host; - private static String clusterId; - private static String scmId; private static String omServiceId; private static String scmServiceId; private static MiniOzoneHAClusterImpl cluster; @@ -108,8 +107,6 @@ public static void init() throws Exception { workDir = GenericTestUtils.getTestDir(TestBlockTokens.class.getSimpleName()); - clusterId = UUID.randomUUID().toString(); - scmId = UUID.randomUUID().toString(); omServiceId = "om-service-test"; scmServiceId = "scm-service-test"; @@ -264,7 +261,7 @@ public void testRotateKeySCMAdminCommandUtil(String[] args) { // rotating. String currentKey = getScmSecretKeyManager().getCurrentSecretKey().toString(); - Assertions.assertEquals(initialKey, currentKey); + assertEquals(initialKey, currentKey); // Rotate the secret key. ozoneAdmin.execute(args); @@ -280,9 +277,9 @@ public void testRotateKeySCMAdminCommandUtil(String[] args) { // Otherwise, both keys should be the same. if (isForceFlagPresent(args) || shouldRotate(getScmSecretKeyManager().getCurrentSecretKey())) { - Assertions.assertNotEquals(initialKey, newKey); + assertNotEquals(initialKey, newKey); } else { - Assertions.assertEquals(initialKey, newKey); + assertEquals(initialKey, newKey); } } @@ -322,10 +319,8 @@ private static void startCluster() throws IOException, TimeoutException, InterruptedException { OzoneManager.setTestSecureOmFlag(true); MiniOzoneCluster.Builder builder = MiniOzoneCluster.newHABuilder(conf) - .setClusterId(clusterId) .setSCMServiceId(scmServiceId) .setOMServiceId(omServiceId) - .setScmId(scmId) .setNumDatanodes(3) .setNumOfStorageContainerManagers(3) .setNumOfOzoneManagers(3); diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/TestContainerOperations.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/TestContainerOperations.java index 56e744886b8..5f8f34a2e3c 100644 --- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/TestContainerOperations.java +++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/TestContainerOperations.java @@ -40,8 +40,11 @@ import org.junit.jupiter.api.BeforeAll; import org.junit.jupiter.api.Test; import org.junit.jupiter.api.Timeout; -import org.junit.jupiter.api.Assertions; + +import static org.assertj.core.api.Assertions.assertThat; import static org.junit.jupiter.api.Assertions.assertEquals; +import static org.junit.jupiter.api.Assertions.assertInstanceOf; +import static org.junit.jupiter.api.Assertions.assertThrows; import static org.junit.jupiter.api.Assertions.assertTrue; /** @@ -91,15 +94,10 @@ public void testCreate() throws Exception { */ @Test public void testGetPipeline() throws Exception { - try { - storageClient.getPipeline(PipelineID.randomId().getProtobuf()); - Assertions.fail("Get Pipeline should fail"); - } catch (Exception e) { - assertTrue( - SCMHAUtils.unwrapException(e) instanceof PipelineNotFoundException); - } - - Assertions.assertFalse(storageClient.listPipelines().isEmpty()); + Exception e = + assertThrows(Exception.class, () -> storageClient.getPipeline(PipelineID.randomId().getProtobuf())); + assertInstanceOf(PipelineNotFoundException.class, SCMHAUtils.unwrapException(e)); + assertThat(storageClient.listPipelines()).isNotEmpty(); } @Test @@ -154,8 +152,7 @@ public void testDatanodeUsageInfoContainerCount() throws IOException { dn.getIpAddress(), dn.getUuidString()); assertEquals(1, usageInfoList.size()); - assertTrue(usageInfoList.get(0).getContainerCount() >= 0 && - usageInfoList.get(0).getContainerCount() <= 1); + assertThat(usageInfoList.get(0).getContainerCount()).isGreaterThanOrEqualTo(0).isLessThanOrEqualTo(1); totalContainerCount[(int)usageInfoList.get(0).getContainerCount()]++; } assertEquals(2, totalContainerCount[0]); diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/TestContainerStateMachineIdempotency.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/TestContainerStateMachineIdempotency.java index 6e76a86dd94..c727ecd0a9d 100644 --- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/TestContainerStateMachineIdempotency.java +++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/TestContainerStateMachineIdempotency.java @@ -41,8 +41,6 @@ import org.junit.jupiter.api.BeforeAll; import org.junit.jupiter.api.Test; import org.junit.jupiter.api.Timeout; -import org.junit.jupiter.api.Assertions; -import java.io.IOException; import java.util.concurrent.TimeUnit; import static java.nio.charset.StandardCharsets.UTF_8; @@ -80,43 +78,39 @@ public static void shutdown() { } @Test - public void testContainerStateMachineIdempotency() throws Exception { + void testContainerStateMachineIdempotency() throws Exception { ContainerWithPipeline container = storageContainerLocationClient .allocateContainer(HddsProtos.ReplicationType.RATIS, HddsProtos.ReplicationFactor.ONE, OzoneConsts.OZONE); long containerID = container.getContainerInfo().getContainerID(); Pipeline pipeline = container.getPipeline(); XceiverClientSpi client = xceiverClientManager.acquireClient(pipeline); - try { - //create the container - ContainerProtocolCalls.createContainer(client, containerID, null); - // call create Container again - BlockID blockID = ContainerTestHelper.getTestBlockID(containerID); - byte[] data = - RandomStringUtils.random(RandomUtils.nextInt(0, 1024)) - .getBytes(UTF_8); - ContainerProtos.ContainerCommandRequestProto writeChunkRequest = - ContainerTestHelper - .getWriteChunkRequest(container.getPipeline(), blockID, - data.length); - client.sendCommand(writeChunkRequest); + //create the container + ContainerProtocolCalls.createContainer(client, containerID, null); + // call create Container again + BlockID blockID = ContainerTestHelper.getTestBlockID(containerID); + byte[] data = + RandomStringUtils.random(RandomUtils.nextInt(0, 1024)).getBytes(UTF_8); + ContainerProtos.ContainerCommandRequestProto writeChunkRequest = + ContainerTestHelper + .getWriteChunkRequest(container.getPipeline(), blockID, + data.length); + client.sendCommand(writeChunkRequest); - //Make the write chunk request again without requesting for overWrite - client.sendCommand(writeChunkRequest); - // Now, explicitly make a putKey request for the block. - ContainerProtos.ContainerCommandRequestProto putKeyRequest = - ContainerTestHelper - .getPutBlockRequest(pipeline, writeChunkRequest.getWriteChunk()); - client.sendCommand(putKeyRequest).getPutBlock(); - // send the putBlock again - client.sendCommand(putKeyRequest); + //Make the write chunk request again without requesting for overWrite + client.sendCommand(writeChunkRequest); + // Now, explicitly make a putKey request for the block. + ContainerProtos.ContainerCommandRequestProto putKeyRequest = + ContainerTestHelper + .getPutBlockRequest(pipeline, writeChunkRequest.getWriteChunk()); + client.sendCommand(putKeyRequest).getPutBlock(); + // send the putBlock again + client.sendCommand(putKeyRequest); + + // close container call + ContainerProtocolCalls.closeContainer(client, containerID, null); + ContainerProtocolCalls.closeContainer(client, containerID, null); - // close container call - ContainerProtocolCalls.closeContainer(client, containerID, null); - ContainerProtocolCalls.closeContainer(client, containerID, null); - } catch (IOException ioe) { - Assertions.fail("Container operation failed" + ioe); - } xceiverClientManager.releaseClient(client, false); } } diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/TestCpuMetrics.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/TestCpuMetrics.java index c70d4da4baa..e49a378a15c 100644 --- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/TestCpuMetrics.java +++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/TestCpuMetrics.java @@ -19,7 +19,7 @@ package org.apache.hadoop.ozone; import static org.apache.hadoop.hdds.scm.ScmConfigKeys.OZONE_SCM_HTTP_ADDRESS_KEY; -import static org.junit.jupiter.api.Assertions.assertTrue; +import static org.assertj.core.api.Assertions.assertThat; import java.io.IOException; import java.util.concurrent.TimeoutException; @@ -67,15 +67,12 @@ public void testCpuMetrics() throws IOException { String metricsResponseBodyContent = metricsResponse.body().string(); // then - assertTrue(metricsResponseBodyContent - .contains("jvm_metrics_cpu_available_processors"), - metricsResponseBodyContent); - assertTrue(metricsResponseBodyContent - .contains("jvm_metrics_cpu_system_load"), - metricsResponseBodyContent); - assertTrue(metricsResponseBodyContent - .contains("jvm_metrics_cpu_jvm_load"), - metricsResponseBodyContent); + assertThat(metricsResponseBodyContent) + .contains("jvm_metrics_cpu_available_processors"); + assertThat(metricsResponseBodyContent) + .contains("jvm_metrics_cpu_system_load"); + assertThat(metricsResponseBodyContent) + .contains("jvm_metrics_cpu_jvm_load"); } } diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/TestDelegationToken.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/TestDelegationToken.java index da806ac2a3e..a82a1a8be70 100644 --- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/TestDelegationToken.java +++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/TestDelegationToken.java @@ -86,11 +86,11 @@ import static org.apache.hadoop.ozone.om.exceptions.OMException.ResultCodes.TOKEN_ERROR_OTHER; import static org.apache.hadoop.ozone.om.exceptions.OMException.ResultCodes.VOLUME_NOT_FOUND; import static org.apache.hadoop.security.UserGroupInformation.AuthenticationMethod.KERBEROS; +import static org.assertj.core.api.Assertions.assertThat; import static org.junit.jupiter.api.Assertions.assertEquals; import static org.junit.jupiter.api.Assertions.assertNotNull; -import static org.junit.jupiter.api.Assertions.assertFalse; + import static org.junit.jupiter.api.Assertions.assertThrows; -import static org.junit.jupiter.api.Assertions.assertTrue; import static org.slf4j.event.Level.INFO; import org.junit.jupiter.api.AfterEach; @@ -323,8 +323,8 @@ public void testDelegationToken(boolean useIp) throws Exception { RandomStringUtils.randomAscii(5)); // Assert if auth was successful via Kerberos - assertFalse(logs.getOutput().contains( - "Auth successful for " + username + " (auth:KERBEROS)")); + assertThat(logs.getOutput()).doesNotContain( + "Auth successful for " + username + " (auth:KERBEROS)"); // Case 1: Test successful delegation token. Token token = omClient @@ -332,7 +332,7 @@ public void testDelegationToken(boolean useIp) throws Exception { // Case 2: Test successful token renewal. long renewalTime = omClient.renewDelegationToken(token); - assertTrue(renewalTime > 0); + assertThat(renewalTime).isGreaterThan(0); // Check if token is of right kind and renewer is running om instance assertNotNull(token); @@ -358,13 +358,12 @@ public void testDelegationToken(boolean useIp) throws Exception { }); // Case 3: Test Client can authenticate using token. - assertFalse(logs.getOutput().contains( - "Auth successful for " + username + " (auth:TOKEN)")); + assertThat(logs.getOutput()).doesNotContain( + "Auth successful for " + username + " (auth:TOKEN)"); OzoneTestUtils.expectOmException(VOLUME_NOT_FOUND, () -> omClient.deleteVolume("vol1")); - assertTrue(logs.getOutput().contains("Auth successful for " - + username + " (auth:TOKEN)"), - "Log file doesn't contain successful auth for user " + username); + assertThat(logs.getOutput()) + .contains("Auth successful for " + username + " (auth:TOKEN)"); // Case 4: Test failure of token renewal. // Call to renewDelegationToken will fail but it will confirm that @@ -374,8 +373,8 @@ public void testDelegationToken(boolean useIp) throws Exception { OMException ex = assertThrows(OMException.class, () -> omClient.renewDelegationToken(token)); assertEquals(INVALID_AUTH_METHOD, ex.getResult()); - assertTrue(logs.getOutput().contains( - "Auth successful for " + username + " (auth:TOKEN)")); + assertThat(logs.getOutput()).contains( + "Auth successful for " + username + " (auth:TOKEN)"); omLogs.clearOutput(); //testUser.setAuthenticationMethod(AuthMethod.KERBEROS); omClient.close(); @@ -391,7 +390,7 @@ public void testDelegationToken(boolean useIp) throws Exception { // Wait for client to timeout Thread.sleep(CLIENT_TIMEOUT); - assertFalse(logs.getOutput().contains("Auth failed for")); + assertThat(logs.getOutput()).doesNotContain("Auth failed for"); // Case 6: Test failure of token cancellation. // Get Om client, this time authentication using Token will fail as @@ -402,8 +401,8 @@ public void testDelegationToken(boolean useIp) throws Exception { ex = assertThrows(OMException.class, () -> omClient.cancelDelegationToken(token)); assertEquals(TOKEN_ERROR_OTHER, ex.getResult()); - assertTrue(ex.getMessage().contains("Cancel delegation token failed")); - assertTrue(logs.getOutput().contains("Auth failed for")); + assertThat(ex.getMessage()).contains("Cancel delegation token failed"); + assertThat(logs.getOutput()).contains("Auth failed for"); } finally { om.stop(); om.join(); diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/TestMiniOzoneCluster.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/TestMiniOzoneCluster.java index b8cf7906c0a..74d52c4a945 100644 --- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/TestMiniOzoneCluster.java +++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/TestMiniOzoneCluster.java @@ -109,8 +109,8 @@ public void testStartMultipleDatanodes() throws Exception { } @Test - public void testContainerRandomPort() throws IOException { - OzoneConfiguration ozoneConf = SCMTestUtils.getConf(); + void testContainerRandomPort(@TempDir File tempDir) throws IOException { + OzoneConfiguration ozoneConf = SCMTestUtils.getConf(tempDir); // Each instance of SM will create an ozone container // that bounds to a random port. @@ -233,7 +233,6 @@ public void testDNstartAfterSCM() throws Exception { EndpointStateMachine.EndPointStates.GETVERSION, endpoint.getState()); } - Thread.sleep(1000); } // DN should successfully register with the SCM after SCM is restarted. diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/TestMiniOzoneOMHACluster.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/TestMiniOzoneOMHACluster.java index a3e0be5a85d..0c51ba41311 100644 --- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/TestMiniOzoneOMHACluster.java +++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/TestMiniOzoneOMHACluster.java @@ -22,19 +22,19 @@ import org.apache.hadoop.ozone.om.OzoneManager; import org.apache.ozone.test.GenericTestUtils; -import java.util.UUID; import java.util.concurrent.TimeUnit; import java.util.concurrent.TimeoutException; import java.util.concurrent.atomic.AtomicReference; import static org.apache.hadoop.ozone.OzoneConfigKeys.OZONE_ACL_ENABLED; import static org.apache.hadoop.ozone.OzoneConfigKeys.OZONE_ADMINISTRATORS_WILDCARD; +import static org.junit.jupiter.api.Assertions.assertNotNull; +import static org.junit.jupiter.api.Assertions.assertTrue; import org.junit.jupiter.api.AfterEach; import org.junit.jupiter.api.BeforeEach; import org.junit.jupiter.api.Test; import org.junit.jupiter.api.Timeout; -import org.junit.jupiter.api.Assertions; /** * This class tests MiniOzoneHAClusterImpl. @@ -44,28 +44,22 @@ public class TestMiniOzoneOMHACluster { private MiniOzoneHAClusterImpl cluster = null; private OzoneConfiguration conf; - private String clusterId; - private String scmId; private String omServiceId; private int numOfOMs = 3; /** * Create a MiniOzoneHAClusterImpl for testing. * - * @throws IOException + * @throws Exception */ @BeforeEach public void init() throws Exception { conf = new OzoneConfiguration(); - clusterId = UUID.randomUUID().toString(); - scmId = UUID.randomUUID().toString(); omServiceId = "omServiceId1"; conf.setBoolean(OZONE_ACL_ENABLED, true); conf.set(OzoneConfigKeys.OZONE_ADMINISTRATORS, OZONE_ADMINISTRATORS_WILDCARD); cluster = (MiniOzoneHAClusterImpl) MiniOzoneCluster.newOMHABuilder(conf) - .setClusterId(clusterId) - .setScmId(scmId) .setOMServiceId(omServiceId) .setNumOfOzoneManagers(numOfOMs) .build(); @@ -91,9 +85,8 @@ public void testGetOMLeader() throws InterruptedException, TimeoutException { ozoneManager.set(om); return om != null; }, 100, 120000); - Assertions.assertNotNull(ozoneManager, "Timed out waiting OM leader election to finish: " + assertNotNull(ozoneManager, "Timed out waiting OM leader election to finish: " + "no leader or more than one leader."); - Assertions.assertTrue(ozoneManager.get().isLeaderReady(), - "Should have gotten the leader!"); + assertTrue(ozoneManager.get().isLeaderReady(), "Should have gotten the leader!"); } } diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/TestMultipartObjectGet.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/TestMultipartObjectGet.java index cec90067dac..852f351ee25 100644 --- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/TestMultipartObjectGet.java +++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/TestMultipartObjectGet.java @@ -17,6 +17,7 @@ */ package org.apache.hadoop.ozone; +import org.apache.commons.lang3.RandomUtils; import org.apache.hadoop.hdds.conf.DefaultConfigManager; import org.apache.hadoop.hdds.conf.OzoneConfiguration; import org.apache.hadoop.hdds.utils.IOUtils; @@ -31,7 +32,6 @@ import org.junit.jupiter.api.AfterAll; import org.junit.jupiter.api.BeforeAll; import org.junit.jupiter.api.Test; -import org.mockito.Mockito; import org.slf4j.Logger; import org.slf4j.LoggerFactory; @@ -42,9 +42,7 @@ import javax.ws.rs.core.UriInfo; import java.io.IOException; import java.io.ByteArrayInputStream; -import java.security.SecureRandom; import java.util.ArrayList; -import java.util.UUID; import java.util.List; import java.util.Base64; import java.util.concurrent.TimeoutException; @@ -53,6 +51,7 @@ import static org.apache.hadoop.ozone.s3.util.S3Consts.STORAGE_CLASS_HEADER; import static org.junit.jupiter.api.Assertions.assertEquals; import static org.junit.jupiter.api.Assertions.assertNotNull; +import static org.mockito.Mockito.mock; import static org.mockito.Mockito.when; /** @@ -64,8 +63,6 @@ public class TestMultipartObjectGet { public static final Logger LOG = LoggerFactory.getLogger( TestMultipartObjectGet.class); private static OzoneConfiguration conf; - private static String clusterId; - private static String scmId; private static String omServiceId; private static String scmServiceId; private static final String BUCKET = OzoneConsts.BUCKET; @@ -80,8 +77,6 @@ public class TestMultipartObjectGet { @BeforeAll public static void init() throws Exception { conf = new OzoneConfiguration(); - clusterId = UUID.randomUUID().toString(); - scmId = UUID.randomUUID().toString(); omServiceId = "om-service-test"; scmServiceId = "scm-service-test"; @@ -89,13 +84,13 @@ public static void init() throws Exception { client = cluster.newClient(); client.getObjectStore().createS3Bucket(BUCKET); - headers = Mockito.mock(HttpHeaders.class); + headers = mock(HttpHeaders.class); when(headers.getHeaderString(STORAGE_CLASS_HEADER)).thenReturn( "STANDARD"); - context = Mockito.mock(ContainerRequestContext.class); - Mockito.when(context.getUriInfo()).thenReturn(Mockito.mock(UriInfo.class)); - Mockito.when(context.getUriInfo().getQueryParameters()) + context = mock(ContainerRequestContext.class); + when(context.getUriInfo()).thenReturn(mock(UriInfo.class)); + when(context.getUriInfo().getQueryParameters()) .thenReturn(new MultivaluedHashMap<>()); REST.setHeaders(headers); @@ -109,10 +104,8 @@ private static void startCluster() throws IOException, TimeoutException, InterruptedException { OzoneManager.setTestSecureOmFlag(true); MiniOzoneCluster.Builder builder = MiniOzoneCluster.newHABuilder(conf) - .setClusterId(clusterId) .setSCMServiceId(scmServiceId) .setOMServiceId(omServiceId) - .setScmId(scmId) .setNumDatanodes(3) .setNumOfStorageContainerManagers(3) .setNumOfOzoneManagers(3); @@ -217,8 +210,7 @@ public void testMultipart() throws Exception { private static String generateRandomContent(int sizeInMB) { int bytesToGenerate = sizeInMB * 1024 * 1024; - byte[] randomBytes = new byte[bytesToGenerate]; - new SecureRandom().nextBytes(randomBytes); + byte[] randomBytes = RandomUtils.nextBytes(bytesToGenerate); return Base64.getEncoder().encodeToString(randomBytes); } } diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/TestSecureOzoneCluster.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/TestSecureOzoneCluster.java index 6bc92418a2a..1be5b64ac87 100644 --- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/TestSecureOzoneCluster.java +++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/TestSecureOzoneCluster.java @@ -180,8 +180,8 @@ import static org.junit.jupiter.api.Assertions.assertThrows; import static org.junit.jupiter.api.Assertions.assertTrue; import static org.junit.jupiter.api.Assertions.fail; -import static org.mockito.ArgumentMatchers.any; -import static org.mockito.ArgumentMatchers.anyString; +import static org.mockito.Mockito.any; +import static org.mockito.Mockito.anyString; import static org.mockito.Mockito.mock; import static org.mockito.Mockito.when; import static org.slf4j.event.Level.INFO; @@ -624,15 +624,11 @@ void testAccessControlExceptionOnClient() throws Exception { new OzoneManagerProtocolClientSideTranslatorPB( OmTransportFactory.create(conf, ugi, null), ClientId.randomId().toString()); - try { - secureClient.createVolume( - new OmVolumeArgs.Builder().setVolume("vol1") - .setOwnerName("owner1") - .setAdminName("admin") - .build()); - } catch (IOException ex) { - fail("Secure client should be able to create volume."); - } + secureClient.createVolume( + new OmVolumeArgs.Builder().setVolume("vol1") + .setOwnerName("owner1") + .setAdminName("admin") + .build()); ugi = UserGroupInformation.createUserForTesting( "testuser1", new String[] {"test"}); diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/TestOzoneClientFactory.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/TestOzoneClientFactory.java index 5e3c3ab5a75..f2a079ca80c 100644 --- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/TestOzoneClientFactory.java +++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/TestOzoneClientFactory.java @@ -16,7 +16,8 @@ */ package org.apache.hadoop.ozone.client; -import static org.junit.jupiter.api.Assertions.fail; +import static org.junit.jupiter.api.Assertions.assertInstanceOf; +import static org.junit.jupiter.api.Assertions.assertThrows; import org.apache.hadoop.hdds.conf.OzoneConfiguration; import org.apache.hadoop.ozone.MiniOzoneCluster; import org.apache.hadoop.security.AccessControlException; @@ -25,27 +26,20 @@ import java.io.IOException; import java.security.PrivilegedExceptionAction; -import java.util.UUID; /** * Test implementation for OzoneClientFactory. */ public class TestOzoneClientFactory { - private static String scmId = UUID.randomUUID().toString(); - private static String clusterId = UUID.randomUUID().toString(); - @Test public void testRemoteException() { OzoneConfiguration conf = new OzoneConfiguration(); - - try { + Exception e = assertThrows(Exception.class, () -> { MiniOzoneCluster cluster = MiniOzoneCluster.newBuilder(conf) .setNumDatanodes(3) .setTotalPipelineNumLimit(10) - .setScmId(scmId) - .setClusterId(clusterId) .build(); String omPort = cluster.getOzoneManager().getRpcPort(); @@ -59,17 +53,14 @@ public void testRemoteException() { public Void run() throws IOException { conf.set("ozone.security.enabled", "true"); try (OzoneClient ozoneClient = - OzoneClientFactory.getRpcClient("localhost", - Integer.parseInt(omPort), conf)) { + OzoneClientFactory.getRpcClient("localhost", Integer.parseInt(omPort), conf)) { ozoneClient.getObjectStore().listVolumes("/"); } return null; } }); - fail("Should throw exception here"); - } catch (IOException | InterruptedException e) { - assert e instanceof AccessControlException; - } + }); + assertInstanceOf(AccessControlException.class, e); } } diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/AbstractTestECKeyOutputStream.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/AbstractTestECKeyOutputStream.java index 3478489edd6..0b0b2586c9e 100644 --- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/AbstractTestECKeyOutputStream.java +++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/AbstractTestECKeyOutputStream.java @@ -16,13 +16,13 @@ */ package org.apache.hadoop.ozone.client.rpc; -import org.apache.hadoop.conf.StorageUnit; import org.apache.hadoop.hdds.HddsConfigKeys; import org.apache.hadoop.hdds.client.DefaultReplicationConfig; import org.apache.hadoop.hdds.client.ECReplicationConfig; import org.apache.hadoop.hdds.client.RatisReplicationConfig; import org.apache.hadoop.hdds.client.ReplicationConfig; import org.apache.hadoop.hdds.conf.OzoneConfiguration; +import org.apache.hadoop.hdds.conf.StorageUnit; import org.apache.hadoop.hdds.protocol.DatanodeDetails; import org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos; import org.apache.hadoop.hdds.protocol.proto.HddsProtos; @@ -31,6 +31,7 @@ import org.apache.hadoop.hdds.scm.pipeline.Pipeline; import org.apache.hadoop.hdds.scm.pipeline.PipelineManager; import org.apache.hadoop.hdds.utils.IOUtils; +import org.apache.hadoop.ozone.ClientConfigForTesting; import org.apache.hadoop.ozone.MiniOzoneCluster; import org.apache.hadoop.ozone.OzoneConfigKeys; import org.apache.hadoop.ozone.client.BucketArgs; @@ -120,11 +121,16 @@ protected static void init(boolean zeroCopyEnabled) throws Exception { TimeUnit.SECONDS); conf.setBoolean(OzoneConfigKeys.OZONE_EC_GRPC_ZERO_COPY_ENABLED, zeroCopyEnabled); - cluster = MiniOzoneCluster.newBuilder(conf).setNumDatanodes(10) - .setTotalPipelineNumLimit(10).setBlockSize(blockSize) - .setChunkSize(chunkSize).setStreamBufferFlushSize(flushSize) + + ClientConfigForTesting.newBuilder(StorageUnit.BYTES) + .setBlockSize(blockSize) + .setChunkSize(chunkSize) + .setStreamBufferFlushSize(flushSize) .setStreamBufferMaxSize(maxFlushSize) - .setStreamBufferSizeUnit(StorageUnit.BYTES).build(); + .applyTo(conf); + + cluster = MiniOzoneCluster.newBuilder(conf).setNumDatanodes(10) + .setTotalPipelineNumLimit(10).build(); cluster.waitForClusterToBeReady(); client = OzoneClientFactory.getRpcClient(conf); objectStore = client.getObjectStore(); diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/Test2WayCommitInRatis.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/Test2WayCommitInRatis.java index ce89e679df4..e7c8be170ca 100644 --- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/Test2WayCommitInRatis.java +++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/Test2WayCommitInRatis.java @@ -17,10 +17,10 @@ package org.apache.hadoop.ozone.client.rpc; -import org.apache.hadoop.conf.StorageUnit; import org.apache.hadoop.hdds.HddsConfigKeys; import org.apache.hadoop.hdds.conf.DatanodeRatisServerConfig; import org.apache.hadoop.hdds.conf.OzoneConfiguration; +import org.apache.hadoop.hdds.conf.StorageUnit; import org.apache.hadoop.hdds.protocol.proto.HddsProtos; import org.apache.hadoop.hdds.ratis.conf.RatisClientConfig; import org.apache.hadoop.hdds.scm.XceiverClientManager; @@ -32,6 +32,7 @@ import org.apache.hadoop.hdds.scm.protocolPB. StorageContainerLocationProtocolClientSideTranslatorPB; import org.apache.hadoop.hdds.utils.IOUtils; +import org.apache.hadoop.ozone.ClientConfigForTesting; import org.apache.hadoop.ozone.HddsDatanodeService; import org.apache.hadoop.ozone.MiniOzoneCluster; import org.apache.hadoop.ozone.OzoneConsts; @@ -100,14 +101,16 @@ private void startCluster(OzoneConfiguration conf) throws Exception { raftClientConfig.setRpcWatchRequestTimeout(Duration.ofSeconds(10)); conf.setFromObject(raftClientConfig); - conf.setQuietMode(false); - cluster = MiniOzoneCluster.newBuilder(conf) - .setNumDatanodes(3) + ClientConfigForTesting.newBuilder(StorageUnit.BYTES) .setBlockSize(blockSize) .setChunkSize(chunkSize) .setStreamBufferFlushSize(flushSize) .setStreamBufferMaxSize(maxFlushSize) - .setStreamBufferSizeUnit(StorageUnit.BYTES) + .applyTo(conf); + + conf.setQuietMode(false); + cluster = MiniOzoneCluster.newBuilder(conf) + .setNumDatanodes(3) .build(); cluster.waitForClusterToBeReady(); // the easiest way to create an open container is creating a key diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestBlockDataStreamOutput.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestBlockDataStreamOutput.java index d3caf623873..8bb791bb103 100644 --- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestBlockDataStreamOutput.java +++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestBlockDataStreamOutput.java @@ -17,9 +17,9 @@ package org.apache.hadoop.ozone.client.rpc; -import org.apache.hadoop.conf.StorageUnit; import org.apache.hadoop.hdds.client.ReplicationType; import org.apache.hadoop.hdds.conf.OzoneConfiguration; +import org.apache.hadoop.hdds.conf.StorageUnit; import org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos; import org.apache.hadoop.hdds.scm.OzoneClientConfig; import org.apache.hadoop.hdds.scm.XceiverClientManager; @@ -27,6 +27,7 @@ import org.apache.hadoop.hdds.scm.storage.BlockDataStreamOutput; import org.apache.hadoop.hdds.scm.storage.ByteBufferStreamOutput; import org.apache.hadoop.hdds.utils.IOUtils; +import org.apache.hadoop.ozone.ClientConfigForTesting; import org.apache.hadoop.ozone.MiniOzoneCluster; import org.apache.hadoop.ozone.OzoneConfigKeys; import org.apache.hadoop.ozone.client.ObjectStore; @@ -92,17 +93,19 @@ public static void init() throws Exception { conf.setStorageSize(OzoneConfigKeys.OZONE_SCM_BLOCK_SIZE, 4, StorageUnit.MB); - cluster = MiniOzoneCluster.newBuilder(conf) - .setNumDatanodes(5) - .setTotalPipelineNumLimit(3) + ClientConfigForTesting.newBuilder(StorageUnit.BYTES) .setBlockSize(blockSize) .setChunkSize(chunkSize) .setStreamBufferFlushSize(flushSize) .setStreamBufferMaxSize(maxFlushSize) - .setDataStreamBufferFlushize(maxFlushSize) - .setStreamBufferSizeUnit(StorageUnit.BYTES) + .setDataStreamBufferFlushSize(maxFlushSize) .setDataStreamMinPacketSize(chunkSize) - .setDataStreamStreamWindowSize(5 * chunkSize) + .setDataStreamWindowSize(5 * chunkSize) + .applyTo(conf); + + cluster = MiniOzoneCluster.newBuilder(conf) + .setNumDatanodes(5) + .setTotalPipelineNumLimit(3) .build(); cluster.waitForClusterToBeReady(); //the easiest way to create an open container is creating a key diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestBlockOutputStream.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestBlockOutputStream.java index 74686d363c8..1e9cefbaa48 100644 --- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestBlockOutputStream.java +++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestBlockOutputStream.java @@ -24,12 +24,12 @@ import java.util.concurrent.TimeoutException; import org.apache.commons.lang3.RandomUtils; -import org.apache.hadoop.conf.StorageUnit; import org.apache.hadoop.hdds.client.ReplicationFactor; import org.apache.hadoop.hdds.client.ReplicationType; import org.apache.hadoop.hdds.conf.ConfigurationSource; import org.apache.hadoop.hdds.conf.DatanodeRatisServerConfig; import org.apache.hadoop.hdds.conf.OzoneConfiguration; +import org.apache.hadoop.hdds.conf.StorageUnit; import org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos.ChecksumType; import org.apache.hadoop.hdds.protocol.proto.HddsProtos; import org.apache.hadoop.hdds.ratis.conf.RatisClientConfig; @@ -38,6 +38,7 @@ import org.apache.hadoop.hdds.scm.XceiverClientMetrics; import org.apache.hadoop.hdds.scm.storage.BufferPool; import org.apache.hadoop.hdds.scm.storage.RatisBlockOutputStream; +import org.apache.hadoop.ozone.ClientConfigForTesting; import org.apache.hadoop.ozone.MiniOzoneCluster; import org.apache.hadoop.ozone.client.ObjectStore; import org.apache.hadoop.ozone.client.OzoneClient; @@ -105,14 +106,16 @@ static MiniOzoneCluster createCluster() throws IOException, ratisClientConfig.setWatchRequestTimeout(Duration.ofSeconds(30)); conf.setFromObject(ratisClientConfig); - MiniOzoneCluster cluster = MiniOzoneCluster.newBuilder(conf) - .setNumDatanodes(5) - .setTotalPipelineNumLimit(3) + ClientConfigForTesting.newBuilder(StorageUnit.BYTES) .setBlockSize(BLOCK_SIZE) .setChunkSize(CHUNK_SIZE) .setStreamBufferFlushSize(FLUSH_SIZE) .setStreamBufferMaxSize(MAX_FLUSH_SIZE) - .setStreamBufferSizeUnit(StorageUnit.BYTES) + .applyTo(conf); + + MiniOzoneCluster cluster = MiniOzoneCluster.newBuilder(conf) + .setNumDatanodes(5) + .setTotalPipelineNumLimit(3) .build(); cluster.waitForClusterToBeReady(); diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestCloseContainerHandlingByClient.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestCloseContainerHandlingByClient.java index f2a5748bffd..9609dea0481 100644 --- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestCloseContainerHandlingByClient.java +++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestCloseContainerHandlingByClient.java @@ -89,7 +89,7 @@ public static void init() throws Exception { chunkSize = (int) OzoneConsts.MB; blockSize = 4 * chunkSize; - OzoneClientConfig config = new OzoneClientConfig(); + OzoneClientConfig config = conf.getObject(OzoneClientConfig.class); config.setChecksumType(ChecksumType.NONE); conf.setFromObject(config); diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestContainerStateMachineFailures.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestContainerStateMachineFailures.java index 5a2d67960fa..2c11177e5ea 100644 --- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestContainerStateMachineFailures.java +++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestContainerStateMachineFailures.java @@ -95,6 +95,7 @@ import static org.junit.jupiter.api.Assertions.assertNull; import static org.junit.jupiter.api.Assertions.assertFalse; import static org.junit.jupiter.api.Assertions.assertSame; +import static org.junit.jupiter.api.Assertions.assertThrows; import static org.junit.jupiter.api.Assertions.assertTrue; import static org.junit.jupiter.api.Assertions.fail; @@ -376,7 +377,7 @@ public void testUnhealthyContainer() throws Exception { int index = cluster.getHddsDatanodeIndex(dn.getDatanodeDetails()); // restart the hdds datanode and see if the container is listed in the // in the missing container set and not in the regular set - cluster.restartHddsDatanode(dn.getDatanodeDetails(), false); + cluster.restartHddsDatanode(dn.getDatanodeDetails(), true); // make sure the container state is still marked unhealthy after restart keyValueContainerData = (KeyValueContainerData) ContainerDataYaml .readContainerFile(containerFile); @@ -459,10 +460,7 @@ public void testApplyTransactionFailure() throws Exception { // a pipeline close action try { - xceiverClient.sendCommand(request.build()); - fail("Expected exception not thrown"); - } catch (IOException e) { - // Exception should be thrown + assertThrows(IOException.class, () -> xceiverClient.sendCommand(request.build())); } finally { xceiverClientManager.releaseClient(xceiverClient, false); } @@ -494,7 +492,7 @@ public void testApplyTransactionFailure() throws Exception { @Test @Flaky("HDDS-6115") - public void testApplyTransactionIdempotencyWithClosedContainer() + void testApplyTransactionIdempotencyWithClosedContainer() throws Exception { OzoneOutputStream key = objectStore.getVolume(volumeName).getBucket(bucketName) @@ -543,11 +541,7 @@ public void testApplyTransactionIdempotencyWithClosedContainer() request.setContainerID(containerID); request.setCloseContainer( ContainerProtos.CloseContainerRequestProto.getDefaultInstance()); - try { - xceiverClient.sendCommand(request.build()); - } catch (IOException e) { - fail("Exception should not be thrown"); - } + xceiverClient.sendCommand(request.build()); assertSame( TestHelper.getDatanodeService(omKeyLocationInfo, cluster) .getDatanodeStateMachine() @@ -557,8 +551,6 @@ public void testApplyTransactionIdempotencyWithClosedContainer() assertTrue(stateMachine.isStateMachineHealthy()); try { stateMachine.takeSnapshot(); - } catch (IOException ioe) { - fail("Exception should not be thrown"); } finally { xceiverClientManager.releaseClient(xceiverClient, false); } @@ -585,7 +577,7 @@ public void testApplyTransactionIdempotencyWithClosedContainer() // not be marked unhealthy and pipeline should not fail if container gets // closed here. @Test - public void testWriteStateMachineDataIdempotencyWithClosedContainer() + void testWriteStateMachineDataIdempotencyWithClosedContainer() throws Exception { OzoneOutputStream key = objectStore.getVolume(volumeName).getBucket(bucketName) @@ -699,11 +691,7 @@ public void testWriteStateMachineDataIdempotencyWithClosedContainer() .getContainerState(), ContainerProtos.ContainerDataProto.State.CLOSED); assertTrue(stateMachine.isStateMachineHealthy()); - try { - stateMachine.takeSnapshot(); - } catch (IOException ioe) { - fail("Exception should not be thrown"); - } + stateMachine.takeSnapshot(); final FileInfo latestSnapshot = getSnapshotFileInfo(storage); assertNotEquals(snapshot.getPath(), latestSnapshot.getPath()); @@ -715,43 +703,37 @@ public void testWriteStateMachineDataIdempotencyWithClosedContainer() } @Test - public void testContainerStateMachineSingleFailureRetry() + void testContainerStateMachineSingleFailureRetry() throws Exception { - OzoneOutputStream key = - objectStore.getVolume(volumeName).getBucket(bucketName) - .createKey("ratis1", 1024, - ReplicationConfig.fromTypeAndFactor(ReplicationType.RATIS, - ReplicationFactor.THREE), new HashMap<>()); + try (OzoneOutputStream key = objectStore.getVolume(volumeName).getBucket(bucketName) + .createKey("ratis1", 1024, + ReplicationConfig.fromTypeAndFactor(ReplicationType.RATIS, + ReplicationFactor.THREE), new HashMap<>())) { - key.write("ratis".getBytes(UTF_8)); - key.flush(); - key.write("ratis".getBytes(UTF_8)); - key.write("ratis".getBytes(UTF_8)); - - KeyOutputStream groupOutputStream = (KeyOutputStream) key. - getOutputStream(); - List locationInfoList = - groupOutputStream.getLocationInfoList(); - assertEquals(1, locationInfoList.size()); + key.write("ratis".getBytes(UTF_8)); + key.flush(); + key.write("ratis".getBytes(UTF_8)); + key.write("ratis".getBytes(UTF_8)); - OmKeyLocationInfo omKeyLocationInfo = locationInfoList.get(0); + KeyOutputStream groupOutputStream = (KeyOutputStream) key. + getOutputStream(); + List locationInfoList = + groupOutputStream.getLocationInfoList(); + assertEquals(1, locationInfoList.size()); - induceFollowerFailure(omKeyLocationInfo, 2); + OmKeyLocationInfo omKeyLocationInfo = locationInfoList.get(0); - try { + induceFollowerFailure(omKeyLocationInfo, 2); key.flush(); key.write("ratis".getBytes(UTF_8)); key.flush(); - key.close(); - } catch (Exception ioe) { - // Should not fail.. - fail("Exception " + ioe.getMessage()); } + validateData("ratis1", 2, "ratisratisratisratis"); } @Test - public void testContainerStateMachineDualFailureRetry() + void testContainerStateMachineDualFailureRetry() throws Exception { OzoneOutputStream key = objectStore.getVolume(volumeName).getBucket(bucketName) @@ -774,15 +756,10 @@ public void testContainerStateMachineDualFailureRetry() induceFollowerFailure(omKeyLocationInfo, 1); - try { - key.flush(); - key.write("ratis".getBytes(UTF_8)); - key.flush(); - key.close(); - } catch (Exception ioe) { - // Should not fail.. - fail("Exception " + ioe.getMessage()); - } + key.flush(); + key.write("ratis".getBytes(UTF_8)); + key.flush(); + key.close(); validateData("ratis1", 2, "ratisratisratisratis"); } @@ -819,31 +796,24 @@ private void induceFollowerFailure(OmKeyLocationInfo omKeyLocationInfo, } } - private void validateData(String key, int locationCount, String payload) { + private void validateData(String key, int locationCount, String payload) throws Exception { OmKeyArgs omKeyArgs = new OmKeyArgs.Builder() .setVolumeName(volumeName) .setBucketName(bucketName) .setKeyName(key) .build(); - OmKeyInfo keyInfo = null; - try { - keyInfo = cluster.getOzoneManager().lookupKey(omKeyArgs); - - assertEquals(locationCount, - keyInfo.getLatestVersionLocations().getLocationListCount()); - byte[] buffer = new byte[1024]; - try (OzoneInputStream o = objectStore.getVolume(volumeName) - .getBucket(bucketName).readKey(key)) { - o.read(buffer, 0, 1024); - } - int end = ArrayUtils.indexOf(buffer, (byte) 0); - String response = new String(buffer, 0, - end, - StandardCharsets.UTF_8); - assertEquals(payload, response); - } catch (IOException e) { - fail("Exception not expected " + e.getMessage()); + OmKeyInfo keyInfo = cluster.getOzoneManager().lookupKey(omKeyArgs); + + assertEquals(locationCount, + keyInfo.getLatestVersionLocations().getLocationListCount()); + byte[] buffer = new byte[1024]; + try (OzoneInputStream o = objectStore.getVolume(volumeName) + .getBucket(bucketName).readKey(key)) { + o.read(buffer, 0, 1024); } + int end = ArrayUtils.indexOf(buffer, (byte) 0); + String response = new String(buffer, 0, end, StandardCharsets.UTF_8); + assertEquals(payload, response); } static FileInfo getSnapshotFileInfo(SimpleStateMachineStorage storage) diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestContainerStateMachineFlushDelay.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestContainerStateMachineFlushDelay.java index 32fc9ba5c93..23ab89b80c6 100644 --- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestContainerStateMachineFlushDelay.java +++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestContainerStateMachineFlushDelay.java @@ -17,16 +17,17 @@ package org.apache.hadoop.ozone.client.rpc; -import org.apache.hadoop.conf.StorageUnit; import org.apache.hadoop.fs.FileUtil; import org.apache.hadoop.hdds.client.ReplicationConfig; import org.apache.hadoop.hdds.client.ReplicationFactor; import org.apache.hadoop.hdds.client.ReplicationType; import org.apache.hadoop.hdds.conf.OzoneConfiguration; +import org.apache.hadoop.hdds.conf.StorageUnit; import org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos; import org.apache.hadoop.hdds.scm.ScmConfigKeys; import org.apache.hadoop.hdds.security.x509.certificate.client.CertificateClientTestImpl; import org.apache.hadoop.hdds.utils.IOUtils; +import org.apache.hadoop.ozone.ClientConfigForTesting; import org.apache.hadoop.ozone.MiniOzoneCluster; import org.apache.hadoop.ozone.OzoneConfigKeys; import org.apache.hadoop.ozone.client.ObjectStore; @@ -108,13 +109,16 @@ public void setup() throws Exception { conf.set(OzoneConfigKeys.OZONE_SCM_CLOSE_CONTAINER_WAIT_DURATION, "2s"); conf.set(ScmConfigKeys.OZONE_SCM_PIPELINE_SCRUB_INTERVAL, "2s"); conf.set(ScmConfigKeys.OZONE_SCM_PIPELINE_DESTROY_TIMEOUT, "5s"); + + ClientConfigForTesting.newBuilder(StorageUnit.BYTES) + .setBlockSize(blockSize) + .setChunkSize(chunkSize) + .setStreamBufferFlushSize(flushSize) + .setStreamBufferMaxSize(maxFlushSize) + .applyTo(conf); + cluster = MiniOzoneCluster.newBuilder(conf).setNumDatanodes(1) - .setBlockSize(blockSize) - .setChunkSize(chunkSize) - .setStreamBufferFlushSize(flushSize) - .setStreamBufferMaxSize(maxFlushSize) - .setStreamBufferSizeUnit(StorageUnit.BYTES) .setHbInterval(200) .setCertificateClient(new CertificateClientTestImpl(conf)) .setSecretKeyClient(new SecretKeyTestClient()) diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestContainerStateMachineStream.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestContainerStateMachineStream.java index ab2fbeadb61..97a3047bfdb 100644 --- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestContainerStateMachineStream.java +++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestContainerStateMachineStream.java @@ -17,14 +17,15 @@ package org.apache.hadoop.ozone.client.rpc; -import org.apache.hadoop.conf.StorageUnit; import org.apache.hadoop.hdds.client.ReplicationType; import org.apache.hadoop.hdds.conf.DatanodeRatisServerConfig; import org.apache.hadoop.hdds.conf.OzoneConfiguration; +import org.apache.hadoop.hdds.conf.StorageUnit; import org.apache.hadoop.hdds.protocol.proto.HddsProtos; import org.apache.hadoop.hdds.ratis.conf.RatisClientConfig; import org.apache.hadoop.hdds.scm.OzoneClientConfig; import org.apache.hadoop.hdds.utils.IOUtils; +import org.apache.hadoop.ozone.ClientConfigForTesting; import org.apache.hadoop.ozone.HddsDatanodeService; import org.apache.hadoop.ozone.MiniOzoneCluster; import org.apache.hadoop.ozone.OzoneConfigKeys; @@ -112,18 +113,20 @@ public void setup() throws Exception { raftClientConfig.setRpcWatchRequestTimeout(Duration.ofSeconds(10)); conf.setFromObject(raftClientConfig); + ClientConfigForTesting.newBuilder(StorageUnit.BYTES) + .setDataStreamMinPacketSize(1024) + .setBlockSize(BLOCK_SIZE) + .setChunkSize(CHUNK_SIZE) + .setStreamBufferFlushSize(FLUSH_SIZE) + .setStreamBufferMaxSize(MAX_FLUSH_SIZE) + .applyTo(conf); + conf.setLong(OzoneConfigKeys.DFS_RATIS_SNAPSHOT_THRESHOLD_KEY, 1); conf.setQuietMode(false); cluster = MiniOzoneCluster.newBuilder(conf) .setNumDatanodes(3) .setHbInterval(200) - .setDataStreamMinPacketSize(1024) - .setBlockSize(BLOCK_SIZE) - .setChunkSize(CHUNK_SIZE) - .setStreamBufferFlushSize(FLUSH_SIZE) - .setStreamBufferMaxSize(MAX_FLUSH_SIZE) - .setStreamBufferSizeUnit(StorageUnit.BYTES) .build(); cluster.waitForClusterToBeReady(); cluster.waitForPipelineTobeReady(HddsProtos.ReplicationFactor.ONE, 60000); diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestDeleteWithInAdequateDN.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestDeleteWithInAdequateDN.java index 0363d7aef14..fa50dac64f7 100644 --- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestDeleteWithInAdequateDN.java +++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestDeleteWithInAdequateDN.java @@ -71,10 +71,9 @@ import static org.apache.hadoop.hdds.scm.ScmConfigKeys.OZONE_SCM_STALENODE_INTERVAL; import static org.assertj.core.api.Assertions.assertThat; import static org.junit.jupiter.api.Assertions.assertEquals; -import static org.junit.jupiter.api.Assertions.assertInstanceOf; import static org.junit.jupiter.api.Assertions.assertNotNull; import static org.junit.jupiter.api.Assertions.assertSame; -import static org.junit.jupiter.api.Assertions.fail; +import static org.junit.jupiter.api.Assertions.assertThrows; import org.junit.jupiter.api.AfterAll; import org.junit.jupiter.api.Assumptions; import org.junit.jupiter.api.BeforeAll; @@ -201,7 +200,7 @@ public static void shutdown() { * data is not deleted from any of the nodes which have the closed replica. */ @Test - public void testDeleteKeyWithInAdequateDN() throws Exception { + void testDeleteKeyWithInAdequateDN() throws Exception { String keyName = "ratis"; OzoneOutputStream key = objectStore.getVolume(volumeName).getBucket(bucketName) @@ -289,14 +288,11 @@ public void testDeleteKeyWithInAdequateDN() throws Exception { deleteKey("ratis"); // make sure the chunk was never deleted on the leader even though // deleteBlock handler is invoked - try { - for (ContainerProtos.ChunkInfo chunkInfo : blockData.getChunks()) { - keyValueHandler.getChunkManager() - .readChunk(container, blockID, ChunkInfo.getFromProtoBuf(chunkInfo), - null); - } - } catch (IOException ioe) { - fail("Exception should not be thrown."); + + for (ContainerProtos.ChunkInfo chunkInfo : blockData.getChunks()) { + keyValueHandler.getChunkManager() + .readChunk(container, blockID, ChunkInfo.getFromProtoBuf(chunkInfo), + null); } long numReadStateMachineOps = stateMachine.getMetrics().getNumReadStateMachineOps(); @@ -319,16 +315,14 @@ public void testDeleteKeyWithInAdequateDN() throws Exception { .getDispatcher() .getHandler(ContainerProtos.ContainerType.KeyValueContainer); // make sure the chunk is now deleted on the all dns - try { + KeyValueHandler finalKeyValueHandler = keyValueHandler; + StorageContainerException e = assertThrows(StorageContainerException.class, () -> { for (ContainerProtos.ChunkInfo chunkInfo : blockData.getChunks()) { - keyValueHandler.getChunkManager().readChunk(container, blockID, - ChunkInfo.getFromProtoBuf(chunkInfo), null); + finalKeyValueHandler.getChunkManager().readChunk(container, blockID, + ChunkInfo.getFromProtoBuf(chunkInfo), null); } - fail("Expected exception is not thrown"); - } catch (IOException ioe) { - StorageContainerException e = assertInstanceOf(StorageContainerException.class, ioe); - assertSame(ContainerProtos.Result.UNABLE_TO_FIND_CHUNK, e.getResult()); - } + }); + assertSame(ContainerProtos.Result.UNABLE_TO_FIND_CHUNK, e.getResult()); } } } diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestDiscardPreallocatedBlocks.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestDiscardPreallocatedBlocks.java index c689a692ae7..47891586827 100644 --- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestDiscardPreallocatedBlocks.java +++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestDiscardPreallocatedBlocks.java @@ -85,7 +85,7 @@ public static void init() throws Exception { chunkSize = (int) OzoneConsts.MB; blockSize = 4 * chunkSize; - OzoneClientConfig config = new OzoneClientConfig(); + OzoneClientConfig config = conf.getObject(OzoneClientConfig.class); config.setChecksumType(ChecksumType.NONE); conf.setFromObject(config); diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestFailureHandlingByClientFlushDelay.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestFailureHandlingByClientFlushDelay.java index 41438996c27..fadc06bd57b 100644 --- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestFailureHandlingByClientFlushDelay.java +++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestFailureHandlingByClientFlushDelay.java @@ -17,13 +17,13 @@ package org.apache.hadoop.ozone.client.rpc; -import org.apache.hadoop.conf.StorageUnit; import org.apache.hadoop.hdds.HddsUtils; import org.apache.hadoop.hdds.client.BlockID; import org.apache.hadoop.hdds.client.RatisReplicationConfig; import org.apache.hadoop.hdds.client.ReplicationType; import org.apache.hadoop.hdds.conf.DatanodeRatisServerConfig; import org.apache.hadoop.hdds.conf.OzoneConfiguration; +import org.apache.hadoop.hdds.conf.StorageUnit; import org.apache.hadoop.hdds.protocol.DatanodeDetails; import org.apache.hadoop.hdds.protocol.proto.HddsProtos; import org.apache.hadoop.hdds.ratis.conf.RatisClientConfig; @@ -35,6 +35,7 @@ import org.apache.hadoop.net.DNSToSwitchMapping; import org.apache.hadoop.net.NetUtils; import org.apache.hadoop.net.StaticMapping; +import org.apache.hadoop.ozone.ClientConfigForTesting; import org.apache.hadoop.ozone.MiniOzoneCluster; import org.apache.hadoop.ozone.OzoneConfigKeys; import org.apache.hadoop.ozone.client.ObjectStore; @@ -129,14 +130,18 @@ private void init() throws Exception { StaticMapping.addNodeToRack(NetUtils.normalizeHostNames( Collections.singleton(HddsUtils.getHostName(conf))).get(0), "/rack1"); - cluster = MiniOzoneCluster.newBuilder(conf) - .setNumDatanodes(10) - .setTotalPipelineNumLimit(15) - .setChunkSize(chunkSize) + + ClientConfigForTesting.newBuilder(StorageUnit.BYTES) .setBlockSize(blockSize) + .setChunkSize(chunkSize) .setStreamBufferFlushSize(flushSize) .setStreamBufferMaxSize(maxFlushSize) - .setStreamBufferSizeUnit(StorageUnit.BYTES).build(); + .applyTo(conf); + + cluster = MiniOzoneCluster.newBuilder(conf) + .setNumDatanodes(10) + .setTotalPipelineNumLimit(15) + .build(); cluster.waitForClusterToBeReady(); //the easiest way to create an open container is creating a key client = OzoneClientFactory.getRpcClient(conf); diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestOzoneAtRestEncryption.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestOzoneAtRestEncryption.java index ab09bc24330..919654d82a9 100644 --- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestOzoneAtRestEncryption.java +++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestOzoneAtRestEncryption.java @@ -34,7 +34,6 @@ import java.util.UUID; import com.google.common.cache.Cache; -import org.apache.hadoop.conf.StorageUnit; import org.apache.hadoop.crypto.key.KeyProvider; import org.apache.hadoop.crypto.key.kms.KMSClientProvider; import org.apache.hadoop.crypto.key.kms.server.MiniKMS; @@ -44,11 +43,13 @@ import org.apache.hadoop.hdds.client.ReplicationFactor; import org.apache.hadoop.hdds.client.ReplicationType; import org.apache.hadoop.hdds.conf.OzoneConfiguration; +import org.apache.hadoop.hdds.conf.StorageUnit; import org.apache.hadoop.hdds.protocol.proto.HddsProtos; import org.apache.hadoop.hdds.scm.container.ContainerInfo; import org.apache.hadoop.hdds.scm.protocolPB.StorageContainerLocationProtocolClientSideTranslatorPB; import org.apache.hadoop.hdds.security.x509.certificate.client.CertificateClientTestImpl; import org.apache.hadoop.hdds.utils.db.Table; +import org.apache.hadoop.ozone.ClientConfigForTesting; import org.apache.hadoop.ozone.MiniOzoneCluster; import org.apache.hadoop.ozone.OzoneConsts; import org.apache.hadoop.ozone.client.BucketArgs; @@ -78,6 +79,7 @@ import static org.apache.hadoop.hdds.HddsConfigKeys.OZONE_METADATA_DIRS; import static org.apache.hadoop.hdds.client.ReplicationFactor.ONE; import static org.apache.hadoop.hdds.client.ReplicationType.RATIS; +import static org.apache.ozone.test.GenericTestUtils.getTestStartTime; import static org.assertj.core.api.Assertions.assertThat; import static org.junit.jupiter.api.Assertions.assertArrayEquals; import static org.junit.jupiter.api.Assertions.assertEquals; @@ -107,8 +109,6 @@ class TestOzoneAtRestEncryption { private static StorageContainerLocationProtocolClientSideTranslatorPB storageContainerLocationClient; - private static final String SCM_ID = UUID.randomUUID().toString(); - private static final String CLUSTER_ID = UUID.randomUUID().toString(); private static File testDir; private static OzoneConfiguration conf; private static final String TEST_KEY = "key1"; @@ -141,13 +141,14 @@ static void init() throws Exception { conf.set(OZONE_METADATA_DIRS, testDir.getAbsolutePath()); CertificateClientTestImpl certificateClientTest = new CertificateClientTestImpl(conf); - cluster = MiniOzoneCluster.newBuilder(conf) - .setNumDatanodes(10) - .setScmId(SCM_ID) - .setClusterId(CLUSTER_ID) + + ClientConfigForTesting.newBuilder(StorageUnit.BYTES) .setBlockSize(BLOCK_SIZE) .setChunkSize(CHUNK_SIZE) - .setStreamBufferSizeUnit(StorageUnit.BYTES) + .applyTo(conf); + + cluster = MiniOzoneCluster.newBuilder(conf) + .setNumDatanodes(10) .setCertificateClient(certificateClientTest) .setSecretKeyClient(new SecretKeyTestClient()) .build(); @@ -165,7 +166,6 @@ static void init() throws Exception { TestOzoneRpcClient.setStorageContainerLocationClient( storageContainerLocationClient); TestOzoneRpcClient.setStore(store); - TestOzoneRpcClient.setClusterId(CLUSTER_ID); // create test key createKey(TEST_KEY, cluster.getOzoneManager().getKmsProvider(), conf); @@ -229,7 +229,7 @@ void testLinkEncryptedBuckets(BucketLayout bucketLayout) throws Exception { static void createAndVerifyStreamKeyData(OzoneBucket bucket) throws Exception { - Instant testStartTime = Instant.now(); + Instant testStartTime = getTestStartTime(); String keyName = UUID.randomUUID().toString(); String value = "sample value"; try (OzoneDataStreamOutput out = bucket.createStreamKey(keyName, @@ -242,7 +242,7 @@ static void createAndVerifyStreamKeyData(OzoneBucket bucket) } static void createAndVerifyKeyData(OzoneBucket bucket) throws Exception { - Instant testStartTime = Instant.now(); + Instant testStartTime = getTestStartTime(); String keyName = UUID.randomUUID().toString(); String value = "sample value"; try (OzoneOutputStream out = bucket.createKey(keyName, @@ -318,7 +318,7 @@ void testKeyWithEncryptionAndGdpr(BucketLayout bucketLayout) //Step 1 String volumeName = UUID.randomUUID().toString(); String bucketName = UUID.randomUUID().toString(); - Instant testStartTime = Instant.now(); + Instant testStartTime = getTestStartTime(); String value = "sample value"; store.createVolume(volumeName); diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestOzoneClientMultipartUploadWithFSO.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestOzoneClientMultipartUploadWithFSO.java index 4867be49066..a89e6176996 100644 --- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestOzoneClientMultipartUploadWithFSO.java +++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestOzoneClientMultipartUploadWithFSO.java @@ -58,7 +58,6 @@ import org.apache.hadoop.ozone.om.helpers.QuotaUtil; import org.apache.hadoop.ozone.om.request.OMRequestTestUtils; -import org.apache.hadoop.ozone.om.request.file.OMFileRequest; import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos; import org.junit.jupiter.api.AfterAll; import org.junit.jupiter.api.BeforeEach; @@ -83,13 +82,11 @@ import static org.assertj.core.api.Assertions.assertThat; import static org.junit.jupiter.api.Assertions.assertEquals; import static org.junit.jupiter.api.Assertions.assertFalse; -import static org.junit.jupiter.api.Assertions.assertInstanceOf; import static org.junit.jupiter.api.Assertions.assertNotEquals; import static org.junit.jupiter.api.Assertions.assertNotNull; import static org.junit.jupiter.api.Assertions.assertNull; import static org.junit.jupiter.api.Assertions.assertThrows; import static org.junit.jupiter.api.Assertions.assertTrue; -import static org.junit.jupiter.api.Assertions.fail; /** * This test verifies all the S3 multipart client apis - prefix layout. @@ -101,7 +98,6 @@ public class TestOzoneClientMultipartUploadWithFSO { private static MiniOzoneCluster cluster = null; private static OzoneClient ozClient = null; - private static String scmId = UUID.randomUUID().toString(); private String volumeName; private String bucketName; private String keyName; @@ -140,7 +136,6 @@ static void startCluster(OzoneConfiguration conf) throws Exception { cluster = MiniOzoneCluster.newBuilder(conf) .setNumDatanodes(5) .setTotalPipelineNumLimit(10) - .setScmId(scmId) .build(); cluster.waitForClusterToBeReady(); ozClient = OzoneClientFactory.getRpcClient(conf); @@ -490,15 +485,9 @@ public void testCommitPartAfterCompleteUpload() throws Exception { String part1 = new String(data, UTF_8); sb.append(part1); assertEquals(sb.toString(), new String(fileContent, UTF_8)); - - try { - ozoneOutputStream.close(); - fail("testCommitPartAfterCompleteUpload failed"); - } catch (IOException ex) { - assertInstanceOf(OMException.class, ex); - assertEquals(NO_SUCH_MULTIPART_UPLOAD_ERROR, - ((OMException) ex).getResult()); - } + OzoneOutputStream finalOzoneOutputStream = ozoneOutputStream; + OMException ex = assertThrows(OMException.class, () -> finalOzoneOutputStream.close()); + assertEquals(NO_SUCH_MULTIPART_UPLOAD_ERROR, ex.getResult()); } @Test @@ -523,15 +512,8 @@ public void testAbortUploadFailWithInProgressPartUpload() throws Exception { // Abort before completing part upload. bucket.abortMultipartUpload(keyName, uploadID); - - try { - ozoneOutputStream.close(); - fail("testAbortUploadFailWithInProgressPartUpload failed"); - } catch (IOException ex) { - assertInstanceOf(OMException.class, ex); - assertEquals(NO_SUCH_MULTIPART_UPLOAD_ERROR, - ((OMException) ex).getResult()); - } + OMException ome = assertThrows(OMException.class, () -> ozoneOutputStream.close()); + assertEquals(NO_SUCH_MULTIPART_UPLOAD_ERROR, ome.getResult()); } @Test @@ -568,8 +550,7 @@ public void testAbortUploadSuccessWithParts() throws Exception { bucket.abortMultipartUpload(keyName, uploadID); String multipartOpenKey = - getMultipartOpenKey(uploadID, volumeName, bucketName, keyName, - metadataMgr); + metadataMgr.getMultipartKeyFSO(volumeName, bucketName, keyName, uploadID); OmKeyInfo omKeyInfo = metadataMgr.getOpenKeyTable(bucketLayout).get(multipartOpenKey); OmMultipartKeyInfo omMultipartKeyInfo = @@ -853,8 +834,7 @@ private String verifyUploadedPart(String uploadID, String partName, ozoneManager.getMetadataManager().getBucketTable().get(buckKey); BucketLayout bucketLayout = buckInfo.getBucketLayout(); String multipartOpenKey = - getMultipartOpenKey(uploadID, volumeName, bucketName, keyName, - metadataMgr); + metadataMgr.getMultipartKeyFSO(volumeName, bucketName, keyName, uploadID); String multipartKey = metadataMgr.getMultipartKey(volumeName, bucketName, keyName, uploadID); @@ -881,32 +861,6 @@ private String verifyUploadedPart(String uploadID, String partName, return multipartKey; } - private String getMultipartOpenKey(String multipartUploadID, - String volName, String buckName, String kName, - OMMetadataManager omMetadataManager) throws IOException { - - String fileName = OzoneFSUtils.getFileName(kName); - final long volumeId = omMetadataManager.getVolumeId(volName); - final long bucketId = omMetadataManager.getBucketId(volName, - buckName); - long parentID = getParentID(volName, buckName, kName, - omMetadataManager); - - String multipartKey = omMetadataManager.getMultipartKey(volumeId, bucketId, - parentID, fileName, multipartUploadID); - - return multipartKey; - } - - private long getParentID(String volName, String buckName, - String kName, OMMetadataManager omMetadataManager) throws IOException { - final long volumeId = omMetadataManager.getVolumeId(volName); - final long bucketId = omMetadataManager.getBucketId(volName, - buckName); - return OMFileRequest.getParentID(volumeId, bucketId, - kName, omMetadataManager); - } - private String initiateMultipartUpload(OzoneBucket oBucket, String kName, ReplicationType replicationType, ReplicationFactor replicationFactor) throws IOException { diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestOzoneClientRetriesOnExceptionFlushDelay.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestOzoneClientRetriesOnExceptionFlushDelay.java index 925cfd9d954..c3e8a8d461b 100644 --- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestOzoneClientRetriesOnExceptionFlushDelay.java +++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestOzoneClientRetriesOnExceptionFlushDelay.java @@ -20,10 +20,10 @@ import java.io.OutputStream; import java.util.UUID; -import org.apache.hadoop.conf.StorageUnit; import org.apache.hadoop.hdds.client.ReplicationFactor; import org.apache.hadoop.hdds.client.ReplicationType; import org.apache.hadoop.hdds.conf.OzoneConfiguration; +import org.apache.hadoop.hdds.conf.StorageUnit; import org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos.ChecksumType; import org.apache.hadoop.hdds.scm.OzoneClientConfig; import org.apache.hadoop.hdds.scm.ScmConfigKeys; @@ -35,6 +35,7 @@ import org.apache.hadoop.hdds.scm.pipeline.Pipeline; import org.apache.hadoop.hdds.scm.storage.BlockOutputStream; import org.apache.hadoop.hdds.utils.IOUtils; +import org.apache.hadoop.ozone.ClientConfigForTesting; import org.apache.hadoop.ozone.MiniOzoneCluster; import org.apache.hadoop.ozone.OzoneConfigKeys; import org.apache.hadoop.ozone.client.ObjectStore; @@ -88,7 +89,7 @@ public void init() throws Exception { maxFlushSize = 2 * flushSize; blockSize = 2 * maxFlushSize; - OzoneClientConfig config = new OzoneClientConfig(); + OzoneClientConfig config = conf.getObject(OzoneClientConfig.class); config.setChecksumType(ChecksumType.NONE); config.setMaxRetryCount(3); conf.setFromObject(config); @@ -98,14 +99,17 @@ public void init() throws Exception { conf.set(ScmConfigKeys.OZONE_SCM_PIPELINE_SCRUB_INTERVAL, "2s"); conf.set(ScmConfigKeys.OZONE_SCM_PIPELINE_DESTROY_TIMEOUT, "5s"); conf.setQuietMode(false); - cluster = MiniOzoneCluster.newBuilder(conf) - .setNumDatanodes(5) - .setTotalPipelineNumLimit(3) + + ClientConfigForTesting.newBuilder(StorageUnit.BYTES) .setBlockSize(blockSize) .setChunkSize(chunkSize) .setStreamBufferFlushSize(flushSize) .setStreamBufferMaxSize(maxFlushSize) - .setStreamBufferSizeUnit(StorageUnit.BYTES) + .applyTo(conf); + + cluster = MiniOzoneCluster.newBuilder(conf) + .setNumDatanodes(5) + .setTotalPipelineNumLimit(3) .build(); cluster.waitForClusterToBeReady(); //the easiest way to create an open container is creating a key diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestOzoneClientRetriesOnExceptions.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestOzoneClientRetriesOnExceptions.java index 5d6b601ad9a..cd99382f300 100644 --- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestOzoneClientRetriesOnExceptions.java +++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestOzoneClientRetriesOnExceptions.java @@ -22,10 +22,10 @@ import java.util.List; import java.util.UUID; -import org.apache.hadoop.conf.StorageUnit; import org.apache.hadoop.hdds.client.ReplicationFactor; import org.apache.hadoop.hdds.client.ReplicationType; import org.apache.hadoop.hdds.conf.OzoneConfiguration; +import org.apache.hadoop.hdds.conf.StorageUnit; import org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos.ChecksumType; import org.apache.hadoop.hdds.scm.OzoneClientConfig; import org.apache.hadoop.hdds.scm.ScmConfigKeys; @@ -38,6 +38,7 @@ import org.apache.hadoop.hdds.scm.pipeline.Pipeline; import org.apache.hadoop.hdds.scm.storage.BlockOutputStream; import org.apache.hadoop.hdds.utils.IOUtils; +import org.apache.hadoop.ozone.ClientConfigForTesting; import org.apache.hadoop.ozone.MiniOzoneCluster; import org.apache.hadoop.ozone.OzoneConfigKeys; import org.apache.hadoop.ozone.client.ObjectStore; @@ -53,7 +54,7 @@ import static org.assertj.core.api.Assertions.assertThat; import static org.junit.jupiter.api.Assertions.assertEquals; import static org.junit.jupiter.api.Assertions.assertInstanceOf; -import static org.junit.jupiter.api.Assertions.fail; +import static org.junit.jupiter.api.Assertions.assertThrows; import org.apache.ratis.protocol.exceptions.GroupMismatchException; import org.junit.jupiter.api.AfterEach; import org.junit.jupiter.api.Assumptions; @@ -108,14 +109,16 @@ public void init() throws Exception { conf.set(ScmConfigKeys.OZONE_SCM_PIPELINE_DESTROY_TIMEOUT, "5s"); conf.setQuietMode(false); - cluster = MiniOzoneCluster.newBuilder(conf) - .setNumDatanodes(5) - .setTotalPipelineNumLimit(3) + ClientConfigForTesting.newBuilder(StorageUnit.BYTES) .setBlockSize(blockSize) .setChunkSize(chunkSize) .setStreamBufferFlushSize(flushSize) .setStreamBufferMaxSize(maxFlushSize) - .setStreamBufferSizeUnit(StorageUnit.BYTES) + .applyTo(conf); + + cluster = MiniOzoneCluster.newBuilder(conf) + .setNumDatanodes(5) + .setTotalPipelineNumLimit(3) .build(); cluster.waitForClusterToBeReady(); //the easiest way to create an open container is creating a key @@ -188,72 +191,63 @@ public void testGroupMismatchExceptionHandling() throws Exception { } @Test - public void testMaxRetriesByOzoneClient() throws Exception { + void testMaxRetriesByOzoneClient() throws Exception { String keyName = getKeyName(); - OzoneOutputStream key = createKey( - keyName, ReplicationType.RATIS, (MAX_RETRIES + 1) * blockSize); - KeyOutputStream keyOutputStream = - assertInstanceOf(KeyOutputStream.class, key.getOutputStream()); - List entries = keyOutputStream.getStreamEntries(); - assertEquals((MAX_RETRIES + 1), - keyOutputStream.getStreamEntries().size()); - int dataLength = maxFlushSize + 50; - // write data more than 1 chunk - byte[] data1 = - ContainerTestHelper.getFixedLengthString(keyString, dataLength) - .getBytes(UTF_8); - long containerID; - List containerList = new ArrayList<>(); - for (BlockOutputStreamEntry entry : entries) { - containerID = entry.getBlockID().getContainerID(); - ContainerInfo container = - cluster.getStorageContainerManager().getContainerManager() - .getContainer(ContainerID.valueOf(containerID)); - Pipeline pipeline = - cluster.getStorageContainerManager().getPipelineManager() - .getPipeline(container.getPipelineID()); - XceiverClientSpi xceiverClient = - xceiverClientManager.acquireClient(pipeline); - Assumptions.assumeFalse(containerList.contains(containerID)); - containerList.add(containerID); - xceiverClient.sendCommand(ContainerTestHelper - .getCreateContainerRequest(containerID, pipeline)); - xceiverClientManager.releaseClient(xceiverClient, false); - } - key.write(data1); - OutputStream stream = entries.get(0).getOutputStream(); - BlockOutputStream blockOutputStream = assertInstanceOf(BlockOutputStream.class, stream); - TestHelper.waitForContainerClose(key, cluster); - // Ensure that blocks for the key have been allocated to at least N+1 - // containers so that write request will be tried on N+1 different blocks - // of N+1 different containers and it will finally fail as it will hit - // the max retry count of N. - Assumptions.assumeTrue(containerList.size() > MAX_RETRIES, - containerList.size() + " <= " + MAX_RETRIES); - try { + try (OzoneOutputStream key = createKey( + keyName, ReplicationType.RATIS, (MAX_RETRIES + 1) * blockSize)) { + KeyOutputStream keyOutputStream = + assertInstanceOf(KeyOutputStream.class, key.getOutputStream()); + List entries = keyOutputStream.getStreamEntries(); + assertEquals((MAX_RETRIES + 1), + keyOutputStream.getStreamEntries().size()); + int dataLength = maxFlushSize + 50; + // write data more than 1 chunk + byte[] data1 = + ContainerTestHelper.getFixedLengthString(keyString, dataLength) + .getBytes(UTF_8); + long containerID; + List containerList = new ArrayList<>(); + for (BlockOutputStreamEntry entry : entries) { + containerID = entry.getBlockID().getContainerID(); + ContainerInfo container = + cluster.getStorageContainerManager().getContainerManager() + .getContainer(ContainerID.valueOf(containerID)); + Pipeline pipeline = + cluster.getStorageContainerManager().getPipelineManager() + .getPipeline(container.getPipelineID()); + XceiverClientSpi xceiverClient = + xceiverClientManager.acquireClient(pipeline); + Assumptions.assumeFalse(containerList.contains(containerID)); + containerList.add(containerID); + xceiverClient.sendCommand(ContainerTestHelper + .getCreateContainerRequest(containerID, pipeline)); + xceiverClientManager.releaseClient(xceiverClient, false); + } key.write(data1); - // ensure that write is flushed to dn - key.flush(); - fail("Expected exception not thrown"); - } catch (IOException ioe) { + OutputStream stream = entries.get(0).getOutputStream(); + BlockOutputStream blockOutputStream = assertInstanceOf(BlockOutputStream.class, stream); + TestHelper.waitForContainerClose(key, cluster); + // Ensure that blocks for the key have been allocated to at least N+1 + // containers so that write request will be tried on N+1 different blocks + // of N+1 different containers and it will finally fail as it will hit + // the max retry count of N. + Assumptions.assumeTrue(containerList.size() > MAX_RETRIES, + containerList.size() + " <= " + MAX_RETRIES); + IOException ioe = assertThrows(IOException.class, () -> { + key.write(data1); + // ensure that write is flushed to dn + key.flush(); + }); assertInstanceOf(ContainerNotOpenException.class, HddsClientUtils.checkForException(blockOutputStream.getIoException())); assertThat(ioe.getMessage()).contains( - "Retry request failed. " + - "retries get failed due to exceeded maximum " + - "allowed retries number: " + MAX_RETRIES); - } - try { - key.flush(); - fail("Expected exception not thrown"); - } catch (IOException ioe) { + "Retry request failed. " + + "retries get failed due to exceeded maximum " + + "allowed retries number: " + MAX_RETRIES); + + ioe = assertThrows(IOException.class, () -> key.flush()); assertThat(ioe.getMessage()).contains("Stream is closed"); } - try { - key.close(); - } catch (IOException ioe) { - fail("Expected should not be thrown"); - } } private OzoneOutputStream createKey(String keyName, ReplicationType type, diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestOzoneRpcClientAbstract.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestOzoneRpcClientAbstract.java index 4e0c9bbf5b1..a87d05321e2 100644 --- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestOzoneRpcClientAbstract.java +++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestOzoneRpcClientAbstract.java @@ -38,7 +38,6 @@ import java.util.concurrent.atomic.AtomicInteger; import java.util.stream.Stream; -import org.apache.hadoop.conf.StorageUnit; import org.apache.hadoop.hdds.client.DefaultReplicationConfig; import org.apache.hadoop.hdds.client.ECReplicationConfig; import org.apache.hadoop.hdds.client.ECReplicationConfig.EcCodec; @@ -49,6 +48,7 @@ import org.apache.hadoop.hdds.client.ReplicationType; import org.apache.hadoop.hdds.client.StandaloneReplicationConfig; import org.apache.hadoop.hdds.conf.OzoneConfiguration; +import org.apache.hadoop.hdds.conf.StorageUnit; import org.apache.hadoop.hdds.protocol.DatanodeDetails; import org.apache.hadoop.hdds.protocol.StorageType; import org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos; @@ -62,6 +62,7 @@ import org.apache.hadoop.hdds.scm.protocolPB.StorageContainerLocationProtocolClientSideTranslatorPB; import org.apache.hadoop.hdds.scm.server.StorageContainerManager; import org.apache.hadoop.hdds.utils.db.Table; +import org.apache.hadoop.ozone.ClientConfigForTesting; import org.apache.hadoop.ozone.HddsDatanodeService; import org.apache.hadoop.ozone.MiniOzoneCluster; import org.apache.hadoop.ozone.OmUtils; @@ -84,7 +85,6 @@ import org.apache.hadoop.ozone.client.io.OzoneInputStream; import org.apache.hadoop.ozone.client.io.OzoneOutputStream; import org.apache.hadoop.ozone.client.protocol.ClientProtocol; -import org.apache.hadoop.ozone.common.OzoneChecksumException; import org.apache.hadoop.ozone.container.common.helpers.BlockData; import org.apache.hadoop.ozone.container.common.interfaces.BlockIterator; import org.apache.hadoop.ozone.container.common.interfaces.Container; @@ -129,6 +129,7 @@ import static org.apache.hadoop.hdds.client.ReplicationFactor.ONE; import static org.apache.hadoop.hdds.client.ReplicationFactor.THREE; import static org.apache.hadoop.hdds.client.ReplicationType.RATIS; +import static org.apache.hadoop.ozone.OmUtils.LOG; import static org.apache.hadoop.ozone.OmUtils.MAX_TRXN_ID; import static org.apache.hadoop.ozone.OzoneAcl.AclScope.ACCESS; import static org.apache.hadoop.ozone.OzoneAcl.AclScope.DEFAULT; @@ -144,11 +145,11 @@ import static org.apache.hadoop.ozone.security.acl.IAccessAuthorizer.ACLType.READ; import static org.apache.hadoop.ozone.security.acl.IAccessAuthorizer.ACLType.WRITE; +import static org.apache.ozone.test.GenericTestUtils.getTestStartTime; import static org.assertj.core.api.Assertions.assertThat; import static org.junit.jupiter.api.Assertions.assertArrayEquals; import static org.junit.jupiter.api.Assertions.assertEquals; import static org.junit.jupiter.api.Assertions.assertFalse; -import static org.junit.jupiter.api.Assertions.assertInstanceOf; import static org.junit.jupiter.api.Assertions.assertNotEquals; import static org.junit.jupiter.api.Assertions.assertNotNull; import static org.junit.jupiter.api.Assertions.assertNull; @@ -193,10 +194,6 @@ public abstract class TestOzoneRpcClientAbstract { private static OzoneAcl inheritedGroupAcl = new OzoneAcl(GROUP, remoteGroupName, READ, ACCESS); - private static String scmId = UUID.randomUUID().toString(); - private static String clusterId; - - /** * Create a MiniOzoneCluster for testing. * @param conf Configurations to start the cluster. @@ -205,14 +202,15 @@ public abstract class TestOzoneRpcClientAbstract { static void startCluster(OzoneConfiguration conf) throws Exception { // Reduce long wait time in MiniOzoneClusterImpl#waitForHddsDatanodesStop // for testZReadKeyWithUnhealthyContainerReplica. - clusterId = UUID.randomUUID().toString(); conf.set("ozone.scm.stale.node.interval", "10s"); + + ClientConfigForTesting.newBuilder(StorageUnit.MB) + .setDataStreamMinPacketSize(1) + .applyTo(conf); + cluster = MiniOzoneCluster.newBuilder(conf) .setNumDatanodes(14) .setTotalPipelineNumLimit(10) - .setScmId(scmId) - .setClusterId(clusterId) - .setDataStreamMinPacketSize(1) // 1MB .build(); cluster.waitForClusterToBeReady(); ozClient = OzoneClientFactory.getRpcClient(conf); @@ -266,10 +264,6 @@ public static ObjectStore getStore() { return TestOzoneRpcClientAbstract.store; } - public static void setClusterId(String clusterId) { - TestOzoneRpcClientAbstract.clusterId = clusterId; - } - public static OzoneClient getClient() { return TestOzoneRpcClientAbstract.ozClient; } @@ -583,7 +577,7 @@ public void testCreateBucketWithMetadata() @Test public void testCreateBucket() throws IOException { - Instant testStartTime = Instant.now(); + Instant testStartTime = getTestStartTime(); String volumeName = UUID.randomUUID().toString(); String bucketName = UUID.randomUUID().toString(); store.createVolume(volumeName); @@ -598,7 +592,7 @@ public void testCreateBucket() @Test public void testCreateS3Bucket() throws IOException { - Instant testStartTime = Instant.now(); + Instant testStartTime = getTestStartTime(); String bucketName = UUID.randomUUID().toString(); store.createS3Bucket(bucketName); OzoneBucket bucket = store.getS3Bucket(bucketName); @@ -610,7 +604,7 @@ public void testCreateS3Bucket() @Test public void testDeleteS3Bucket() throws Exception { - Instant testStartTime = Instant.now(); + Instant testStartTime = getTestStartTime(); String bucketName = UUID.randomUUID().toString(); store.createS3Bucket(bucketName); OzoneBucket bucket = store.getS3Bucket(bucketName); @@ -1005,7 +999,7 @@ public void testPutKeyWithReplicationConfig(String replicationValue, public void testPutKey() throws IOException { String volumeName = UUID.randomUUID().toString(); String bucketName = UUID.randomUUID().toString(); - Instant testStartTime = Instant.now(); + Instant testStartTime = getTestStartTime(); String value = "sample value"; store.createVolume(volumeName); @@ -1545,7 +1539,7 @@ public void testValidateBlockLengthWithCommitKey() throws IOException { public void testPutKeyRatisOneNode() throws IOException { String volumeName = UUID.randomUUID().toString(); String bucketName = UUID.randomUUID().toString(); - Instant testStartTime = Instant.now(); + Instant testStartTime = getTestStartTime(); String value = "sample value"; store.createVolume(volumeName); @@ -1580,7 +1574,7 @@ public void testPutKeyRatisOneNode() throws IOException { public void testPutKeyRatisThreeNodes() throws IOException { String volumeName = UUID.randomUUID().toString(); String bucketName = UUID.randomUUID().toString(); - Instant testStartTime = Instant.now(); + Instant testStartTime = getTestStartTime(); String value = "sample value"; store.createVolume(volumeName); @@ -1617,7 +1611,7 @@ public void testPutKeyRatisThreeNodesParallel() throws IOException, InterruptedException { String volumeName = UUID.randomUUID().toString(); String bucketName = UUID.randomUUID().toString(); - Instant testStartTime = Instant.now(); + Instant testStartTime = getTestStartTime(); store.createVolume(volumeName); OzoneVolume volume = store.getVolume(volumeName); volume.createBucket(bucketName); @@ -1911,7 +1905,7 @@ public void testReadKeyWithCorruptedData() throws IOException { // Make this executed at last, for it has some side effect to other UTs @Test @Flaky("HDDS-6151") - public void testZReadKeyWithUnhealthyContainerReplica() throws Exception { + void testZReadKeyWithUnhealthyContainerReplica() throws Exception { String volumeName = UUID.randomUUID().toString(); String bucketName = UUID.randomUUID().toString(); @@ -1994,16 +1988,12 @@ public void testZReadKeyWithUnhealthyContainerReplica() throws Exception { }, 1000, 10000); // Try reading keyName2 - try { - GenericTestUtils.setLogLevel(XceiverClientGrpc.getLogger(), DEBUG); - try (OzoneInputStream is = bucket.readKey(keyName2)) { - byte[] content = new byte[100]; - is.read(content); - String retValue = new String(content, UTF_8); - assertEquals(value, retValue.trim()); - } - } catch (IOException e) { - fail("Reading unhealthy replica should succeed."); + GenericTestUtils.setLogLevel(XceiverClientGrpc.getLogger(), DEBUG); + try (OzoneInputStream is = bucket.readKey(keyName2)) { + byte[] content = new byte[100]; + is.read(content); + String retValue = new String(content, UTF_8); + assertEquals(value, retValue.trim()); } } @@ -2012,7 +2002,7 @@ public void testZReadKeyWithUnhealthyContainerReplica() throws Exception { * @throws IOException */ @Test - public void testReadKeyWithCorruptedDataWithMutiNodes() throws IOException { + void testReadKeyWithCorruptedDataWithMutiNodes() throws IOException { String volumeName = UUID.randomUUID().toString(); String bucketName = UUID.randomUUID().toString(); @@ -2061,8 +2051,6 @@ public void testReadKeyWithCorruptedDataWithMutiNodes() throws IOException { byte[] b = new byte[data.length]; is.read(b); assertArrayEquals(b, data); - } catch (OzoneChecksumException e) { - fail("Reading corrupted data should not fail."); } corruptData(containerList.get(1), key); // Try reading the key. Read will fail on the first node and will eventually @@ -2071,8 +2059,6 @@ public void testReadKeyWithCorruptedDataWithMutiNodes() throws IOException { byte[] b = new byte[data.length]; is.read(b); assertArrayEquals(b, data); - } catch (OzoneChecksumException e) { - fail("Reading corrupted data should not fail."); } corruptData(containerList.get(2), key); // Try reading the key. Read will fail here as all the replicas are corrupt @@ -2114,7 +2100,7 @@ private void corruptData(Container container, OzoneKey key) String containreBaseDir = container.getContainerData().getVolume().getHddsRootDir().getPath(); File chunksLocationPath = KeyValueContainerLocationUtil - .getChunksLocationPath(containreBaseDir, clusterId, containerID); + .getChunksLocationPath(containreBaseDir, cluster.getClusterId(), containerID); byte[] corruptData = "corrupted data".getBytes(UTF_8); // Corrupt the contents of chunk files for (File file : FileUtils.listFiles(chunksLocationPath, null, false)) { @@ -2808,13 +2794,10 @@ public void testMultipartUploadWithACL() throws Exception { String keyName2 = UUID.randomUUID().toString(); OzoneBucket bucket2 = client.getObjectStore().getVolume(volumeName) .getBucket(bucketName); - try { - initiateMultipartUpload(bucket2, keyName2, anyReplication()); - fail("User without permission should fail"); - } catch (Exception e) { - OMException ome = assertInstanceOf(OMException.class, e); - assertEquals(ResultCodes.PERMISSION_DENIED, ome.getResult()); - } + OMException ome = + assertThrows(OMException.class, () -> initiateMultipartUpload(bucket2, keyName2, anyReplication()), + "User without permission should fail"); + assertEquals(ResultCodes.PERMISSION_DENIED, ome.getResult()); // Add create permission for user, and try multi-upload init again OzoneAcl acl7 = new OzoneAcl(USER, userName, ACLType.CREATE, DEFAULT); @@ -2843,12 +2826,12 @@ public void testMultipartUploadWithACL() throws Exception { completeMultipartUpload(bucket2, keyName2, uploadId, partsMap); // User without permission cannot read multi-uploaded object - try (OzoneInputStream ignored = bucket2.readKey(keyName)) { - fail("User without permission should fail"); - } catch (Exception e) { - OMException ome = assertInstanceOf(OMException.class, e); - assertEquals(ResultCodes.PERMISSION_DENIED, ome.getResult()); - } + OMException ex = assertThrows(OMException.class, () -> { + try (OzoneInputStream ignored = bucket2.readKey(keyName)) { + LOG.error("User without permission should fail"); + } + }, "User without permission should fail"); + assertEquals(ResultCodes.PERMISSION_DENIED, ex.getResult()); } } @@ -3052,14 +3035,8 @@ void testAbortUploadFailWithInProgressPartUpload() throws Exception { // Abort before completing part upload. bucket.abortMultipartUpload(keyName, omMultipartInfo.getUploadID()); - - try { - ozoneOutputStream.close(); - fail("testAbortUploadFailWithInProgressPartUpload failed"); - } catch (IOException ex) { - OMException ome = assertInstanceOf(OMException.class, ex); - assertEquals(NO_SUCH_MULTIPART_UPLOAD_ERROR, ome.getResult()); - } + OMException ome = assertThrows(OMException.class, () -> ozoneOutputStream.close()); + assertEquals(NO_SUCH_MULTIPART_UPLOAD_ERROR, ome.getResult()); } @Test @@ -3114,14 +3091,8 @@ void testCommitPartAfterCompleteUpload() throws Exception { String part1 = new String(data, UTF_8); sb.append(part1); assertEquals(sb.toString(), new String(fileContent, UTF_8)); - - try { - ozoneOutputStream.close(); - fail("testCommitPartAfterCompleteUpload failed"); - } catch (IOException ex) { - OMException ome = assertInstanceOf(OMException.class, ex); - assertEquals(NO_SUCH_MULTIPART_UPLOAD_ERROR, ome.getResult()); - } + OMException ex = assertThrows(OMException.class, ozoneOutputStream::close); + assertEquals(NO_SUCH_MULTIPART_UPLOAD_ERROR, ex.getResult()); } diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestOzoneRpcClientForAclAuditLog.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestOzoneRpcClientForAclAuditLog.java index a830c8c739d..e373b06d950 100644 --- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestOzoneRpcClientForAclAuditLog.java +++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestOzoneRpcClientForAclAuditLog.java @@ -51,7 +51,6 @@ import java.util.ArrayList; import java.util.Arrays; import java.util.List; -import java.util.UUID; import static org.apache.hadoop.ozone.OzoneAcl.AclScope.ACCESS; import static org.apache.hadoop.ozone.OzoneConfigKeys.OZONE_ACL_AUTHORIZER_CLASS; @@ -94,8 +93,6 @@ public class TestOzoneRpcClientForAclAuditLog { private static ObjectStore store = null; private static StorageContainerLocationProtocolClientSideTranslatorPB storageContainerLocationClient; - private static String scmId = UUID.randomUUID().toString(); - /** * Create a MiniOzoneCluster for testing. @@ -127,7 +124,6 @@ public static void init() throws Exception { private static void startCluster(OzoneConfiguration conf) throws Exception { cluster = MiniOzoneCluster.newBuilder(conf) .setNumDatanodes(3) - .setScmId(scmId) .build(); cluster.waitForClusterToBeReady(); ozClient = OzoneClientFactory.getRpcClient(conf); diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestOzoneRpcClientWithRatis.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestOzoneRpcClientWithRatis.java index 253193c92e6..ffd80f359ff 100644 --- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestOzoneRpcClientWithRatis.java +++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestOzoneRpcClientWithRatis.java @@ -48,7 +48,6 @@ import org.apache.hadoop.ozone.client.io.OzoneDataStreamOutput; import org.apache.hadoop.ozone.client.io.OzoneInputStream; import org.apache.hadoop.ozone.client.io.OzoneOutputStream; -import org.apache.hadoop.ozone.common.OzoneChecksumException; import org.apache.hadoop.ozone.om.OMConfigKeys; import org.apache.hadoop.ozone.om.helpers.OmKeyArgs; import org.apache.hadoop.ozone.om.helpers.OmMultipartInfo; @@ -135,8 +134,6 @@ void testGetKeyAndFileWithNetworkTopology() throws IOException { byte[] b = new byte[value.getBytes(UTF_8).length]; is.read(b); assertArrayEquals(b, value.getBytes(UTF_8)); - } catch (OzoneChecksumException e) { - fail("Read key should succeed"); } // read file with topology aware read enabled @@ -144,8 +141,6 @@ void testGetKeyAndFileWithNetworkTopology() throws IOException { byte[] b = new byte[value.getBytes(UTF_8).length]; is.read(b); assertArrayEquals(b, value.getBytes(UTF_8)); - } catch (OzoneChecksumException e) { - fail("Read file should succeed"); } // read key with topology aware read disabled @@ -159,8 +154,6 @@ void testGetKeyAndFileWithNetworkTopology() throws IOException { byte[] b = new byte[value.getBytes(UTF_8).length]; is.read(b); assertArrayEquals(b, value.getBytes(UTF_8)); - } catch (OzoneChecksumException e) { - fail("Read key should succeed"); } // read file with topology aware read disabled @@ -168,8 +161,6 @@ void testGetKeyAndFileWithNetworkTopology() throws IOException { byte[] b = new byte[value.getBytes(UTF_8).length]; is.read(b); assertArrayEquals(b, value.getBytes(UTF_8)); - } catch (OzoneChecksumException e) { - fail("Read file should succeed"); } } } diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestSecureOzoneRpcClient.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestSecureOzoneRpcClient.java index 3301320c005..15af5a2d8e0 100644 --- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestSecureOzoneRpcClient.java +++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestSecureOzoneRpcClient.java @@ -95,11 +95,12 @@ import static org.apache.hadoop.ozone.OzoneConsts.OZONE_URI_DELIMITER; import static org.apache.hadoop.ozone.om.OMConfigKeys.OZONE_OM_ADDRESS_KEY; import static org.apache.hadoop.ozone.om.helpers.BucketLayout.FILE_SYSTEM_OPTIMIZED; -import static org.junit.Assert.assertThrows; +import static org.apache.ozone.test.GenericTestUtils.getTestStartTime; import static org.junit.jupiter.api.Assertions.assertEquals; import static org.junit.jupiter.api.Assertions.assertFalse; import static org.junit.jupiter.api.Assertions.assertNotNull; import static org.junit.jupiter.api.Assertions.assertNull; +import static org.junit.jupiter.api.Assertions.assertThrows; import static org.junit.jupiter.api.Assertions.assertTrue; /** @@ -114,8 +115,6 @@ public class TestSecureOzoneRpcClient extends TestOzoneRpcClient { private static StorageContainerLocationProtocolClientSideTranslatorPB storageContainerLocationClient; - private static final String SCM_ID = UUID.randomUUID().toString(); - private static final String CLUSTER_ID = UUID.randomUUID().toString(); private static File testDir; private static OzoneConfiguration conf; @@ -149,8 +148,6 @@ public static void init() throws Exception { conf.setBoolean(OzoneConfigKeys.OZONE_FS_HSYNC_ENABLED, true); cluster = MiniOzoneCluster.newBuilder(conf) .setNumDatanodes(14) - .setScmId(SCM_ID) - .setClusterId(CLUSTER_ID) .setCertificateClient(certificateClientTest) .setSecretKeyClient(new SecretKeyTestClient()) .build(); @@ -167,7 +164,6 @@ public static void init() throws Exception { TestOzoneRpcClient.setStorageContainerLocationClient( storageContainerLocationClient); TestOzoneRpcClient.setStore(store); - TestOzoneRpcClient.setClusterId(CLUSTER_ID); } /** @@ -187,7 +183,7 @@ private void testPutKeySuccessWithBlockTokenWithBucketLayout( BucketLayout bucketLayout) throws Exception { String volumeName = UUID.randomUUID().toString(); String bucketName = UUID.randomUUID().toString(); - Instant testStartTime = Instant.now(); + Instant testStartTime = getTestStartTime(); OMMetadataManager omMetadataManager = ozoneManager.getMetadataManager(); String value = "sample value"; @@ -426,7 +422,7 @@ public void testS3Auth() throws Exception { // Add secret to S3Secret table. s3SecretManager.storeSecret(accessKey, - new S3SecretValue(accessKey, secret)); + S3SecretValue.of(accessKey, secret)); OMRequest writeRequest = OMRequest.newBuilder() .setCmdType(OzoneManagerProtocolProtos.Type.CreateVolume) @@ -475,7 +471,7 @@ public void testS3Auth() throws Exception { // Override secret to S3Secret store with some dummy value s3SecretManager - .storeSecret(accessKey, new S3SecretValue(accessKey, "dummy")); + .storeSecret(accessKey, S3SecretValue.of(accessKey, "dummy")); // Write request with invalid credentials. omResponse = cluster.getOzoneManager().getOmServerProtocol() diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestWatchForCommit.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestWatchForCommit.java index d03c57bf4e4..f8e9b552e3e 100644 --- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestWatchForCommit.java +++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestWatchForCommit.java @@ -22,16 +22,16 @@ import java.time.Duration; import java.util.ArrayList; import java.util.List; -import java.util.Random; +import org.apache.commons.lang3.RandomUtils; import java.util.UUID; import java.util.concurrent.ExecutionException; import java.util.concurrent.TimeUnit; import java.util.concurrent.TimeoutException; -import org.apache.hadoop.conf.StorageUnit; import org.apache.hadoop.hdds.client.ReplicationType; import org.apache.hadoop.hdds.conf.DatanodeRatisServerConfig; import org.apache.hadoop.hdds.conf.OzoneConfiguration; +import org.apache.hadoop.hdds.conf.StorageUnit; import org.apache.hadoop.hdds.protocol.DatanodeDetails; import org.apache.hadoop.hdds.protocol.proto.HddsProtos; import org.apache.hadoop.hdds.ratis.conf.RatisClientConfig; @@ -48,6 +48,7 @@ import org.apache.hadoop.hdds.scm.storage.BlockOutputStream; import org.apache.hadoop.hdds.scm.storage.RatisBlockOutputStream; import org.apache.hadoop.hdds.utils.IOUtils; +import org.apache.hadoop.ozone.ClientConfigForTesting; import org.apache.hadoop.ozone.HddsDatanodeService; import org.apache.hadoop.ozone.MiniOzoneCluster; import org.apache.hadoop.ozone.OzoneConfigKeys; @@ -69,7 +70,7 @@ import static org.junit.jupiter.api.Assertions.assertEquals; import static org.junit.jupiter.api.Assertions.assertFalse; import static org.junit.jupiter.api.Assertions.assertInstanceOf; -import static org.junit.jupiter.api.Assertions.fail; +import static org.junit.jupiter.api.Assertions.assertThrows; import org.apache.ratis.protocol.exceptions.GroupMismatchException; import org.junit.jupiter.api.AfterEach; @@ -140,13 +141,16 @@ public void init() throws Exception { conf.setFromObject(raftClientConfig); conf.setTimeDuration(OZONE_SCM_STALENODE_INTERVAL, 30, TimeUnit.SECONDS); - cluster = MiniOzoneCluster.newBuilder(conf) - .setNumDatanodes(9) + + ClientConfigForTesting.newBuilder(StorageUnit.BYTES) .setBlockSize(blockSize) .setChunkSize(chunkSize) .setStreamBufferFlushSize(flushSize) .setStreamBufferMaxSize(maxFlushSize) - .setStreamBufferSizeUnit(StorageUnit.BYTES) + .applyTo(conf); + + cluster = MiniOzoneCluster.newBuilder(conf) + .setNumDatanodes(9) .build(); cluster.waitForClusterToBeReady(); cluster.waitForPipelineTobeReady(HddsProtos.ReplicationFactor.THREE, 60000); @@ -275,25 +279,20 @@ public void testWatchForCommitForRetryfailure() throws Exception { cluster.getStorageContainerManager() .getPipelineManager().closePipeline(pipeline, false); // again write data with more than max buffer limit. This wi - try { - // just watch for a log index which in not updated in the commitInfo Map - // as well as there is no logIndex generate in Ratis. - // The basic idea here is just to test if its throws an exception. - xceiverClient - .watchForCommit(index + new Random().nextInt(100) + 10); - fail("expected exception not thrown"); - } catch (Exception e) { - assertInstanceOf(ExecutionException.class, e); - // since the timeout value is quite long, the watch request will either - // fail with NotReplicated exceptio, RetryFailureException or - // RuntimeException - assertFalse(HddsClientUtils - .checkForException(e) instanceof TimeoutException); - // client should not attempt to watch with - // MAJORITY_COMMITTED replication level, except the grpc IO issue - if (!logCapturer.getOutput().contains("Connection refused")) { - assertThat(e.getMessage()).doesNotContain("Watch-MAJORITY_COMMITTED"); - } + // just watch for a log index which in not updated in the commitInfo Map + // as well as there is no logIndex generate in Ratis. + // The basic idea here is just to test if its throws an exception. + ExecutionException e = assertThrows(ExecutionException.class, + () -> xceiverClient.watchForCommit(index + RandomUtils.nextInt(0, 100) + 10)); + // since the timeout value is quite long, the watch request will either + // fail with NotReplicated exceptio, RetryFailureException or + // RuntimeException + assertFalse(HddsClientUtils + .checkForException(e) instanceof TimeoutException); + // client should not attempt to watch with + // MAJORITY_COMMITTED replication level, except the grpc IO issue + if (!logCapturer.getOutput().contains("Connection refused")) { + assertThat(e.getMessage()).doesNotContain("Watch-MAJORITY_COMMITTED"); } clientManager.releaseClient(xceiverClient, false); } @@ -368,17 +367,13 @@ public void testWatchForCommitForGroupMismatchException() throws Exception { List pipelineList = new ArrayList<>(); pipelineList.add(pipeline); TestHelper.waitForPipelineClose(pipelineList, cluster); - try { - // just watch for a log index which in not updated in the commitInfo Map - // as well as there is no logIndex generate in Ratis. - // The basic idea here is just to test if its throws an exception. - xceiverClient - .watchForCommit(reply.getLogIndex() + - new Random().nextInt(100) + 10); - fail("Expected exception not thrown"); - } catch (Exception e) { - assertInstanceOf(GroupMismatchException.class, HddsClientUtils.checkForException(e)); - } + // just watch for a log index which in not updated in the commitInfo Map + // as well as there is no logIndex generate in Ratis. + // The basic idea here is just to test if its throws an exception. + Exception e = + assertThrows(Exception.class, + () -> xceiverClient.watchForCommit(reply.getLogIndex() + RandomUtils.nextInt(0, 100) + 10)); + assertInstanceOf(GroupMismatchException.class, HddsClientUtils.checkForException(e)); clientManager.releaseClient(xceiverClient, false); } } diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/read/TestInputStreamBase.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/read/TestInputStreamBase.java index 22ad4f036cf..9f5d04c56f9 100644 --- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/read/TestInputStreamBase.java +++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/read/TestInputStreamBase.java @@ -20,11 +20,12 @@ import java.util.UUID; import java.util.concurrent.TimeUnit; -import org.apache.hadoop.conf.StorageUnit; import org.apache.hadoop.hdds.conf.OzoneConfiguration; +import org.apache.hadoop.hdds.conf.StorageUnit; import org.apache.hadoop.hdds.scm.OzoneClientConfig; import org.apache.hadoop.hdds.scm.ScmConfigKeys; import org.apache.hadoop.hdds.scm.container.replication.ReplicationManager.ReplicationManagerConfiguration; +import org.apache.hadoop.ozone.ClientConfigForTesting; import org.apache.hadoop.ozone.MiniOzoneCluster; import org.apache.hadoop.ozone.OzoneConfigKeys; import org.apache.hadoop.ozone.container.common.impl.ContainerLayoutVersion; @@ -45,7 +46,7 @@ protected static MiniOzoneCluster newCluster( ContainerLayoutVersion containerLayout) throws Exception { OzoneConfiguration conf = new OzoneConfiguration(); - OzoneClientConfig config = new OzoneClientConfig(); + OzoneClientConfig config = conf.getObject(OzoneClientConfig.class); config.setBytesPerChecksum(BYTES_PER_CHECKSUM); conf.setFromObject(config); @@ -63,14 +64,16 @@ protected static MiniOzoneCluster newCluster( repConf.setInterval(Duration.ofSeconds(1)); conf.setFromObject(repConf); - return MiniOzoneCluster.newBuilder(conf) - .setNumDatanodes(5) - .setTotalPipelineNumLimit(5) + ClientConfigForTesting.newBuilder(StorageUnit.BYTES) .setBlockSize(BLOCK_SIZE) .setChunkSize(CHUNK_SIZE) .setStreamBufferFlushSize(FLUSH_SIZE) .setStreamBufferMaxSize(MAX_FLUSH_SIZE) - .setStreamBufferSizeUnit(StorageUnit.BYTES) + .applyTo(conf); + + return MiniOzoneCluster.newBuilder(conf) + .setNumDatanodes(5) + .setTotalPipelineNumLimit(5) .build(); } diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/container/TestContainerReplication.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/container/TestContainerReplication.java index 28d19d0be87..810a5725492 100644 --- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/container/TestContainerReplication.java +++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/container/TestContainerReplication.java @@ -28,7 +28,7 @@ import static org.apache.hadoop.ozone.container.TestHelper.waitForContainerClose; import static org.apache.hadoop.ozone.container.TestHelper.waitForReplicaCount; import static org.apache.ozone.test.GenericTestUtils.setLogLevel; -import static org.junit.jupiter.api.Assertions.assertFalse; +import static org.assertj.core.api.Assertions.assertThat; import static org.junit.jupiter.api.Assertions.assertNotNull; import java.io.IOException; @@ -108,7 +108,7 @@ void testContainerReplication( createTestData(client); List keyLocations = lookupKey(cluster); - assertFalse(keyLocations.isEmpty()); + assertThat(keyLocations).isNotEmpty(); OmKeyLocationInfo keyLocation = keyLocations.get(0); long containerID = keyLocation.getContainerID(); diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/container/TestECContainerRecovery.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/container/TestECContainerRecovery.java index b50f2ac8d68..e045b48bda9 100644 --- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/container/TestECContainerRecovery.java +++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/container/TestECContainerRecovery.java @@ -16,11 +16,11 @@ */ package org.apache.hadoop.ozone.container; -import org.apache.hadoop.conf.StorageUnit; import org.apache.hadoop.hdds.HddsConfigKeys; import org.apache.hadoop.hdds.client.DefaultReplicationConfig; import org.apache.hadoop.hdds.client.ECReplicationConfig; import org.apache.hadoop.hdds.conf.OzoneConfiguration; +import org.apache.hadoop.hdds.conf.StorageUnit; import org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos; import org.apache.hadoop.hdds.protocol.proto.HddsProtos; import org.apache.hadoop.hdds.protocol.proto.StorageContainerDatanodeProtocolProtos; @@ -34,6 +34,7 @@ import org.apache.hadoop.hdds.scm.pipeline.Pipeline; import org.apache.hadoop.hdds.scm.server.StorageContainerManager; import org.apache.hadoop.hdds.utils.IOUtils; +import org.apache.hadoop.ozone.ClientConfigForTesting; import org.apache.hadoop.ozone.HddsDatanodeService; import org.apache.hadoop.ozone.MiniOzoneCluster; import org.apache.hadoop.ozone.OzoneConfigKeys; @@ -52,7 +53,6 @@ import org.junit.jupiter.api.AfterAll; import org.junit.jupiter.api.BeforeAll; import org.junit.jupiter.api.Test; -import org.mockito.Mockito; import org.slf4j.Logger; import org.slf4j.LoggerFactory; @@ -72,6 +72,8 @@ import static java.nio.charset.StandardCharsets.UTF_8; import static org.apache.hadoop.ozone.OzoneConfigKeys.OZONE_RECOVERING_CONTAINER_TIMEOUT; import static org.apache.hadoop.ozone.OzoneConfigKeys.OZONE_RECOVERING_CONTAINER_TIMEOUT_DEFAULT; +import static org.mockito.Mockito.any; +import static org.mockito.Mockito.doAnswer; /** * Tests the EC recovery and over replication processing. @@ -138,11 +140,17 @@ public static void init() throws Exception { TimeUnit.MILLISECONDS); conf.setTimeDuration(HddsConfigKeys.HDDS_CONTAINER_REPORT_INTERVAL, 1, TimeUnit.SECONDS); - cluster = MiniOzoneCluster.newBuilder(conf).setNumDatanodes(10) - .setTotalPipelineNumLimit(10).setBlockSize(blockSize) - .setChunkSize(chunkSize).setStreamBufferFlushSize(flushSize) + + ClientConfigForTesting.newBuilder(StorageUnit.BYTES) + .setBlockSize(blockSize) + .setChunkSize(chunkSize) + .setStreamBufferFlushSize(flushSize) .setStreamBufferMaxSize(maxFlushSize) - .setStreamBufferSizeUnit(StorageUnit.BYTES).build(); + .applyTo(conf); + + cluster = MiniOzoneCluster.newBuilder(conf).setNumDatanodes(10) + .setTotalPipelineNumLimit(10) + .build(); cluster.waitForClusterToBeReady(); client = OzoneClientFactory.getRpcClient(conf); objectStore = client.getObjectStore(); @@ -308,7 +316,7 @@ public void testECContainerRecoveryWithTimedOutRecovery() throws Exception { .mockFieldReflection(handler, "coordinator"); - Mockito.doAnswer(invocation -> { + doAnswer(invocation -> { GenericTestUtils.waitFor(() -> dn.getDatanodeStateMachine() .getContainer() @@ -320,8 +328,8 @@ public void testECContainerRecoveryWithTimedOutRecovery() throws Exception { reconstructedDN.set(dn); invocation.callRealMethod(); return null; - }).when(coordinator).reconstructECBlockGroup(Mockito.any(), Mockito.any(), - Mockito.any(), Mockito.any()); + }).when(coordinator).reconstructECBlockGroup(any(), any(), + any(), any()); } // Shutting down DN triggers close pipeline and close container. diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/container/TestHelper.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/container/TestHelper.java index 665e07630d9..d6bb591979f 100644 --- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/container/TestHelper.java +++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/container/TestHelper.java @@ -65,7 +65,9 @@ import org.slf4j.LoggerFactory; import static java.util.stream.Collectors.toList; +import static org.assertj.core.api.Assertions.assertThat; import static org.junit.jupiter.api.Assertions.assertArrayEquals; +import static org.junit.jupiter.api.Assertions.assertInstanceOf; import static org.junit.jupiter.api.Assertions.assertTrue; import static org.junit.jupiter.api.Assertions.assertFalse; import static org.junit.jupiter.api.Assertions.assertEquals; @@ -205,7 +207,7 @@ public static void waitForContainerClose(OzoneOutputStream outputStream, containerIdList.add(id); } } - assertFalse(containerIdList.isEmpty()); + assertThat(containerIdList).isNotEmpty(); waitForContainerClose(cluster, containerIdList.toArray(new Long[0])); } @@ -223,7 +225,7 @@ public static void waitForContainerClose(OzoneDataStreamOutput outputStream, containerIdList.add(id); } } - assertFalse(containerIdList.isEmpty()); + assertThat(containerIdList).isNotEmpty(); waitForContainerClose(cluster, containerIdList.toArray(new Long[0])); } @@ -241,7 +243,7 @@ public static void waitForPipelineClose(OzoneOutputStream outputStream, containerIdList.add(id); } } - assertFalse(containerIdList.isEmpty()); + assertThat(containerIdList).isNotEmpty(); waitForPipelineClose(cluster, waitForContainerCreation, containerIdList.toArray(new Long[0])); } @@ -296,7 +298,7 @@ public static void waitForPipelineClose(List pipelineList, XceiverServerSpi server = cluster.getHddsDatanodes().get(cluster.getHddsDatanodeIndex(dn)) .getDatanodeStateMachine().getContainer().getWriteChannel(); - assertTrue(server instanceof XceiverServerRatis); + assertInstanceOf(XceiverServerRatis.class, server); GenericTestUtils.waitFor(() -> !server.isExist(pipelineId), 100, 30_000); } @@ -313,7 +315,7 @@ public static void createPipelineOnDatanode(Pipeline pipeline, cluster.getHddsDatanodes().get(cluster.getHddsDatanodeIndex(dn)) .getDatanodeStateMachine().getContainer() .getWriteChannel(); - assertTrue(server instanceof XceiverServerRatis); + assertInstanceOf(XceiverServerRatis.class, server); try { server.addGroup(pipeline.getId().getProtobuf(), Collections. unmodifiableList(pipeline.getNodes())); diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/container/common/statemachine/commandhandler/TestBlockDeletion.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/container/common/statemachine/commandhandler/TestBlockDeletion.java index 5dec1799b40..d5564ac2315 100644 --- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/container/common/statemachine/commandhandler/TestBlockDeletion.java +++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/container/common/statemachine/commandhandler/TestBlockDeletion.java @@ -72,7 +72,6 @@ import org.apache.ozone.test.GenericTestUtils.LogCapturer; import org.apache.ozone.test.tag.Flaky; import org.junit.jupiter.api.AfterEach; -import org.junit.jupiter.api.Assertions; import org.junit.jupiter.api.BeforeEach; import org.junit.jupiter.api.Test; import org.junit.jupiter.params.ParameterizedTest; @@ -95,14 +94,20 @@ import static java.lang.Math.max; import static java.nio.charset.StandardCharsets.UTF_8; import static org.apache.hadoop.hdds.HddsConfigKeys.HDDS_COMMAND_STATUS_REPORT_INTERVAL; -import static org.apache.hadoop.hdds - .HddsConfigKeys.HDDS_CONTAINER_REPORT_INTERVAL; +import static org.apache.hadoop.hdds.HddsConfigKeys.HDDS_CONTAINER_REPORT_INTERVAL; import static org.apache.hadoop.hdds.HddsConfigKeys.HDDS_HEARTBEAT_INTERVAL; import static org.apache.hadoop.hdds.scm.ScmConfigKeys.OZONE_SCM_EXPIRED_CONTAINER_REPLICA_OP_SCRUB_INTERVAL; import static org.apache.hadoop.hdds.scm.ScmConfigKeys.OZONE_SCM_PIPELINE_OWNER_CONTAINER_COUNT; import static org.apache.hadoop.hdds.scm.ScmConfigKeys.OZONE_SCM_STALENODE_INTERVAL; -import static org.apache.hadoop.ozone - .OzoneConfigKeys.OZONE_BLOCK_DELETING_SERVICE_INTERVAL; +import static org.apache.hadoop.ozone.OzoneConfigKeys.OZONE_BLOCK_DELETING_SERVICE_INTERVAL; +import static org.assertj.core.api.Assertions.assertThat; +import static org.junit.jupiter.api.Assertions.assertEquals; +import static org.junit.jupiter.api.Assertions.assertFalse; +import static org.junit.jupiter.api.Assertions.assertNotNull; +import static org.junit.jupiter.api.Assertions.assertNull; +import static org.junit.jupiter.api.Assertions.assertThrows; +import static org.junit.jupiter.api.Assertions.assertTrue; +import static org.junit.jupiter.api.Assertions.fail; /** * Tests for Block deletion. @@ -246,23 +251,23 @@ public void testBlockDeletion(ReplicationConfig repConfig) throws Exception { } }, 1000, 10000); // No containers with deleted blocks - Assertions.assertTrue(containerIdsWithDeletedBlocks.isEmpty()); + assertThat(containerIdsWithDeletedBlocks).isEmpty(); // Delete transactionIds for the containers should be 0. // NOTE: this test assumes that all the container is KetValueContainer. If // other container types is going to be added, this test should be checked. matchContainerTransactionIds(); - Assertions.assertEquals(0L, + assertEquals(0L, metrics.getNumBlockDeletionTransactionCreated()); writeClient.deleteKey(keyArgs); Thread.sleep(5000); // The blocks should not be deleted in the DN as the container is open - Throwable e = Assertions.assertThrows(AssertionError.class, + Throwable e = assertThrows(AssertionError.class, () -> verifyBlocksDeleted(omKeyLocationInfoGroupList)); - Assertions.assertTrue( + assertTrue( e.getMessage().startsWith("expected: but was:")); - Assertions.assertEquals(0L, metrics.getNumBlockDeletionTransactionSent()); + assertEquals(0L, metrics.getNumBlockDeletionTransactionSent()); // close the containers which hold the blocks for the key OzoneTestUtils.closeAllContainers(scm.getEventQueue(), scm); @@ -291,7 +296,7 @@ public void testBlockDeletion(ReplicationConfig repConfig) throws Exception { }, 2000, 30000); // Few containers with deleted blocks - Assertions.assertFalse(containerIdsWithDeletedBlocks.isEmpty()); + assertThat(containerIdsWithDeletedBlocks).isNotEmpty(); // Containers in the DN and SCM should have same delete transactionIds matchContainerTransactionIds(); @@ -312,13 +317,13 @@ public void testBlockDeletion(ReplicationConfig repConfig) throws Exception { cluster.restartHddsDatanode(0, true); matchContainerTransactionIds(); - Assertions.assertEquals(metrics.getNumBlockDeletionTransactionCreated(), + assertEquals(metrics.getNumBlockDeletionTransactionCreated(), metrics.getNumBlockDeletionTransactionCompleted()); - Assertions.assertTrue(metrics.getNumBlockDeletionCommandSent() >= - metrics.getNumBlockDeletionCommandSuccess() + + assertThat(metrics.getNumBlockDeletionCommandSent()) + .isGreaterThanOrEqualTo(metrics.getNumBlockDeletionCommandSuccess() + metrics.getBNumBlockDeletionCommandFailure()); - Assertions.assertTrue(metrics.getNumBlockDeletionTransactionSent() >= - metrics.getNumBlockDeletionTransactionFailure() + + assertThat(metrics.getNumBlockDeletionTransactionSent()) + .isGreaterThanOrEqualTo(metrics.getNumBlockDeletionTransactionFailure() + metrics.getNumBlockDeletionTransactionSuccess()); LOG.info(metrics.toString()); @@ -326,8 +331,8 @@ public void testBlockDeletion(ReplicationConfig repConfig) throws Exception { for (int i = 5; i >= 0; i--) { if (logCapturer.getOutput().contains("1(" + i + ")")) { for (int j = 0; j <= i; j++) { - Assertions.assertTrue(logCapturer.getOutput() - .contains("1(" + i + ")")); + assertThat(logCapturer.getOutput()) + .contains("1(" + i + ")"); } break; } @@ -367,8 +372,8 @@ public void testContainerStatisticsAfterDelete() throws Exception { final int valueSize = value.getBytes(UTF_8).length; final int keyCount = 1; containerInfos.stream().forEach(container -> { - Assertions.assertEquals(valueSize, container.getUsedBytes()); - Assertions.assertEquals(keyCount, container.getNumberOfKeys()); + assertEquals(valueSize, container.getUsedBytes()); + assertEquals(keyCount, container.getNumberOfKeys()); }); OzoneTestUtils.closeAllContainers(scm.getEventQueue(), scm); @@ -389,7 +394,7 @@ public void testContainerStatisticsAfterDelete() throws Exception { containerMap.values().forEach(container -> { KeyValueContainerData containerData = (KeyValueContainerData)container.getContainerData(); - Assertions.assertEquals(0, containerData.getNumPendingDeletionBlocks()); + assertEquals(0, containerData.getNumPendingDeletionBlocks()); }); }); @@ -398,7 +403,7 @@ public void testContainerStatisticsAfterDelete() throws Exception { ((EventQueue)scm.getEventQueue()).processAll(1000); containerInfos = scm.getContainerManager().getContainers(); containerInfos.stream().forEach(container -> - Assertions.assertEquals(HddsProtos.LifeCycleState.DELETING, + assertEquals(HddsProtos.LifeCycleState.DELETING, container.getState())); LogCapturer logCapturer = LogCapturer.captureLogs( legacyEnabled ? LegacyReplicationManager.LOG : ReplicationManager.LOG); @@ -422,14 +427,14 @@ public void testContainerStatisticsAfterDelete() throws Exception { List infos = scm.getContainerManager().getContainers(); try { infos.stream().forEach(container -> { - Assertions.assertEquals(HddsProtos.LifeCycleState.DELETED, + assertEquals(HddsProtos.LifeCycleState.DELETED, container.getState()); try { - Assertions.assertEquals(HddsProtos.LifeCycleState.DELETED, + assertEquals(HddsProtos.LifeCycleState.DELETED, scm.getScmMetadataStore().getContainerTable() .get(container.containerID()).getState()); } catch (IOException e) { - Assertions.fail( + fail( "Container from SCM DB should be marked as DELETED"); } }); @@ -477,8 +482,8 @@ public void testContainerStateAfterDNRestart() throws Exception { final int keyCount = 1; List containerIdList = new ArrayList<>(); containerInfos.stream().forEach(container -> { - Assertions.assertEquals(valueSize, container.getUsedBytes()); - Assertions.assertEquals(keyCount, container.getNumberOfKeys()); + assertEquals(valueSize, container.getUsedBytes()); + assertEquals(keyCount, container.getNumberOfKeys()); containerIdList.add(container.getContainerID()); }); @@ -499,14 +504,14 @@ public void testContainerStateAfterDNRestart() throws Exception { ContainerID containerId = ContainerID.valueOf( containerInfos.get(0).getContainerID()); // Before restart container state is non-empty - Assertions.assertFalse(getContainerFromDN( + assertFalse(getContainerFromDN( cluster.getHddsDatanodes().get(0), containerId.getId()) .getContainerData().isEmpty()); // Restart DataNode cluster.restartHddsDatanode(0, true); // After restart also container state remains non-empty. - Assertions.assertFalse(getContainerFromDN( + assertFalse(getContainerFromDN( cluster.getHddsDatanodes().get(0), containerId.getId()) .getContainerData().isEmpty()); @@ -526,14 +531,14 @@ public void testContainerStateAfterDNRestart() throws Exception { 100, 10 * 1000); // Container state should be empty now as key got deleted - Assertions.assertTrue(getContainerFromDN( + assertTrue(getContainerFromDN( cluster.getHddsDatanodes().get(0), containerId.getId()) .getContainerData().isEmpty()); // Restart DataNode cluster.restartHddsDatanode(0, true); // Container state should be empty even after restart - Assertions.assertTrue(getContainerFromDN( + assertTrue(getContainerFromDN( cluster.getHddsDatanodes().get(0), containerId.getId()) .getContainerData().isEmpty()); @@ -543,14 +548,14 @@ public void testContainerStateAfterDNRestart() throws Exception { List infos = scm.getContainerManager().getContainers(); try { infos.stream().forEach(container -> { - Assertions.assertEquals(HddsProtos.LifeCycleState.DELETED, + assertEquals(HddsProtos.LifeCycleState.DELETED, container.getState()); try { - Assertions.assertEquals(HddsProtos.LifeCycleState.DELETED, + assertEquals(HddsProtos.LifeCycleState.DELETED, scm.getScmMetadataStore().getContainerTable() .get(container.containerID()).getState()); } catch (IOException e) { - Assertions.fail( + fail( "Container from SCM DB should be marked as DELETED"); } }); @@ -607,8 +612,8 @@ public void testContainerDeleteWithInvalidKeyCount() final int keyCount = 1; List containerIdList = new ArrayList<>(); containerInfos.stream().forEach(container -> { - Assertions.assertEquals(valueSize, container.getUsedBytes()); - Assertions.assertEquals(keyCount, container.getNumberOfKeys()); + assertEquals(valueSize, container.getUsedBytes()); + assertEquals(keyCount, container.getNumberOfKeys()); containerIdList.add(container.getContainerID()); }); @@ -635,7 +640,7 @@ public void testContainerDeleteWithInvalidKeyCount() = scm.getContainerManager().getContainerReplicas(containerId); // Ensure for all replica isEmpty are false in SCM - Assertions.assertTrue(scm.getContainerManager().getContainerReplicas( + assertTrue(scm.getContainerManager().getContainerReplicas( containerId).stream(). allMatch(replica -> !replica.isEmpty())); @@ -680,14 +685,14 @@ public void testContainerDeleteWithInvalidKeyCount() List infos = scm.getContainerManager().getContainers(); try { infos.stream().forEach(container -> { - Assertions.assertEquals(HddsProtos.LifeCycleState.DELETED, + assertEquals(HddsProtos.LifeCycleState.DELETED, container.getState()); try { - Assertions.assertEquals(HddsProtos.LifeCycleState.DELETED, + assertEquals(HddsProtos.LifeCycleState.DELETED, scm.getScmMetadataStore().getContainerTable() .get(container.containerID()).getState()); } catch (IOException e) { - Assertions.fail( + fail( "Container from SCM DB should be marked as DELETED"); } }); @@ -702,7 +707,7 @@ public void testContainerDeleteWithInvalidKeyCount() private void verifyTransactionsCommitted() throws IOException { scm.getScmBlockManager().getDeletedBlockLog(); for (long txnID = 1; txnID <= maxTransactionId; txnID++) { - Assertions.assertNull( + assertNull( scm.getScmMetadataStore().getDeletedBlocksTXTable().get(txnID)); } } @@ -716,15 +721,15 @@ private void matchContainerTransactionIds() throws IOException { for (ContainerData containerData : containerDataList) { long containerId = containerData.getContainerID(); if (containerIdsWithDeletedBlocks.contains(containerId)) { - Assertions.assertTrue( - scm.getContainerInfo(containerId).getDeleteTransactionId() > 0); + assertThat(scm.getContainerInfo(containerId).getDeleteTransactionId()) + .isGreaterThan(0); maxTransactionId = max(maxTransactionId, scm.getContainerInfo(containerId).getDeleteTransactionId()); } else { - Assertions.assertEquals( + assertEquals( scm.getContainerInfo(containerId).getDeleteTransactionId(), 0); } - Assertions.assertEquals( + assertEquals( ((KeyValueContainerData) dnContainerSet.getContainer(containerId) .getContainerData()).getDeleteTransactionId(), scm.getContainerInfo(containerId).getDeleteTransactionId()); @@ -741,7 +746,7 @@ private void verifyBlocksCreated( KeyValueContainerData cData = (KeyValueContainerData) dnContainerSet .getContainer(blockID.getContainerID()).getContainerData(); try (DBHandle db = BlockUtils.getDB(cData, conf)) { - Assertions.assertNotNull(db.getStore().getBlockDataTable() + assertNotNull(db.getStore().getBlockDataTable() .get(cData.getBlockKey(blockID.getLocalID()))); } }, omKeyLocationInfoGroups); @@ -763,11 +768,11 @@ private void verifyBlocksDeleted( String blockKey = cData.getBlockKey(blockID.getLocalID()); BlockData blockData = blockDataTable.get(blockKey); - Assertions.assertNull(blockData); + assertNull(blockData); String deletingKey = cData.getDeletingBlockKey( blockID.getLocalID()); - Assertions.assertNull(blockDataTable.get(deletingKey)); + assertNull(blockDataTable.get(deletingKey)); } containerIdsWithDeletedBlocks.add(blockID.getContainerID()); }, omKeyLocationInfoGroups); diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/container/common/statemachine/commandhandler/TestCloseContainerByPipeline.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/container/common/statemachine/commandhandler/TestCloseContainerByPipeline.java index c62f943ee87..cd25ee25c8f 100644 --- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/container/common/statemachine/commandhandler/TestCloseContainerByPipeline.java +++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/container/common/statemachine/commandhandler/TestCloseContainerByPipeline.java @@ -58,6 +58,7 @@ import static org.apache.hadoop.hdds.protocol.proto.HddsProtos.ReplicationFactor.ONE; import static org.apache.hadoop.hdds.protocol.proto.HddsProtos.ReplicationFactor.THREE; import static org.apache.hadoop.hdds.scm.ScmConfigKeys.OZONE_DATANODE_PIPELINE_LIMIT; +import static org.assertj.core.api.Assertions.assertThat; import static org.junit.jupiter.api.Assertions.assertEquals; import static org.junit.jupiter.api.Assertions.assertFalse; import static org.junit.jupiter.api.Assertions.assertTrue; @@ -159,8 +160,7 @@ public void testIfCloseContainerCommandHandlerIsInvoked() throws Exception { .waitFor(() -> isContainerClosed(cluster, containerID, datanodeDetails), 500, 5 * 1000); // Make sure the closeContainerCommandHandler is Invoked - assertTrue( - closeContainerHandler.getInvocationCount() > lastInvocationCount); + assertThat(closeContainerHandler.getInvocationCount()).isGreaterThan(lastInvocationCount); } @Test diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/container/common/statemachine/commandhandler/TestCloseContainerHandler.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/container/common/statemachine/commandhandler/TestCloseContainerHandler.java index ec47c76d94d..7cb3c7797fa 100644 --- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/container/common/statemachine/commandhandler/TestCloseContainerHandler.java +++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/container/common/statemachine/commandhandler/TestCloseContainerHandler.java @@ -45,9 +45,10 @@ import static org.apache.hadoop.hdds.protocol.proto.HddsProtos.ReplicationFactor.ONE; import static org.apache.hadoop.hdds.scm.ScmConfigKeys.OZONE_DATANODE_RATIS_VOLUME_FREE_SPACE_MIN; import static org.apache.hadoop.hdds.scm.ScmConfigKeys.OZONE_SCM_CONTAINER_SIZE; +import static org.junit.jupiter.api.Assertions.assertFalse; +import static org.junit.jupiter.api.Assertions.assertTrue; import org.junit.jupiter.api.AfterEach; -import org.junit.jupiter.api.Assertions; import org.junit.jupiter.api.BeforeEach; import org.junit.jupiter.api.Test; import org.junit.jupiter.api.Timeout; @@ -117,7 +118,7 @@ public void test() throws Exception { Pipeline pipeline = cluster.getStorageContainerManager() .getPipelineManager().getPipeline(container.getPipelineID()); - Assertions.assertFalse(isContainerClosed(cluster, containerId.getId())); + assertFalse(isContainerClosed(cluster, containerId.getId())); DatanodeDetails datanodeDetails = cluster.getHddsDatanodes().get(0).getDatanodeDetails(); @@ -135,7 +136,7 @@ public void test() throws Exception { 5 * 1000); //double check if it's really closed (waitFor also throws an exception) - Assertions.assertTrue(isContainerClosed(cluster, containerId.getId())); + assertTrue(isContainerClosed(cluster, containerId.getId())); } private static Boolean isContainerClosed(MiniOzoneCluster cluster, diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/container/common/statemachine/commandhandler/TestDeleteContainerHandler.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/container/common/statemachine/commandhandler/TestDeleteContainerHandler.java index 332683658b1..00654d943f7 100644 --- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/container/common/statemachine/commandhandler/TestDeleteContainerHandler.java +++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/container/common/statemachine/commandhandler/TestDeleteContainerHandler.java @@ -57,7 +57,6 @@ import org.apache.hadoop.ozone.protocol.commands.SCMCommand; import org.apache.ozone.test.GenericTestUtils; import org.junit.jupiter.api.AfterAll; -import org.junit.jupiter.api.Assertions; import org.junit.jupiter.api.BeforeAll; import org.junit.jupiter.api.Test; import org.junit.jupiter.api.Timeout; @@ -77,6 +76,10 @@ import static org.apache.hadoop.hdds.scm.ScmConfigKeys.OZONE_DATANODE_RATIS_VOLUME_FREE_SPACE_MIN; import static org.apache.hadoop.hdds.scm.ScmConfigKeys.OZONE_SCM_CONTAINER_SIZE; import static org.apache.hadoop.ozone.OzoneConfigKeys.OZONE_BLOCK_DELETING_SERVICE_INTERVAL; +import static org.assertj.core.api.Assertions.assertThat; +import static org.junit.jupiter.api.Assertions.assertEquals; +import static org.junit.jupiter.api.Assertions.assertFalse; +import static org.junit.jupiter.api.Assertions.assertTrue; /** * Tests DeleteContainerCommand Handler. @@ -165,8 +168,7 @@ public void testDeleteNonEmptyContainerOnDirEmptyCheckTrue() HddsDatanodeService hddsDatanodeService = cluster.getHddsDatanodes().get(0); - Assertions.assertFalse(isContainerClosed(hddsDatanodeService, - containerId.getId())); + assertFalse(isContainerClosed(hddsDatanodeService, containerId.getId())); DatanodeDetails datanodeDetails = hddsDatanodeService.getDatanodeDetails(); @@ -189,8 +191,7 @@ public void testDeleteNonEmptyContainerOnDirEmptyCheckTrue() 500, 5 * 1000); //double check if it's really closed (waitFor also throws an exception) - Assertions.assertTrue(isContainerClosed(hddsDatanodeService, - containerId.getId())); + assertTrue(isContainerClosed(hddsDatanodeService, containerId.getId())); // Delete key, which will make isEmpty flag to true in containerData objectStore.getVolume(volumeName) @@ -217,8 +218,7 @@ public void testDeleteNonEmptyContainerOnDirEmptyCheckTrue() lingeringBlock.createNewFile(); // Check container exists before sending delete container command - Assertions.assertFalse(isContainerDeleted(hddsDatanodeService, - containerId.getId())); + assertFalse(isContainerDeleted(hddsDatanodeService, containerId.getId())); // Set container blockCount to 0 to mock that it is empty as per RocksDB getContainerfromDN(hddsDatanodeService, containerId.getId()) @@ -243,10 +243,8 @@ public void testDeleteNonEmptyContainerOnDirEmptyCheckTrue() contains("Files still part of the container on delete"), 500, 5 * 2000); - Assertions.assertTrue(!isContainerDeleted(hddsDatanodeService, - containerId.getId())); - Assertions.assertTrue(beforeDeleteFailedCount < - metrics.getContainerDeleteFailedNonEmpty()); + assertTrue(!isContainerDeleted(hddsDatanodeService, containerId.getId())); + assertThat(beforeDeleteFailedCount).isLessThan(metrics.getContainerDeleteFailedNonEmpty()); // Send the delete command. It should pass with force flag. // Deleting a non-empty container should pass on the DN when the force flag // is true @@ -260,10 +258,8 @@ public void testDeleteNonEmptyContainerOnDirEmptyCheckTrue() GenericTestUtils.waitFor(() -> isContainerDeleted(hddsDatanodeService, containerId.getId()), 500, 5 * 1000); - Assertions.assertTrue(isContainerDeleted(hddsDatanodeService, - containerId.getId())); - Assertions.assertTrue(beforeForceCount < - metrics.getContainerForceDelete()); + assertTrue(isContainerDeleted(hddsDatanodeService, containerId.getId())); + assertThat(beforeForceCount).isLessThan(metrics.getContainerForceDelete()); kv.setCheckChunksFilePath(false); } @@ -297,8 +293,7 @@ public void testDeleteNonEmptyContainerOnDirEmptyCheckFalse() HddsDatanodeService hddsDatanodeService = cluster.getHddsDatanodes().get(0); - Assertions.assertFalse(isContainerClosed(hddsDatanodeService, - containerId.getId())); + assertFalse(isContainerClosed(hddsDatanodeService, containerId.getId())); DatanodeDetails datanodeDetails = hddsDatanodeService.getDatanodeDetails(); @@ -313,8 +308,7 @@ public void testDeleteNonEmptyContainerOnDirEmptyCheckFalse() 500, 5 * 1000); //double check if it's really closed (waitFor also throws an exception) - Assertions.assertTrue(isContainerClosed(hddsDatanodeService, - containerId.getId())); + assertTrue(isContainerClosed(hddsDatanodeService, containerId.getId())); // Delete key, which will make isEmpty flag to true in containerData objectStore.getVolume(volumeName) @@ -341,8 +335,7 @@ public void testDeleteNonEmptyContainerOnDirEmptyCheckFalse() lingeringBlock.createNewFile(); // Check container exists before sending delete container command - Assertions.assertFalse(isContainerDeleted(hddsDatanodeService, - containerId.getId())); + assertFalse(isContainerDeleted(hddsDatanodeService, containerId.getId())); // send delete container to the datanode SCMCommand command = new DeleteContainerCommand(containerId.getId(), @@ -357,8 +350,7 @@ public void testDeleteNonEmptyContainerOnDirEmptyCheckFalse() GenericTestUtils.waitFor(() -> isContainerDeleted(hddsDatanodeService, containerId.getId()), 500, 5 * 1000); - Assertions.assertTrue(isContainerDeleted(hddsDatanodeService, - containerId.getId())); + assertTrue(isContainerDeleted(hddsDatanodeService, containerId.getId())); } @Test @@ -384,8 +376,7 @@ public void testDeleteNonEmptyContainerBlockTable() HddsDatanodeService hddsDatanodeService = cluster.getHddsDatanodes().get(0); - Assertions.assertFalse(isContainerClosed(hddsDatanodeService, - containerId.getId())); + assertFalse(isContainerClosed(hddsDatanodeService, containerId.getId())); DatanodeDetails datanodeDetails = hddsDatanodeService.getDatanodeDetails(); @@ -418,11 +409,11 @@ public void testDeleteNonEmptyContainerBlockTable() 500, 5 * 1000); //double check if it's really closed (waitFor also throws an exception) - Assertions.assertTrue(isContainerClosed(hddsDatanodeService, + assertTrue(isContainerClosed(hddsDatanodeService, containerId.getId())); // Check container exists before sending delete container command - Assertions.assertFalse(isContainerDeleted(hddsDatanodeService, + assertFalse(isContainerDeleted(hddsDatanodeService, containerId.getId())); long containerDeleteFailedNonEmptyBlockDB = @@ -446,10 +437,10 @@ public void testDeleteNonEmptyContainerBlockTable() contains("the container is not empty with blockCount"), 500, 5 * 2000); - Assertions.assertTrue(!isContainerDeleted(hddsDatanodeService, + assertTrue(!isContainerDeleted(hddsDatanodeService, containerId.getId())); - Assertions.assertTrue(containerDeleteFailedNonEmptyBlockDB < - metrics.getContainerDeleteFailedNonEmpty()); + assertThat(containerDeleteFailedNonEmptyBlockDB) + .isLessThan(metrics.getContainerDeleteFailedNonEmpty()); // Now empty the container Dir and try with a non-empty block table Container containerToDelete = getContainerfromDN( @@ -470,8 +461,7 @@ public void testDeleteNonEmptyContainerBlockTable() cluster.getStorageContainerManager().getScmContext().getTermOfLeader()); nodeManager.addDatanodeCommand(datanodeDetails.getUuid(), command); Thread.sleep(5000); - Assertions.assertTrue(!isContainerDeleted(hddsDatanodeService, - containerId.getId())); + assertFalse(isContainerDeleted(hddsDatanodeService, containerId.getId())); // Send the delete command. It should pass with force flag. long beforeForceCount = metrics.getContainerForceDelete(); command = new DeleteContainerCommand(containerId.getId(), true); @@ -483,10 +473,9 @@ public void testDeleteNonEmptyContainerBlockTable() GenericTestUtils.waitFor(() -> isContainerDeleted(hddsDatanodeService, containerId.getId()), 500, 5 * 1000); - Assertions.assertTrue(isContainerDeleted(hddsDatanodeService, + assertTrue(isContainerDeleted(hddsDatanodeService, containerId.getId())); - Assertions.assertTrue(beforeForceCount < - metrics.getContainerForceDelete()); + assertThat(beforeForceCount).isLessThan(metrics.getContainerForceDelete()); } @Test @@ -507,8 +496,7 @@ public void testContainerDeleteWithInvalidBlockCount() HddsDatanodeService hddsDatanodeService = cluster.getHddsDatanodes().get(0); - Assertions.assertFalse(isContainerClosed(hddsDatanodeService, - containerId.getId())); + assertFalse(isContainerClosed(hddsDatanodeService, containerId.getId())); DatanodeDetails datanodeDetails = hddsDatanodeService.getDatanodeDetails(); NodeManager nodeManager = @@ -525,12 +513,10 @@ public void testContainerDeleteWithInvalidBlockCount() 500, 5 * 1000); //double check if it's really closed (waitFor also throws an exception) - Assertions.assertTrue(isContainerClosed(hddsDatanodeService, - containerId.getId())); + assertTrue(isContainerClosed(hddsDatanodeService, containerId.getId())); // Check container exists before sending delete container command - Assertions.assertFalse(isContainerDeleted(hddsDatanodeService, - containerId.getId())); + assertFalse(isContainerDeleted(hddsDatanodeService, containerId.getId())); // Clear block table clearBlocksTable(getContainerfromDN(hddsDatanodeService, @@ -561,8 +547,7 @@ public void testContainerDeleteWithInvalidBlockCount() GenericTestUtils.waitFor(() -> isContainerDeleted(hddsDatanodeService, containerId.getId()), 500, 5 * 1000); - Assertions.assertTrue(isContainerDeleted(hddsDatanodeService, - containerId.getId())); + assertTrue(isContainerDeleted(hddsDatanodeService, containerId.getId())); } @@ -612,8 +597,7 @@ public void testDeleteContainerRequestHandlerOnClosedContainer() HddsDatanodeService hddsDatanodeService = cluster.getHddsDatanodes().get(0); - Assertions.assertFalse(isContainerClosed(hddsDatanodeService, - containerId.getId())); + assertFalse(isContainerClosed(hddsDatanodeService, containerId.getId())); DatanodeDetails datanodeDetails = hddsDatanodeService.getDatanodeDetails(); @@ -630,12 +614,10 @@ public void testDeleteContainerRequestHandlerOnClosedContainer() 500, 5 * 1000); //double check if it's really closed (waitFor also throws an exception) - Assertions.assertTrue(isContainerClosed(hddsDatanodeService, - containerId.getId())); + assertTrue(isContainerClosed(hddsDatanodeService, containerId.getId())); // Check container exists before sending delete container command - Assertions.assertFalse(isContainerDeleted(hddsDatanodeService, - containerId.getId())); + assertFalse(isContainerDeleted(hddsDatanodeService, containerId.getId())); // send delete container to the datanode SCMCommand command = new DeleteContainerCommand(containerId.getId(), @@ -656,8 +638,7 @@ public void testDeleteContainerRequestHandlerOnClosedContainer() ContainerMetrics metrics = hddsDatanodeService .getDatanodeStateMachine().getContainer().getMetrics(); - Assertions.assertEquals(1, - metrics.getContainerDeleteFailedNonEmpty()); + assertEquals(1, metrics.getContainerDeleteFailedNonEmpty()); // Delete key, which will make isEmpty flag to true in containerData objectStore.getVolume(volumeName) @@ -678,7 +659,7 @@ public void testDeleteContainerRequestHandlerOnClosedContainer() isContainerDeleted(hddsDatanodeService, containerId.getId()), 500, 5 * 1000); - Assertions.assertTrue(isContainerDeleted(hddsDatanodeService, + assertTrue(isContainerDeleted(hddsDatanodeService, containerId.getId())); } @@ -723,7 +704,7 @@ public void testDeleteContainerRequestHandlerOnOpenContainer() } } - Assertions.assertFalse(isContainerDeleted(hddsDatanodeService, + assertFalse(isContainerDeleted(hddsDatanodeService, containerId.getId())); @@ -738,7 +719,7 @@ public void testDeleteContainerRequestHandlerOnOpenContainer() isContainerDeleted(hddsDatanodeService, containerId.getId()), 500, 5 * 1000); - Assertions.assertTrue(isContainerDeleted(hddsDatanodeService, + assertTrue(isContainerDeleted(hddsDatanodeService, containerId.getId())); } diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/container/common/statemachine/commandhandler/TestFinalizeBlock.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/container/common/statemachine/commandhandler/TestFinalizeBlock.java index ef65977017f..e60b1581b32 100644 --- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/container/common/statemachine/commandhandler/TestFinalizeBlock.java +++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/container/common/statemachine/commandhandler/TestFinalizeBlock.java @@ -17,6 +17,7 @@ package org.apache.hadoop.ozone.container.common.statemachine.commandhandler; +import jakarta.annotation.Nonnull; import org.apache.hadoop.hdds.HddsConfigKeys; import org.apache.hadoop.hdds.client.BlockID; import org.apache.hadoop.hdds.client.ReplicationFactor; @@ -47,7 +48,6 @@ import org.apache.hadoop.ozone.om.helpers.OmKeyArgs; import org.apache.hadoop.ozone.om.helpers.OmKeyLocationInfoGroup; import org.apache.ozone.test.GenericTestUtils; -import org.jetbrains.annotations.NotNull; import org.junit.jupiter.api.AfterEach; import org.junit.jupiter.api.Timeout; import org.junit.jupiter.params.ParameterizedTest; @@ -273,7 +273,7 @@ private void testRejectPutAndWriteChunkAfterFinalizeBlock(ContainerID containerI } } - @NotNull + @Nonnull private ContainerProtos.ContainerCommandRequestProto getFinalizeBlockRequest( List omKeyLocationInfoGroupList, ContainerInfo container) { diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/container/common/statemachine/commandhandler/TestRefreshVolumeUsageHandler.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/container/common/statemachine/commandhandler/TestRefreshVolumeUsageHandler.java index c47f0993099..23382b2abe6 100644 --- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/container/common/statemachine/commandhandler/TestRefreshVolumeUsageHandler.java +++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/container/common/statemachine/commandhandler/TestRefreshVolumeUsageHandler.java @@ -30,7 +30,6 @@ import org.apache.hadoop.ozone.client.io.OzoneOutputStream; import org.apache.ozone.test.GenericTestUtils; import org.junit.jupiter.api.AfterEach; -import org.junit.jupiter.api.Assertions; import org.junit.jupiter.api.BeforeEach; import org.junit.jupiter.api.Test; import org.junit.jupiter.api.Timeout; @@ -43,6 +42,7 @@ import static org.apache.hadoop.hdds.protocol.proto.HddsProtos.ReplicationFactor.ONE; import static org.apache.hadoop.hdds.scm.ScmConfigKeys.OZONE_DATANODE_RATIS_VOLUME_FREE_SPACE_MIN; import static org.apache.hadoop.hdds.scm.ScmConfigKeys.OZONE_SCM_CONTAINER_SIZE; +import static org.junit.jupiter.api.Assertions.assertTrue; /** * Test the behaviour of the datanode and scm when communicating @@ -101,7 +101,7 @@ public void test() throws Exception { //a new key is created, but the datanode default REFRESH_PERIOD is 1 hour, //still the cache is updated, so the scm will eventually get the new //used space from the datanode through node report. - Assertions.assertTrue(cluster.getStorageContainerManager() + assertTrue(cluster.getStorageContainerManager() .getScmNodeManager().getUsageInfo(datanodeDetails) .getScmNodeStat().getScmUsed().isEqual(currentScmUsed)); @@ -116,7 +116,7 @@ public void test() throws Exception { //after waiting for several node report , this usage info //in SCM should be updated as we have updated the DN's cached usage info. - Assertions.assertTrue(cluster.getStorageContainerManager() + assertTrue(cluster.getStorageContainerManager() .getScmNodeManager().getUsageInfo(datanodeDetails) .getScmNodeStat().getScmUsed().isGreater(currentScmUsed)); diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/container/common/transport/server/ratis/TestCSMMetrics.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/container/common/transport/server/ratis/TestCSMMetrics.java index cd6dfb171c0..8c35d5011a5 100644 --- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/container/common/transport/server/ratis/TestCSMMetrics.java +++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/container/common/transport/server/ratis/TestCSMMetrics.java @@ -53,8 +53,8 @@ import org.apache.hadoop.hdds.conf.OzoneConfiguration; import static org.apache.ratis.rpc.SupportedRpcType.GRPC; +import static org.assertj.core.api.Assertions.assertThat; import static org.junit.jupiter.api.Assertions.assertEquals; -import static org.junit.jupiter.api.Assertions.assertTrue; import org.apache.ratis.protocol.RaftGroupId; import org.apache.ratis.util.ExitUtils; @@ -66,7 +66,6 @@ import org.apache.ratis.util.function.CheckedBiFunction; import org.junit.jupiter.api.BeforeAll; import org.junit.jupiter.api.Test; -import org.junit.jupiter.api.Assertions; /** * This class tests the metrics of ContainerStateMachine. @@ -142,7 +141,7 @@ static void runContainerStateMachineMetrics( pipeline, blockID, 1024); ContainerCommandResponseProto response = client.sendCommand(writeChunkRequest); - Assertions.assertEquals(ContainerProtos.Result.SUCCESS, + assertEquals(ContainerProtos.Result.SUCCESS, response.getResult()); metric = getMetrics(CSMMetrics.SOURCE_NAME + @@ -160,7 +159,7 @@ static void runContainerStateMachineMetrics( ContainerTestHelper.getReadChunkRequest(pipeline, writeChunkRequest .getWriteChunk()); response = client.sendCommand(readChunkRequest); - Assertions.assertEquals(ContainerProtos.Result.SUCCESS, + assertEquals(ContainerProtos.Result.SUCCESS, response.getResult()); metric = getMetrics(CSMMetrics.SOURCE_NAME + @@ -169,10 +168,10 @@ static void runContainerStateMachineMetrics( assertCounter("NumApplyTransactionOps", 1L, metric); applyTransactionLatency = getDoubleGauge( "ApplyTransactionNsAvgTime", metric); - assertTrue(applyTransactionLatency > 0.0); + assertThat(applyTransactionLatency).isGreaterThan(0.0); writeStateMachineLatency = getDoubleGauge( "WriteStateMachineDataNsAvgTime", metric); - assertTrue(writeStateMachineLatency > 0.0); + assertThat(writeStateMachineLatency).isGreaterThan(0.0); } finally { if (client != null) { diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/container/metrics/TestContainerMetrics.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/container/metrics/TestContainerMetrics.java index 0b83c650fe0..d4900bb4878 100644 --- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/container/metrics/TestContainerMetrics.java +++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/container/metrics/TestContainerMetrics.java @@ -55,7 +55,8 @@ import static org.apache.ozone.test.MetricsAsserts.assertCounter; import static org.apache.ozone.test.MetricsAsserts.assertQuantileGauges; import static org.apache.ozone.test.MetricsAsserts.getMetrics; -import org.junit.jupiter.api.Assertions; +import static org.junit.jupiter.api.Assertions.assertEquals; + import org.junit.jupiter.api.Test; import org.junit.jupiter.api.Timeout; @@ -121,7 +122,7 @@ public void testContainerMetrics() throws Exception { pipeline, blockID, 1024); ContainerCommandResponseProto response = client.sendCommand(writeChunkRequest); - Assertions.assertEquals(ContainerProtos.Result.SUCCESS, + assertEquals(ContainerProtos.Result.SUCCESS, response.getResult()); //Read Chunk @@ -129,7 +130,7 @@ public void testContainerMetrics() throws Exception { ContainerTestHelper.getReadChunkRequest(pipeline, writeChunkRequest .getWriteChunk()); response = client.sendCommand(readChunkRequest); - Assertions.assertEquals(ContainerProtos.Result.SUCCESS, response.getResult()); + assertEquals(ContainerProtos.Result.SUCCESS, response.getResult()); MetricsRecordBuilder containerMetrics = getMetrics( "StorageContainerMetrics"); diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/container/metrics/TestDatanodeQueueMetrics.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/container/metrics/TestDatanodeQueueMetrics.java index 06e1f933749..a1d436b3360 100644 --- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/container/metrics/TestDatanodeQueueMetrics.java +++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/container/metrics/TestDatanodeQueueMetrics.java @@ -17,14 +17,12 @@ package org.apache.hadoop.ozone.container.metrics; -import org.apache.commons.text.WordUtils; import org.apache.hadoop.hdds.conf.OzoneConfiguration; import org.apache.hadoop.hdds.protocol.proto.StorageContainerDatanodeProtocolProtos.SCMCommandProto; import org.apache.hadoop.hdds.scm.ScmConfigKeys; import org.apache.hadoop.ozone.MiniOzoneCluster; import org.apache.hadoop.ozone.MiniOzoneHAClusterImpl; import org.apache.hadoop.ozone.container.common.statemachine.DatanodeQueueMetrics; -import org.junit.jupiter.api.Assertions; import org.junit.jupiter.api.BeforeEach; import org.junit.jupiter.api.Test; import org.junit.jupiter.api.Timeout; @@ -32,12 +30,13 @@ import org.slf4j.LoggerFactory; import java.io.IOException; -import java.util.UUID; +import static org.apache.commons.text.WordUtils.capitalize; import static org.apache.hadoop.ozone.container.common.statemachine.DatanodeQueueMetrics.COMMAND_DISPATCHER_QUEUE_PREFIX; import static org.apache.hadoop.ozone.container.common.statemachine.DatanodeQueueMetrics.STATE_CONTEXT_COMMAND_QUEUE_PREFIX; import static org.apache.ozone.test.MetricsAsserts.getLongGauge; import static org.apache.ozone.test.MetricsAsserts.getMetrics; +import static org.assertj.core.api.Assertions.assertThat; /** * Test for queue metrics of datanodes. @@ -47,8 +46,6 @@ public class TestDatanodeQueueMetrics { private MiniOzoneHAClusterImpl cluster = null; private OzoneConfiguration conf; - private String clusterId; - private String scmId; private String omServiceId; private static int numOfOMs = 3; private String scmServiceId; @@ -68,13 +65,9 @@ public class TestDatanodeQueueMetrics { public void init() throws Exception { conf = new OzoneConfiguration(); conf.set(ScmConfigKeys.OZONE_SCM_PIPELINE_CREATION_INTERVAL, "10s"); - clusterId = UUID.randomUUID().toString(); - scmId = UUID.randomUUID().toString(); omServiceId = "om-service-test1"; scmServiceId = "scm-service-test1"; cluster = (MiniOzoneHAClusterImpl) MiniOzoneCluster.newHABuilder(conf) - .setClusterId(clusterId) - .setScmId(scmId) .setOMServiceId(omServiceId) .setSCMServiceId(scmServiceId) .setNumOfStorageContainerManagers(numOfSCMs) @@ -89,14 +82,12 @@ public void init() throws Exception { @Test public void testQueueMetrics() { - for (SCMCommandProto.Type type: SCMCommandProto.Type.values()) { - Assertions.assertTrue( - getGauge(STATE_CONTEXT_COMMAND_QUEUE_PREFIX + - WordUtils.capitalize(String.valueOf(type)) + "Size") >= 0); - Assertions.assertTrue( - getGauge(COMMAND_DISPATCHER_QUEUE_PREFIX + - WordUtils.capitalize(String.valueOf(type)) + "Size") >= 0); + String typeSize = capitalize(String.valueOf(type)) + "Size"; + assertThat(getGauge(STATE_CONTEXT_COMMAND_QUEUE_PREFIX + typeSize)) + .isGreaterThanOrEqualTo(0); + assertThat(getGauge(COMMAND_DISPATCHER_QUEUE_PREFIX + typeSize)) + .isGreaterThanOrEqualTo(0); } } diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/container/ozoneimpl/TestOzoneContainer.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/container/ozoneimpl/TestOzoneContainer.java index 3a94f3410df..7a64ddc5d5e 100644 --- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/container/ozoneimpl/TestOzoneContainer.java +++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/container/ozoneimpl/TestOzoneContainer.java @@ -48,7 +48,6 @@ import static org.junit.jupiter.api.Assertions.assertEquals; import static org.junit.jupiter.api.Assertions.assertArrayEquals; import static org.junit.jupiter.api.Assertions.assertNotNull; -import static org.junit.jupiter.api.Assertions.fail; import static org.junit.jupiter.api.Assertions.assertTrue; /** @@ -91,7 +90,7 @@ public void testCreateOzoneContainer( } @Test - public void testOzoneContainerStart( + void testOzoneContainerStart( @TempDir File ozoneMetaDir, @TempDir File hddsNodeDir) throws Exception { OzoneConfiguration conf = newOzoneConfiguration(); OzoneContainer container = null; @@ -110,18 +109,12 @@ public void testOzoneContainerStart( String clusterId = UUID.randomUUID().toString(); container.start(clusterId); - try { - container.start(clusterId); - } catch (Exception e) { - fail(); - } + + container.start(clusterId); + + container.stop(); container.stop(); - try { - container.stop(); - } catch (Exception e) { - fail(); - } } finally { if (container != null) { @@ -199,7 +192,7 @@ public static void runTestOzoneContainerViaDataNode( response = client.sendCommand(request); int chunksCount = putBlockRequest.getPutBlock().getBlockData(). getChunksCount(); - ContainerTestHelper.verifyGetBlock(request, response, chunksCount); + ContainerTestHelper.verifyGetBlock(response, chunksCount); // Delete Block and Delete Chunk are handled by BlockDeletingService // ContainerCommandRequestProto DeleteBlock and DeleteChunk requests @@ -367,7 +360,7 @@ public void testCloseContainer( response = client.sendCommand(request); int chunksCount = putBlockRequest.getPutBlock().getBlockData() .getChunksCount(); - ContainerTestHelper.verifyGetBlock(request, response, chunksCount); + ContainerTestHelper.verifyGetBlock(response, chunksCount); } finally { if (client != null) { client.close(); diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/container/ozoneimpl/TestOzoneContainerWithTLS.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/container/ozoneimpl/TestOzoneContainerWithTLS.java index 841f344fc34..73910ef00ff 100644 --- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/container/ozoneimpl/TestOzoneContainerWithTLS.java +++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/container/ozoneimpl/TestOzoneContainerWithTLS.java @@ -42,7 +42,6 @@ import org.apache.hadoop.security.UserGroupInformation; import org.apache.hadoop.security.token.Token; import org.apache.ozone.test.GenericTestUtils.LogCapturer; -import org.junit.jupiter.api.Assertions; import org.junit.jupiter.api.BeforeEach; import org.junit.jupiter.api.Test; import org.junit.jupiter.api.Timeout; @@ -81,12 +80,13 @@ import static org.apache.hadoop.ozone.container.replication.CopyContainerCompression.NO_COMPRESSION; import static org.apache.ozone.test.GenericTestUtils.LogCapturer.captureLogs; import static org.apache.ozone.test.GenericTestUtils.waitFor; +import static org.assertj.core.api.Assertions.assertThat; import static org.junit.jupiter.api.Assertions.assertEquals; +import static org.junit.jupiter.api.Assertions.assertInstanceOf; import static org.junit.jupiter.api.Assertions.assertNotNull; import static org.junit.jupiter.api.Assertions.assertNull; import static org.junit.jupiter.api.Assertions.assertSame; import static org.junit.jupiter.api.Assertions.assertThrows; -import static org.junit.jupiter.api.Assertions.assertTrue; import static org.junit.jupiter.api.Assertions.fail; import static org.slf4j.LoggerFactory.getLogger; @@ -257,7 +257,7 @@ public void testLongLivingClientWithCertRenews() throws Exception { while (e.getCause() != null) { e = e.getCause(); } - assertTrue((e instanceof CertificateExpiredException)); + assertInstanceOf(CertificateExpiredException.class, e); } finally { clientManager.releaseClient(client, true); } @@ -288,10 +288,12 @@ private void assertClientTrustManagerLoading( } private void assertClientTrustManagerFailedAndRetried(LogCapturer logs) { - assertTrue(logs.getOutput().contains("trying to re-fetch rootCA"), - "Check client failed first, and initiates a reload."); - assertTrue(logs.getOutput().contains("Loading certificates for client."), - "Check client loaded certificates."); + assertThat(logs.getOutput()) + .withFailMessage("Check client failed first, and initiates a reload.") + .contains("trying to re-fetch rootCA"); + assertThat(logs.getOutput()) + .withFailMessage("Check client loaded certificates.") + .contains("Loading certificates for client."); logs.clearOutput(); } @@ -320,8 +322,8 @@ private void assertDownloadContainerFails(long containerId, sourceDatanodes, tempFolder.resolve("tmp"), NO_COMPRESSION); downloader.close(); assertNull(file); - assertTrue(logCapture.getOutput().contains( - "java.security.cert.CertificateExpiredException")); + assertThat(logCapture.getOutput()) + .contains("java.security.cert.CertificateExpiredException"); } private void assertDownloadContainerWorks(List containers, @@ -352,20 +354,15 @@ private Token createContainer( } private long createAndCloseContainer( - XceiverClientSpi client, boolean useToken) { + XceiverClientSpi client, boolean useToken) throws IOException { long id = getTestContainerID(); - try { - Token - token = createContainer(client, useToken, id); - - ContainerCommandRequestProto request = - getCloseContainer(client.getPipeline(), id, token); - ContainerCommandResponseProto response = client.sendCommand(request); - assertNotNull(response); - assertSame(response.getResult(), ContainerProtos.Result.SUCCESS); - } catch (Exception e) { - Assertions.fail(e); - } + Token token = createContainer(client, useToken, id); + + ContainerCommandRequestProto request = + getCloseContainer(client.getPipeline(), id, token); + ContainerCommandResponseProto response = client.sendCommand(request); + assertNotNull(response); + assertSame(response.getResult(), ContainerProtos.Result.SUCCESS); return id; } diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/container/replication/TestContainerReplication.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/container/replication/TestContainerReplication.java index d3d9ad55c11..08932aa4e37 100644 --- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/container/replication/TestContainerReplication.java +++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/container/replication/TestContainerReplication.java @@ -43,6 +43,7 @@ import org.apache.hadoop.hdds.scm.XceiverClientManager; import org.apache.hadoop.hdds.scm.XceiverClientSpi; import org.apache.hadoop.hdds.scm.container.replication.ReplicationManager.ReplicationManagerConfiguration; +import org.apache.hadoop.hdds.utils.IOUtils; import org.apache.hadoop.ozone.HddsDatanodeService; import org.apache.hadoop.ozone.MiniOzoneCluster; import org.apache.hadoop.ozone.container.common.statemachine.DatanodeStateMachine; @@ -88,13 +89,8 @@ static void setup() throws Exception { } @AfterAll - static void tearDown() throws IOException { - if (clientFactory != null) { - clientFactory.close(); - } - if (cluster != null) { - cluster.shutdown(); - } + static void tearDown() { + IOUtils.closeQuietly(clientFactory, cluster); } @ParameterizedTest diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/container/server/TestContainerServer.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/container/server/TestContainerServer.java index 0451ba5c98e..3c89bb12ee7 100644 --- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/container/server/TestContainerServer.java +++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/container/server/TestContainerServer.java @@ -67,7 +67,6 @@ import org.apache.ratis.util.function.CheckedBiConsumer; import org.apache.ratis.util.function.CheckedBiFunction; import org.junit.jupiter.api.AfterAll; -import org.junit.jupiter.api.Assertions; import org.junit.jupiter.api.BeforeAll; import org.junit.jupiter.api.Test; @@ -75,6 +74,7 @@ import static org.apache.hadoop.hdds.protocol.MockDatanodeDetails.randomDatanodeDetails; import static org.apache.hadoop.hdds.scm.ScmConfigKeys.HDDS_DATANODE_DIR_KEY; import static org.apache.ratis.rpc.SupportedRpcType.GRPC; +import static org.junit.jupiter.api.Assertions.assertNotNull; /** * Test Containers. @@ -89,6 +89,7 @@ public class TestContainerServer { public static void setup() { DefaultMetricsSystem.setMiniClusterMode(true); CONF.set(HddsConfigKeys.HDDS_METADATA_DIR_NAME, TEST_DIR); + CONF.setBoolean(OzoneConfigKeys.DFS_CONTAINER_RATIS_DATASTREAM_ENABLED, false); DatanodeDetails dn = MockDatanodeDetails.randomDatanodeDetails(); caClient = new DNCertificateClient(new SecurityConfig(CONF), null, dn, null, null, null); @@ -170,7 +171,7 @@ static void runTestClientServer( ContainerTestHelper .getCreateContainerRequest( ContainerTestHelper.getTestContainerID(), pipeline); - Assertions.assertNotNull(request.getTraceID()); + assertNotNull(request.getTraceID()); client.sendCommand(request); } finally { diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/container/server/TestSecureContainerServer.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/container/server/TestSecureContainerServer.java index 2880d90db2f..53420c0e220 100644 --- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/container/server/TestSecureContainerServer.java +++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/container/server/TestSecureContainerServer.java @@ -95,23 +95,21 @@ import org.apache.commons.lang3.RandomUtils; import org.apache.commons.lang3.exception.ExceptionUtils; import org.apache.ratis.rpc.RpcType; - -import static org.apache.ratis.rpc.SupportedRpcType.GRPC; - import org.apache.ratis.util.ExitUtils; import org.apache.ratis.util.function.CheckedBiConsumer; import org.apache.ratis.util.function.CheckedBiFunction; + +import org.junit.jupiter.api.AfterAll; import org.junit.jupiter.api.AfterEach; +import org.junit.jupiter.api.BeforeAll; +import org.junit.jupiter.api.Test; +import static org.apache.ratis.rpc.SupportedRpcType.GRPC; +import static org.assertj.core.api.Assertions.assertThat; import static org.junit.jupiter.api.Assertions.assertEquals; import static org.junit.jupiter.api.Assertions.assertNotEquals; import static org.junit.jupiter.api.Assertions.assertNotNull; -import static org.junit.jupiter.api.Assertions.assertTrue; - -import org.junit.jupiter.api.AfterAll; -import org.junit.jupiter.api.Assertions; -import org.junit.jupiter.api.BeforeAll; -import org.junit.jupiter.api.Test; +import static org.junit.jupiter.api.Assertions.assertThrows; /** * Test Container servers when security is enabled. @@ -318,9 +316,9 @@ private static void assertFailsTokenVerification(XceiverClientSpi client, ContainerCommandResponseProto response = client.sendCommand(request); assertNotEquals(response.getResult(), ContainerProtos.Result.SUCCESS); String msg = response.getMessage(); - assertTrue(msg.contains(BLOCK_TOKEN_VERIFICATION_FAILED.name()), msg); + assertThat(msg).contains(BLOCK_TOKEN_VERIFICATION_FAILED.name()); } else { - final Throwable t = Assertions.assertThrows(Throwable.class, + final Throwable t = assertThrows(Throwable.class, () -> client.sendCommand(request)); assertRootCauseMessage(BLOCK_TOKEN_VERIFICATION_FAILED.name(), t); } @@ -331,7 +329,7 @@ private static void assertRootCauseMessage(String contained, Throwable t) { Throwable rootCause = ExceptionUtils.getRootCause(t); assertNotNull(rootCause); String msg = rootCause.getMessage(); - assertTrue(msg.contains(contained), msg); + assertThat(msg).contains(contained); } private static String getToken(ContainerID containerID) throws IOException { diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/debug/TestLDBCli.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/debug/TestLDBCli.java index 0d65d81c5ed..ec7eb81db33 100644 --- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/debug/TestLDBCli.java +++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/debug/TestLDBCli.java @@ -36,9 +36,8 @@ import org.apache.hadoop.ozone.container.metadata.DatanodeSchemaThreeDBDefinition; import org.apache.hadoop.ozone.om.helpers.OmKeyInfo; import org.apache.hadoop.ozone.om.request.OMRequestTestUtils; -import org.jetbrains.annotations.NotNull; +import jakarta.annotation.Nonnull; import org.junit.jupiter.api.AfterEach; -import org.junit.jupiter.api.Assertions; import org.junit.jupiter.api.BeforeEach; import org.junit.jupiter.api.Named; import org.junit.jupiter.api.Test; @@ -62,6 +61,8 @@ import java.util.stream.Stream; import static java.nio.charset.StandardCharsets.UTF_8; +import static org.assertj.core.api.Assertions.assertThat; +import static org.junit.jupiter.api.Assertions.assertEquals; /** * This class tests `ozone debug ldb` CLI that reads from a RocksDB directory. @@ -197,8 +198,8 @@ private static Stream scanTestCases() { @ParameterizedTest @MethodSource("scanTestCases") void testLDBScan( - @NotNull Pair tableAndOption, - @NotNull Pair expectedExitCodeStderrPair, + @Nonnull Pair tableAndOption, + @Nonnull Pair expectedExitCodeStderrPair, List scanArgs, Pair dbMapRange) throws IOException { @@ -218,7 +219,7 @@ void testLDBScan( int exitCode = cmd.execute(completeScanArgs.toArray(new String[0])); // Check exit code. Print stderr if not expected int expectedExitCode = expectedExitCodeStderrPair.getLeft(); - Assertions.assertEquals(expectedExitCode, exitCode, stderr.toString()); + assertEquals(expectedExitCode, exitCode, stderr.toString()); // Construct expected result map given test param input Map> expectedMap; @@ -235,7 +236,7 @@ void testLDBScan( // Check stderr final String stderrShouldContain = expectedExitCodeStderrPair.getRight(); - Assertions.assertTrue(stderr.toString().contains(stderrShouldContain)); + assertThat(stderr.toString()).contains(stderrShouldContain); } @Test @@ -251,13 +252,13 @@ void testScanOfPipelinesWhenNoData() throws IOException { int exitCode = cmd.execute(completeScanArgs.toArray(new String[0])); // Check exit code. Print stderr if not expected - Assertions.assertEquals(0, exitCode, stderr.toString()); + assertEquals(0, exitCode, stderr.toString()); // Check stdout - Assertions.assertEquals("{ }\n", stdout.toString()); + assertEquals("{ }\n", stdout.toString()); // Check stderr - Assertions.assertEquals("", stderr.toString()); + assertEquals("", stderr.toString()); } /** @@ -271,7 +272,7 @@ private void assertContents(Map expected, String actualStr) Map> actualMap = MAPPER.readValue( actualStr, new TypeReference>>() { }); - Assertions.assertEquals(expected, actualMap); + assertEquals(expected, actualMap); } /** diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/debug/TestLeaseRecoverer.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/debug/TestLeaseRecoverer.java index 784c7df8937..c24cf748ddb 100644 --- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/debug/TestLeaseRecoverer.java +++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/debug/TestLeaseRecoverer.java @@ -19,7 +19,6 @@ import java.io.IOException; import java.io.PrintWriter; import java.io.StringWriter; -import java.util.UUID; import java.util.concurrent.ThreadLocalRandom; import java.util.concurrent.TimeUnit; @@ -76,12 +75,8 @@ public static void init() throws Exception { OzoneClientConfig clientConfig = conf.getObject(OzoneClientConfig.class); clientConfig.setStreamBufferFlushDelay(false); conf.setFromObject(clientConfig); - String clusterId = UUID.randomUUID().toString(); - String scmId = UUID.randomUUID().toString(); - String omId = UUID.randomUUID().toString(); // Set the number of keys to be processed during batch operate. - cluster = MiniOzoneCluster.newBuilder(conf).setClusterId(clusterId) - .setScmId(scmId).setOmId(omId).build(); + cluster = MiniOzoneCluster.newBuilder(conf).build(); cluster.waitForClusterToBeReady(); client = cluster.newClient(); diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/dn/ratis/TestDnRatisLogParser.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/dn/ratis/TestDnRatisLogParser.java index eae12fd4dc9..cca47e17e40 100644 --- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/dn/ratis/TestDnRatisLogParser.java +++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/dn/ratis/TestDnRatisLogParser.java @@ -30,12 +30,12 @@ import org.apache.ozone.test.GenericTestUtils; import org.junit.jupiter.api.AfterEach; -import org.junit.jupiter.api.Assertions; import org.junit.jupiter.api.BeforeEach; import org.junit.jupiter.api.Test; import org.junit.jupiter.api.Timeout; import static java.nio.charset.StandardCharsets.UTF_8; +import static org.assertj.core.api.Assertions.assertThat; /** * Test Datanode Ratis log parser. @@ -78,14 +78,14 @@ public void testRatisLogParsing() throws Exception { File currentDir = new File(pipelineDir, "current"); File logFile = new File(currentDir, "log_inprogress_0"); GenericTestUtils.waitFor(logFile::exists, 100, 15000); - Assertions.assertTrue(logFile.isFile()); + assertThat(logFile).isFile(); DatanodeRatisLogParser datanodeRatisLogParser = new DatanodeRatisLogParser(); datanodeRatisLogParser.setSegmentFile(logFile); datanodeRatisLogParser.parseRatisLogs( DatanodeRatisLogParser::smToContainerLogString); - Assertions.assertTrue(out.toString(StandardCharsets.UTF_8.name()) - .contains("Num Total Entries:")); + assertThat(out.toString(StandardCharsets.UTF_8.name())) + .contains("Num Total Entries:"); } } diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/dn/volume/TestDatanodeHddsVolumeFailureToleration.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/dn/volume/TestDatanodeHddsVolumeFailureToleration.java index 719c38816f4..0273deb50e6 100644 --- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/dn/volume/TestDatanodeHddsVolumeFailureToleration.java +++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/dn/volume/TestDatanodeHddsVolumeFailureToleration.java @@ -34,7 +34,6 @@ import org.apache.ozone.test.GenericTestUtils; import org.apache.ozone.test.GenericTestUtils.LogCapturer; import org.junit.jupiter.api.AfterEach; -import org.junit.jupiter.api.Assertions; import org.junit.jupiter.api.BeforeEach; import org.junit.jupiter.api.Test; import org.junit.jupiter.api.Timeout; @@ -56,6 +55,7 @@ import static org.apache.hadoop.hdds.scm.ScmConfigKeys.OZONE_SCM_HEARTBEAT_PROCESS_INTERVAL; import static org.apache.hadoop.hdds.scm.ScmConfigKeys.OZONE_SCM_STALENODE_INTERVAL; import static org.apache.hadoop.ozone.OzoneConfigKeys.OZONE_REPLICATION; +import static org.assertj.core.api.Assertions.assertThat; /** * This class tests datanode can tolerate configured num of failed volumes. @@ -141,8 +141,8 @@ public void testDNCorrectlyHandlesVolumeFailureOnStartup() throws Exception { // cluster. GenericTestUtils.waitFor(() -> exitCapturer.getOutput() .contains("Exiting with status 1: ExitException"), 500, 60000); - Assertions.assertTrue(dsmCapturer.getOutput() - .contains("DatanodeStateMachine Shutdown due to too many bad volumes")); + assertThat(dsmCapturer.getOutput()) + .contains("DatanodeStateMachine Shutdown due to too many bad volumes"); // restore bad volumes DatanodeTestUtils.restoreBadRootDir(volRootDir0); diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/freon/TestDataValidate.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/freon/TestDataValidate.java index 17d82fcc8e6..28cc863c26d 100644 --- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/freon/TestDataValidate.java +++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/freon/TestDataValidate.java @@ -23,12 +23,15 @@ import org.apache.hadoop.hdds.protocol.proto.HddsProtos; import org.apache.hadoop.hdds.ratis.conf.RatisClientConfig; import org.apache.hadoop.ozone.MiniOzoneCluster; -import org.junit.jupiter.api.Assertions; import org.junit.jupiter.api.Test; import picocli.CommandLine; import java.time.Duration; +import static org.junit.jupiter.api.Assertions.assertEquals; +import static org.junit.jupiter.api.Assertions.assertNotEquals; +import static org.junit.jupiter.api.Assertions.assertTrue; + /** * Tests Freon, with MiniOzoneCluster and validate data. */ @@ -76,10 +79,10 @@ public void ratisTestLargeKey() { "--validate-writes" ); - Assertions.assertEquals(1, randomKeyGenerator.getNumberOfVolumesCreated()); - Assertions.assertEquals(1, randomKeyGenerator.getNumberOfBucketsCreated()); - Assertions.assertEquals(1, randomKeyGenerator.getNumberOfKeysAdded()); - Assertions.assertEquals(0, randomKeyGenerator.getUnsuccessfulValidationCount()); + assertEquals(1, randomKeyGenerator.getNumberOfVolumesCreated()); + assertEquals(1, randomKeyGenerator.getNumberOfBucketsCreated()); + assertEquals(1, randomKeyGenerator.getNumberOfKeysAdded()); + assertEquals(0, randomKeyGenerator.getUnsuccessfulValidationCount()); } @Test @@ -95,14 +98,12 @@ public void validateWriteTest() { "--validate-writes" ); - Assertions.assertEquals(2, randomKeyGenerator.getNumberOfVolumesCreated()); - Assertions.assertEquals(10, randomKeyGenerator.getNumberOfBucketsCreated()); - Assertions.assertEquals(100, randomKeyGenerator.getNumberOfKeysAdded()); - Assertions.assertTrue(randomKeyGenerator.getValidateWrites()); - Assertions.assertNotEquals(0, randomKeyGenerator.getTotalKeysValidated()); - Assertions.assertNotEquals(0, randomKeyGenerator - .getSuccessfulValidationCount()); - Assertions.assertEquals(0, randomKeyGenerator - .getUnsuccessfulValidationCount()); + assertEquals(2, randomKeyGenerator.getNumberOfVolumesCreated()); + assertEquals(10, randomKeyGenerator.getNumberOfBucketsCreated()); + assertEquals(100, randomKeyGenerator.getNumberOfKeysAdded()); + assertTrue(randomKeyGenerator.getValidateWrites()); + assertNotEquals(0, randomKeyGenerator.getTotalKeysValidated()); + assertNotEquals(0, randomKeyGenerator.getSuccessfulValidationCount()); + assertEquals(0, randomKeyGenerator.getUnsuccessfulValidationCount()); } } diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/freon/TestFreonWithDatanodeFastRestart.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/freon/TestFreonWithDatanodeFastRestart.java index 12b00a0c8cc..0798731a839 100644 --- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/freon/TestFreonWithDatanodeFastRestart.java +++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/freon/TestFreonWithDatanodeFastRestart.java @@ -28,14 +28,15 @@ import org.apache.ratis.statemachine.impl.SingleFileSnapshotInfo; import org.junit.jupiter.api.AfterAll; -import org.junit.jupiter.api.Assertions; import org.junit.jupiter.api.BeforeAll; import org.junit.jupiter.api.Test; import org.junit.jupiter.api.Timeout; import picocli.CommandLine; - import java.util.concurrent.TimeUnit; +import static org.assertj.core.api.Assertions.assertThat; +import static org.junit.jupiter.api.Assertions.assertEquals; + /** * Tests Freon with Datanode restarts without waiting for pipeline to close. */ @@ -86,15 +87,14 @@ public void testRestart() throws Exception { String expectedSnapFile = storage.getSnapshotFile(termIndexBeforeRestart.getTerm(), termIndexBeforeRestart.getIndex()).getAbsolutePath(); - Assertions.assertEquals(expectedSnapFile, - snapshotInfo.getFile().getPath().toString()); - Assertions.assertEquals(termInSnapshot, termIndexBeforeRestart); + assertEquals(expectedSnapFile, snapshotInfo.getFile().getPath().toString()); + assertEquals(termInSnapshot, termIndexBeforeRestart); // After restart the term index might have progressed to apply pending // transactions. TermIndex termIndexAfterRestart = sm.getLastAppliedTermIndex(); - Assertions.assertTrue(termIndexAfterRestart.getIndex() >= - termIndexBeforeRestart.getIndex()); + assertThat(termIndexAfterRestart.getIndex()) + .isGreaterThanOrEqualTo(termIndexBeforeRestart.getIndex()); // TODO: fix me // Give some time for the datanode to register again with SCM. // If we try to use the pipeline before the datanode registers with SCM @@ -119,10 +119,10 @@ private void startFreon() { "--validate-writes" ); - Assertions.assertEquals(1, randomKeyGenerator.getNumberOfVolumesCreated()); - Assertions.assertEquals(1, randomKeyGenerator.getNumberOfBucketsCreated()); - Assertions.assertEquals(1, randomKeyGenerator.getNumberOfKeysAdded()); - Assertions.assertEquals(0, randomKeyGenerator.getUnsuccessfulValidationCount()); + assertEquals(1, randomKeyGenerator.getNumberOfVolumesCreated()); + assertEquals(1, randomKeyGenerator.getNumberOfBucketsCreated()); + assertEquals(1, randomKeyGenerator.getNumberOfKeysAdded()); + assertEquals(0, randomKeyGenerator.getUnsuccessfulValidationCount()); } private StateMachine getStateMachine() throws Exception { diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/freon/TestFreonWithPipelineDestroy.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/freon/TestFreonWithPipelineDestroy.java index c0b055a8b20..d78beff7e78 100644 --- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/freon/TestFreonWithPipelineDestroy.java +++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/freon/TestFreonWithPipelineDestroy.java @@ -31,14 +31,14 @@ import org.apache.hadoop.ozone.MiniOzoneCluster; import org.apache.hadoop.ozone.container.common.transport.server.XceiverServerSpi; import org.junit.jupiter.api.AfterAll; -import org.junit.jupiter.api.Assertions; import org.junit.jupiter.api.BeforeAll; import org.junit.jupiter.api.Test; import picocli.CommandLine; - import java.time.Duration; import java.util.concurrent.TimeUnit; +import static org.junit.jupiter.api.Assertions.assertEquals; + /** * Tests Freon with Pipeline destroy. */ @@ -114,11 +114,10 @@ private void startFreon() { "--validate-writes" ); - Assertions.assertEquals(1, randomKeyGenerator.getNumberOfVolumesCreated()); - Assertions.assertEquals(1, randomKeyGenerator.getNumberOfBucketsCreated()); - Assertions.assertEquals(1, randomKeyGenerator.getNumberOfKeysAdded()); - Assertions.assertEquals(0, - randomKeyGenerator.getUnsuccessfulValidationCount()); + assertEquals(1, randomKeyGenerator.getNumberOfVolumesCreated()); + assertEquals(1, randomKeyGenerator.getNumberOfBucketsCreated()); + assertEquals(1, randomKeyGenerator.getNumberOfKeysAdded()); + assertEquals(0, randomKeyGenerator.getUnsuccessfulValidationCount()); } private void destroyPipeline() throws Exception { diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/freon/TestHadoopDirTreeGenerator.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/freon/TestHadoopDirTreeGenerator.java index afda0368f02..3140681d3dc 100644 --- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/freon/TestHadoopDirTreeGenerator.java +++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/freon/TestHadoopDirTreeGenerator.java @@ -34,7 +34,6 @@ import org.apache.ozone.test.GenericTestUtils; import org.apache.ratis.server.RaftServer; import org.apache.ratis.server.raftlog.RaftLog; -import org.junit.jupiter.api.Assertions; import org.junit.jupiter.api.BeforeEach; import org.junit.jupiter.api.Test; import org.slf4j.Logger; @@ -47,6 +46,9 @@ import java.net.URI; import java.util.ArrayList; +import static org.assertj.core.api.Assertions.assertThat; +import static org.junit.jupiter.api.Assertions.assertEquals; + /** * Test for HadoopDirTreeGenerator. */ @@ -157,7 +159,7 @@ private void verifyDirTree(String volumeName, String bucketName, int depth, int actualDepth = traverseToLeaf(fileSystem, fileStatus.getPath(), 1, depth, span, fileCount, StorageSize.parse(perFileSize, StorageUnit.BYTES)); - Assertions.assertEquals(depth, actualDepth, "Mismatch depth in a path"); + assertEquals(depth, actualDepth, "Mismatch depth in a path"); } } @@ -179,16 +181,17 @@ private int traverseToLeaf(FileSystem fs, Path dirPath, int depth, return traverseToLeaf(fs, fileStatus.getPath(), depth, expectedDepth, expectedSpanCnt, expectedFileCnt, perFileSize); } else { - Assertions.assertEquals(perFileSize.toBytes(), fileStatus.getLen(), "Mismatches file len"); + assertEquals(perFileSize.toBytes(), fileStatus.getLen(), "Mismatches file len"); String fName = fileStatus.getPath().getName(); - Assertions.assertFalse(files.contains(fName), actualNumFiles + "actualNumFiles:" + fName + - ", fName:" + expectedFileCnt + ", expectedFileCnt:" + depth - + ", depth:"); + assertThat(files) + .withFailMessage(actualNumFiles + "actualNumFiles:" + fName + + ", fName:" + expectedFileCnt + ", expectedFileCnt:" + depth + ", depth:") + .doesNotContain(fName); files.add(fName); actualNumFiles++; } } - Assertions.assertEquals(expectedFileCnt, actualNumFiles, "Mismatches files count in a directory"); + assertEquals(expectedFileCnt, actualNumFiles, "Mismatches files count in a directory"); return depth; } @@ -200,7 +203,7 @@ private int verifyActualSpan(int expectedSpanCnt, ++actualSpan; } } - Assertions.assertEquals(expectedSpanCnt, actualSpan, "Mismatches subdirs count in a directory"); + assertEquals(expectedSpanCnt, actualSpan, "Mismatches subdirs count in a directory"); return actualSpan; } } diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/freon/TestHadoopNestedDirGenerator.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/freon/TestHadoopNestedDirGenerator.java index 7c75858269c..4411c0d2ea5 100644 --- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/freon/TestHadoopNestedDirGenerator.java +++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/freon/TestHadoopNestedDirGenerator.java @@ -32,7 +32,6 @@ import org.apache.ratis.server.RaftServer; import org.apache.ratis.server.raftlog.RaftLog; import java.util.LinkedList; -import org.junit.jupiter.api.Assertions; import org.junit.jupiter.api.BeforeEach; import org.junit.jupiter.api.Test; import org.slf4j.Logger; @@ -44,6 +43,7 @@ import java.net.URI; import static org.apache.ozone.test.GenericTestUtils.getTempPath; +import static org.junit.jupiter.api.Assertions.assertEquals; /** * Test for HadoopNestedDirGenerator. @@ -142,7 +142,7 @@ private void verifyDirTree(String volumeName, String bucketName, // verify the num of peer directories and span directories p = depthBFS(fileSystem, fileStatuses, span, actualDepth); int actualSpan = spanCheck(fileSystem, span, p); - Assertions.assertEquals(span, actualSpan, "Mismatch span in a path"); + assertEquals(span, actualSpan, "Mismatch span in a path"); } } @@ -182,7 +182,7 @@ private Path depthBFS(FileSystem fs, FileStatus[] fileStatuses, p = f.getPath().getParent(); } } - Assertions.assertEquals(depth, actualDepth, "Mismatch depth in a path"); + assertEquals(depth, actualDepth, "Mismatch depth in a path"); return p; } diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/freon/TestOMSnapshotDAG.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/freon/TestOMSnapshotDAG.java index 88fb0107969..bca21aebd1a 100644 --- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/freon/TestOMSnapshotDAG.java +++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/freon/TestOMSnapshotDAG.java @@ -46,7 +46,6 @@ import org.apache.ratis.server.RaftServer; import org.apache.ratis.server.raftlog.RaftLog; import org.junit.jupiter.api.AfterAll; -import org.junit.jupiter.api.Assertions; import org.junit.jupiter.api.BeforeAll; import org.junit.jupiter.api.Test; import org.slf4j.Logger; @@ -70,6 +69,10 @@ import static org.apache.hadoop.ozone.OzoneConsts.OM_KEY_PREFIX; import static org.apache.hadoop.ozone.OzoneConsts.OM_SNAPSHOT_DIFF_DIR; import static org.apache.hadoop.ozone.om.snapshot.SnapshotUtils.getColumnFamilyToKeyPrefixMap; +import static org.assertj.core.api.Assertions.assertThat; +import static org.junit.jupiter.api.Assertions.assertEquals; +import static org.junit.jupiter.api.Assertions.assertNotNull; + /** * Tests Freon, with MiniOzoneCluster. @@ -178,9 +181,8 @@ public void testDAGReconstruction() "--validate-writes" ); - Assertions.assertEquals(500L, randomKeyGenerator.getNumberOfKeysAdded()); - Assertions.assertEquals(500L, - randomKeyGenerator.getSuccessfulValidationCount()); + assertEquals(500L, randomKeyGenerator.getNumberOfKeysAdded()); + assertEquals(500L, randomKeyGenerator.getSuccessfulValidationCount()); List volList = cluster.getOzoneManager() .listAllVolumes("", "", 2); @@ -263,7 +265,7 @@ public void testDAGReconstruction() // Same snapshot. Result should be empty list List sstDiffList22 = differ.getSSTDiffList(snap2, snap2); - Assertions.assertTrue(sstDiffList22.isEmpty()); + assertThat(sstDiffList22).isEmpty(); snapDB1.close(); snapDB2.close(); snapDB3.close(); @@ -292,13 +294,13 @@ public void testDAGReconstruction() ((RDBStore)((OmSnapshot)snapDB3.get()) .getMetadataManager().getStore()).getDb().getManagedRocksDb()); List sstDiffList21Run2 = differ.getSSTDiffList(snap2, snap1); - Assertions.assertEquals(sstDiffList21, sstDiffList21Run2); + assertEquals(sstDiffList21, sstDiffList21Run2); List sstDiffList32Run2 = differ.getSSTDiffList(snap3, snap2); - Assertions.assertEquals(sstDiffList32, sstDiffList32Run2); + assertEquals(sstDiffList32, sstDiffList32Run2); List sstDiffList31Run2 = differ.getSSTDiffList(snap3, snap1); - Assertions.assertEquals(sstDiffList31, sstDiffList31Run2); + assertEquals(sstDiffList31, sstDiffList31Run2); snapDB1.close(); snapDB2.close(); snapDB3.close(); @@ -324,9 +326,8 @@ public void testSkipTrackingWithZeroSnapshot() { "--validate-writes" ); - Assertions.assertEquals(1000L, randomKeyGenerator.getNumberOfKeysAdded()); - Assertions.assertEquals(1000L, - randomKeyGenerator.getSuccessfulValidationCount()); + assertEquals(1000L, randomKeyGenerator.getNumberOfKeysAdded()); + assertEquals(1000L, randomKeyGenerator.getSuccessfulValidationCount()); String omMetadataDir = cluster.getOzoneManager().getConfiguration().get(OZONE_METADATA_DIRS); @@ -338,7 +339,7 @@ public void testSkipTrackingWithZeroSnapshot() { if (fileList != null) { for (File file : fileList) { if (file != null && file.isFile() && file.getName().endsWith(".log")) { - Assertions.assertEquals(0L, file.length()); + assertEquals(0L, file.length()); } } } @@ -346,8 +347,8 @@ public void testSkipTrackingWithZeroSnapshot() { Path sstBackupPath = Paths.get(omMetadataDir, OM_SNAPSHOT_DIFF_DIR, DB_COMPACTION_SST_BACKUP_DIR); fileList = sstBackupPath.toFile().listFiles(); - Assertions.assertNotNull(fileList); - Assertions.assertEquals(0L, fileList.length); + assertNotNull(fileList); + assertEquals(0L, fileList.length); } } diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/freon/TestOmBucketReadWriteFileOps.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/freon/TestOmBucketReadWriteFileOps.java index 04547064735..5244bb85790 100644 --- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/freon/TestOmBucketReadWriteFileOps.java +++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/freon/TestOmBucketReadWriteFileOps.java @@ -34,7 +34,6 @@ import org.apache.ozone.test.GenericTestUtils; import org.apache.ratis.server.RaftServer; import org.apache.ratis.server.raftlog.RaftLog; -import org.junit.jupiter.api.Assertions; import org.junit.jupiter.api.BeforeEach; import org.junit.jupiter.api.Test; import org.slf4j.Logger; @@ -46,6 +45,9 @@ import java.io.IOException; import java.net.URI; +import static org.assertj.core.api.Assertions.assertThat; +import static org.junit.jupiter.api.Assertions.assertEquals; + /** * Test for OmBucketReadWriteFileOps. */ @@ -207,7 +209,7 @@ private void verifyFileCreation(int expectedCount, FileStatus[] fileStatuses, } } } - Assertions.assertEquals(expectedCount, actual, "Mismatch Count!"); + assertEquals(expectedCount, actual, "Mismatch Count!"); } private void verifyOMLockMetrics(OMLockMetrics omLockMetrics) { @@ -218,7 +220,7 @@ private void verifyOMLockMetrics(OMLockMetrics omLockMetrics) { omLockMetrics.getLongestReadLockWaitingTimeMs()); int readWaitingSamples = Integer.parseInt(readLockWaitingTimeMsStat.split(" ")[2]); - Assertions.assertTrue(readWaitingSamples > 0, "Read Lock Waiting Samples should be positive"); + assertThat(readWaitingSamples).isPositive(); String readLockHeldTimeMsStat = omLockMetrics.getReadLockHeldTimeMsStat(); LOG.info("Read Lock Held Time Stat: " + readLockHeldTimeMsStat); @@ -226,7 +228,7 @@ private void verifyOMLockMetrics(OMLockMetrics omLockMetrics) { omLockMetrics.getLongestReadLockHeldTimeMs()); int readHeldSamples = Integer.parseInt(readLockHeldTimeMsStat.split(" ")[2]); - Assertions.assertTrue(readHeldSamples > 0, "Read Lock Held Samples should be positive"); + assertThat(readHeldSamples).isPositive(); String writeLockWaitingTimeMsStat = omLockMetrics.getWriteLockWaitingTimeMsStat(); @@ -235,7 +237,7 @@ private void verifyOMLockMetrics(OMLockMetrics omLockMetrics) { omLockMetrics.getLongestWriteLockWaitingTimeMs()); int writeWaitingSamples = Integer.parseInt(writeLockWaitingTimeMsStat.split(" ")[2]); - Assertions.assertTrue(writeWaitingSamples > 0, "Write Lock Waiting Samples should be positive"); + assertThat(writeWaitingSamples).isPositive(); String writeLockHeldTimeMsStat = omLockMetrics.getWriteLockHeldTimeMsStat(); LOG.info("Write Lock Held Time Stat: " + writeLockHeldTimeMsStat); @@ -243,7 +245,7 @@ private void verifyOMLockMetrics(OMLockMetrics omLockMetrics) { omLockMetrics.getLongestWriteLockHeldTimeMs()); int writeHeldSamples = Integer.parseInt(writeLockHeldTimeMsStat.split(" ")[2]); - Assertions.assertTrue(writeHeldSamples > 0, "Write Lock Held Samples should be positive"); + assertThat(writeHeldSamples).isPositive(); } private static class ParameterBuilder { diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/freon/TestOmBucketReadWriteKeyOps.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/freon/TestOmBucketReadWriteKeyOps.java index ba141f7c5eb..b74022b83e5 100644 --- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/freon/TestOmBucketReadWriteKeyOps.java +++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/freon/TestOmBucketReadWriteKeyOps.java @@ -33,7 +33,6 @@ import org.apache.ozone.test.GenericTestUtils; import org.apache.ratis.server.RaftServer; import org.apache.ratis.server.raftlog.RaftLog; -import org.junit.jupiter.api.Assertions; import org.junit.jupiter.api.BeforeEach; import org.junit.jupiter.api.Test; import org.slf4j.Logger; @@ -45,6 +44,9 @@ import java.io.IOException; import java.util.Iterator; +import static org.assertj.core.api.Assertions.assertThat; +import static org.junit.jupiter.api.Assertions.assertEquals; + /** * Test for OmBucketReadWriteKeyOps. */ @@ -193,7 +195,7 @@ private void verifyKeyCreation(int expectedCount, OzoneBucket bucket, ozoneKeyIterator.next(); ++actual; } - Assertions.assertEquals(expectedCount, actual, "Mismatch Count!"); + assertEquals(expectedCount, actual, "Mismatch Count!"); } private void verifyOMLockMetrics(OMLockMetrics omLockMetrics) { @@ -204,7 +206,7 @@ private void verifyOMLockMetrics(OMLockMetrics omLockMetrics) { omLockMetrics.getLongestReadLockWaitingTimeMs()); int readWaitingSamples = Integer.parseInt(readLockWaitingTimeMsStat.split(" ")[2]); - Assertions.assertTrue(readWaitingSamples > 0, "Read Lock Waiting Samples should be positive"); + assertThat(readWaitingSamples).isGreaterThan(0); String readLockHeldTimeMsStat = omLockMetrics.getReadLockHeldTimeMsStat(); LOG.info("Read Lock Held Time Stat: " + readLockHeldTimeMsStat); @@ -212,7 +214,7 @@ private void verifyOMLockMetrics(OMLockMetrics omLockMetrics) { omLockMetrics.getLongestReadLockHeldTimeMs()); int readHeldSamples = Integer.parseInt(readLockHeldTimeMsStat.split(" ")[2]); - Assertions.assertTrue(readHeldSamples > 0, "Read Lock Held Samples should be positive"); + assertThat(readHeldSamples).isGreaterThan(0); String writeLockWaitingTimeMsStat = omLockMetrics.getWriteLockWaitingTimeMsStat(); @@ -221,7 +223,7 @@ private void verifyOMLockMetrics(OMLockMetrics omLockMetrics) { omLockMetrics.getLongestWriteLockWaitingTimeMs()); int writeWaitingSamples = Integer.parseInt(writeLockWaitingTimeMsStat.split(" ")[2]); - Assertions.assertTrue(writeWaitingSamples > 0, "Write Lock Waiting Samples should be positive"); + assertThat(writeWaitingSamples).isGreaterThan(0); String writeLockHeldTimeMsStat = omLockMetrics.getWriteLockHeldTimeMsStat(); LOG.info("Write Lock Held Time Stat: " + writeLockHeldTimeMsStat); @@ -229,7 +231,7 @@ private void verifyOMLockMetrics(OMLockMetrics omLockMetrics) { omLockMetrics.getLongestWriteLockHeldTimeMs()); int writeHeldSamples = Integer.parseInt(writeLockHeldTimeMsStat.split(" ")[2]); - Assertions.assertTrue(writeHeldSamples > 0, "Write Lock Held Samples should be positive"); + assertThat(writeHeldSamples).isGreaterThan(0); } private static class ParameterBuilder { diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/fsck/TestContainerMapper.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/fsck/TestContainerMapper.java index 77815698c3b..7811470887d 100644 --- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/fsck/TestContainerMapper.java +++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/fsck/TestContainerMapper.java @@ -67,7 +67,6 @@ public class TestContainerMapper { private static OzoneManager ozoneManager; private static StorageContainerLocationProtocolClientSideTranslatorPB storageContainerLocationClient; - private static final String SCM_ID = UUID.randomUUID().toString(); private static String volName = UUID.randomUUID().toString(); private static String bucketName = UUID.randomUUID().toString(); private static OzoneConfiguration conf; @@ -88,7 +87,6 @@ public static void init() throws Exception { conf.setInt(ScmConfigKeys.OZONE_SCM_PIPELINE_OWNER_CONTAINER_COUNT, 6); cluster = MiniOzoneCluster.newBuilder(conf) .setNumDatanodes(3) - .setScmId(SCM_ID) .build(); cluster.waitForClusterToBeReady(); ozClient = OzoneClientFactory.getRpcClient(conf); diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/TestAddRemoveOzoneManager.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/TestAddRemoveOzoneManager.java index 1fa93cf4164..9d4d489586b 100644 --- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/TestAddRemoveOzoneManager.java +++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/TestAddRemoveOzoneManager.java @@ -25,7 +25,6 @@ import java.util.ArrayList; import java.util.Collection; import java.util.List; -import java.util.UUID; import java.util.concurrent.TimeUnit; import java.util.stream.Collectors; @@ -65,8 +64,8 @@ import static org.junit.jupiter.api.Assertions.assertEquals; import static org.junit.jupiter.api.Assertions.assertNotNull; import static org.junit.jupiter.api.Assertions.assertFalse; +import static org.junit.jupiter.api.Assertions.assertThrows; import static org.junit.jupiter.api.Assertions.assertTrue; -import static org.junit.jupiter.api.Assertions.fail; /** * Test for OM bootstrap process. @@ -77,8 +76,6 @@ public class TestAddRemoveOzoneManager { private MiniOzoneHAClusterImpl cluster = null; private ObjectStore objectStore; private OzoneConfiguration conf; - private final String clusterId = UUID.randomUUID().toString(); - private final String scmId = UUID.randomUUID().toString(); private long lastTransactionIndex; private UserGroupInformation user; @@ -99,8 +96,6 @@ private void setupCluster(int numInitialOMs) throws Exception { conf = new OzoneConfiguration(); conf.setInt(OzoneConfigKeys.OZONE_CLIENT_FAILOVER_MAX_ATTEMPTS_KEY, 5); cluster = (MiniOzoneHAClusterImpl) MiniOzoneCluster.newHABuilder(conf) - .setClusterId(clusterId) - .setScmId(scmId) .setSCMServiceId(SCM_DUMMY_SERVICE_ID) .setOMServiceId(OM_SERVICE_ID) .setNumOfOzoneManagers(numInitialOMs) @@ -206,7 +201,7 @@ public void testBootstrap() throws Exception { .toLong(TimeUnit.MILLISECONDS) * 3); // Verify that one of the new OMs is the leader - GenericTestUtils.waitFor(() -> cluster.getOMLeader() != null, 500, 30000); + cluster.waitForLeaderOM(); OzoneManager omLeader = cluster.getOMLeader(); assertThat(newOMNodeIds) @@ -249,21 +244,18 @@ public void testBootstrapWithoutConfigUpdate() throws Exception { // Bootstrap a new node without updating the configs on existing OMs. // This should result in the bootstrap failing. - String newNodeId = "omNode-bootstrap-1"; - try { - cluster.bootstrapOzoneManager(newNodeId, false, false); - fail("Bootstrap should have failed as configs are not updated on" + - " all OMs."); - } catch (Exception e) { - assertEquals(OmUtils.getOMAddressListPrintString( - Lists.newArrayList(existingOM.getNodeDetails())) + " do not have or" + - " have incorrect information of the bootstrapping OM. Update their " + - "ozone-site.xml before proceeding.", e.getMessage()); - assertThat(omLog.getOutput()).contains("Remote OM config check " + - "failed on OM " + existingOMNodeId); - assertThat(miniOzoneClusterLog.getOutput()).contains(newNodeId + - " - System Exit"); - } + final String newNodeId = "omNode-bootstrap-1"; + Exception e = + assertThrows(Exception.class, () -> cluster.bootstrapOzoneManager(newNodeId, false, false), + "Bootstrap should have failed as configs are not updated on all OMs."); + assertEquals(OmUtils.getOMAddressListPrintString( + Lists.newArrayList(existingOM.getNodeDetails())) + " do not have or" + + " have incorrect information of the bootstrapping OM. Update their " + + "ozone-site.xml before proceeding.", e.getMessage()); + assertThat(omLog.getOutput()).contains("Remote OM config check " + + "failed on OM " + existingOMNodeId); + assertThat(miniOzoneClusterLog.getOutput()).contains(newNodeId + + " - System Exit"); /*************************************************************************** * 2. Force bootstrap without updating config on any OM -> fail @@ -276,15 +268,15 @@ public void testBootstrapWithoutConfigUpdate() throws Exception { miniOzoneClusterLog.clearOutput(); omLog.clearOutput(); - newNodeId = "omNode-bootstrap-2"; + String newNodeId1 = "omNode-bootstrap-2"; try { - cluster.bootstrapOzoneManager(newNodeId, false, true); - } catch (IOException e) { + cluster.bootstrapOzoneManager(newNodeId1, false, true); + } catch (IOException ex) { assertThat(omLog.getOutput()).contains("Couldn't add OM " + - newNodeId + " to peer list."); + newNodeId1 + " to peer list."); assertThat(miniOzoneClusterLog.getOutput()).contains( existingOMNodeId + " - System Exit: There is no OM configuration " + - "for node ID " + newNodeId + " in ozone-site.xml."); + "for node ID " + newNodeId1 + " in ozone-site.xml."); // Verify that the existing OM has stopped. assertFalse(cluster.getOzoneManager(existingOMNodeId).isRunning()); @@ -323,23 +315,20 @@ public void testForceBootstrap() throws Exception { **************************************************************************/ // Update configs on all active OMs and Bootstrap a new node - String newNodeId = "omNode-bootstrap-1"; - try { - cluster.bootstrapOzoneManager(newNodeId, true, false); - fail("Bootstrap should have failed as configs are not updated on" + - " all OMs."); - } catch (IOException e) { - assertEquals(OmUtils.getOMAddressListPrintString( - Lists.newArrayList(downOM.getNodeDetails())) + " do not have or " + - "have incorrect information of the bootstrapping OM. Update their " + - "ozone-site.xml before proceeding.", e.getMessage()); - assertThat(omLog.getOutput()).contains("Remote OM " + downOMNodeId + - " configuration returned null"); - assertThat(omLog.getOutput()).contains("Remote OM config check " + - "failed on OM " + downOMNodeId); - assertThat(miniOzoneClusterLog.getOutput()).contains(newNodeId + - " - System Exit"); - } + final String newNodeId = "omNode-bootstrap-1"; + IOException e = + assertThrows(IOException.class, () -> cluster.bootstrapOzoneManager(newNodeId, true, false), + "Bootstrap should have failed as configs are not updated on all OMs."); + assertEquals(OmUtils.getOMAddressListPrintString( + Lists.newArrayList(downOM.getNodeDetails())) + " do not have or " + + "have incorrect information of the bootstrapping OM. Update their " + + "ozone-site.xml before proceeding.", e.getMessage()); + assertThat(omLog.getOutput()).contains("Remote OM " + downOMNodeId + + " configuration returned null"); + assertThat(omLog.getOutput()).contains("Remote OM config check " + + "failed on OM " + downOMNodeId); + assertThat(miniOzoneClusterLog.getOutput()).contains(newNodeId + + " - System Exit"); /*************************************************************************** * 2. Force bootstrap (with 1 node down and updated configs on rest) -> pass @@ -349,9 +338,9 @@ public void testForceBootstrap() throws Exception { omLog.clearOutput(); // Update configs on all active OMs and Force Bootstrap a new node - newNodeId = "omNode-bootstrap-2"; - cluster.bootstrapOzoneManager(newNodeId, true, true); - OzoneManager newOM = cluster.getOzoneManager(newNodeId); + String newNodeId1 = "omNode-bootstrap-2"; + cluster.bootstrapOzoneManager(newNodeId1, true, true); + OzoneManager newOM = cluster.getOzoneManager(newNodeId1); // Verify that the newly bootstrapped OM is running assertTrue(newOM.isRunning()); @@ -434,6 +423,6 @@ private void decommissionOM(String decommNodeId) throws Exception { }, 100, 100000); // Wait for new leader election if required - GenericTestUtils.waitFor(() -> cluster.getOMLeader() != null, 500, 30000); + cluster.waitForLeaderOM(); } } diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/TestBucketLayoutWithOlderClient.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/TestBucketLayoutWithOlderClient.java index 69085830629..73596781cc6 100644 --- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/TestBucketLayoutWithOlderClient.java +++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/TestBucketLayoutWithOlderClient.java @@ -45,9 +45,6 @@ public class TestBucketLayoutWithOlderClient { private static MiniOzoneCluster cluster = null; private static OzoneConfiguration conf; - private static String clusterId; - private static String scmId; - private static String omId; private static OzoneClient client; /** @@ -59,13 +56,9 @@ public class TestBucketLayoutWithOlderClient { @BeforeAll public static void init() throws Exception { conf = new OzoneConfiguration(); - clusterId = UUID.randomUUID().toString(); - scmId = UUID.randomUUID().toString(); - omId = UUID.randomUUID().toString(); conf.set(OMConfigKeys.OZONE_DEFAULT_BUCKET_LAYOUT, BucketLayout.OBJECT_STORE.name()); - cluster = MiniOzoneCluster.newBuilder(conf).setClusterId(clusterId) - .setScmId(scmId).setOmId(omId).build(); + cluster = MiniOzoneCluster.newBuilder(conf).build(); cluster.waitForClusterToBeReady(); client = cluster.newClient(); } diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/TestBucketOwner.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/TestBucketOwner.java index 0af4925dbce..37fec8dcda7 100644 --- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/TestBucketOwner.java +++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/TestBucketOwner.java @@ -39,7 +39,6 @@ import org.slf4j.LoggerFactory; import java.io.IOException; -import java.util.UUID; import static org.apache.hadoop.ozone.OzoneAcl.AclScope.DEFAULT; import static org.apache.hadoop.ozone.OzoneConfigKeys.OZONE_ACL_AUTHORIZER_CLASS; @@ -47,8 +46,8 @@ import static org.apache.hadoop.ozone.OzoneConfigKeys.OZONE_ACL_ENABLED; import static org.apache.hadoop.ozone.security.acl.IAccessAuthorizer.ACLIdentityType.USER; import static org.apache.hadoop.ozone.security.acl.OzoneObj.StoreType.OZONE; +import static org.junit.jupiter.api.Assertions.assertThrows; import static org.junit.jupiter.api.Assertions.assertTrue; -import static org.junit.jupiter.api.Assertions.fail; /** * Test for Ozone Bucket Owner. @@ -73,13 +72,9 @@ public static void init() throws Exception { // loginUser is the user running this test. UserGroupInformation.setLoginUser(adminUser); OzoneConfiguration conf = new OzoneConfiguration(); - String clusterId = UUID.randomUUID().toString(); - String scmId = UUID.randomUUID().toString(); - String omId = UUID.randomUUID().toString(); conf.set(OZONE_ACL_AUTHORIZER_CLASS, OZONE_ACL_AUTHORIZER_CLASS_NATIVE); conf.setBoolean(OZONE_ACL_ENABLED, true); - cluster = MiniOzoneCluster.newBuilder(conf).setClusterId(clusterId) - .setScmId(scmId).setOmId(omId).build(); + cluster = MiniOzoneCluster.newBuilder(conf).build(); cluster.waitForClusterToBeReady(); try (OzoneClient client = cluster.newClient()) { ObjectStore objectStore = client.getObjectStore(); @@ -139,65 +134,54 @@ public void testNonBucketNonVolumeOwner() throws Exception { //Key Create UserGroupInformation.setLoginUser(user3); try (OzoneClient client = cluster.newClient()) { - OzoneVolume volume = client.getObjectStore() - .getVolume("volume1"); - OzoneBucket ozoneBucket = volume.getBucket("bucket1"); - createKey(ozoneBucket, "key3", 10, new byte[10]); - fail("Create key as non-volume and non-bucket owner should fail"); - } catch (Exception ex) { - LOG.info(ex.getMessage()); + assertThrows(Exception.class, () -> { + OzoneVolume volume = client.getObjectStore().getVolume("volume1"); + OzoneBucket ozoneBucket = volume.getBucket("bucket1"); + createKey(ozoneBucket, "key3", 10, new byte[10]); + }, "Create key as non-volume and non-bucket owner should fail"); } //Key Delete - should fail try (OzoneClient client = cluster.newClient()) { - OzoneVolume volume = client.getObjectStore() - .getVolume("volume1"); - OzoneBucket ozoneBucket = volume.getBucket("bucket1"); - ozoneBucket.deleteKey("key2"); - fail("Delete key as non-volume and non-bucket owner should fail"); - } catch (Exception ex) { - LOG.info(ex.getMessage()); + assertThrows(Exception.class, () -> { + OzoneVolume volume = client.getObjectStore().getVolume("volume1"); + OzoneBucket ozoneBucket = volume.getBucket("bucket1"); + ozoneBucket.deleteKey("key2"); + }, "Delete key as non-volume and non-bucket owner should fail"); } //Key Rename - should fail try (OzoneClient client = cluster.newClient()) { - OzoneVolume volume = client.getObjectStore() - .getVolume("volume1"); - OzoneBucket ozoneBucket = volume.getBucket("bucket1"); - ozoneBucket.renameKey("key2", "key4"); - fail("Rename key as non-volume and non-bucket owner should fail"); - } catch (Exception ex) { - LOG.info(ex.getMessage()); + assertThrows(Exception.class, () -> { + OzoneVolume volume = client.getObjectStore().getVolume("volume1"); + OzoneBucket ozoneBucket = volume.getBucket("bucket1"); + ozoneBucket.renameKey("key2", "key4"); + }, "Rename key as non-volume and non-bucket owner should fail"); } //List Keys - should fail try (OzoneClient client = cluster.newClient()) { - OzoneVolume volume = client.getObjectStore() - .getVolume("volume1"); - OzoneBucket ozoneBucket = volume.getBucket("bucket1"); - ozoneBucket.listKeys("key"); - fail("List keys as non-volume and non-bucket owner should fail"); - } catch (Exception ex) { - LOG.info(ex.getMessage()); + assertThrows(Exception.class, () -> { + OzoneVolume volume = client.getObjectStore().getVolume("volume1"); + OzoneBucket ozoneBucket = volume.getBucket("bucket1"); + ozoneBucket.listKeys("key"); + }, "List keys as non-volume and non-bucket owner should fail"); } //Get Acls - should fail try (OzoneClient client = cluster.newClient()) { - OzoneVolume volume = client.getObjectStore() - .getVolume("volume1"); - OzoneBucket ozoneBucket = volume.getBucket("bucket1"); - ozoneBucket.getAcls(); - fail("Get Acls as non-volume and non-bucket owner should fail"); - } catch (Exception ex) { - LOG.info(ex.getMessage()); + assertThrows(Exception.class, () -> { + OzoneVolume volume = client.getObjectStore().getVolume("volume1"); + OzoneBucket ozoneBucket = volume.getBucket("bucket1"); + ozoneBucket.getAcls(); + }, "Get Acls as non-volume and non-bucket owner should fail"); } + //Add Acls - should fail try (OzoneClient client = cluster.newClient()) { - OzoneVolume volume = client.getObjectStore() - .getVolume("volume1"); - OzoneBucket ozoneBucket = volume.getBucket("bucket1"); - OzoneAcl acl = new OzoneAcl(USER, "testuser1", - IAccessAuthorizer.ACLType.ALL, DEFAULT); - ozoneBucket.addAcl(acl); - fail("Add Acls as non-volume and non-bucket owner should fail"); - } catch (Exception ex) { - LOG.info(ex.getMessage()); + assertThrows(Exception.class, () -> { + OzoneVolume volume = client.getObjectStore().getVolume("volume1"); + OzoneBucket ozoneBucket = volume.getBucket("bucket1"); + OzoneAcl acl = new OzoneAcl(USER, "testuser1", + IAccessAuthorizer.ACLType.ALL, DEFAULT); + ozoneBucket.addAcl(acl); + }, "Add Acls as non-volume and non-bucket owner should fail"); } } diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/TestKeyManagerImpl.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/TestKeyManagerImpl.java index 23697d9283b..67ab3169b69 100644 --- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/TestKeyManagerImpl.java +++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/TestKeyManagerImpl.java @@ -115,7 +115,7 @@ import static org.apache.hadoop.ozone.security.acl.IAccessAuthorizer.ACLType.ALL; import org.apache.ratis.util.ExitUtils; -import org.jetbrains.annotations.NotNull; +import jakarta.annotation.Nonnull; import org.junit.jupiter.api.AfterEach; import org.junit.jupiter.api.AfterAll; import org.junit.jupiter.api.BeforeAll; @@ -132,12 +132,11 @@ import static org.junit.jupiter.api.Assertions.assertNull; import static org.junit.jupiter.api.Assertions.assertThrows; import static org.junit.jupiter.api.Assertions.assertTrue; -import static org.junit.jupiter.api.Assertions.fail; import static org.junit.jupiter.api.Assumptions.assumeFalse; -import static org.mockito.ArgumentMatchers.any; -import static org.mockito.ArgumentMatchers.anyInt; -import static org.mockito.ArgumentMatchers.anyLong; -import static org.mockito.ArgumentMatchers.anyString; +import static org.mockito.Mockito.any; +import static org.mockito.Mockito.anyInt; +import static org.mockito.Mockito.anyLong; +import static org.mockito.Mockito.anyString; import static org.mockito.Mockito.doThrow; import static org.mockito.Mockito.mock; import static org.mockito.Mockito.never; @@ -381,13 +380,11 @@ public void testCreateDirectory() throws IOException { keyArgs.setLocationInfoList( keySession.getKeyInfo().getLatestVersionLocations().getLocationList()); writeClient.commitKey(keyArgs, keySession.getId()); - try { - writeClient.createDirectory(keyArgs); - fail("Creation should fail for directory."); - } catch (OMException e) { - assertEquals(e.getResult(), - OMException.ResultCodes.FILE_ALREADY_EXISTS); - } + OmKeyArgs finalKeyArgs = keyArgs; + OMException e = + assertThrows(OMException.class, () -> writeClient.createDirectory(finalKeyArgs), + "Creation should fail for directory."); + assertEquals(e.getResult(), OMException.ResultCodes.FILE_ALREADY_EXISTS); // create directory where parent is root keyName = RandomStringUtils.randomAlphabetic(5); @@ -414,13 +411,12 @@ public void testOpenFile() throws IOException { writeClient.commitKey(keyArgs, keySession.getId()); // try to open created key with overWrite flag set to false - try { - writeClient.createFile(keyArgs, false, false); - fail("Open key should fail for non overwrite create"); - } catch (OMException ex) { - if (ex.getResult() != OMException.ResultCodes.FILE_ALREADY_EXISTS) { - throw ex; - } + OmKeyArgs finalKeyArgs = keyArgs; + OMException ex = + assertThrows(OMException.class, () -> writeClient.createFile(finalKeyArgs, false, false), + "Open key should fail for non overwrite create"); + if (ex.getResult() != OMException.ResultCodes.FILE_ALREADY_EXISTS) { + throw ex; } // create file should pass with overwrite flag set to true @@ -437,13 +433,12 @@ public void testOpenFile() throws IOException { keyArgs = createBuilder() .setKeyName(keyName) .build(); - try { - writeClient.createFile(keyArgs, false, false); - fail("Open file should fail for non recursive write"); - } catch (OMException ex) { - if (ex.getResult() != OMException.ResultCodes.DIRECTORY_NOT_FOUND) { - throw ex; - } + OmKeyArgs finalKeyArgs1 = keyArgs; + ex = + assertThrows(OMException.class, () -> writeClient.createFile(finalKeyArgs1, false, false), + "Open file should fail for non recursive write"); + if (ex.getResult() != OMException.ResultCodes.DIRECTORY_NOT_FOUND) { + throw ex; } // file create should pass when recursive flag is set to true @@ -458,13 +453,11 @@ public void testOpenFile() throws IOException { keyArgs = createBuilder() .setKeyName("") .build(); - try { - writeClient.createFile(keyArgs, true, true); - fail("Open file should fail for non recursive write"); - } catch (OMException ex) { - if (ex.getResult() != OMException.ResultCodes.NOT_A_FILE) { - throw ex; - } + OmKeyArgs finalKeyArgs2 = keyArgs; + ex = assertThrows(OMException.class, () -> writeClient.createFile(finalKeyArgs2, true, true), + "Open file should fail for non recursive write"); + if (ex.getResult() != OMException.ResultCodes.NOT_A_FILE) { + throw ex; } } @@ -642,7 +635,9 @@ public void testInvalidPrefixAcl() throws IOException { .build(); // add acl with invalid prefix name - writeClient.addAcl(ozInvalidPrefix, ozAcl1); + Exception ex = assertThrows(OMException.class, + () -> writeClient.addAcl(ozInvalidPrefix, ozAcl1)); + assertTrue(ex.getMessage().startsWith("Invalid prefix name")); OzoneObj ozPrefix1 = new OzoneObjInfo.Builder() .setVolumeName(volumeName) @@ -658,17 +653,22 @@ public void testInvalidPrefixAcl() throws IOException { assertEquals(ozAcl1, ozAclGet.get(0)); // get acl with invalid prefix name - Exception ex = assertThrows(OMException.class, + ex = assertThrows(OMException.class, () -> writeClient.getAcl(ozInvalidPrefix)); assertTrue(ex.getMessage().startsWith("Invalid prefix name")); // set acl with invalid prefix name List ozoneAcls = new ArrayList(); ozoneAcls.add(ozAcl1); - writeClient.setAcl(ozInvalidPrefix, ozoneAcls); + + ex = assertThrows(OMException.class, + () -> writeClient.setAcl(ozInvalidPrefix, ozoneAcls)); + assertTrue(ex.getMessage().startsWith("Invalid prefix name")); // remove acl with invalid prefix name - writeClient.removeAcl(ozInvalidPrefix, ozAcl1); + ex = assertThrows(OMException.class, + () -> writeClient.removeAcl(ozInvalidPrefix, ozAcl1)); + assertTrue(ex.getMessage().startsWith("Invalid prefix name")); } @Test @@ -732,13 +732,12 @@ public void testLookupFile() throws IOException { .build(); // lookup for a non-existent file - try { - keyManager.lookupFile(keyArgs, null); - fail("Lookup file should fail for non existent file"); - } catch (OMException ex) { - if (ex.getResult() != OMException.ResultCodes.FILE_NOT_FOUND) { - throw ex; - } + OmKeyArgs finalKeyArgs = keyArgs; + OMException ex = + assertThrows(OMException.class, () -> keyManager.lookupFile(finalKeyArgs, null), + "Lookup file should fail for non existent file"); + if (ex.getResult() != OMException.ResultCodes.FILE_NOT_FOUND) { + throw ex; } // create a file @@ -753,13 +752,11 @@ public void testLookupFile() throws IOException { keyArgs = createBuilder() .setKeyName("") .build(); - try { - keyManager.lookupFile(keyArgs, null); - fail("Lookup file should fail for a directory"); - } catch (OMException ex) { - if (ex.getResult() != OMException.ResultCodes.NOT_A_FILE) { - throw ex; - } + OmKeyArgs finalKeyArgs1 = keyArgs; + ex = assertThrows(OMException.class, () -> keyManager.lookupFile(finalKeyArgs1, null), + "Lookup file should fail for a directory"); + if (ex.getResult() != OMException.ResultCodes.NOT_A_FILE) { + throw ex; } } @@ -775,13 +772,11 @@ public void testLookupKeyWithLocation() throws IOException { .setSortDatanodesInPipeline(true) .build(); // lookup for a non-existent key - try { - keyManager.lookupKey(keyArgs, resolvedBucket(), null); - fail("Lookup key should fail for non existent key"); - } catch (OMException ex) { - if (ex.getResult() != OMException.ResultCodes.KEY_NOT_FOUND) { - throw ex; - } + OMException ex = + assertThrows(OMException.class, () -> keyManager.lookupKey(keyArgs, resolvedBucket(), null), + "Lookup key should fail for non existent key"); + if (ex.getResult() != OMException.ResultCodes.KEY_NOT_FOUND) { + throw ex; } // create a key @@ -854,7 +849,7 @@ public void testLookupKeyWithLocation() throws IOException { .getLocationList().get(0).getPipeline().getNodesInOrder()); } - @NotNull + @Nonnull private ResolvedBucket resolvedBucket() { ResolvedBucket bucket = new ResolvedBucket(VOLUME_NAME, BUCKET_NAME, VOLUME_NAME, BUCKET_NAME, "", BucketLayout.DEFAULT); @@ -870,13 +865,12 @@ public void testLatestLocationVersion() throws IOException { .build(); // lookup for a non-existent key - try { - keyManager.lookupKey(keyArgs, resolvedBucket(), null); - fail("Lookup key should fail for non existent key"); - } catch (OMException ex) { - if (ex.getResult() != OMException.ResultCodes.KEY_NOT_FOUND) { - throw ex; - } + OmKeyArgs finalKeyArgs = keyArgs; + OMException ex = + assertThrows(OMException.class, () -> keyManager.lookupKey(finalKeyArgs, resolvedBucket(), null), + "Lookup key should fail for non existent key"); + if (ex.getResult() != OMException.ResultCodes.KEY_NOT_FOUND) { + throw ex; } // create a key diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/TestListKeys.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/TestListKeys.java index d373eeae71a..be972557f4a 100644 --- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/TestListKeys.java +++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/TestListKeys.java @@ -43,7 +43,6 @@ import java.util.List; import java.util.ArrayList; import java.util.Optional; -import java.util.UUID; import java.util.stream.Stream; import static com.google.common.collect.Lists.newLinkedList; @@ -62,9 +61,6 @@ public class TestListKeys { private static MiniOzoneCluster cluster = null; private static OzoneConfiguration conf; - private static String clusterId; - private static String scmId; - private static String omId; private static OzoneBucket legacyOzoneBucket; private static OzoneClient client; @@ -79,14 +75,10 @@ public class TestListKeys { public static void init() throws Exception { conf = new OzoneConfiguration(); conf.setBoolean(OMConfigKeys.OZONE_OM_ENABLE_FILESYSTEM_PATHS, true); - clusterId = UUID.randomUUID().toString(); - scmId = UUID.randomUUID().toString(); - omId = UUID.randomUUID().toString(); // Set the number of keys to be processed during batch operate. conf.setInt(OZONE_FS_ITERATE_BATCH_SIZE, 3); conf.setInt(OZONE_CLIENT_LIST_CACHE_SIZE, 3); - cluster = MiniOzoneCluster.newBuilder(conf).setClusterId(clusterId) - .setScmId(scmId).setOmId(omId).build(); + cluster = MiniOzoneCluster.newBuilder(conf).build(); cluster.waitForClusterToBeReady(); client = cluster.newClient(); diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/TestListKeysWithFSO.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/TestListKeysWithFSO.java index f5d6ed75296..f499e3569c8 100644 --- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/TestListKeysWithFSO.java +++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/TestListKeysWithFSO.java @@ -44,7 +44,6 @@ import java.util.List; import java.util.ArrayList; import java.util.Optional; -import java.util.UUID; import static org.apache.hadoop.ozone.OzoneConfigKeys.OZONE_CLIENT_LIST_CACHE_SIZE; import static org.apache.hadoop.ozone.OzoneConfigKeys.OZONE_FS_ITERATE_BATCH_SIZE; @@ -59,9 +58,6 @@ public class TestListKeysWithFSO { private static MiniOzoneCluster cluster = null; private static OzoneConfiguration conf; - private static String clusterId; - private static String scmId; - private static String omId; private static OzoneBucket legacyOzoneBucket; private static OzoneBucket fsoOzoneBucket; @@ -80,14 +76,10 @@ public static void init() throws Exception { conf = new OzoneConfiguration(); conf.setBoolean(OMConfigKeys.OZONE_OM_ENABLE_FILESYSTEM_PATHS, true); - clusterId = UUID.randomUUID().toString(); - scmId = UUID.randomUUID().toString(); - omId = UUID.randomUUID().toString(); // Set the number of keys to be processed during batch operate. conf.setInt(OZONE_FS_ITERATE_BATCH_SIZE, 3); conf.setInt(OZONE_CLIENT_LIST_CACHE_SIZE, 3); - cluster = MiniOzoneCluster.newBuilder(conf).setClusterId(clusterId) - .setScmId(scmId).setOmId(omId).build(); + cluster = MiniOzoneCluster.newBuilder(conf).build(); cluster.waitForClusterToBeReady(); client = cluster.newClient(); @@ -417,6 +409,11 @@ public void testListKeysWithMixOfDirsAndFiles() throws Exception { expectedKeys = getExpectedKeyList("a", "a1", legacyOzoneBucket2); checkKeyList("a", "a1", expectedKeys, fsoOzoneBucket2); + + // test when the keyPrefix = existing key + expectedKeys = + getExpectedKeyList("x/y/z/z1.tx", "", legacyOzoneBucket2); + checkKeyList("x/y/z/z1.tx", "", expectedKeys, fsoOzoneBucket2); } @Test @@ -549,6 +546,7 @@ private static void buildNameSpaceTree2(OzoneBucket ozoneBucket) keys.add("/a3/b1/c1/c1.tx"); keys.add("/x/y/z/z1.tx"); + keys.add("/x/y/z/z1.txdir/z2.tx"); keys.add("/dir1/dir2/dir3/d11.tx"); diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/TestListStatus.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/TestListStatus.java index a24e78617f7..52cb9287cc0 100644 --- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/TestListStatus.java +++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/TestListStatus.java @@ -33,7 +33,6 @@ import org.junit.jupiter.api.Timeout; import java.io.IOException; -import java.util.UUID; import java.util.List; import static org.assertj.core.api.Assertions.assertThat; import static org.junit.jupiter.api.Assertions.assertEquals; @@ -48,9 +47,6 @@ public class TestListStatus { private static MiniOzoneCluster cluster = null; private static OzoneConfiguration conf; - private static String clusterId; - private static String scmId; - private static String omId; private static OzoneBucket fsoOzoneBucket; private static OzoneClient client; @@ -65,11 +61,7 @@ public static void init() throws Exception { conf = new OzoneConfiguration(); conf.setBoolean(OMConfigKeys.OZONE_OM_ENABLE_FILESYSTEM_PATHS, true); - clusterId = UUID.randomUUID().toString(); - scmId = UUID.randomUUID().toString(); - omId = UUID.randomUUID().toString(); - cluster = MiniOzoneCluster.newBuilder(conf).setClusterId(clusterId) - .setScmId(scmId).setOmId(omId).build(); + cluster = MiniOzoneCluster.newBuilder(conf).build(); cluster.waitForClusterToBeReady(); client = cluster.newClient(); diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/TestOMBucketLayoutUpgrade.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/TestOMBucketLayoutUpgrade.java index f56a95d30a4..b8e11586472 100644 --- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/TestOMBucketLayoutUpgrade.java +++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/TestOMBucketLayoutUpgrade.java @@ -93,8 +93,6 @@ void setup() throws Exception { OzoneConfiguration conf = new OzoneConfiguration(); String omServiceId = UUID.randomUUID().toString(); cluster = (MiniOzoneHAClusterImpl) MiniOzoneCluster.newOMHABuilder(conf) - .setClusterId(UUID.randomUUID().toString()) - .setScmId(UUID.randomUUID().toString()) .setOMServiceId(omServiceId) .setNumOfOzoneManagers(3) .setNumDatanodes(1) diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/TestOMDbCheckpointServlet.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/TestOMDbCheckpointServlet.java index 5e3d4d77c98..68ed3536a64 100644 --- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/TestOMDbCheckpointServlet.java +++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/TestOMDbCheckpointServlet.java @@ -112,14 +112,14 @@ import static org.junit.jupiter.api.Assertions.assertThrows; import static org.junit.jupiter.api.Assertions.assertTrue; import static org.junit.jupiter.params.provider.Arguments.arguments; -import static org.mockito.ArgumentMatchers.any; -import static org.mockito.ArgumentMatchers.anyBoolean; -import static org.mockito.ArgumentMatchers.anyInt; -import static org.mockito.ArgumentMatchers.anyString; -import static org.mockito.ArgumentMatchers.eq; +import static org.mockito.Mockito.any; +import static org.mockito.Mockito.anyBoolean; +import static org.mockito.Mockito.anyInt; +import static org.mockito.Mockito.anyString; import static org.mockito.Mockito.doCallRealMethod; import static org.mockito.Mockito.doNothing; import static org.mockito.Mockito.doReturn; +import static org.mockito.Mockito.eq; import static org.mockito.Mockito.mock; import static org.mockito.Mockito.spy; import static org.mockito.Mockito.times; diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/TestOMEpochForNonRatis.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/TestOMEpochForNonRatis.java index d63aab63dd4..991b3a66fb0 100644 --- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/TestOMEpochForNonRatis.java +++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/TestOMEpochForNonRatis.java @@ -18,7 +18,6 @@ package org.apache.hadoop.ozone.om; import java.util.HashMap; -import java.util.UUID; import org.apache.hadoop.hdds.utils.IOUtils; import org.apache.commons.lang3.RandomStringUtils; @@ -56,22 +55,13 @@ public class TestOMEpochForNonRatis { private static MiniOzoneCluster cluster = null; private static OzoneConfiguration conf; - private static String clusterId; - private static String scmId; - private static String omId; private static OzoneClient client; @BeforeAll public static void init() throws Exception { conf = new OzoneConfiguration(); - clusterId = UUID.randomUUID().toString(); - scmId = UUID.randomUUID().toString(); - omId = UUID.randomUUID().toString(); conf.setBoolean(OZONE_OM_RATIS_ENABLE_KEY, false); cluster = MiniOzoneCluster.newBuilder(conf) - .setClusterId(clusterId) - .setScmId(scmId) - .setOmId(omId) .build(); cluster.waitForClusterToBeReady(); client = cluster.newClient(); diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/TestOMRatisSnapshots.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/TestOMRatisSnapshots.java index fdb363dbc71..f3f0c7d69b9 100644 --- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/TestOMRatisSnapshots.java +++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/TestOMRatisSnapshots.java @@ -31,6 +31,7 @@ import org.apache.hadoop.hdds.utils.TransactionInfo; import org.apache.hadoop.hdds.utils.db.DBCheckpoint; import org.apache.hadoop.hdds.utils.db.RDBCheckpointUtils; +import org.apache.hadoop.hdds.utils.db.RDBStore; import org.apache.hadoop.ozone.MiniOzoneCluster; import org.apache.hadoop.ozone.MiniOzoneHAClusterImpl; import org.apache.hadoop.ozone.client.BucketArgs; @@ -52,7 +53,6 @@ import org.apache.hadoop.ozone.om.snapshot.OmSnapshotUtils; import org.apache.hadoop.utils.FaultInjectorImpl; import org.apache.ozone.test.GenericTestUtils; -import org.apache.ozone.test.tag.Unhealthy; import org.apache.ratis.server.protocol.TermIndex; import org.assertj.core.api.Fail; import org.junit.jupiter.api.AfterEach; @@ -61,8 +61,8 @@ import org.junit.jupiter.api.TestInfo; import org.junit.jupiter.api.Timeout; import org.junit.jupiter.api.io.TempDir; -import org.junit.jupiter.params.ParameterizedTest; -import org.junit.jupiter.params.provider.ValueSource; +import org.rocksdb.RocksDB; +import org.rocksdb.RocksDBException; import org.slf4j.Logger; import org.slf4j.event.Level; @@ -79,7 +79,6 @@ import java.util.List; import java.util.Objects; import java.util.Set; -import java.util.UUID; import java.util.concurrent.ExecutorService; import java.util.concurrent.Executors; import java.util.concurrent.Future; @@ -109,8 +108,6 @@ public class TestOMRatisSnapshots { private MiniOzoneHAClusterImpl cluster = null; private ObjectStore objectStore; private OzoneConfiguration conf; - private String clusterId; - private String scmId; private String omServiceId; private int numOfOMs = 3; private OzoneBucket ozoneBucket; @@ -136,8 +133,6 @@ public class TestOMRatisSnapshots { @BeforeEach public void init(TestInfo testInfo) throws Exception { conf = new OzoneConfiguration(); - clusterId = UUID.randomUUID().toString(); - scmId = UUID.randomUUID().toString(); omServiceId = "om-service-test1"; conf.setInt(OMConfigKeys.OZONE_OM_RATIS_LOG_PURGE_GAP, LOG_PURGE_GAP); conf.setStorageSize(OMConfigKeys.OZONE_OM_RATIS_SEGMENT_SIZE_KEY, 16, @@ -161,8 +156,6 @@ public void init(TestInfo testInfo) throws Exception { conf.setFromObject(omRatisConf); cluster = (MiniOzoneHAClusterImpl) MiniOzoneCluster.newOMHABuilder(conf) - .setClusterId(clusterId) - .setScmId(scmId) .setOMServiceId("om-service-test1") .setNumOfOzoneManagers(numOfOMs) .setNumOfActiveOMs(2) @@ -198,12 +191,12 @@ public void shutdown() { } } - @ParameterizedTest - @ValueSource(ints = {100}) // tried up to 1000 snapshots and this test works, but some of the // timeouts have to be increased. - @Unhealthy("HDDS-10059") - void testInstallSnapshot(int numSnapshotsToCreate, @TempDir Path tempDir) throws Exception { + private static final int SNAPSHOTS_TO_CREATE = 100; + + @Test + public void testInstallSnapshot(@TempDir Path tempDir) throws Exception { // Get the leader OM String leaderOMNodeId = OmFailoverProxyUtil .getFailoverProxyProvider(objectStore.getClientProxy()) @@ -231,8 +224,7 @@ void testInstallSnapshot(int numSnapshotsToCreate, @TempDir Path tempDir) throws String snapshotName = ""; List keys = new ArrayList<>(); SnapshotInfo snapshotInfo = null; - for (int snapshotCount = 0; snapshotCount < numSnapshotsToCreate; - snapshotCount++) { + for (int snapshotCount = 0; snapshotCount < SNAPSHOTS_TO_CREATE; snapshotCount++) { snapshotName = snapshotNamePrefix + snapshotCount; keys = writeKeys(keyIncrement); snapshotInfo = createOzoneSnapshot(leaderOM, snapshotName); @@ -326,7 +318,7 @@ void testInstallSnapshot(int numSnapshotsToCreate, @TempDir Path tempDir) throws private void checkSnapshot(OzoneManager leaderOM, OzoneManager followerOM, String snapshotName, List keys, SnapshotInfo snapshotInfo) - throws IOException { + throws IOException, RocksDBException { // Read back data from snapshot. OmKeyArgs omKeyArgs = new OmKeyArgs.Builder() .setVolumeName(volumeName) @@ -347,10 +339,19 @@ private void checkSnapshot(OzoneManager leaderOM, OzoneManager followerOM, Path leaderActiveDir = Paths.get(leaderMetaDir.toString(), OM_DB_NAME); Path leaderSnapshotDir = Paths.get(getSnapshotPath(leaderOM.getConfiguration(), snapshotInfo)); + + // Get list of live files on the leader. + RocksDB activeRocksDB = ((RDBStore) leaderOM.getMetadataManager().getStore()) + .getDb().getManagedRocksDb().get(); + // strip the leading "/". + Set liveSstFiles = activeRocksDB.getLiveFiles().files.stream() + .map(s -> s.substring(1)) + .collect(Collectors.toSet()); + // Get the list of hardlinks from the leader. Then confirm those links // are on the follower int hardLinkCount = 0; - try (Streamlist = Files.list(leaderSnapshotDir)) { + try (Stream list = Files.list(leaderSnapshotDir)) { for (Path leaderSnapshotSST: list.collect(Collectors.toList())) { String fileName = leaderSnapshotSST.getFileName().toString(); if (fileName.toLowerCase().endsWith(".sst")) { @@ -358,7 +359,8 @@ private void checkSnapshot(OzoneManager leaderOM, OzoneManager followerOM, Path leaderActiveSST = Paths.get(leaderActiveDir.toString(), fileName); // Skip if not hard link on the leader - if (!leaderActiveSST.toFile().exists()) { + // First confirm it is live + if (!liveSstFiles.contains(fileName)) { continue; } // If it is a hard link on the leader, it should be a hard diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/TestOMStartupWithBucketLayout.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/TestOMStartupWithBucketLayout.java index 00b5b2a6243..354a11ad554 100644 --- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/TestOMStartupWithBucketLayout.java +++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/TestOMStartupWithBucketLayout.java @@ -28,8 +28,6 @@ import org.junit.jupiter.api.Test; import org.junit.jupiter.api.Timeout; -import java.util.UUID; - /** * Verifies OM startup with different layout. */ @@ -41,11 +39,8 @@ public class TestOMStartupWithBucketLayout { public static void startCluster(OzoneConfiguration conf) throws Exception { - String clusterId = UUID.randomUUID().toString(); - String scmId = UUID.randomUUID().toString(); - String omId = UUID.randomUUID().toString(); - cluster = MiniOzoneCluster.newBuilder(conf).setClusterId(clusterId) - .setScmId(scmId).setOmId(omId).withoutDatanodes().build(); + cluster = MiniOzoneCluster.newBuilder(conf) + .withoutDatanodes().build(); cluster.waitForClusterToBeReady(); client = cluster.newClient(); } diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/TestOMUpgradeFinalization.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/TestOMUpgradeFinalization.java index 1e657d0ea78..fa84130c9d6 100644 --- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/TestOMUpgradeFinalization.java +++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/TestOMUpgradeFinalization.java @@ -104,8 +104,6 @@ void testOMUpgradeFinalizationWithOneOMDown() throws Exception { private static MiniOzoneHAClusterImpl newCluster(OzoneConfiguration conf) throws IOException { return (MiniOzoneHAClusterImpl) MiniOzoneCluster.newOMHABuilder(conf) - .setClusterId(UUID.randomUUID().toString()) - .setScmId(UUID.randomUUID().toString()) .setOMServiceId(UUID.randomUUID().toString()) .setNumOfOzoneManagers(3) .setNumDatanodes(1) diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/TestObjectStore.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/TestObjectStore.java index 5349727ff5f..5997d5758a2 100644 --- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/TestObjectStore.java +++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/TestObjectStore.java @@ -35,7 +35,7 @@ import java.util.UUID; import static org.junit.jupiter.api.Assertions.assertEquals; -import static org.junit.jupiter.api.Assertions.fail; +import static org.junit.jupiter.api.Assertions.assertThrows; /** * Tests to verify Object store without prefix enabled. @@ -44,9 +44,6 @@ public class TestObjectStore { private static MiniOzoneCluster cluster = null; private static OzoneConfiguration conf; - private static String clusterId; - private static String scmId; - private static String omId; private static OzoneClient client; /** @@ -58,11 +55,7 @@ public class TestObjectStore { @BeforeAll public static void init() throws Exception { conf = new OzoneConfiguration(); - clusterId = UUID.randomUUID().toString(); - scmId = UUID.randomUUID().toString(); - omId = UUID.randomUUID().toString(); - cluster = MiniOzoneCluster.newBuilder(conf).setClusterId(clusterId) - .setScmId(scmId).setOmId(omId).build(); + cluster = MiniOzoneCluster.newBuilder(conf).build(); cluster.waitForClusterToBeReady(); client = cluster.newClient(); } @@ -220,14 +213,12 @@ public void testLoopInLinkBuckets() throws Exception { createLinkBucket(volume, linkBucket2Name, linkBucket3Name); createLinkBucket(volume, linkBucket3Name, linkBucket1Name); - try { - volume.getBucket(linkBucket1Name); - fail("Should throw Exception due to loop in Link Buckets"); - } catch (OMException oe) { - // Expected exception - assertEquals(OMException.ResultCodes.DETECTED_LOOP_IN_BUCKET_LINKS, - oe.getResult()); - } + OMException oe = + assertThrows(OMException.class, () -> volume.getBucket(linkBucket1Name), + "Should throw Exception due to loop in Link Buckets"); + // Expected exception + assertEquals(OMException.ResultCodes.DETECTED_LOOP_IN_BUCKET_LINKS, + oe.getResult()); } /** diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/TestObjectStoreWithFSO.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/TestObjectStoreWithFSO.java index f9a69402466..2e58b6dbb73 100644 --- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/TestObjectStoreWithFSO.java +++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/TestObjectStoreWithFSO.java @@ -76,6 +76,7 @@ import static org.junit.jupiter.api.Assertions.assertNotNull; import static org.junit.jupiter.api.Assertions.assertNull; import static org.junit.jupiter.api.Assertions.assertFalse; +import static org.junit.jupiter.api.Assertions.assertThrows; import static org.junit.jupiter.api.Assertions.assertTrue; import static org.junit.jupiter.api.Assertions.fail; @@ -88,9 +89,6 @@ public class TestObjectStoreWithFSO { new Path(OZONE_URI_DELIMITER); private static MiniOzoneCluster cluster = null; private static OzoneConfiguration conf; - private static String clusterId; - private static String scmId; - private static String omId; private static String volumeName; private static String bucketName; private static FileSystem fs; @@ -105,13 +103,9 @@ public class TestObjectStoreWithFSO { @BeforeAll public static void init() throws Exception { conf = new OzoneConfiguration(); - clusterId = UUID.randomUUID().toString(); - scmId = UUID.randomUUID().toString(); - omId = UUID.randomUUID().toString(); conf.set(OMConfigKeys.OZONE_DEFAULT_BUCKET_LAYOUT, BucketLayout.FILE_SYSTEM_OPTIMIZED.name()); - cluster = MiniOzoneCluster.newBuilder(conf).setClusterId(clusterId) - .setScmId(scmId).setOmId(omId).build(); + cluster = MiniOzoneCluster.newBuilder(conf).build(); cluster.waitForClusterToBeReady(); client = cluster.newClient(); // create a volume and a bucket to be used by OzoneFileSystem @@ -251,40 +245,25 @@ public void testDeleteBucketWithKeys() throws Exception { ozoneBucket.createKey(key, 10).close(); assertFalse(cluster.getOzoneManager().getMetadataManager().isBucketEmpty( testVolumeName, testBucketName)); - - try { - // Try to delete the bucket while a key is present under it. - ozoneVolume.deleteBucket(testBucketName); - fail("Bucket Deletion should fail, since bucket is not empty."); - } catch (IOException ioe) { - // Do nothing - } + // Try to delete the bucket while a key is present under it. + assertThrows(IOException.class, () -> ozoneVolume.deleteBucket(testBucketName), + "Bucket Deletion should fail, since bucket is not empty."); // Delete the key (this only deletes the file) ozoneBucket.deleteKey(key); assertFalse(cluster.getOzoneManager().getMetadataManager() .isBucketEmpty(testVolumeName, testBucketName)); - try { - // Try to delete the bucket while intermediate dirs are present under it. - ozoneVolume.deleteBucket(testBucketName); - fail("Bucket Deletion should fail, since bucket still contains " + - "intermediate directories"); - } catch (IOException ioe) { - // Do nothing - } + // Try to delete the bucket while intermediate dirs are present under it. + assertThrows(IOException.class, () -> ozoneVolume.deleteBucket(testBucketName), + "Bucket Deletion should fail, since bucket still contains intermediate directories"); // Delete last level of directories. ozoneBucket.deleteDirectory(parent, true); assertFalse(cluster.getOzoneManager().getMetadataManager() .isBucketEmpty(testVolumeName, testBucketName)); - try { - // Try to delete the bucket while dirs are present under it. - ozoneVolume.deleteBucket(testBucketName); - fail("Bucket Deletion should fail, since bucket still contains " + - "intermediate directories"); - } catch (IOException ioe) { - // Do nothing - } + // Try to delete the bucket while dirs are present under it. + assertThrows(IOException.class, () -> ozoneVolume.deleteBucket(testBucketName), + "Bucket Deletion should fail, since bucket still contains intermediate directories"); // Delete all the intermediate directories ozoneBucket.deleteDirectory("a/", true); @@ -331,14 +310,11 @@ public void testLookupKey() throws Exception { data.length()); // open key - try { - ozoneBucket.getKey(key); - fail("Should throw exception as fileName is not visible and its still " + - "open for writing!"); - } catch (OMException ome) { - // expected - assertEquals(ome.getResult(), OMException.ResultCodes.KEY_NOT_FOUND); - } + OMException ome = + assertThrows(OMException.class, () -> ozoneBucket.getKey(key), + "Should throw exception as fileName is not visible and its still open for writing!"); + // expected + assertEquals(ome.getResult(), OMException.ResultCodes.KEY_NOT_FOUND); ozoneOutputStream.close(); @@ -358,13 +334,10 @@ public void testLookupKey() throws Exception { ozoneBucket.deleteKey(key); // get deleted key - try { - ozoneBucket.getKey(key); - fail("Should throw exception as fileName not exists!"); - } catch (OMException ome) { - // expected - assertEquals(ome.getResult(), OMException.ResultCodes.KEY_NOT_FOUND); - } + ome = assertThrows(OMException.class, () -> ozoneBucket.getKey(key), + "Should throw exception as fileName not exists!"); + // expected + assertEquals(ome.getResult(), OMException.ResultCodes.KEY_NOT_FOUND); // after key delete verifyKeyInFileTable(fileTable, fileName, dirPathC.getObjectID(), true); @@ -658,13 +631,10 @@ public void testRenameKey() throws IOException { bucket.renameKey(fromKeyName, toKeyName); // Lookup for old key should fail. - try { - bucket.getKey(fromKeyName); - fail("Lookup for old from key name should fail!"); - } catch (OMException ome) { - assertEquals(KEY_NOT_FOUND, ome.getResult()); - } - + OMException e = + assertThrows(OMException.class, () -> bucket.getKey(fromKeyName), + "Lookup for old from key name should fail!"); + assertEquals(KEY_NOT_FOUND, e.getResult()); OzoneKey key = bucket.getKey(toKeyName); assertEquals(toKeyName, key.getName()); } @@ -707,13 +677,10 @@ public void testRenameToAnExistingKey() throws Exception { OzoneBucket bucket = volume.getBucket(bucketName); createTestKey(bucket, keyName1, value); createTestKey(bucket, keyName2, value); - - try { - bucket.renameKey(keyName1, keyName2); - fail("Should throw exception as destin key already exists!"); - } catch (OMException e) { - assertEquals(KEY_ALREADY_EXISTS, e.getResult()); - } + OMException e = + assertThrows(OMException.class, () -> bucket.renameKey(keyName1, keyName2), + "Should throw exception as destin key already exists!"); + assertEquals(KEY_ALREADY_EXISTS, e.getResult()); } @Test @@ -770,13 +737,11 @@ public void testCreateBucketWithBucketLayout() throws Exception { } private void assertKeyRenamedEx(OzoneBucket bucket, String keyName) - throws Exception { - try { - bucket.getKey(keyName); - fail("Should throw KeyNotFound as the key got renamed!"); - } catch (OMException ome) { - assertEquals(KEY_NOT_FOUND, ome.getResult()); - } + throws Exception { + OMException ome = + assertThrows(OMException.class, () -> bucket.getKey(keyName), + "Should throw KeyNotFound as the key got renamed!"); + assertEquals(KEY_NOT_FOUND, ome.getResult()); } private void createTestKey(OzoneBucket bucket, String keyName, diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/TestObjectStoreWithLegacyFS.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/TestObjectStoreWithLegacyFS.java index 0b00f9b5780..be2e0a96526 100644 --- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/TestObjectStoreWithLegacyFS.java +++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/TestObjectStoreWithLegacyFS.java @@ -59,6 +59,7 @@ import static org.junit.jupiter.api.Assertions.assertEquals; import static org.junit.jupiter.api.Assertions.assertNotNull; +import static org.junit.jupiter.api.Assertions.assertThrows; import static org.junit.jupiter.api.Assertions.fail; /** @@ -204,14 +205,11 @@ public void testMultiPartCompleteUpload() throws Exception { omBucketArgs = builder.build(); volume.createBucket(legacyBuckName, omBucketArgs); bucket = volume.getBucket(legacyBuckName); - - try { - uploadMPUWithDirectoryExists(bucket, keyName); - fail("Must throw error as there is " + - "already directory in the given path"); - } catch (OMException ome) { - assertEquals(OMException.ResultCodes.NOT_A_FILE, ome.getResult()); - } + OzoneBucket finalBucket = bucket; + OMException ome = + assertThrows(OMException.class, () -> uploadMPUWithDirectoryExists(finalBucket, keyName), + "Must throw error as there is " + "already directory in the given path"); + assertEquals(OMException.ResultCodes.NOT_A_FILE, ome.getResult()); } private OmMultipartUploadCompleteInfo uploadMPUWithDirectoryExists( diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/TestOmAcls.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/TestOmAcls.java index 02ad087965d..d9724b2a490 100644 --- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/TestOmAcls.java +++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/TestOmAcls.java @@ -41,7 +41,6 @@ import java.io.IOException; import java.util.ArrayList; -import java.util.UUID; import static org.apache.hadoop.ozone.OzoneConfigKeys.OZONE_ACL_AUTHORIZER_CLASS; import static org.apache.hadoop.ozone.OzoneConfigKeys.OZONE_ACL_ENABLED; @@ -78,17 +77,11 @@ public class TestOmAcls { @BeforeAll public static void init() throws Exception { OzoneConfiguration conf = new OzoneConfiguration(); - String clusterId = UUID.randomUUID().toString(); - String scmId = UUID.randomUUID().toString(); - String omId = UUID.randomUUID().toString(); conf.setBoolean(OZONE_ACL_ENABLED, true); conf.setClass(OZONE_ACL_AUTHORIZER_CLASS, OzoneAccessAuthorizerTest.class, IAccessAuthorizer.class); conf.setStrings(OZONE_ADMINISTRATORS, OZONE_ADMINISTRATORS_WILDCARD); cluster = MiniOzoneCluster.newBuilder(conf) - .setClusterId(clusterId) - .setScmId(scmId) - .setOmId(omId) .build(); cluster.waitForClusterToBeReady(); client = cluster.newClient(); diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/TestOmContainerLocationCache.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/TestOmContainerLocationCache.java index 5b6beebee29..50ff9c36a0a 100644 --- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/TestOmContainerLocationCache.java +++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/TestOmContainerLocationCache.java @@ -72,7 +72,7 @@ import org.apache.ratis.thirdparty.io.grpc.StatusException; import org.apache.ratis.thirdparty.io.grpc.StatusRuntimeException; import org.apache.ratis.util.ExitUtils; -import org.jetbrains.annotations.NotNull; +import jakarta.annotation.Nonnull; import org.junit.jupiter.api.AfterAll; import org.junit.jupiter.api.BeforeAll; import org.junit.jupiter.api.BeforeEach; @@ -102,12 +102,12 @@ import static org.junit.jupiter.api.Assertions.assertArrayEquals; import static org.junit.jupiter.api.Assertions.assertEquals; import static org.junit.jupiter.api.Assertions.assertThrows; -import static org.mockito.ArgumentMatchers.any; -import static org.mockito.ArgumentMatchers.anyBoolean; -import static org.mockito.ArgumentMatchers.anyInt; -import static org.mockito.ArgumentMatchers.anyLong; -import static org.mockito.ArgumentMatchers.anyString; -import static org.mockito.ArgumentMatchers.argThat; +import static org.mockito.Mockito.any; +import static org.mockito.Mockito.anyBoolean; +import static org.mockito.Mockito.anyInt; +import static org.mockito.Mockito.anyLong; +import static org.mockito.Mockito.anyString; +import static org.mockito.Mockito.argThat; import static org.mockito.Mockito.doAnswer; import static org.mockito.Mockito.mock; import static org.mockito.Mockito.reset; @@ -169,7 +169,7 @@ public static void setUp() throws Exception { metadataManager = omTestManagers.getMetadataManager(); rpcClient = new RpcClient(conf, null) { - @NotNull + @Nonnull @Override protected XceiverClientFactory createXceiverClientFactory( ServiceInfoEx serviceInfo) throws IOException { @@ -588,7 +588,7 @@ private void mockPutBlockResponse(XceiverClientSpi mockDnProtocol, .sendCommandAsync(argThat(matchCmd(Type.PutBlock))); } - @NotNull + @Nonnull private ContainerProtos.DatanodeBlockID createBlockId(long containerId, long localId) { return ContainerProtos.DatanodeBlockID.newBuilder() @@ -696,7 +696,7 @@ private void mockGetBlock(XceiverClientGrpc mockDnProtocol, .sendCommandAsync(argThat(matchCmd(Type.GetBlock)), any()); } - @NotNull + @Nonnull private ChunkInfo createChunkInfo(byte[] data) throws Exception { Checksum checksum = new Checksum(ChecksumType.CRC32, 4); return ChunkInfo.newBuilder() diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/TestOmInit.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/TestOmInit.java index 84387a232d9..dc2d6e3cf39 100644 --- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/TestOmInit.java +++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/TestOmInit.java @@ -18,7 +18,6 @@ import static org.junit.jupiter.api.Assertions.assertTrue; import java.io.IOException; -import java.util.UUID; import org.apache.hadoop.hdds.conf.OzoneConfiguration; import org.apache.hadoop.ozone.MiniOzoneCluster; @@ -38,9 +37,6 @@ public class TestOmInit { private static MiniOzoneCluster cluster = null; private static OMMetrics omMetrics; private static OzoneConfiguration conf; - private static String clusterId; - private static String scmId; - private static String omId; /** * Create a MiniDFSCluster for testing. @@ -52,13 +48,7 @@ public class TestOmInit { @BeforeAll public static void init() throws Exception { conf = new OzoneConfiguration(); - clusterId = UUID.randomUUID().toString(); - scmId = UUID.randomUUID().toString(); - omId = UUID.randomUUID().toString(); cluster = MiniOzoneCluster.newBuilder(conf) - .setClusterId(clusterId) - .setScmId(scmId) - .setOmId(omId) .build(); cluster.waitForClusterToBeReady(); omMetrics = cluster.getOzoneManager().getMetrics(); diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/TestOmMetrics.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/TestOmMetrics.java index 0f8c82a5580..85e7c2a76e5 100644 --- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/TestOmMetrics.java +++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/TestOmMetrics.java @@ -29,11 +29,11 @@ import static org.junit.jupiter.api.Assertions.assertEquals; import static org.junit.jupiter.api.Assertions.assertThrows; import static org.junit.jupiter.api.Assertions.assertTrue; -import static org.mockito.ArgumentMatchers.any; -import static org.mockito.ArgumentMatchers.anyInt; -import static org.mockito.ArgumentMatchers.eq; +import static org.mockito.Mockito.any; +import static org.mockito.Mockito.anyInt; import static org.mockito.Mockito.doReturn; import static org.mockito.Mockito.doThrow; +import static org.mockito.Mockito.eq; import static org.mockito.Mockito.spy; import java.io.IOException; diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/TestOmSnapshotDisabled.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/TestOmSnapshotDisabled.java index 1e6cd969542..fd1a60128de 100644 --- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/TestOmSnapshotDisabled.java +++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/TestOmSnapshotDisabled.java @@ -33,8 +33,6 @@ import org.junit.jupiter.api.Test; import org.junit.jupiter.api.Timeout; -import java.util.UUID; - import static org.apache.hadoop.hdds.HddsConfigKeys.HDDS_DB_PROFILE; import static org.apache.hadoop.ozone.om.exceptions.OMException.ResultCodes.FEATURE_NOT_ENABLED; import static org.junit.jupiter.api.Assertions.assertEquals; @@ -53,8 +51,6 @@ public class TestOmSnapshotDisabled { @Timeout(60) public static void init() throws Exception { OzoneConfiguration conf = new OzoneConfiguration(); - String clusterId = UUID.randomUUID().toString(); - String scmId = UUID.randomUUID().toString(); conf.set(OMConfigKeys.OZONE_DEFAULT_BUCKET_LAYOUT, BucketLayout.LEGACY.name()); conf.setEnum(HDDS_DB_PROFILE, DBProfile.TEST); @@ -62,8 +58,6 @@ public static void init() throws Exception { conf.setBoolean(OMConfigKeys.OZONE_FILESYSTEM_SNAPSHOT_ENABLED_KEY, false); cluster = MiniOzoneCluster.newOMHABuilder(conf) - .setClusterId(clusterId) - .setScmId(scmId) .setOMServiceId("om-service-test1") .setNumOfOzoneManagers(3) .build(); diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/TestOmSnapshotDisabledRestart.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/TestOmSnapshotDisabledRestart.java index 6048f1e4d60..babc643ffa0 100644 --- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/TestOmSnapshotDisabledRestart.java +++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/TestOmSnapshotDisabledRestart.java @@ -31,8 +31,6 @@ import org.junit.jupiter.api.Test; import org.junit.jupiter.api.Timeout; -import java.util.UUID; - import static org.assertj.core.api.Assertions.assertThat; import static org.junit.jupiter.api.Assertions.assertThrows; import static org.junit.jupiter.api.Assertions.fail; @@ -52,15 +50,11 @@ public class TestOmSnapshotDisabledRestart { @Timeout(60) public static void init() throws Exception { OzoneConfiguration conf = new OzoneConfiguration(); - String clusterId = UUID.randomUUID().toString(); - String scmId = UUID.randomUUID().toString(); // Enable filesystem snapshot feature at the beginning conf.setBoolean(OMConfigKeys.OZONE_FILESYSTEM_SNAPSHOT_ENABLED_KEY, true); cluster = (MiniOzoneHAClusterImpl) MiniOzoneCluster.newOMHABuilder(conf) - .setClusterId(clusterId) - .setScmId(scmId) .setOMServiceId("om-service-test2") .setNumOfOzoneManagers(3) .build(); diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/TestOmSnapshotFileSystem.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/TestOmSnapshotFileSystem.java index 790399c3237..055ddeb20c9 100644 --- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/TestOmSnapshotFileSystem.java +++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/TestOmSnapshotFileSystem.java @@ -124,12 +124,8 @@ public TestOmSnapshotFileSystem(String bucketName) { @BeforeAll public static void init() throws Exception { conf = new OzoneConfiguration(); - String clusterId = UUID.randomUUID().toString(); - String scmId = UUID.randomUUID().toString(); - String omId = UUID.randomUUID().toString(); conf.setBoolean(OMConfigKeys.OZONE_OM_ENABLE_FILESYSTEM_PATHS, true); - cluster = MiniOzoneCluster.newBuilder(conf).setClusterId(clusterId) - .setScmId(scmId).setOmId(omId).build(); + cluster = MiniOzoneCluster.newBuilder(conf).build(); cluster.waitForClusterToBeReady(); client = cluster.newClient(); @@ -582,7 +578,7 @@ public void testGetFileStatus() throws Exception { } @Test - public void testReadFileFromSnapshot() throws Exception { + void testReadFileFromSnapshot() throws Exception { String keyName = "dir/file"; byte[] strBytes = "Sample text".getBytes(StandardCharsets.UTF_8); Path parent = new Path("/"); @@ -612,8 +608,6 @@ public void testReadFileFromSnapshot() throws Exception { byte[] readBytes = new byte[strBytes.length]; System.arraycopy(buffer.array(), 0, readBytes, 0, strBytes.length); assertArrayEquals(strBytes, readBytes); - } catch (Exception e) { - fail("Failed to read file, Exception : " + e); } deleteSnapshot(snapshotName); diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/TestOmStartupSlvLessThanMlv.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/TestOmStartupSlvLessThanMlv.java index 0af41af8318..77bb7189b5e 100644 --- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/TestOmStartupSlvLessThanMlv.java +++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/TestOmStartupSlvLessThanMlv.java @@ -20,7 +20,6 @@ import java.nio.file.Files; import java.nio.file.Path; import java.nio.file.Paths; -import java.util.UUID; import org.apache.hadoop.hdds.conf.OzoneConfiguration; import org.apache.hadoop.hdds.protocol.proto.HddsProtos; @@ -66,10 +65,7 @@ public void testStartupSlvLessThanMlv() throws Exception { // Create version file with MLV > SLV, which should fail the cluster build. UpgradeTestUtils.createVersionFile(omSubdir, HddsProtos.NodeType.OM, mlv); - MiniOzoneCluster.Builder clusterBuilder = MiniOzoneCluster.newBuilder(conf) - .setClusterId(UUID.randomUUID().toString()) - .setScmId(UUID.randomUUID().toString()) - .setOmId(UUID.randomUUID().toString()); + MiniOzoneCluster.Builder clusterBuilder = MiniOzoneCluster.newBuilder(conf); OMException omException = assertThrows(OMException.class, clusterBuilder::build); diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/TestOzoneManagerConfiguration.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/TestOzoneManagerConfiguration.java index 89acf321e39..81f87265b0a 100644 --- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/TestOzoneManagerConfiguration.java +++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/TestOzoneManagerConfiguration.java @@ -20,10 +20,8 @@ import java.io.IOException; import java.net.InetSocketAddress; import java.nio.file.Path; -import java.nio.file.Paths; import java.util.Collection; import java.util.List; -import java.util.UUID; import java.util.concurrent.TimeUnit; import org.apache.hadoop.hdds.HddsConfigKeys; @@ -35,7 +33,6 @@ import org.apache.hadoop.ozone.ha.ConfUtils; import org.apache.hadoop.ozone.om.helpers.OMNodeDetails; import org.apache.hadoop.ozone.om.ratis.OzoneManagerRatisServer; -import org.apache.ozone.test.GenericTestUtils; import org.apache.ratis.protocol.RaftPeer; import org.apache.ratis.util.LifeCycle; @@ -43,6 +40,7 @@ import org.junit.jupiter.api.BeforeEach; import org.junit.jupiter.api.Test; import org.junit.jupiter.api.Timeout; +import org.junit.jupiter.api.io.TempDir; import static org.apache.hadoop.ozone.om.OMConfigKeys.OZONE_OM_ADDRESS_KEY; import static org.assertj.core.api.Assertions.assertThat; @@ -60,30 +58,18 @@ public class TestOzoneManagerConfiguration { private OzoneConfiguration conf; private MiniOzoneCluster cluster; - private String omId; - private String clusterId; - private String scmId; private OzoneManager om; private OzoneManagerRatisServer omRatisServer; private static final long RATIS_RPC_TIMEOUT = 500L; @BeforeEach - public void init() throws IOException { + void init(@TempDir Path metaDirPath) throws IOException { conf = new OzoneConfiguration(); - omId = UUID.randomUUID().toString(); - clusterId = UUID.randomUUID().toString(); - scmId = UUID.randomUUID().toString(); - final String path = GenericTestUtils.getTempPath(omId); - Path metaDirPath = Paths.get(path, "om-meta"); conf.set(HddsConfigKeys.OZONE_METADATA_DIRS, metaDirPath.toString()); conf.setBoolean(OMConfigKeys.OZONE_OM_RATIS_ENABLE_KEY, true); conf.setTimeDuration(OMConfigKeys.OZONE_OM_RATIS_MINIMUM_TIMEOUT_KEY, RATIS_RPC_TIMEOUT, TimeUnit.MILLISECONDS); - OMStorage omStore = new OMStorage(conf); - omStore.setClusterId("testClusterId"); - // writes the version file properties - omStore.initialize(); } @AfterEach @@ -95,9 +81,6 @@ public void shutdown() { private void startCluster() throws Exception { cluster = MiniOzoneCluster.newBuilder(conf) - .setClusterId(clusterId) - .setScmId(scmId) - .setOmId(omId) .withoutDatanodes() .build(); cluster.waitForClusterToBeReady(); @@ -377,15 +360,10 @@ public void testNoOMNodes() throws Exception { String omServiceId = "service1"; conf.set(OMConfigKeys.OZONE_OM_SERVICE_IDS_KEY, omServiceId); // Deliberately skip OZONE_OM_NODES_KEY and OZONE_OM_ADDRESS_KEY config - - try { - startCluster(); - fail("Should have failed to start the cluster!"); - } catch (OzoneIllegalArgumentException e) { - // Expect error message - assertTrue(e.getMessage().contains( - "List of OM Node ID's should be specified")); - } + OzoneIllegalArgumentException e = + assertThrows(OzoneIllegalArgumentException.class, () -> startCluster()); + // Expect error message + assertTrue(e.getMessage().contains("List of OM Node ID's should be specified")); } /** @@ -407,15 +385,9 @@ public void testNoOMAddrs() throws Exception { conf.set(OMConfigKeys.OZONE_OM_SERVICE_IDS_KEY, omServiceId); conf.set(omNodesKey, omNodesKeyValue); // Deliberately skip OZONE_OM_ADDRESS_KEY config - - try { - startCluster(); - fail("Should have failed to start the cluster!"); - } catch (OzoneIllegalArgumentException e) { - // Expect error message - assertTrue(e.getMessage().contains( - "OM RPC Address should be set for all node")); - } + OzoneIllegalArgumentException e = assertThrows(OzoneIllegalArgumentException.class, () -> startCluster()); + // Expect error message + assertTrue(e.getMessage().contains("OM RPC Address should be set for all node")); } /** diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/TestOzoneManagerHA.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/TestOzoneManagerHA.java index c18d1f8b17a..454019b4a8a 100644 --- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/TestOzoneManagerHA.java +++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/TestOzoneManagerHA.java @@ -40,7 +40,6 @@ import org.apache.hadoop.ozone.client.rpc.RpcClient; import org.apache.hadoop.ozone.om.ha.HadoopRpcOMFailoverProxyProvider; import org.apache.hadoop.ozone.om.ratis.OzoneManagerRatisServerConfig; -import org.apache.ozone.test.GenericTestUtils; import org.junit.jupiter.api.AfterAll; import org.junit.jupiter.api.BeforeAll; import org.junit.jupiter.api.Timeout; @@ -51,7 +50,6 @@ import java.util.Iterator; import java.util.UUID; import java.util.HashMap; -import java.util.concurrent.TimeUnit; import java.util.concurrent.TimeoutException; import static java.nio.charset.StandardCharsets.UTF_8; @@ -65,7 +63,6 @@ import static org.apache.hadoop.ozone.OzoneConsts.OM_KEY_PREFIX; import static org.apache.hadoop.ozone.om.OMConfigKeys.OZONE_DEFAULT_BUCKET_LAYOUT; import static org.apache.hadoop.ozone.om.OMConfigKeys.OZONE_KEY_DELETING_LIMIT_PER_TASK; -import static org.apache.hadoop.ozone.om.OMConfigKeys.OZONE_OM_RATIS_SERVER_FAILURE_TIMEOUT_DURATION_DEFAULT; import static org.assertj.core.api.Assertions.assertThat; import static org.junit.jupiter.api.Assertions.assertFalse; import static org.junit.jupiter.api.Assertions.assertTrue; @@ -82,9 +79,6 @@ public abstract class TestOzoneManagerHA { private static MiniOzoneCluster.Builder clusterBuilder = null; private static ObjectStore objectStore; private static OzoneConfiguration conf; - private static String clusterId; - private static String scmId; - private static String omId; private static String omServiceId; private static int numOfOMs = 3; private static final int LOG_PURGE_GAP = 50; @@ -150,10 +144,7 @@ public static Duration getRetryCacheDuration() { @BeforeAll public static void init() throws Exception { conf = new OzoneConfiguration(); - clusterId = UUID.randomUUID().toString(); - scmId = UUID.randomUUID().toString(); omServiceId = "om-service-test1"; - omId = UUID.randomUUID().toString(); conf.setBoolean(OZONE_ACL_ENABLED, true); conf.set(OzoneConfigKeys.OZONE_ADMINISTRATORS, OZONE_ADMINISTRATORS_WILDCARD); @@ -187,10 +178,7 @@ public static void init() throws Exception { conf.set(OZONE_KEY_DELETING_LIMIT_PER_TASK, "2"); clusterBuilder = MiniOzoneCluster.newOMHABuilder(conf) - .setClusterId(clusterId) - .setScmId(scmId) .setOMServiceId(omServiceId) - .setOmId(omId) .setNumOfOzoneManagers(numOfOMs); cluster = (MiniOzoneHAClusterImpl) clusterBuilder.build(); @@ -217,13 +205,16 @@ public static void shutdown() { */ public static String createKey(OzoneBucket ozoneBucket) throws IOException { String keyName = "key" + RandomStringUtils.randomNumeric(5); + createKey(ozoneBucket, keyName); + return keyName; + } + + public static void createKey(OzoneBucket ozoneBucket, String keyName) throws IOException { String data = "data" + RandomStringUtils.randomNumeric(5); - OzoneOutputStream ozoneOutputStream = ozoneBucket.createKey(keyName, - data.length(), ReplicationType.RATIS, + OzoneOutputStream ozoneOutputStream = ozoneBucket.createKey(keyName, data.length(), ReplicationType.RATIS, ReplicationFactor.ONE, new HashMap<>()); ozoneOutputStream.write(data.getBytes(UTF_8), 0, data.length()); ozoneOutputStream.close(); - return keyName; } protected OzoneBucket setupBucket() throws Exception { @@ -460,9 +451,6 @@ protected void createKeyTest(boolean checkSuccess) throws Exception { protected void waitForLeaderToBeReady() throws InterruptedException, TimeoutException { // Wait for Leader Election timeout - int timeout = OZONE_OM_RATIS_SERVER_FAILURE_TIMEOUT_DURATION_DEFAULT - .toIntExact(TimeUnit.MILLISECONDS); - GenericTestUtils.waitFor(() -> - getCluster().getOMLeader() != null, 500, timeout); + cluster.waitForLeaderOM(); } } diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/TestOzoneManagerHASnapshot.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/TestOzoneManagerHASnapshot.java index 5d9f8a0d48d..1d3ddb08a68 100644 --- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/TestOzoneManagerHASnapshot.java +++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/TestOzoneManagerHASnapshot.java @@ -68,13 +68,9 @@ public class TestOzoneManagerHASnapshot { @BeforeAll public static void staticInit() throws Exception { OzoneConfiguration conf = new OzoneConfiguration(); - String clusterId = UUID.randomUUID().toString(); - String scmId = UUID.randomUUID().toString(); conf.setBoolean(OMConfigKeys.OZONE_FILESYSTEM_SNAPSHOT_ENABLED_KEY, true); cluster = (MiniOzoneHAClusterImpl) MiniOzoneCluster.newOMHABuilder(conf) - .setClusterId(clusterId) - .setScmId(scmId) .setOMServiceId("om-service-test") .setNumOfOzoneManagers(3) .build(); @@ -123,7 +119,7 @@ public void testSnapshotDiffWhenOmLeaderRestart() cluster.shutdownOzoneManager(omLeader); cluster.restartOzoneManager(omLeader, true); - await(120_000, 100, () -> cluster.getOMLeader() != null); + cluster.waitForLeaderOM(); String newLeader = cluster.getOMLeader().getOMNodeId(); @@ -250,7 +246,7 @@ public void testSnapshotChainManagerRestore() throws Exception { cluster.shutdownOzoneManager(omLeader); cluster.restartOzoneManager(omLeader, true); - await(180_000, 100, () -> cluster.getOMLeader() != null); + cluster.waitForLeaderOM(); assertNotNull(cluster.getOMLeader()); OmMetadataManagerImpl metadataManager = (OmMetadataManagerImpl) cluster .getOMLeader().getMetadataManager(); diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/TestOzoneManagerListVolumes.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/TestOzoneManagerListVolumes.java index 3abcba4a9a6..1fcfa63d1a8 100644 --- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/TestOzoneManagerListVolumes.java +++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/TestOzoneManagerListVolumes.java @@ -25,7 +25,6 @@ import java.util.Iterator; import java.util.List; import java.util.Set; -import java.util.UUID; import java.util.concurrent.TimeoutException; import org.apache.hadoop.hdds.conf.OzoneConfiguration; @@ -47,8 +46,8 @@ import static org.apache.hadoop.ozone.om.OMConfigKeys.OZONE_OM_VOLUME_LISTALL_ALLOWED; import static org.apache.hadoop.ozone.security.acl.OzoneObj.StoreType.OZONE; import static org.junit.jupiter.api.Assertions.assertEquals; +import static org.junit.jupiter.api.Assertions.assertThrows; import static org.junit.jupiter.api.Assertions.assertTrue; -import static org.junit.jupiter.api.Assertions.fail; import org.junit.jupiter.api.AfterAll; import org.junit.jupiter.api.AfterEach; @@ -93,9 +92,6 @@ public static void setupClass() throws InterruptedException, TimeoutException, IOException { OzoneConfiguration conf = new OzoneConfiguration(); UserGroupInformation.setLoginUser(adminUser); - String clusterId = UUID.randomUUID().toString(); - String scmId = UUID.randomUUID().toString(); - String omId = UUID.randomUUID().toString(); conf.setInt(OZONE_SCM_RATIS_PIPELINE_LIMIT, 10); // Use native impl here, default impl doesn't do actual checks @@ -103,7 +99,6 @@ public static void setupClass() cluster = MiniOzoneCluster.newBuilder(conf) .withoutDatanodes() - .setClusterId(clusterId).setScmId(scmId).setOmId(omId) .build(); cluster.waitForClusterToBeReady(); @@ -237,20 +232,17 @@ private static void checkUser(OzoneClient client, UserGroupInformation user, } assertEquals(5, count); } else { - try { - objectStore.listVolumes("volume"); - fail("listAllVolumes should fail for " + user.getUserName()); - } catch (RuntimeException ex) { - // Current listAllVolumes throws RuntimeException - if (ex.getCause() instanceof OMException) { - // Expect PERMISSION_DENIED - if (((OMException) ex.getCause()).getResult() != - OMException.ResultCodes.PERMISSION_DENIED) { - throw ex; - } - } else { + RuntimeException ex = + assertThrows(RuntimeException.class, () -> objectStore.listVolumes("volume")); + // Current listAllVolumes throws RuntimeException + if (ex.getCause() instanceof OMException) { + // Expect PERMISSION_DENIED + if (((OMException) ex.getCause()).getResult() != + OMException.ResultCodes.PERMISSION_DENIED) { throw ex; } + } else { + throw ex; } } } diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/TestOzoneManagerListVolumesSecure.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/TestOzoneManagerListVolumesSecure.java index 1c751bc99a5..41f1c14f372 100644 --- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/TestOzoneManagerListVolumesSecure.java +++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/TestOzoneManagerListVolumesSecure.java @@ -83,7 +83,7 @@ public class TestOzoneManagerListVolumesSecure { private OzoneManager om; private static final String OM_CERT_SERIAL_ID = "9879877970576"; - private final String adminUser = "om"; + private static final String ADMIN_USER = "om"; private String adminPrincipal; private String adminPrincipalInOtherHost; private File adminKeytab; @@ -91,8 +91,8 @@ public class TestOzoneManagerListVolumesSecure { private UserGroupInformation adminUGI; private UserGroupInformation adminInOtherHostUGI; - private final String user1 = "user1"; - private final String user2 = "user2"; + private static final String USER_1 = "user1"; + private static final String USER_2 = "user2"; private String userPrincipal1; private String userPrincipal2; private File userKeytab1; @@ -142,18 +142,18 @@ private void createPrincipals() throws Exception { String host = InetAddress.getLocalHost() .getCanonicalHostName().toLowerCase(); String hostAndRealm = host + "@" + this.realm; - this.adminPrincipal = adminUser + "/" + hostAndRealm; - this.adminPrincipalInOtherHost = adminUser + "/otherhost@" + this.realm; - this.adminKeytab = new File(workDir, adminUser + ".keytab"); - this.adminKeytabInOtherHost = new File(workDir, adminUser + + this.adminPrincipal = ADMIN_USER + "/" + hostAndRealm; + this.adminPrincipalInOtherHost = ADMIN_USER + "/otherhost@" + this.realm; + this.adminKeytab = new File(workDir, ADMIN_USER + ".keytab"); + this.adminKeytabInOtherHost = new File(workDir, ADMIN_USER + "InOtherHost.keytab"); createPrincipal(this.adminKeytab, adminPrincipal); createPrincipal(this.adminKeytabInOtherHost, adminPrincipalInOtherHost); - this.userPrincipal1 = this.user1 + "/" + hostAndRealm; - this.userPrincipal2 = this.user2 + "/" + hostAndRealm; - this.userKeytab1 = new File(workDir, this.user1 + ".keytab"); - this.userKeytab2 = new File(workDir, this.user2 + ".keytab"); + this.userPrincipal1 = USER_1 + "/" + hostAndRealm; + this.userPrincipal2 = USER_2 + "/" + hostAndRealm; + this.userKeytab1 = new File(workDir, USER_1 + ".keytab"); + this.userKeytab2 = new File(workDir, USER_2 + ".keytab"); createPrincipal(this.userKeytab1, userPrincipal1); createPrincipal(this.userKeytab2, userPrincipal2); } @@ -212,12 +212,12 @@ private void setupEnvironment(boolean aclEnabled, String aclUser1All = "user:user1:a"; String aclUser2All = "user:user2:a"; String aclWorldAll = "world::a"; - createVolumeWithOwnerAndAcl(omClient, "volume1", user1, aclUser1All); - createVolumeWithOwnerAndAcl(omClient, "volume2", user2, aclUser2All); - createVolumeWithOwnerAndAcl(omClient, "volume3", user1, aclUser2All); - createVolumeWithOwnerAndAcl(omClient, "volume4", user2, aclUser1All); - createVolumeWithOwnerAndAcl(omClient, "volume5", user1, aclWorldAll); - createVolumeWithOwnerAndAcl(omClient, "volume6", adminUser, null); + createVolumeWithOwnerAndAcl(omClient, "volume1", USER_1, aclUser1All); + createVolumeWithOwnerAndAcl(omClient, "volume2", USER_2, aclUser2All); + createVolumeWithOwnerAndAcl(omClient, "volume3", USER_1, aclUser2All); + createVolumeWithOwnerAndAcl(omClient, "volume4", USER_2, aclUser1All); + createVolumeWithOwnerAndAcl(omClient, "volume5", USER_1, aclWorldAll); + createVolumeWithOwnerAndAcl(omClient, "volume6", ADMIN_USER, null); omClient.close(); } @@ -226,7 +226,7 @@ private void createVolumeWithOwnerAndAcl( String ownerName, String aclString) throws IOException { // Create volume use adminUgi OmVolumeArgs.Builder builder = - OmVolumeArgs.newBuilder().setVolume(volumeName).setAdminName(adminUser); + OmVolumeArgs.newBuilder().setVolume(volumeName).setAdminName(ADMIN_USER); if (!Strings.isNullOrEmpty(ownerName)) { builder.setOwnerName(ownerName); } @@ -315,9 +315,9 @@ public void testListVolumeWithOtherUsersListAllAllowed() throws Exception { // Login as user1, list other users' volumes doAs(userUGI1, () -> { - checkUser(user2, Arrays.asList("volume2", "volume3", "volume4", + checkUser(USER_2, Arrays.asList("volume2", "volume3", "volume4", "volume5"), true); - checkUser(adminUser, Arrays + checkUser(ADMIN_USER, Arrays .asList("volume1", "volume2", "volume3", "volume4", "volume5", "volume6", "s3v"), true); return true; @@ -325,9 +325,9 @@ public void testListVolumeWithOtherUsersListAllAllowed() throws Exception { // Login as user2, list other users' volumes doAs(userUGI2, () -> { - checkUser(user1, Arrays.asList("volume1", "volume3", "volume4", + checkUser(USER_1, Arrays.asList("volume1", "volume3", "volume4", "volume5"), true); - checkUser(adminUser, Arrays + checkUser(ADMIN_USER, Arrays .asList("volume1", "volume2", "volume3", "volume4", "volume5", "volume6", "s3v"), true); return true; @@ -335,18 +335,18 @@ public void testListVolumeWithOtherUsersListAllAllowed() throws Exception { // Login as admin, list other users' volumes doAs(adminUGI, () -> { - checkUser(user1, Arrays.asList("volume1", "volume3", "volume4", + checkUser(USER_1, Arrays.asList("volume1", "volume3", "volume4", "volume5"), true); - checkUser(user2, Arrays.asList("volume2", "volume3", "volume4", + checkUser(USER_2, Arrays.asList("volume2", "volume3", "volume4", "volume5"), true); return true; }); // Login as admin in other host, list other users' volumes doAs(adminInOtherHostUGI, () -> { - checkUser(user1, Arrays.asList("volume1", "volume3", + checkUser(USER_1, Arrays.asList("volume1", "volume3", "volume4", "volume5"), true); - checkUser(user2, Arrays.asList("volume2", "volume3", + checkUser(USER_2, Arrays.asList("volume2", "volume3", "volume4", "volume5"), true); return true; }); @@ -363,18 +363,18 @@ public void testListVolumeWithOtherUsersListAllDisallowed() throws Exception { // Login as user1, list other users' volumes, expect failure doAs(userUGI1, () -> { - checkUser(user2, Arrays.asList("volume2", "volume3", "volume4", + checkUser(USER_2, Arrays.asList("volume2", "volume3", "volume4", "volume5"), false); - checkUser(adminUser, Arrays.asList("volume1", "volume2", "volume3", + checkUser(ADMIN_USER, Arrays.asList("volume1", "volume2", "volume3", "volume4", "volume5", "volume6", "s3v"), false); return true; }); // Login as user2, list other users' volumes, expect failure doAs(userUGI2, () -> { - checkUser(user1, Arrays.asList("volume1", "volume3", "volume4", + checkUser(USER_1, Arrays.asList("volume1", "volume3", "volume4", "volume5"), false); - checkUser(adminUser, + checkUser(ADMIN_USER, Arrays.asList("volume1", "volume2", "volume3", "volume4", "volume5", "volume6", "s3v"), false); return true; @@ -382,18 +382,18 @@ public void testListVolumeWithOtherUsersListAllDisallowed() throws Exception { // While admin should be able to list volumes just fine. doAs(adminUGI, () -> { - checkUser(user1, Arrays.asList("volume1", "volume3", "volume4", + checkUser(USER_1, Arrays.asList("volume1", "volume3", "volume4", "volume5"), true); - checkUser(user2, Arrays.asList("volume2", "volume3", "volume4", + checkUser(USER_2, Arrays.asList("volume2", "volume3", "volume4", "volume5"), true); return true; }); // While admin in other host should be able to list volumes just fine. doAs(adminInOtherHostUGI, () -> { - checkUser(user1, Arrays.asList("volume1", "volume3", + checkUser(USER_1, Arrays.asList("volume1", "volume3", "volume4", "volume5"), true); - checkUser(user2, Arrays.asList("volume2", "volume3", + checkUser(USER_2, Arrays.asList("volume2", "volume3", "volume4", "volume5"), true); return true; }); @@ -405,28 +405,28 @@ public void testAclEnabledListAllAllowed() throws Exception { // Login as user1, list their own volumes doAs(userUGI1, () -> { - checkUser(user1, Arrays.asList("volume1", "volume3", "volume4", + checkUser(USER_1, Arrays.asList("volume1", "volume3", "volume4", "volume5"), true); return true; }); // Login as user2, list their own volumes doAs(userUGI2, () -> { - checkUser(user2, Arrays.asList("volume2", "volume3", "volume4", + checkUser(USER_2, Arrays.asList("volume2", "volume3", "volume4", "volume5"), true); return true; }); // Login as admin, list their own volumes doAs(adminUGI, () -> { - checkUser(adminUser, Arrays.asList("volume1", "volume2", "volume3", + checkUser(ADMIN_USER, Arrays.asList("volume1", "volume2", "volume3", "volume4", "volume5", "volume6", "s3v"), true); return true; }); // Login as admin in other host, list their own volumes doAs(adminInOtherHostUGI, () -> { - checkUser(adminUser, Arrays.asList("volume1", "volume2", + checkUser(ADMIN_USER, Arrays.asList("volume1", "volume2", "volume3", "volume4", "volume5", "volume6", "s3v"), true); return true; }); @@ -438,12 +438,12 @@ public void testAclEnabledListAllDisallowed() throws Exception { // Login as user1, list their own volumes doAs(userUGI1, () -> { - checkUser(user1, Arrays.asList("volume1", "volume3", "volume4", + checkUser(USER_1, Arrays.asList("volume1", "volume3", "volume4", "volume5"), false); return true; }); - // Login as user2, list their own volumes + // Login as USER_2, list their own volumes doAs(userUGI2, () -> { checkUser(userPrincipal2, Arrays.asList("volume2", "volume3", "volume4", "volume5"), false); @@ -473,26 +473,26 @@ public void testAclDisabledListAllAllowed() throws Exception { // Login as user1, list their own volumes doAs(userUGI1, () -> { - checkUser(user1, Arrays.asList("volume1", "volume3", "volume5"), + checkUser(USER_1, Arrays.asList("volume1", "volume3", "volume5"), true); return true; }); // Login as user2, list their own volumes doAs(userUGI2, () -> { - checkUser(user2, Arrays.asList("volume2", "volume4"), + checkUser(USER_2, Arrays.asList("volume2", "volume4"), true); return true; }); doAs(adminUGI, () -> { - checkUser(adminUser, Arrays.asList("volume6", "s3v"), true); + checkUser(ADMIN_USER, Arrays.asList("volume6", "s3v"), true); return true; }); // Login as admin in other host, list their own volumes doAs(adminInOtherHostUGI, () -> { - checkUser(adminUser, Arrays.asList("volume6", "s3v"), + checkUser(ADMIN_USER, Arrays.asList("volume6", "s3v"), true); return true; }); @@ -504,26 +504,26 @@ public void testAclDisabledListAllDisallowed() throws Exception { // Login as user1, list their own volumes doAs(userUGI1, () -> { - checkUser(user1, Arrays.asList("volume1", "volume3", "volume5"), + checkUser(USER_1, Arrays.asList("volume1", "volume3", "volume5"), true); return true; }); // Login as user2, list their own volumes doAs(userUGI2, () -> { - checkUser(user2, Arrays.asList("volume2", "volume4"), + checkUser(USER_2, Arrays.asList("volume2", "volume4"), true); return true; }); doAs(adminUGI, () -> { - checkUser(adminUser, Arrays.asList("volume6", "s3v"), true); + checkUser(ADMIN_USER, Arrays.asList("volume6", "s3v"), true); return true; }); // Login as admin in other host, list their own volumes doAs(adminInOtherHostUGI, () -> { - checkUser(adminUser, Arrays.asList("volume6", "s3v"), + checkUser(ADMIN_USER, Arrays.asList("volume6", "s3v"), true); return true; }); diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/TestOzoneManagerRestart.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/TestOzoneManagerRestart.java index ac4710c0c6b..440b75aff6a 100644 --- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/TestOzoneManagerRestart.java +++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/TestOzoneManagerRestart.java @@ -21,7 +21,6 @@ import java.io.IOException; import java.util.HashMap; import java.util.Map; -import java.util.UUID; import org.apache.hadoop.hdds.utils.IOUtils; import org.apache.hadoop.hdds.client.ReplicationFactor; @@ -63,9 +62,6 @@ public class TestOzoneManagerRestart { private static MiniOzoneCluster cluster = null; private static OzoneConfiguration conf; - private static String clusterId; - private static String scmId; - private static String omId; private static OzoneClient client; /** @@ -78,9 +74,6 @@ public class TestOzoneManagerRestart { @BeforeAll public static void init() throws Exception { conf = new OzoneConfiguration(); - clusterId = UUID.randomUUID().toString(); - scmId = UUID.randomUUID().toString(); - omId = UUID.randomUUID().toString(); conf.setBoolean(OZONE_ACL_ENABLED, true); conf.set(OZONE_ADMINISTRATORS, OZONE_ADMINISTRATORS_WILDCARD); conf.setInt(OZONE_SCM_RATIS_PIPELINE_LIMIT, 10); @@ -88,9 +81,6 @@ public static void init() throws Exception { conf.set(OMConfigKeys.OZONE_DEFAULT_BUCKET_LAYOUT, BucketLayout.OBJECT_STORE.name()); cluster = MiniOzoneCluster.newBuilder(conf) - .setClusterId(clusterId) - .setScmId(scmId) - .setOmId(omId) .build(); cluster.waitForClusterToBeReady(); client = cluster.newClient(); diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/TestRecursiveAclWithFSO.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/TestRecursiveAclWithFSO.java index 4699cbb7021..6f86fcba70e 100644 --- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/TestRecursiveAclWithFSO.java +++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/TestRecursiveAclWithFSO.java @@ -45,15 +45,14 @@ import java.util.ArrayList; import java.util.Arrays; import java.util.List; -import java.util.UUID; import static org.apache.hadoop.ozone.OzoneConfigKeys.OZONE_ACL_AUTHORIZER_CLASS; import static org.apache.hadoop.ozone.OzoneConfigKeys.OZONE_ACL_AUTHORIZER_CLASS_NATIVE; import static org.apache.hadoop.ozone.OzoneConfigKeys.OZONE_ACL_ENABLED; import static org.apache.hadoop.ozone.security.acl.OzoneObj.StoreType.OZONE; import static org.junit.jupiter.api.Assertions.assertEquals; +import static org.junit.jupiter.api.Assertions.assertThrows; import static org.junit.jupiter.api.Assertions.assertTrue; -import static org.junit.jupiter.api.Assertions.fail; /** * Test recursive acl checks for delete and rename for FSO Buckets. @@ -168,23 +167,17 @@ public void testKeyDeleteAndRenameWithoutPermission() throws Exception { OzoneBucket ozoneBucket = volume.getBucket("bucket1"); // perform delete - try { - ozoneBucket.deleteDirectory("a/b2", true); - fail("Should throw permission denied !"); - } catch (OMException ome) { - // expect permission error - assertEquals(OMException.ResultCodes.PERMISSION_DENIED, - ome.getResult(), "Permission check failed"); - } + OMException e = + assertThrows(OMException.class, () -> ozoneBucket.deleteDirectory("a/b2", true)); + // expect permission error + assertEquals(OMException.ResultCodes.PERMISSION_DENIED, + e.getResult(), "Permission check failed"); + // perform rename - try { - ozoneBucket.renameKey("a/b2", "a/b2_renamed"); - fail("Should throw permission denied !"); - } catch (OMException ome) { - // expect permission error - assertEquals(OMException.ResultCodes.PERMISSION_DENIED, - ome.getResult(), "Permission check failed"); - } + e = assertThrows(OMException.class, () -> ozoneBucket.renameKey("a/b2", "a/b2_renamed")); + // expect permission error + assertEquals(OMException.ResultCodes.PERMISSION_DENIED, + e.getResult(), "Permission check failed"); // Test case 2 // Remove acl from directory c2, delete/rename a/b1 should throw @@ -200,35 +193,23 @@ public void testKeyDeleteAndRenameWithoutPermission() throws Exception { UserGroupInformation.setLoginUser(user2); // perform delete - try { - ozoneBucket.deleteDirectory("a/b1", true); - fail("Should throw permission denied !"); - } catch (OMException ome) { - // expect permission error - assertEquals(OMException.ResultCodes.PERMISSION_DENIED, - ome.getResult(), "Permission check failed"); - } + e = assertThrows(OMException.class, () -> ozoneBucket.deleteDirectory("a/b1", true)); + // expect permission error + assertEquals(OMException.ResultCodes.PERMISSION_DENIED, + e.getResult(), "Permission check failed"); // perform rename - try { - ozoneBucket.renameKey("a/b1", "a/b1_renamed"); - fail("Should throw permission denied !"); - } catch (OMException ome) { - // expect permission error - assertEquals(OMException.ResultCodes.PERMISSION_DENIED, - ome.getResult(), "Permission check failed"); - } + e = assertThrows(OMException.class, () -> ozoneBucket.renameKey("a/b1", "a/b1_renamed")); + // expect permission error + assertEquals(OMException.ResultCodes.PERMISSION_DENIED, + e.getResult(), "Permission check failed"); // Test case 3 // delete b3 and this should throw exception because user2 has no acls - try { - ozoneBucket.deleteDirectory("a/b3", true); - fail("Should throw permission denied !"); - } catch (OMException ome) { - // expect permission error - assertEquals(OMException.ResultCodes.PERMISSION_DENIED, - ome.getResult(), "Permission check failed"); - } + e = assertThrows(OMException.class, () -> ozoneBucket.deleteDirectory("a/b3", true)); + // expect permission error + assertEquals(OMException.ResultCodes.PERMISSION_DENIED, + e.getResult(), "Permission check failed"); } } @@ -251,9 +232,6 @@ private void removeAclsFromKey(ObjectStore objectStore, private void startCluster() throws Exception { OzoneConfiguration conf = new OzoneConfiguration(); - String clusterId = UUID.randomUUID().toString(); - String scmId = UUID.randomUUID().toString(); - String omId = UUID.randomUUID().toString(); // Use native impl here, default impl doesn't do actual checks conf.set(OZONE_ACL_AUTHORIZER_CLASS, OZONE_ACL_AUTHORIZER_CLASS_NATIVE); @@ -262,9 +240,7 @@ private void startCluster() throws Exception { OMRequestTestUtils.configureFSOptimizedPaths(conf, true); - cluster = - MiniOzoneCluster.newBuilder(conf).setClusterId(clusterId) - .setScmId(scmId).setOmId(omId).build(); + cluster = MiniOzoneCluster.newBuilder(conf).build(); cluster.waitForClusterToBeReady(); } diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/TestScmSafeMode.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/TestScmSafeMode.java index 0a8c256b46a..14b1a30b44f 100644 --- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/TestScmSafeMode.java +++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/TestScmSafeMode.java @@ -35,7 +35,7 @@ import org.apache.hadoop.hdds.server.events.EventQueue; import org.apache.hadoop.ozone.HddsDatanodeService; import org.apache.hadoop.ozone.MiniOzoneCluster; -import org.apache.hadoop.ozone.scm.TestStorageContainerManagerHelper; +import org.apache.hadoop.hdds.scm.TestStorageContainerManagerHelper; import org.apache.hadoop.ozone.client.ObjectStore; import org.apache.hadoop.ozone.client.OzoneBucket; import org.apache.hadoop.ozone.client.OzoneClient; @@ -126,7 +126,7 @@ public void shutdown() { } @Test - public void testSafeModeOperations() throws Exception { + void testSafeModeOperations() throws Exception { // Create {numKeys} random names keys. TestStorageContainerManagerHelper helper = new TestStorageContainerManagerHelper(cluster, conf); @@ -148,12 +148,7 @@ public void testSafeModeOperations() throws Exception { cluster.stop(); - try { - cluster = builder.build(); - } catch (IOException e) { - fail("failed"); - } - + cluster = builder.build(); StorageContainerManager scm; @@ -179,17 +174,13 @@ public void testSafeModeOperations() throws Exception { * Tests inSafeMode & forceExitSafeMode api calls. */ @Test - public void testIsScmInSafeModeAndForceExit() throws Exception { + void testIsScmInSafeModeAndForceExit() throws Exception { // Test 1: SCM should be out of safe mode. assertFalse(storageContainerLocationClient.inSafeMode()); cluster.stop(); // Restart the cluster with same metadata dir. - try { - cluster = builder.build(); - } catch (IOException e) { - fail("Cluster startup failed."); - } + cluster = builder.build(); // Test 2: Scm should be in safe mode as datanodes are not started yet. storageContainerLocationClient = cluster @@ -212,15 +203,12 @@ public void testIsScmInSafeModeAndForceExit() throws Exception { } @Test - public void testSCMSafeMode() throws Exception { + void testSCMSafeMode() throws Exception { // Test1: Test safe mode when there are no containers in system. cluster.stop(); - try { - cluster = builder.build(); - } catch (IOException e) { - fail("Cluster startup failed."); - } + cluster = builder.build(); + assertTrue(cluster.getStorageContainerManager().isInSafeMode()); cluster.startHddsDatanodes(); cluster.waitForClusterToBeReady(); @@ -259,11 +247,7 @@ public void testSCMSafeMode() throws Exception { .captureLogs(SCMSafeModeManager.getLogger()); logCapturer.clearOutput(); - try { - cluster = builder.build(); - } catch (IOException ex) { - fail("failed"); - } + cluster = builder.build(); StorageContainerManager scm; diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/TestSecureOzoneManager.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/TestSecureOzoneManager.java index 8562a09be08..82d22d783f5 100644 --- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/TestSecureOzoneManager.java +++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/TestSecureOzoneManager.java @@ -25,14 +25,13 @@ import org.apache.hadoop.hdds.security.x509.certificate.client.CertificateClient; import org.apache.hadoop.hdds.security.x509.certificate.utils.CertificateCodec; import org.apache.hadoop.hdds.security.x509.keys.KeyCodec; -import org.apache.hadoop.ozone.MiniOzoneCluster; import org.apache.hadoop.ozone.security.OMCertificateClient; import org.apache.hadoop.security.ssl.KeyStoreTestUtil; import org.bouncycastle.cert.X509CertificateHolder; -import org.junit.jupiter.api.AfterEach; import org.junit.jupiter.api.BeforeEach; import org.junit.jupiter.api.Test; import org.junit.jupiter.api.Timeout; +import org.junit.jupiter.api.io.TempDir; import java.nio.file.Path; import java.nio.file.Paths; @@ -49,8 +48,7 @@ import static org.apache.hadoop.ozone.OzoneConfigKeys.OZONE_SECURITY_ENABLED_KEY; import static org.apache.hadoop.ozone.om.OMConfigKeys.OZONE_OM_ADDRESS_KEY; import static org.apache.hadoop.security.UserGroupInformation.AuthenticationMethod.KERBEROS; -import static org.apache.ozone.test.GenericTestUtils.LogCapturer; -import static org.apache.ozone.test.GenericTestUtils.getTempPath; +import static org.assertj.core.api.Assertions.assertThat; import static org.junit.jupiter.api.Assertions.assertEquals; import static org.junit.jupiter.api.Assertions.assertNotNull; import static org.junit.jupiter.api.Assertions.assertNull; @@ -60,24 +58,19 @@ * Test secure Ozone Manager operation in distributed handler scenario. */ @Timeout(25) -public class TestSecureOzoneManager { +class TestSecureOzoneManager { private static final String COMPONENT = "om"; - private MiniOzoneCluster cluster = null; private OzoneConfiguration conf; private String clusterId; private String scmId; private String omId; + @TempDir private Path metaDir; private HddsProtos.OzoneManagerDetailsProto omInfo; - /** - * Create a MiniDFSCluster for testing. - *

- * Ozone is made active by setting OZONE_ENABLED = true - */ @BeforeEach - public void init() throws Exception { + void init() throws Exception { conf = new OzoneConfiguration(); clusterId = UUID.randomUUID().toString(); scmId = UUID.randomUUID().toString(); @@ -87,37 +80,21 @@ public void init() throws Exception { conf.set(HADOOP_SECURITY_AUTHENTICATION, KERBEROS.toString()); conf.setInt(IPC_CLIENT_CONNECT_MAX_RETRIES_KEY, 2); conf.set(OZONE_SCM_NAMES, "localhost"); - final String path = getTempPath(UUID.randomUUID().toString()); - metaDir = Paths.get(path, "om-meta"); conf.set(HddsConfigKeys.OZONE_METADATA_DIRS, metaDir.toString()); OzoneManager.setTestSecureOmFlag(true); omInfo = OzoneManager.getOmDetailsProto(conf, omId); } - /** - * Shutdown MiniDFSCluster. - */ - @AfterEach - public void shutdown() { - if (cluster != null) { - cluster.shutdown(); - } - FileUtils.deleteQuietly(metaDir.toFile()); - } - /** * Test failure cases for secure OM initialization. */ @Test - public void testSecureOmInitFailures() throws Exception { + void testSecureOmInitFailures() throws Exception { PrivateKey privateKey; PublicKey publicKey; - LogCapturer omLogs = - LogCapturer.captureLogs(OzoneManager.getLogger()); OMStorage omStorage = new OMStorage(conf); omStorage.setClusterId(clusterId); omStorage.setOmId(omId); - omLogs.clearOutput(); // Case 1: When keypair as well as certificate is missing. Initial keypair // boot-up. Get certificate will fail when SCM is not running. @@ -216,7 +193,7 @@ public void testSecureOmInitFailures() throws Exception { * Test om bind socket address. */ @Test - public void testSecureOmInitFailure() throws Exception { + void testSecureOmInitFailure() throws Exception { OzoneConfiguration config = new OzoneConfiguration(conf); OMStorage omStorage = new OMStorage(config); omStorage.setClusterId(clusterId); @@ -224,8 +201,9 @@ public void testSecureOmInitFailure() throws Exception { config.set(OZONE_OM_ADDRESS_KEY, "om-unknown"); RuntimeException rte = assertThrows(RuntimeException.class, () -> OzoneManager.initializeSecurity(config, omStorage, scmId)); - assertEquals("Can't get SCM signed certificate. omRpcAdd:" + - " om-unknown:9862", rte.getMessage()); + assertThat(rte) + .hasMessageStartingWith("Can't get SCM signed certificate. omRpcAdd: om-unknown") + .hasMessageEndingWith(":9862"); } } diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/TestSnapshotBackgroundServices.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/TestSnapshotBackgroundServices.java index dd0af27c950..a7bc5544641 100644 --- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/TestSnapshotBackgroundServices.java +++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/TestSnapshotBackgroundServices.java @@ -16,7 +16,6 @@ */ package org.apache.hadoop.ozone.om; -import org.apache.commons.lang3.RandomStringUtils; import org.apache.hadoop.hdds.conf.OzoneConfiguration; import org.apache.hadoop.hdds.conf.StorageUnit; import org.apache.hadoop.hdds.utils.IOUtils; @@ -40,6 +39,7 @@ import org.apache.hadoop.ozone.om.helpers.BucketLayout; import org.apache.hadoop.ozone.om.helpers.OmKeyInfo; import org.apache.hadoop.ozone.om.helpers.SnapshotInfo; +import org.apache.hadoop.ozone.om.ratis.OzoneManagerRatisServerConfig; import org.apache.hadoop.ozone.om.snapshot.ReferenceCounted; import org.apache.hadoop.ozone.om.snapshot.SnapshotCache; import org.apache.hadoop.ozone.snapshot.SnapshotDiffReportOzone; @@ -48,7 +48,6 @@ import org.apache.ozone.rocksdiff.CompactionNode; import org.apache.ozone.test.GenericTestUtils; import org.apache.ozone.test.LambdaTestUtils; -import org.apache.ozone.test.tag.Flaky; import org.apache.ratis.server.protocol.TermIndex; import org.junit.jupiter.api.AfterEach; import org.junit.jupiter.api.BeforeEach; @@ -63,9 +62,9 @@ import java.util.Collections; import java.util.List; import java.util.Objects; -import java.util.UUID; import java.util.concurrent.TimeUnit; import java.util.concurrent.TimeoutException; +import java.util.concurrent.atomic.AtomicInteger; import java.util.concurrent.atomic.AtomicLong; import java.util.concurrent.atomic.AtomicReference; @@ -88,10 +87,8 @@ * Tests snapshot background services. */ @Timeout(5000) -@Flaky("HDDS-9455") public class TestSnapshotBackgroundServices { - - private MiniOzoneHAClusterImpl cluster = null; + private MiniOzoneHAClusterImpl cluster; private ObjectStore objectStore; private OzoneBucket ozoneBucket; private String volumeName; @@ -99,12 +96,12 @@ public class TestSnapshotBackgroundServices { private static final long SNAPSHOT_THRESHOLD = 50; private static final int LOG_PURGE_GAP = 50; - // This test depends on direct RocksDB checks that are easier done with OBS - // buckets. - private static final BucketLayout TEST_BUCKET_LAYOUT = - BucketLayout.OBJECT_STORE; - private static final String SNAPSHOT_NAME_PREFIX = "snapshot"; + // This test depends on direct RocksDB checks that are easier done with OBS buckets. + private static final BucketLayout TEST_BUCKET_LAYOUT = BucketLayout.OBJECT_STORE; + private static final String SNAPSHOT_NAME_PREFIX = "snapshot-"; + private static final String KEY_NAME_PREFIX = "key-"; private OzoneClient client; + private final AtomicInteger counter = new AtomicInteger(); /** * Create a MiniOzoneCluster for testing. The cluster initially has one @@ -114,14 +111,13 @@ public class TestSnapshotBackgroundServices { @BeforeEach public void init(TestInfo testInfo) throws Exception { OzoneConfiguration conf = new OzoneConfiguration(); - String clusterId = UUID.randomUUID().toString(); - String scmId = UUID.randomUUID().toString(); String omServiceId = "om-service-test1"; + OzoneManagerRatisServerConfig omRatisConf = conf.getObject(OzoneManagerRatisServerConfig.class); + omRatisConf.setLogAppenderWaitTimeMin(10); + conf.setFromObject(omRatisConf); conf.setInt(OMConfigKeys.OZONE_OM_RATIS_LOG_PURGE_GAP, LOG_PURGE_GAP); - conf.setStorageSize(OMConfigKeys.OZONE_OM_RATIS_SEGMENT_SIZE_KEY, 16, - StorageUnit.KB); - conf.setStorageSize(OMConfigKeys. - OZONE_OM_RATIS_SEGMENT_PREALLOCATED_SIZE_KEY, 16, StorageUnit.KB); + conf.setStorageSize(OMConfigKeys.OZONE_OM_RATIS_SEGMENT_SIZE_KEY, 16, StorageUnit.KB); + conf.setStorageSize(OMConfigKeys.OZONE_OM_RATIS_SEGMENT_PREALLOCATED_SIZE_KEY, 16, StorageUnit.KB); if ("testSSTFilteringBackgroundService".equals(testInfo.getDisplayName())) { conf.setTimeDuration(OZONE_SNAPSHOT_SST_FILTERING_SERVICE_INTERVAL, 1, TimeUnit.SECONDS); @@ -158,8 +154,6 @@ public void init(TestInfo testInfo) throws Exception { SNAPSHOT_THRESHOLD); int numOfOMs = 3; cluster = (MiniOzoneHAClusterImpl) MiniOzoneCluster.newOMHABuilder(conf) - .setClusterId(clusterId) - .setScmId(scmId) .setOMServiceId("om-service-test1") .setNumOfOzoneManagers(numOfOMs) .setNumOfActiveOMs(2) @@ -176,12 +170,12 @@ public void init(TestInfo testInfo) throws Exception { client = OzoneClientFactory.getRpcClient(omServiceId, conf); objectStore = client.getObjectStore(); - volumeName = "volume" + RandomStringUtils.randomNumeric(5); - bucketName = "bucket" + RandomStringUtils.randomNumeric(5); + volumeName = "volume" + counter.incrementAndGet(); + bucketName = "bucket" + counter.incrementAndGet(); VolumeArgs createVolumeArgs = VolumeArgs.newBuilder() - .setOwner("user" + RandomStringUtils.randomNumeric(5)) - .setAdmin("admin" + RandomStringUtils.randomNumeric(5)) + .setOwner("user" + counter.incrementAndGet()) + .setAdmin("admin" + counter.incrementAndGet()) .build(); objectStore.createVolume(volumeName, createVolumeArgs); @@ -226,8 +220,7 @@ public void testSnapshotAndKeyDeletionBackgroundServices() cluster.getOzoneManager(leaderOM.getOMNodeId()); assertEquals(leaderOM, newFollowerOM); - SnapshotInfo newSnapshot = createOzoneSnapshot(newLeaderOM, - SNAPSHOT_NAME_PREFIX + RandomStringUtils.randomNumeric(5)); + SnapshotInfo newSnapshot = createOzoneSnapshot(newLeaderOM, SNAPSHOT_NAME_PREFIX + counter.incrementAndGet()); /* Check whether newly created key data is reclaimed @@ -252,8 +245,7 @@ public void testSnapshotAndKeyDeletionBackgroundServices() assertNotNull(keyInfoA); // create snapshot b - SnapshotInfo snapshotInfoB = createOzoneSnapshot(newLeaderOM, - SNAPSHOT_NAME_PREFIX + RandomStringUtils.randomNumeric(5)); + SnapshotInfo snapshotInfoB = createOzoneSnapshot(newLeaderOM, SNAPSHOT_NAME_PREFIX + counter.incrementAndGet()); assertNotNull(snapshotInfoB); // delete key a @@ -263,8 +255,7 @@ public void testSnapshotAndKeyDeletionBackgroundServices() () -> !isKeyInTable(keyA, omKeyInfoTable)); // create snapshot c - SnapshotInfo snapshotInfoC = createOzoneSnapshot(newLeaderOM, - SNAPSHOT_NAME_PREFIX + RandomStringUtils.randomNumeric(5)); + SnapshotInfo snapshotInfoC = createOzoneSnapshot(newLeaderOM, SNAPSHOT_NAME_PREFIX + counter.incrementAndGet()); // get snapshot c OmSnapshot snapC; @@ -281,8 +272,7 @@ public void testSnapshotAndKeyDeletionBackgroundServices() () -> isKeyInTable(keyA, snapC.getMetadataManager().getDeletedTable())); // create snapshot d - SnapshotInfo snapshotInfoD = createOzoneSnapshot(newLeaderOM, - SNAPSHOT_NAME_PREFIX + RandomStringUtils.randomNumeric(5)); + SnapshotInfo snapshotInfoD = createOzoneSnapshot(newLeaderOM, SNAPSHOT_NAME_PREFIX + counter.incrementAndGet()); // delete snapshot c client.getObjectStore() @@ -535,18 +525,14 @@ public void testSSTFilteringBackgroundService() private void confirmSnapDiffForTwoSnapshotsDifferingBySingleKey( OzoneManager ozoneManager) throws IOException, InterruptedException, TimeoutException { - String firstSnapshot = createOzoneSnapshot(ozoneManager, - TestSnapshotBackgroundServices.SNAPSHOT_NAME_PREFIX + - RandomStringUtils.randomNumeric(10)).getName(); + String firstSnapshot = createOzoneSnapshot(ozoneManager, SNAPSHOT_NAME_PREFIX + counter.incrementAndGet()) + .getName(); String diffKey = writeKeys(1).get(0); - String secondSnapshot = createOzoneSnapshot(ozoneManager, - TestSnapshotBackgroundServices.SNAPSHOT_NAME_PREFIX + - RandomStringUtils.randomNumeric(10)).getName(); - SnapshotDiffReportOzone diff = getSnapDiffReport(volumeName, bucketName, - firstSnapshot, secondSnapshot); + String secondSnapshot = createOzoneSnapshot(ozoneManager, SNAPSHOT_NAME_PREFIX + counter.incrementAndGet()) + .getName(); + SnapshotDiffReportOzone diff = getSnapDiffReport(volumeName, bucketName, firstSnapshot, secondSnapshot); assertEquals(Collections.singletonList( - SnapshotDiffReportOzone.getDiffReportEntry( - SnapshotDiffReport.DiffType.CREATE, diffKey, null)), + SnapshotDiffReportOzone.getDiffReportEntry(SnapshotDiffReport.DiffType.CREATE, diffKey, null)), diff.getDiffList()); } @@ -576,9 +562,7 @@ private static File getSstBackupDir(OzoneManager ozoneManager) { private void checkIfSnapshotGetsProcessedBySFS(OzoneManager ozoneManager) throws IOException, TimeoutException, InterruptedException { writeKeys(1); - SnapshotInfo newSnapshot = createOzoneSnapshot(ozoneManager, - TestSnapshotBackgroundServices.SNAPSHOT_NAME_PREFIX + - RandomStringUtils.randomNumeric(5)); + SnapshotInfo newSnapshot = createOzoneSnapshot(ozoneManager, SNAPSHOT_NAME_PREFIX + counter.incrementAndGet()); assertNotNull(newSnapshot); Table snapshotInfoTable = ozoneManager.getMetadataManager().getSnapshotInfoTable(); @@ -642,22 +626,17 @@ private SnapshotDiffReportOzone getSnapDiffReport(String volume, return response.get().getSnapshotDiffReport(); } - private SnapshotInfo createOzoneSnapshot(OzoneManager leaderOM, String name) - throws IOException { + private SnapshotInfo createOzoneSnapshot(OzoneManager leaderOM, String name) throws IOException { objectStore.createSnapshot(volumeName, bucketName, name); - String tableKey = SnapshotInfo.getTableKey(volumeName, - bucketName, - name); + String tableKey = SnapshotInfo.getTableKey(volumeName, bucketName, name); SnapshotInfo snapshotInfo = leaderOM.getMetadataManager() .getSnapshotInfoTable() .get(tableKey); // Allow the snapshot to be written to disk - String fileName = - getSnapshotPath(leaderOM.getConfiguration(), snapshotInfo); + String fileName = getSnapshotPath(leaderOM.getConfiguration(), snapshotInfo); File snapshotDir = new File(fileName); - if (!RDBCheckpointUtils - .waitForCheckpointDirectoryExist(snapshotDir)) { + if (!RDBCheckpointUtils.waitForCheckpointDirectoryExist(snapshotDir)) { throw new IOException("snapshot directory doesn't exist"); } return snapshotInfo; @@ -667,7 +646,9 @@ private List writeKeys(long keyCount) throws IOException { List keys = new ArrayList<>(); long index = 0; while (index < keyCount) { - keys.add(createKey(ozoneBucket)); + String key = KEY_NAME_PREFIX + counter.incrementAndGet(); + createKey(ozoneBucket, key); + keys.add(key); index++; } return keys; @@ -681,5 +662,4 @@ private void readKeys(List keys) throws IOException { inputStream.close(); } } - } diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/TestSnapshotDeletingService.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/TestSnapshotDeletingService.java index 12844c23cd7..6e3e4fd7f40 100644 --- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/TestSnapshotDeletingService.java +++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/TestSnapshotDeletingService.java @@ -41,7 +41,6 @@ import org.apache.hadoop.ozone.om.snapshot.ReferenceCounted; import org.apache.hadoop.ozone.om.snapshot.SnapshotCache; import org.apache.ozone.test.GenericTestUtils; -import org.apache.ozone.test.tag.Flaky; import org.junit.jupiter.api.AfterEach; import org.junit.jupiter.api.BeforeEach; import org.junit.jupiter.api.Test; @@ -147,7 +146,6 @@ public void testSnapshotSplitAndMove() throws Exception { } @Test - @Flaky("HDDS-9288") public void testMultipleSnapshotKeyReclaim() throws Exception { Table deletedTable = @@ -197,7 +195,6 @@ public void testMultipleSnapshotKeyReclaim() throws Exception { } @SuppressWarnings("checkstyle:MethodLength") - @Flaky("HDDS-9023") @Test public void testSnapshotWithFSO() throws Exception { Table dirTable = diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/service/TestRangerBGSyncService.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/service/TestRangerBGSyncService.java index 161d1b755a9..abc21ed4351 100644 --- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/service/TestRangerBGSyncService.java +++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/service/TestRangerBGSyncService.java @@ -77,9 +77,10 @@ import static org.apache.hadoop.security.authentication.util.KerberosName.DEFAULT_MECHANISM; import static org.assertj.core.api.Assertions.assertThat; import static org.junit.jupiter.api.Assertions.assertEquals; +import static org.junit.jupiter.api.Assertions.assertInstanceOf; import static org.junit.jupiter.api.Assertions.assertNotNull; -import static org.junit.jupiter.api.Assertions.fail; -import static org.mockito.ArgumentMatchers.any; +import static org.junit.jupiter.api.Assertions.assertThrows; +import static org.mockito.Mockito.any; import static org.mockito.Mockito.doAnswer; import static org.mockito.Mockito.doNothing; import static org.mockito.Mockito.framework; @@ -277,7 +278,7 @@ private void createRoleHelper(Role role) throws IOException { rolesCreated.add(0, role.getName()); } - private void createRolesAndPoliciesInRanger(boolean populateDB) { + private void createRolesAndPoliciesInRanger(boolean populateDB) throws IOException { policiesCreated.clear(); rolesCreated.clear(); @@ -299,102 +300,75 @@ private void createRolesAndPoliciesInRanger(boolean populateDB) { // Add tenant entry in OM DB if (populateDB) { LOG.info("Creating OM DB tenant entries"); - try { - // Tenant State entry - omMetadataManager.getTenantStateTable().put(tenantId, - new OmDBTenantState( - tenantId, volumeName, userRoleName, adminRoleName, - bucketNamespacePolicyName, bucketPolicyName)); - // Access ID entry for alice - final String aliceAccessId = OMMultiTenantManager.getDefaultAccessId( - tenantId, USER_ALICE_SHORT); - omMetadataManager.getTenantAccessIdTable().put(aliceAccessId, - new OmDBAccessIdInfo.Builder() - .setTenantId(tenantId) - .setUserPrincipal(USER_ALICE_SHORT) - .setIsAdmin(false) - .setIsDelegatedAdmin(false) - .build()); - // Access ID entry for bob - final String bobAccessId = OMMultiTenantManager.getDefaultAccessId( - tenantId, USER_BOB_SHORT); - omMetadataManager.getTenantAccessIdTable().put(bobAccessId, - new OmDBAccessIdInfo.Builder() - .setTenantId(tenantId) - .setUserPrincipal(USER_BOB_SHORT) - .setIsAdmin(false) - .setIsDelegatedAdmin(false) - .build()); - } catch (IOException e) { - fail(e.getMessage()); - } + // Tenant State entry + omMetadataManager.getTenantStateTable().put(tenantId, + new OmDBTenantState( + tenantId, volumeName, userRoleName, adminRoleName, + bucketNamespacePolicyName, bucketPolicyName)); + // Access ID entry for alice + final String aliceAccessId = OMMultiTenantManager.getDefaultAccessId( + tenantId, USER_ALICE_SHORT); + omMetadataManager.getTenantAccessIdTable().put(aliceAccessId, + new OmDBAccessIdInfo.Builder() + .setTenantId(tenantId) + .setUserPrincipal(USER_ALICE_SHORT) + .setIsAdmin(false) + .setIsDelegatedAdmin(false) + .build()); + // Access ID entry for bob + final String bobAccessId = OMMultiTenantManager.getDefaultAccessId( + tenantId, USER_BOB_SHORT); + omMetadataManager.getTenantAccessIdTable().put(bobAccessId, + new OmDBAccessIdInfo.Builder() + .setTenantId(tenantId) + .setUserPrincipal(USER_BOB_SHORT) + .setIsAdmin(false) + .setIsDelegatedAdmin(false) + .build()); } - try { - LOG.info("Creating user in Ranger: {}", USER_ALICE_SHORT); - rangerUserRequest.createUser(USER_ALICE_SHORT, "Password12"); - usersCreated.add(USER_ALICE_SHORT); - } catch (IOException e) { - fail(e.getMessage()); - } - try { - LOG.info("Creating user in Ranger: {}", USER_BOB_SHORT); - rangerUserRequest.createUser(USER_BOB_SHORT, "Password12"); - usersCreated.add(USER_BOB_SHORT); - } catch (IOException e) { - fail(e.getMessage()); - } + LOG.info("Creating user in Ranger: {}", USER_ALICE_SHORT); + rangerUserRequest.createUser(USER_ALICE_SHORT, "Password12"); + usersCreated.add(USER_ALICE_SHORT); - try { - LOG.info("Creating admin role in Ranger: {}", adminRoleName); - // Create empty admin role first - Role adminRole = new Role.Builder() - .setName(adminRoleName) - .setDescription(OZONE_TENANT_RANGER_ROLE_DESCRIPTION) - .build(); - createRoleHelper(adminRole); - } catch (IOException e) { - fail(e.getMessage()); - } + LOG.info("Creating user in Ranger: {}", USER_BOB_SHORT); + rangerUserRequest.createUser(USER_BOB_SHORT, "Password12"); + usersCreated.add(USER_BOB_SHORT); - try { - LOG.info("Creating user role in Ranger: {}", userRoleName); - Role userRole = new Role.Builder() - .setName(userRoleName) - .setDescription(OZONE_TENANT_RANGER_ROLE_DESCRIPTION) - .addRole(adminRoleName, true) - // Add alice and bob to the user role - .addUsers(Arrays.asList(USER_ALICE_SHORT, USER_BOB_SHORT)) - .build(); - createRoleHelper(userRole); - } catch (IOException e) { - fail(e.getMessage()); - } - - try { - Policy tenant1VolumeAccessPolicy = - OMMultiTenantManager.getDefaultVolumeAccessPolicy( - tenantId, volumeName, userRoleName, adminRoleName); - LOG.info("Creating VolumeAccess policy in Ranger: {}", - tenant1VolumeAccessPolicy.getName()); - accessController.createPolicy(tenant1VolumeAccessPolicy); - policiesCreated.add(tenant1VolumeAccessPolicy.getName()); - } catch (IOException e) { - fail(e.getMessage()); - } - - try { - Policy tenant1BucketCreatePolicy = - OMMultiTenantManager.getDefaultBucketAccessPolicy( - tenantId, volumeName, userRoleName); - LOG.info("Creating BucketAccess policy in Ranger: {}", - tenant1BucketCreatePolicy.getName()); - accessController.createPolicy(tenant1BucketCreatePolicy); - policiesCreated.add(tenant1BucketCreatePolicy.getName()); - } catch (IOException e) { - fail(e.getMessage()); - } + LOG.info("Creating admin role in Ranger: {}", adminRoleName); + // Create empty admin role first + Role adminRole = new Role.Builder() + .setName(adminRoleName) + .setDescription(OZONE_TENANT_RANGER_ROLE_DESCRIPTION) + .build(); + createRoleHelper(adminRole); + + LOG.info("Creating user role in Ranger: {}", userRoleName); + Role userRole = new Role.Builder() + .setName(userRoleName) + .setDescription(OZONE_TENANT_RANGER_ROLE_DESCRIPTION) + .addRole(adminRoleName, true) + // Add alice and bob to the user role + .addUsers(Arrays.asList(USER_ALICE_SHORT, USER_BOB_SHORT)) + .build(); + createRoleHelper(userRole); + + Policy tenant1VolumeAccessPolicy = + OMMultiTenantManager.getDefaultVolumeAccessPolicy( + tenantId, volumeName, userRoleName, adminRoleName); + LOG.info("Creating VolumeAccess policy in Ranger: {}", + tenant1VolumeAccessPolicy.getName()); + accessController.createPolicy(tenant1VolumeAccessPolicy); + policiesCreated.add(tenant1VolumeAccessPolicy.getName()); + + Policy tenant1BucketCreatePolicy = + OMMultiTenantManager.getDefaultBucketAccessPolicy( + tenantId, volumeName, userRoleName); + LOG.info("Creating BucketAccess policy in Ranger: {}", + tenant1BucketCreatePolicy.getName()); + accessController.createPolicy(tenant1BucketCreatePolicy); + policiesCreated.add(tenant1BucketCreatePolicy.getName()); } public void cleanupPolicies() { @@ -487,31 +461,15 @@ public void testRemovePolicyAndRole() throws Exception { // by OzoneManager Multi-Tenancy tables are cleaned up by sync thread for (String policy : policiesCreated) { - try { - final Policy policyRead = accessController.getPolicy(policy); - fail("The policy should have been deleted: " + policyRead); - } catch (IOException ex) { - if (!(ex.getCause() instanceof RangerServiceException)) { - fail("Expected RangerServiceException, got " + - ex.getCause().getClass().getSimpleName()); - } - RangerServiceException rse = (RangerServiceException) ex.getCause(); - assertEquals(404, rse.getStatus().getStatusCode()); - } + IOException ex = assertThrows(IOException.class, () -> accessController.getPolicy(policy)); + RangerServiceException rse = assertInstanceOf(RangerServiceException.class, ex.getCause()); + assertEquals(404, rse.getStatus().getStatusCode()); } for (String roleName : rolesCreated) { - try { - final Role role = accessController.getRole(roleName); - fail("This role should have been deleted from Ranger: " + role); - } catch (IOException ex) { - if (!(ex.getCause() instanceof RangerServiceException)) { - fail("Expected RangerServiceException, got " + - ex.getCause().getClass().getSimpleName()); - } - RangerServiceException rse = (RangerServiceException) ex.getCause(); - assertEquals(400, rse.getStatus().getStatusCode()); - } + IOException ex = assertThrows(IOException.class, () -> accessController.getRole(roleName)); + RangerServiceException rse = assertInstanceOf(RangerServiceException.class, ex.getCause()); + assertEquals(400, rse.getStatus().getStatusCode()); } } @@ -521,7 +479,7 @@ public void testRemovePolicyAndRole() throws Exception { * Expect sync service to check Ranger state but write nothing to Ranger. */ @Test - public void testConsistentState() throws Exception { + void testConsistentState() throws Exception { long startingRangerVersion = initBGSync(); // Create roles and policies in ranger that are @@ -549,23 +507,13 @@ public void testConsistentState() throws Exception { } for (String policyName : policiesCreated) { - try { - final Policy policyRead = accessController.getPolicy(policyName); - assertEquals(policyName, policyRead.getName()); - } catch (Exception e) { - e.printStackTrace(); - fail(e.getMessage()); - } + final Policy policyRead = accessController.getPolicy(policyName); + assertEquals(policyName, policyRead.getName()); } for (String roleName : rolesCreated) { - try { - final Role roleResponse = accessController.getRole(roleName); - assertEquals(roleName, roleResponse.getName()); - } catch (Exception e) { - e.printStackTrace(); - fail(e.getMessage()); - } + final Role roleResponse = accessController.getRole(roleName); + assertEquals(roleName, roleResponse.getName()); } } @@ -641,7 +589,7 @@ public void testRecoverRangerRole() throws Exception { * Expect sync service to recover both policies to their default states. */ @Test - public void testRecreateDeletedRangerPolicy() throws Exception { + void testRecreateDeletedRangerPolicy() throws Exception { long startingRangerVersion = initBGSync(); // Create roles and policies in ranger that are @@ -676,23 +624,13 @@ public void testRecreateDeletedRangerPolicy() throws Exception { assertThat(rangerSvcVersionAfter).isGreaterThan(rangerSvcVersionBefore); for (String policyName : policiesCreated) { - try { - final Policy policyRead = accessController.getPolicy(policyName); - assertEquals(policyName, policyRead.getName()); - } catch (Exception e) { - e.printStackTrace(); - fail(e.getMessage()); - } + final Policy policyRead = accessController.getPolicy(policyName); + assertEquals(policyName, policyRead.getName()); } for (String roleName : rolesCreated) { - try { - final Role roleRead = accessController.getRole(roleName); - assertEquals(roleName, roleRead.getName()); - } catch (Exception e) { - e.printStackTrace(); - fail(e.getMessage()); - } + final Role roleRead = accessController.getRole(roleName); + assertEquals(roleName, roleRead.getName()); } } diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/snapshot/TestOmSnapshot.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/snapshot/TestOmSnapshot.java index 6d8bc353eb1..029b0813bb5 100644 --- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/snapshot/TestOmSnapshot.java +++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/snapshot/TestOmSnapshot.java @@ -87,8 +87,7 @@ import org.apache.ozone.rocksdiff.CompactionNode; import org.apache.ozone.test.GenericTestUtils; import org.apache.ozone.test.tag.Slow; -import org.apache.ozone.test.tag.Unhealthy; -import org.jetbrains.annotations.NotNull; +import jakarta.annotation.Nonnull; import org.junit.jupiter.api.AfterAll; import org.junit.jupiter.api.Test; import org.junit.jupiter.api.TestInstance; @@ -197,8 +196,6 @@ public TestOmSnapshot(BucketLayout newBucketLayout, */ private void init() throws Exception { OzoneConfiguration conf = new OzoneConfiguration(); - String clusterId = UUID.randomUUID().toString(); - String scmId = UUID.randomUUID().toString(); conf.setBoolean(OZONE_OM_ENABLE_FILESYSTEM_PATHS, enabledFileSystemPaths); conf.set(OZONE_DEFAULT_BUCKET_LAYOUT, bucketLayout.name()); conf.setBoolean(OZONE_OM_SNAPSHOT_FORCE_FULL_DIFF, forceFullSnapshotDiff); @@ -212,12 +209,8 @@ private void init() throws Exception { conf.setBoolean(OMConfigKeys.OZONE_FILESYSTEM_SNAPSHOT_ENABLED_KEY, true); cluster = MiniOzoneCluster.newBuilder(conf) - .setClusterId(clusterId) - .setScmId(scmId) .setNumOfOzoneManagers(3) - .setOmLayoutVersion(OMLayoutFeature. - BUCKET_LAYOUT_SUPPORT.layoutVersion()) - .setOmId(UUID.randomUUID().toString()) + .setOmLayoutVersion(OMLayoutFeature.BUCKET_LAYOUT_SUPPORT.layoutVersion()) .build(); cluster.waitForClusterToBeReady(); @@ -279,7 +272,7 @@ private static void assertFinalizationException(OMException omException) { * Trigger OM upgrade finalization from the client and block until completion * (status FINALIZATION_DONE). */ - private void finalizeOMUpgrade() throws IOException { + private void finalizeOMUpgrade() throws Exception { // Trigger OM upgrade finalization. Ref: FinalizeUpgradeSubCommand#call final OzoneManagerProtocol omClient = client.getObjectStore() .getClientProxy().getOzoneManagerClient(); @@ -291,17 +284,12 @@ private void finalizeOMUpgrade() throws IOException { assertTrue(isStarting(finalizationResponse.status())); // Wait for the finalization to be marked as done. // 10s timeout should be plenty. - try { - await(POLL_MAX_WAIT_MILLIS, POLL_INTERVAL_MILLIS, () -> { - final UpgradeFinalizer.StatusAndMessages progress = - omClient.queryUpgradeFinalizationProgress( - upgradeClientID, false, false); - return isDone(progress.status()); - }); - } catch (Exception e) { - fail("Unexpected exception while waiting for " - + "the OM upgrade to finalize: " + e.getMessage()); - } + await(POLL_MAX_WAIT_MILLIS, POLL_INTERVAL_MILLIS, () -> { + final UpgradeFinalizer.StatusAndMessages progress = + omClient.queryUpgradeFinalizationProgress( + upgradeClientID, false, false); + return isDone(progress.status()); + }); } @AfterAll @@ -1784,9 +1772,7 @@ public void testListSnapshotDiffWithInvalidParameters() * sst filtering code path. */ @Test - @Unhealthy("HDDS-8005") - public void testSnapDiffWithMultipleSSTs() - throws Exception { + public void testSnapDiffWithMultipleSSTs() throws Exception { // Create a volume and 2 buckets String volumeName1 = "vol-" + counter.incrementAndGet(); String bucketName1 = "buck1"; @@ -1800,29 +1786,27 @@ public void testSnapDiffWithMultipleSSTs() String keyPrefix = "key-"; // add file to bucket1 and take snapshot createFileKeyWithPrefix(bucket1, keyPrefix); + int keyTableSize = getKeyTableSstFiles().size(); String snap1 = "snap" + counter.incrementAndGet(); createSnapshot(volumeName1, bucketName1, snap1); // 1.sst - assertEquals(1, getKeyTableSstFiles().size()); + assertEquals(1, (getKeyTableSstFiles().size() - keyTableSize)); // add files to bucket2 and flush twice to create 2 sst files for (int i = 0; i < 5; i++) { createFileKeyWithPrefix(bucket2, keyPrefix); } flushKeyTable(); // 1.sst 2.sst - assertEquals(2, getKeyTableSstFiles().size()); + assertEquals(2, (getKeyTableSstFiles().size() - keyTableSize)); for (int i = 0; i < 5; i++) { createFileKeyWithPrefix(bucket2, keyPrefix); } flushKeyTable(); // 1.sst 2.sst 3.sst - assertEquals(3, getKeyTableSstFiles().size()); + assertEquals(3, (getKeyTableSstFiles().size() - keyTableSize)); // add a file to bucket1 and take second snapshot createFileKeyWithPrefix(bucket1, keyPrefix); String snap2 = "snap" + counter.incrementAndGet(); createSnapshot(volumeName1, bucketName1, snap2); // 1.sst 2.sst 3.sst 4.sst - assertEquals(4, getKeyTableSstFiles().size()); - SnapshotDiffReportOzone diff1 = - store.snapshotDiff(volumeName1, bucketName1, snap1, snap2, - null, 0, forceFullSnapshotDiff, disableNativeDiff) - .getSnapshotDiffReport(); + assertEquals(4, (getKeyTableSstFiles().size() - keyTableSize)); + SnapshotDiffReportOzone diff1 = getSnapDiffReport(volumeName1, bucketName1, snap1, snap2); assertEquals(1, diff1.getDiffList().size()); } @@ -1949,7 +1933,7 @@ public void testSnapshotQuotaHandling() throws Exception { assertEquals(buckUsedBytesIntial, bucket1.getUsedBytes()); } - @NotNull + @Nonnull private List getKeyTableSstFiles() throws IOException { if (!bucketLayout.isFileSystemOptimized()) { diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/snapshot/TestOmSnapshotFsoWithNativeLib.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/snapshot/TestOmSnapshotFsoWithNativeLib.java index 60e66923524..5ed2f848aed 100644 --- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/snapshot/TestOmSnapshotFsoWithNativeLib.java +++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/snapshot/TestOmSnapshotFsoWithNativeLib.java @@ -18,16 +18,21 @@ package org.apache.hadoop.ozone.om.snapshot; +import org.apache.ozone.test.tag.Native; +import org.apache.ozone.test.tag.Unhealthy; import org.junit.jupiter.api.Timeout; +import static org.apache.hadoop.hdds.utils.NativeConstants.ROCKS_TOOLS_NATIVE_LIBRARY_NAME; import static org.apache.hadoop.ozone.om.helpers.BucketLayout.FILE_SYSTEM_OPTIMIZED; /** * Test OmSnapshot for FSO bucket type when native lib is enabled. */ +@Native(ROCKS_TOOLS_NATIVE_LIBRARY_NAME) @Timeout(300) -public class TestOmSnapshotFsoWithNativeLib extends TestOmSnapshot { - public TestOmSnapshotFsoWithNativeLib() throws Exception { +@Unhealthy("HDDS-10149") +class TestOmSnapshotFsoWithNativeLib extends TestOmSnapshot { + TestOmSnapshotFsoWithNativeLib() throws Exception { super(FILE_SYSTEM_OPTIMIZED, false, false, false); } } diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/snapshot/TestOzoneManagerSnapshotAcl.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/snapshot/TestOzoneManagerSnapshotAcl.java index efe854dbc35..8c0b375c3ca 100644 --- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/snapshot/TestOzoneManagerSnapshotAcl.java +++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/snapshot/TestOzoneManagerSnapshotAcl.java @@ -21,7 +21,6 @@ import java.io.File; import java.io.IOException; -import java.util.UUID; import java.util.stream.Stream; import org.apache.hadoop.hdds.utils.IOUtils; @@ -97,7 +96,8 @@ public class TestOzoneManagerSnapshotAcl { private static OzoneClient client; private String volumeName; private String bucketName; - private static final String KEY_PREFIX = "key-"; + private static final String DIR_PREFIX = "dir1/"; + private static final String KEY_PREFIX = DIR_PREFIX + "key-"; private String keyName; private String snapshotKeyPrefix; @@ -111,8 +111,6 @@ public static void init() throws Exception { + RandomStringUtils.randomNumeric(32); cluster = MiniOzoneCluster.newOMHABuilder(conf) - .setClusterId(UUID.randomUUID().toString()) - .setScmId(UUID.randomUUID().toString()) .setOMServiceId(omServiceId) .setNumOfOzoneManagers(1) .build(); @@ -382,6 +380,69 @@ public void testGetAclWithNotAllowedUser(BucketLayout bucketLayout) assertDoesNotThrow(() -> ozoneManager.getAcl(keyObj)); } + @ParameterizedTest + @EnumSource(BucketLayout.class) + public void testLookupKeyWithAllowedUserForPrefixAcl(BucketLayout bucketLayout) throws Exception { + UserGroupInformation.setLoginUser(UGI1); + + createVolume(); + + final OzoneVolume volume = objectStore.getVolume(volumeName); + createBucket(bucketLayout, volume); + + final OzoneBucket bucket = volume.getBucket(bucketName); + + setDefaultPrefixAcls(); + + createKey(bucket); + + setDefaultVolumeAcls(); + setDefaultBucketAcls(); + + createSnapshot(); + + final OmKeyArgs snapshotKeyArgs = getOmKeyArgs(true); + assertDoesNotThrow(() -> ozoneManager.lookupKey(snapshotKeyArgs)); + } + + @ParameterizedTest + @EnumSource(BucketLayout.class) + public void testLookupKeyWithNotAllowedUserForPrefixAcl(BucketLayout bucketLayout) throws Exception { + UserGroupInformation.setLoginUser(UGI1); + + createVolume(); + + final OzoneVolume volume = objectStore.getVolume(volumeName); + createBucket(bucketLayout, volume); + + final OzoneBucket bucket = volume.getBucket(bucketName); + + setDefaultPrefixAcls(); + + createKey(bucket); + + setDefaultVolumeAcls(); + setDefaultBucketAcls(); + + createSnapshot(); + + final OmKeyArgs snapshotKeyArgs = getOmKeyArgs(true); + + // Add user2 to bucket and prefix ACL + setBucketAcl(); + setPrefixAcls(); + + createKey(bucket); + final OmKeyArgs keyArgs = getOmKeyArgs(false); + + UserGroupInformation.setLoginUser(UGI2); + final OMException ex = assertThrows(OMException.class, () -> ozoneManager.lookupKey(snapshotKeyArgs)); + assertEquals(OMException.ResultCodes.PERMISSION_DENIED, ex.getResult()); + + assertDoesNotThrow(() -> ozoneManager.lookupKey(keyArgs)); + } + + private void setup(BucketLayout bucketLayout) throws IOException { UserGroupInformation.setLoginUser(UGI1); @@ -403,6 +464,12 @@ private void setup(BucketLayout bucketLayout) } private void setDefaultAcls() throws IOException { + setDefaultVolumeAcls(); + setDefaultBucketAcls(); + setDefaultKeyAcls(); + } + + private void setDefaultVolumeAcls() throws IOException { final OzoneObj volumeObj = OzoneObjInfo.Builder.newBuilder() .setResType(OzoneObj.ResourceType.VOLUME) .setStoreType(OzoneObj.StoreType.OZONE) @@ -410,8 +477,10 @@ private void setDefaultAcls() throws IOException { .build(); objectStore.setAcl(volumeObj, OzoneAcl.parseAcls( "user:" + USER1 + ":r," + - "user:" + USER2 + ":r")); + "user:" + USER2 + ":r")); + } + private void setDefaultBucketAcls() throws IOException { final OzoneObj bucketObj = OzoneObjInfo.Builder.newBuilder() .setResType(OzoneObj.ResourceType.BUCKET) .setStoreType(OzoneObj.StoreType.OZONE) @@ -420,8 +489,10 @@ private void setDefaultAcls() throws IOException { .build(); objectStore.setAcl(bucketObj, OzoneAcl.parseAcls( "user:" + USER1 + ":r," + - "user:" + USER1 + ":l")); + "user:" + USER1 + ":l")); + } + private void setDefaultKeyAcls() throws IOException { final OzoneObj keyObj = OzoneObjInfo.Builder.newBuilder() .setResType(RESOURCE_TYPE_KEY) .setStoreType(OzoneObj.StoreType.OZONE) @@ -431,7 +502,36 @@ private void setDefaultAcls() throws IOException { .build(); objectStore.setAcl(keyObj, OzoneAcl.parseAcls( "user:" + USER1 + ":r," + - "user:" + USER1 + ":x")); + "user:" + USER1 + ":x")); + } + + private void setDefaultPrefixAcls() throws IOException { + final OzoneObj prefixObj = OzoneObjInfo.Builder.newBuilder() + .setResType(OzoneObj.ResourceType.PREFIX) + .setStoreType(OzoneObj.StoreType.OZONE) + .setVolumeName(volumeName) + .setBucketName(bucketName) + .setPrefixName(DIR_PREFIX) + .build(); + + objectStore.setAcl(prefixObj, OzoneAcl.parseAcls( + "user:" + USER1 + ":r[DEFAULT]," + + "user:" + USER1 + ":x[DEFAULT]")); + } + + private void setBucketAcl() throws IOException { + OzoneObj bucketObj = OzoneObjInfo.Builder.newBuilder() + .setResType(OzoneObj.ResourceType.BUCKET) + .setStoreType(OzoneObj.StoreType.OZONE) + .setVolumeName(volumeName) + .setBucketName(bucketName) + .build(); + + objectStore.setAcl(bucketObj, OzoneAcl.parseAcls( + "user:" + USER1 + ":r," + + "user:" + USER1 + ":l," + + "user:" + USER2 + ":r," + + "user:" + USER2 + ":l")); } private void setKeyAcl() throws IOException { @@ -449,8 +549,23 @@ private void setKeyAcl() throws IOException { "user:" + USER2 + ":x")); } - private void createKey(OzoneBucket bucket) - throws IOException { + private void setPrefixAcls() throws IOException { + final OzoneObj prefixObj = OzoneObjInfo.Builder.newBuilder() + .setResType(OzoneObj.ResourceType.PREFIX) + .setStoreType(OzoneObj.StoreType.OZONE) + .setVolumeName(volumeName) + .setBucketName(bucketName) + .setPrefixName(DIR_PREFIX) + .build(); + + objectStore.setAcl(prefixObj, OzoneAcl.parseAcls( + "user:" + USER1 + ":r[DEFAULT]," + + "user:" + USER1 + ":x[DEFAULT]," + + "user:" + USER2 + ":r[DEFAULT]," + + "user:" + USER2 + ":x[DEFAULT]")); + } + + private void createKey(OzoneBucket bucket) throws IOException { keyName = KEY_PREFIX + RandomStringUtils.randomNumeric(32); byte[] data = RandomStringUtils.randomAscii(1).getBytes(UTF_8); final OzoneOutputStream fileKey = bucket.createKey(keyName, data.length); @@ -480,21 +595,6 @@ private void createSnapshot() } } - private void setBucketAcl() throws IOException { - OzoneObj bucketObj = OzoneObjInfo.Builder.newBuilder() - .setResType(OzoneObj.ResourceType.BUCKET) - .setStoreType(OzoneObj.StoreType.OZONE) - .setVolumeName(volumeName) - .setBucketName(bucketName) - .build(); - - objectStore.setAcl(bucketObj, OzoneAcl.parseAcls( - "user:" + USER1 + ":r," + - "user:" + USER1 + ":l," + - "user:" + USER2 + ":r," + - "user:" + USER2 + ":l")); - } - private static Stream getListStatusArguments() { return Stream.of( arguments(BucketLayout.OBJECT_STORE, false, false), diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/snapshot/TestOzoneManagerSnapshotProvider.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/snapshot/TestOzoneManagerSnapshotProvider.java index 091471fab17..643191b36d4 100644 --- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/snapshot/TestOzoneManagerSnapshotProvider.java +++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/snapshot/TestOzoneManagerSnapshotProvider.java @@ -18,8 +18,6 @@ package org.apache.hadoop.ozone.om.snapshot; -import java.util.UUID; - import org.apache.commons.lang3.RandomStringUtils; import org.apache.hadoop.hdds.conf.OzoneConfiguration; import org.apache.hadoop.hdds.utils.IOUtils; @@ -54,8 +52,6 @@ public class TestOzoneManagerSnapshotProvider { private MiniOzoneHAClusterImpl cluster = null; private ObjectStore objectStore; private OzoneConfiguration conf; - private String clusterId; - private String scmId; private String omServiceId; private int numOfOMs = 3; @@ -67,14 +63,10 @@ public class TestOzoneManagerSnapshotProvider { @BeforeEach public void init() throws Exception { conf = new OzoneConfiguration(); - clusterId = UUID.randomUUID().toString(); - scmId = UUID.randomUUID().toString(); omServiceId = "om-service-test1"; conf.setBoolean(OMConfigKeys.OZONE_OM_HTTP_ENABLED_KEY, true); conf.setBoolean(OMConfigKeys.OZONE_OM_RATIS_ENABLE_KEY, true); cluster = (MiniOzoneHAClusterImpl) MiniOzoneCluster.newOMHABuilder(conf) - .setClusterId(clusterId) - .setScmId(scmId) .setOMServiceId(omServiceId) .setNumOfOzoneManagers(numOfOMs) .build(); diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/snapshot/TestOzoneSnapshotRestore.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/snapshot/TestOzoneSnapshotRestore.java index 21fdd853b9e..e0d01c148d6 100644 --- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/snapshot/TestOzoneSnapshotRestore.java +++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/snapshot/TestOzoneSnapshotRestore.java @@ -53,7 +53,6 @@ import java.io.File; import java.io.IOException; import java.util.Iterator; -import java.util.UUID; import java.util.concurrent.TimeoutException; import java.util.stream.Stream; import java.util.concurrent.atomic.AtomicInteger; @@ -104,13 +103,9 @@ public void init() throws Exception { // Enable filesystem snapshot feature for the test regardless of the default conf.setBoolean(OMConfigKeys.OZONE_FILESYSTEM_SNAPSHOT_ENABLED_KEY, true); - String clusterId = UUID.randomUUID().toString(); - String scmId = UUID.randomUUID().toString(); String serviceID = OM_SERVICE_ID + RandomStringUtils.randomNumeric(5); cluster = MiniOzoneCluster.newOMHABuilder(conf) - .setClusterId(clusterId) - .setScmId(scmId) .setOMServiceId(serviceID) .setNumOfOzoneManagers(3) .build(); diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/parser/TestOzoneHARatisLogParser.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/parser/TestOzoneHARatisLogParser.java index 9902187be66..8f11941fcbf 100644 --- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/parser/TestOzoneHARatisLogParser.java +++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/parser/TestOzoneHARatisLogParser.java @@ -34,7 +34,6 @@ import org.apache.ozone.test.GenericTestUtils; import org.apache.ozone.test.tag.Flaky; import org.junit.jupiter.api.AfterEach; -import org.junit.jupiter.api.Assertions; import org.junit.jupiter.api.BeforeEach; import org.junit.jupiter.api.Test; import org.junit.jupiter.api.Timeout; @@ -46,6 +45,9 @@ import static java.nio.charset.StandardCharsets.UTF_8; import static org.apache.hadoop.hdds.HddsConfigKeys.OZONE_METADATA_DIRS; +import static org.assertj.core.api.Assertions.assertThat; +import static org.junit.jupiter.api.Assertions.assertEquals; +import static org.junit.jupiter.api.Assertions.assertNotNull; /** * Test Ozone OM and SCM HA Ratis log parser. @@ -61,14 +63,10 @@ class TestOzoneHARatisLogParser { @BeforeEach void setup() throws Exception { - String clusterId = UUID.randomUUID().toString(); - String scmId = UUID.randomUUID().toString(); String omServiceId = "omServiceId1"; OzoneConfiguration conf = new OzoneConfiguration(); String scmServiceId = "scmServiceId"; cluster = (MiniOzoneHAClusterImpl) MiniOzoneCluster.newHABuilder(conf) - .setClusterId(clusterId) - .setScmId(scmId) .setOMServiceId(omServiceId) .setSCMServiceId(scmServiceId) .setNumOfOzoneManagers(3) @@ -113,20 +111,20 @@ void testRatisLogParsing() throws Exception { File omMetaDir = new File(ozoneConfiguration.get(OZONE_METADATA_DIRS), "ratis"); - Assertions.assertTrue(omMetaDir.isDirectory()); + assertThat(omMetaDir).isDirectory(); String[] ratisDirs = omMetaDir.list(); - Assertions.assertNotNull(ratisDirs); - Assertions.assertEquals(1, ratisDirs.length); + assertNotNull(ratisDirs); + assertEquals(1, ratisDirs.length); File groupDir = new File(omMetaDir, ratisDirs[0]); - Assertions.assertNotNull(groupDir); - Assertions.assertTrue(groupDir.isDirectory()); + assertNotNull(groupDir); + assertThat(groupDir).isDirectory(); File currentDir = new File(groupDir, "current"); File logFile = new File(currentDir, "log_inprogress_0"); GenericTestUtils.waitFor(logFile::exists, 100, 15000); - Assertions.assertTrue(logFile.isFile()); + assertThat(logFile).isFile(); OMRatisLogParser omRatisLogParser = new OMRatisLogParser(); omRatisLogParser.setSegmentFile(logFile); @@ -135,27 +133,26 @@ void testRatisLogParsing() throws Exception { // Not checking total entry count, because of not sure of exact count of // metadata entry changes. - Assertions.assertTrue(out.toString(UTF_8.name()) - .contains("Num Total Entries:")); + assertThat(out.toString(UTF_8.name())).contains("Num Total Entries:"); out.reset(); // Now check for SCM. File scmMetadataDir = new File(SCMHAUtils.getRatisStorageDir(leaderSCMConfig)); - Assertions.assertTrue(scmMetadataDir.isDirectory()); + assertThat(scmMetadataDir).isDirectory(); ratisDirs = scmMetadataDir.list(); - Assertions.assertNotNull(ratisDirs); - Assertions.assertEquals(1, ratisDirs.length); + assertNotNull(ratisDirs); + assertEquals(1, ratisDirs.length); groupDir = new File(scmMetadataDir, ratisDirs[0]); - Assertions.assertNotNull(groupDir); - Assertions.assertTrue(groupDir.isDirectory()); + assertNotNull(groupDir); + assertThat(groupDir).isDirectory(); currentDir = new File(groupDir, "current"); logFile = new File(currentDir, "log_inprogress_1"); GenericTestUtils.waitFor(logFile::exists, 100, 15000); - Assertions.assertTrue(logFile.isFile()); + assertThat(logFile).isFile(); SCMRatisLogParser scmRatisLogParser = new SCMRatisLogParser(); scmRatisLogParser.setSegmentFile(logFile); @@ -163,7 +160,6 @@ void testRatisLogParsing() throws Exception { // Not checking total entry count, because of not sure of exact count of // metadata entry changes. - Assertions.assertTrue(out.toString(UTF_8.name()) - .contains("Num Total Entries:")); + assertThat(out.toString(UTF_8.name())).contains("Num Total Entries:"); } } diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/recon/TestReconAndAdminContainerCLI.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/recon/TestReconAndAdminContainerCLI.java new file mode 100644 index 00000000000..7691704d924 --- /dev/null +++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/recon/TestReconAndAdminContainerCLI.java @@ -0,0 +1,489 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with this + * work for additional information regarding copyright ownership. The ASF + * licenses this file to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + *

+ * http://www.apache.org/licenses/LICENSE-2.0 + *

+ * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT + * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the + * License for the specific language governing permissions and limitations under + * the License. + */ +package org.apache.hadoop.ozone.recon; + +import org.apache.hadoop.hdds.HddsConfigKeys; +import org.apache.hadoop.hdds.client.RatisReplicationConfig; +import org.apache.hadoop.hdds.client.ReplicationConfig; +import org.apache.hadoop.hdds.conf.OzoneConfiguration; +import org.apache.hadoop.hdds.protocol.DatanodeDetails; +import org.apache.hadoop.hdds.protocol.proto.HddsProtos; +import org.apache.hadoop.hdds.protocol.proto.HddsProtos.NodeOperationalState; +import org.apache.hadoop.hdds.scm.ScmConfigKeys; +import org.apache.hadoop.hdds.scm.ScmUtils; +import org.apache.hadoop.hdds.scm.cli.ContainerOperationClient; +import org.apache.hadoop.hdds.scm.client.ScmClient; +import org.apache.hadoop.hdds.scm.container.ContainerID; +import org.apache.hadoop.hdds.scm.container.ContainerManager; +import org.apache.hadoop.hdds.scm.container.ReplicationManagerReport; +import org.apache.hadoop.hdds.scm.container.ReplicationManagerReport.HealthState; +import org.apache.hadoop.hdds.scm.container.replication.ReplicationManager; +import org.apache.hadoop.hdds.scm.node.NodeManager; +import org.apache.hadoop.hdds.scm.pipeline.Pipeline; +import org.apache.hadoop.hdds.scm.pipeline.PipelineManager; +import org.apache.hadoop.hdds.scm.server.StorageContainerManager; +import org.apache.hadoop.ozone.MiniOzoneCluster; +import org.apache.hadoop.ozone.OzoneConfigKeys; +import org.apache.hadoop.ozone.TestDataUtil; +import org.apache.hadoop.ozone.client.OzoneBucket; +import org.apache.hadoop.ozone.client.OzoneClient; +import org.apache.hadoop.ozone.container.TestHelper; +import org.apache.hadoop.ozone.om.helpers.BucketLayout; +import org.apache.hadoop.ozone.om.helpers.OmKeyArgs; +import org.apache.hadoop.ozone.om.helpers.OmKeyInfo; +import org.apache.hadoop.ozone.om.helpers.OmKeyLocationInfo; +import org.apache.hadoop.ozone.recon.api.types.UnhealthyContainerMetadata; +import org.apache.hadoop.ozone.recon.api.types.UnhealthyContainersResponse; +import org.apache.hadoop.ozone.recon.scm.ReconNodeManager; +import org.apache.hadoop.ozone.recon.scm.ReconStorageContainerManagerFacade; +import org.apache.hadoop.ozone.recon.spi.ReconContainerMetadataManager; +import org.apache.hadoop.ozone.recon.tasks.ReconTaskConfig; +import org.apache.hadoop.hdds.scm.node.TestNodeUtil; +import org.apache.ozone.test.GenericTestUtils; +import org.apache.ozone.test.LambdaTestUtils; +import org.hadoop.ozone.recon.schema.ContainerSchemaDefinition.UnHealthyContainerStates; +import org.junit.jupiter.api.AfterAll; +import org.junit.jupiter.api.BeforeAll; +import org.junit.jupiter.api.Test; +import org.junit.jupiter.api.Timeout; +import org.junit.jupiter.params.ParameterizedTest; +import org.junit.jupiter.params.provider.Arguments; +import org.junit.jupiter.params.provider.MethodSource; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; +import org.slf4j.event.Level; + +import java.io.IOException; +import java.io.OutputStream; +import java.time.Duration; +import java.util.ArrayList; +import java.util.Arrays; +import java.util.Collections; +import java.util.List; +import java.util.concurrent.TimeUnit; +import java.util.stream.Collectors; +import java.util.stream.Stream; + +import static java.nio.charset.StandardCharsets.UTF_8; +import static java.util.Collections.emptyMap; +import static java.util.concurrent.TimeUnit.SECONDS; +import static org.apache.hadoop.hdds.HddsConfigKeys.HDDS_COMMAND_STATUS_REPORT_INTERVAL; +import static org.apache.hadoop.hdds.HddsConfigKeys.HDDS_CONTAINER_REPORT_INTERVAL; +import static org.apache.hadoop.hdds.HddsConfigKeys.HDDS_HEARTBEAT_INTERVAL; +import static org.apache.hadoop.hdds.HddsConfigKeys.HDDS_NODE_REPORT_INTERVAL; +import static org.apache.hadoop.hdds.HddsConfigKeys.HDDS_PIPELINE_REPORT_INTERVAL; +import static org.apache.hadoop.hdds.HddsConfigKeys.HDDS_RECON_HEARTBEAT_INTERVAL; +import static org.apache.hadoop.hdds.protocol.proto.HddsProtos.NodeOperationalState.IN_SERVICE; +import static org.apache.hadoop.hdds.scm.ScmConfigKeys.OZONE_SCM_DATANODE_ADMIN_MONITOR_INTERVAL; +import static org.apache.hadoop.hdds.scm.ScmConfigKeys.OZONE_SCM_DEADNODE_INTERVAL; +import static org.apache.hadoop.hdds.scm.ScmConfigKeys.OZONE_SCM_HEARTBEAT_PROCESS_INTERVAL; +import static org.apache.hadoop.hdds.scm.ScmConfigKeys.OZONE_SCM_STALENODE_INTERVAL; +import static org.apache.hadoop.ozone.recon.ReconServerConfigKeys.OZONE_RECON_OM_SNAPSHOT_TASK_INTERVAL_DELAY; +import static org.assertj.core.api.Assertions.assertThat; +import static org.junit.jupiter.api.Assertions.assertDoesNotThrow; +import static org.junit.jupiter.api.Assertions.assertEquals; +import static org.junit.jupiter.api.Assertions.assertNotNull; + +/** + * Integration tests for ensuring Recon's consistency + * with the "ozone admin container" CLI. + */ +@Timeout(300) +class TestReconAndAdminContainerCLI { + + private static final Logger LOG = LoggerFactory.getLogger(TestReconAndAdminContainerCLI.class); + + private static final OzoneConfiguration CONF = new OzoneConfiguration(); + private static ScmClient scmClient; + private static MiniOzoneCluster cluster; + private static NodeManager scmNodeManager; + private static long containerIdR3; + private static OzoneBucket ozoneBucket; + private static ContainerManager scmContainerManager; + private static ContainerManager reconContainerManager; + + private static Stream outOfServiceNodeStateArgs() { + return Stream.of( + Arguments.of(NodeOperationalState.ENTERING_MAINTENANCE, + NodeOperationalState.IN_MAINTENANCE, true), + Arguments.of(NodeOperationalState.DECOMMISSIONING, + NodeOperationalState.DECOMMISSIONED, false) + ); + } + + @BeforeAll + static void init() throws Exception { + setupConfigKeys(); + cluster = MiniOzoneCluster.newBuilder(CONF) + .setNumDatanodes(5) + .includeRecon(true) + .build(); + cluster.waitForClusterToBeReady(); + GenericTestUtils.setLogLevel(ReconNodeManager.LOG, Level.DEBUG); + + scmClient = new ContainerOperationClient(CONF); + StorageContainerManager scm = cluster.getStorageContainerManager(); + PipelineManager scmPipelineManager = scm.getPipelineManager(); + scmContainerManager = scm.getContainerManager(); + scmNodeManager = scm.getScmNodeManager(); + + ReconStorageContainerManagerFacade reconScm = + (ReconStorageContainerManagerFacade) + cluster.getReconServer().getReconStorageContainerManager(); + PipelineManager reconPipelineManager = reconScm.getPipelineManager(); + reconContainerManager = reconScm.getContainerManager(); + + LambdaTestUtils.await(60000, 5000, + () -> (reconPipelineManager.getPipelines().size() >= 4)); + + // Verify that Recon has all the pipelines from SCM. + scmPipelineManager.getPipelines().forEach(p -> { + Pipeline pipeline = assertDoesNotThrow(() -> reconPipelineManager.getPipeline(p.getId())); + assertNotNull(pipeline); + }); + + assertThat(scmContainerManager.getContainers()).isEmpty(); + + // Verify that all nodes are registered with Recon. + NodeManager reconNodeManager = reconScm.getScmNodeManager(); + assertEquals(scmNodeManager.getAllNodes().size(), + reconNodeManager.getAllNodes().size()); + + OzoneClient client = cluster.newClient(); + String volumeName = "vol1"; + String bucketName = "bucket1"; + + ozoneBucket = TestDataUtil.createVolumeAndBucket( + client, volumeName, bucketName, BucketLayout.FILE_SYSTEM_OPTIMIZED); + + String keyNameR3 = "key1"; + containerIdR3 = setupRatisKey(keyNameR3, + HddsProtos.ReplicationFactor.THREE); + } + + @AfterAll + static void shutdown() { + if (cluster != null) { + cluster.shutdown(); + } + } + + /** + * It's the same regardless of the ReplicationConfig, + * but it's easier to test with Ratis ONE. + */ + @Test + void testMissingContainer() throws Exception { + String keyNameR1 = "key2"; + long containerID = setupRatisKey(keyNameR1, + HddsProtos.ReplicationFactor.ONE); + + Pipeline pipeline = + scmClient.getContainerWithPipeline(containerID).getPipeline(); + + for (DatanodeDetails details : pipeline.getNodes()) { + cluster.shutdownHddsDatanode(details); + } + TestHelper.waitForReplicaCount(containerID, 0, cluster); + + GenericTestUtils.waitFor(() -> { + try { + return scmClient.getReplicationManagerReport() + .getStat(ReplicationManagerReport.HealthState.MISSING) == 1; + } catch (IOException e) { + throw new RuntimeException(e); + } + }, 1000, 20000); + + UnHealthyContainerStates containerStateForTesting = + UnHealthyContainerStates.MISSING; + compareRMReportToReconResponse(containerStateForTesting); + + for (DatanodeDetails details : pipeline.getNodes()) { + cluster.restartHddsDatanode(details, false); + TestNodeUtil.waitForDnToReachOpState(scmNodeManager, details, IN_SERVICE); + } + } + + @ParameterizedTest + @MethodSource("outOfServiceNodeStateArgs") + void testNodesInDecommissionOrMaintenance( + NodeOperationalState initialState, NodeOperationalState finalState, + boolean isMaintenance) throws Exception { + Pipeline pipeline = + scmClient.getContainerWithPipeline(containerIdR3).getPipeline(); + + List details = + pipeline.getNodes().stream() + .filter(d -> d.getPersistedOpState().equals(IN_SERVICE)) + .collect(Collectors.toList()); + + final DatanodeDetails nodeToGoOffline1 = details.get(0); + final DatanodeDetails nodeToGoOffline2 = details.get(1); + + UnHealthyContainerStates underReplicatedState = + UnHealthyContainerStates.UNDER_REPLICATED; + UnHealthyContainerStates overReplicatedState = + UnHealthyContainerStates.OVER_REPLICATED; + + // First node goes offline. + if (isMaintenance) { + scmClient.startMaintenanceNodes(Collections.singletonList( + TestNodeUtil.getDNHostAndPort(nodeToGoOffline1)), 0); + } else { + scmClient.decommissionNodes(Collections.singletonList( + TestNodeUtil.getDNHostAndPort(nodeToGoOffline1))); + } + + TestNodeUtil.waitForDnToReachOpState(scmNodeManager, + nodeToGoOffline1, initialState); + + compareRMReportToReconResponse(underReplicatedState); + compareRMReportToReconResponse(overReplicatedState); + + TestNodeUtil.waitForDnToReachOpState(scmNodeManager, + nodeToGoOffline1, finalState); + // Every time a node goes into decommission, + // a new replica-copy is made to another node. + // For maintenance, there is no replica-copy in this case. + if (!isMaintenance) { + TestHelper.waitForReplicaCount(containerIdR3, 4, cluster); + } + + compareRMReportToReconResponse(underReplicatedState); + compareRMReportToReconResponse(overReplicatedState); + + // Second node goes offline. + if (isMaintenance) { + scmClient.startMaintenanceNodes(Collections.singletonList( + TestNodeUtil.getDNHostAndPort(nodeToGoOffline2)), 0); + } else { + scmClient.decommissionNodes(Collections.singletonList( + TestNodeUtil.getDNHostAndPort(nodeToGoOffline2))); + } + + TestNodeUtil.waitForDnToReachOpState(scmNodeManager, + nodeToGoOffline2, initialState); + + compareRMReportToReconResponse(underReplicatedState); + compareRMReportToReconResponse(overReplicatedState); + + TestNodeUtil.waitForDnToReachOpState(scmNodeManager, + nodeToGoOffline2, finalState); + + // There will be a replica copy for both maintenance and decommission. + // maintenance 3 -> 4, decommission 4 -> 5. + int expectedReplicaNum = isMaintenance ? 4 : 5; + TestHelper.waitForReplicaCount(containerIdR3, expectedReplicaNum, cluster); + + compareRMReportToReconResponse(underReplicatedState); + compareRMReportToReconResponse(overReplicatedState); + + scmClient.recommissionNodes(Arrays.asList( + TestNodeUtil.getDNHostAndPort(nodeToGoOffline1), + TestNodeUtil.getDNHostAndPort(nodeToGoOffline2))); + + TestNodeUtil.waitForDnToReachOpState(scmNodeManager, + nodeToGoOffline1, IN_SERVICE); + TestNodeUtil.waitForDnToReachOpState(scmNodeManager, + nodeToGoOffline2, IN_SERVICE); + + TestNodeUtil.waitForDnToReachPersistedOpState(nodeToGoOffline1, IN_SERVICE); + TestNodeUtil.waitForDnToReachPersistedOpState(nodeToGoOffline2, IN_SERVICE); + + compareRMReportToReconResponse(underReplicatedState); + compareRMReportToReconResponse(overReplicatedState); + } + + /** + * The purpose of this method, isn't to validate the numbers + * but to make sure that they are consistent between + * Recon and the ReplicationManager. + */ + private static void compareRMReportToReconResponse(UnHealthyContainerStates containerState) + throws Exception { + assertNotNull(containerState); + + // Both threads are running every 1 second. + // Wait until all values are equal. + GenericTestUtils.waitFor(() -> assertReportsMatch(containerState), + 1000, 40000); + } + + private static boolean assertReportsMatch(UnHealthyContainerStates state) { + ReplicationManagerReport rmReport; + UnhealthyContainersResponse reconResponse; + + try { + rmReport = scmClient.getReplicationManagerReport(); + reconResponse = TestReconEndpointUtil + .getUnhealthyContainersFromRecon(CONF, state); + + assertEquals(rmReport.getStat(HealthState.MISSING), reconResponse.getMissingCount()); + assertEquals(rmReport.getStat(HealthState.UNDER_REPLICATED), reconResponse.getUnderReplicatedCount()); + assertEquals(rmReport.getStat(HealthState.OVER_REPLICATED), reconResponse.getOverReplicatedCount()); + assertEquals(rmReport.getStat(HealthState.MIS_REPLICATED), reconResponse.getMisReplicatedCount()); + } catch (IOException e) { + LOG.info("Error getting report", e); + return false; + } catch (AssertionError e) { + LOG.info("Reports do not match (yet): {}", e.getMessage()); + return false; + } + + // Recon's UnhealthyContainerResponse contains a list of containers + // for a particular state. Check if RMs sample of containers can be + // found in Recon's list of containers for a particular state. + HealthState rmState = HealthState.UNHEALTHY; + + if (state.equals(UnHealthyContainerStates.MISSING) && + reconResponse.getMissingCount() > 0) { + rmState = HealthState.MISSING; + } else if (state.equals(UnHealthyContainerStates.UNDER_REPLICATED) && + reconResponse.getUnderReplicatedCount() > 0) { + rmState = HealthState.UNDER_REPLICATED; + } else if (state.equals(UnHealthyContainerStates.OVER_REPLICATED) && + reconResponse.getOverReplicatedCount() > 0) { + rmState = HealthState.OVER_REPLICATED; + } else if (state.equals(UnHealthyContainerStates.MIS_REPLICATED) && + reconResponse.getMisReplicatedCount() > 0) { + rmState = HealthState.MIS_REPLICATED; + } + + List rmContainerIDs = rmReport.getSample(rmState); + List rmIDsToLong = new ArrayList<>(); + for (ContainerID id : rmContainerIDs) { + rmIDsToLong.add(id.getId()); + } + List reconContainerIDs = + reconResponse.getContainers() + .stream() + .map(UnhealthyContainerMetadata::getContainerID) + .collect(Collectors.toList()); + assertThat(reconContainerIDs).containsAll(rmIDsToLong); + + return true; + } + + private static long setupRatisKey(String keyName, + HddsProtos.ReplicationFactor replicationFactor) throws Exception { + OmKeyInfo omKeyInfo = createTestKey(keyName, + RatisReplicationConfig.getInstance(replicationFactor)); + + // Sync Recon with OM, to force it to get the new key entries. + TestReconEndpointUtil.triggerReconDbSyncWithOm(CONF); + + List containerIDs = getContainerIdsForKey(omKeyInfo); + // The list has only 1 containerID. + assertEquals(1, containerIDs.size()); + long containerID = containerIDs.get(0); + + // Verify Recon picked up the new container. + assertEquals(scmContainerManager.getContainers(), + reconContainerManager.getContainers()); + + ReconContainerMetadataManager reconContainerMetadataManager = + cluster.getReconServer().getReconContainerMetadataManager(); + + // Verify Recon picked up the new keys and + // updated its container key mappings. + GenericTestUtils.waitFor(() -> { + try { + return reconContainerMetadataManager + .getKeyCountForContainer(containerID) > 0; + } catch (IOException e) { + throw new RuntimeException(e); + } + }, 1000, 20000); + + return containerID; + } + + private static OmKeyInfo createTestKey(String keyName, + ReplicationConfig replicationConfig) + throws IOException { + byte[] textBytes = "Testing".getBytes(UTF_8); + try (OutputStream out = ozoneBucket.createKey(keyName, + textBytes.length, replicationConfig, emptyMap())) { + out.write(textBytes); + } + + OmKeyArgs keyArgs = new OmKeyArgs.Builder() + .setVolumeName(ozoneBucket.getVolumeName()) + .setBucketName(ozoneBucket.getName()) + .setKeyName(keyName) + .build(); + return cluster.getOzoneManager().lookupKey(keyArgs); + } + + private static List getContainerIdsForKey(OmKeyInfo omKeyInfo) { + assertNotNull(omKeyInfo.getLatestVersionLocations()); + List locations = + omKeyInfo.getLatestVersionLocations().getLocationList(); + + List ids = new ArrayList<>(); + for (OmKeyLocationInfo location : locations) { + ids.add(location.getContainerID()); + } + return ids; + } + + private static void setupConfigKeys() { + CONF.setTimeDuration(OZONE_SCM_HEARTBEAT_PROCESS_INTERVAL, + 100, TimeUnit.MILLISECONDS); + CONF.setTimeDuration(HDDS_HEARTBEAT_INTERVAL, 1, SECONDS); + CONF.setInt(ScmConfigKeys.OZONE_DATANODE_PIPELINE_LIMIT, 1); + CONF.setTimeDuration(HDDS_PIPELINE_REPORT_INTERVAL, 1, SECONDS); + CONF.setTimeDuration(HDDS_COMMAND_STATUS_REPORT_INTERVAL, 1, SECONDS); + CONF.setTimeDuration(HDDS_CONTAINER_REPORT_INTERVAL, 1, SECONDS); + CONF.setTimeDuration(HDDS_NODE_REPORT_INTERVAL, 1, SECONDS); + CONF.setTimeDuration(OZONE_SCM_STALENODE_INTERVAL, 3, SECONDS); + CONF.setTimeDuration(OZONE_SCM_DEADNODE_INTERVAL, 6, SECONDS); + CONF.setTimeDuration(OZONE_SCM_DATANODE_ADMIN_MONITOR_INTERVAL, + 1, SECONDS); + CONF.setTimeDuration( + ScmConfigKeys.OZONE_SCM_EXPIRED_CONTAINER_REPLICA_OP_SCRUB_INTERVAL, + 1, SECONDS); + CONF.setTimeDuration(HddsConfigKeys.HDDS_SCM_WAIT_TIME_AFTER_SAFE_MODE_EXIT, + 0, SECONDS); + CONF.set(OzoneConfigKeys.OZONE_SCM_CLOSE_CONTAINER_WAIT_DURATION, "2s"); + CONF.set(ScmConfigKeys.OZONE_SCM_PIPELINE_SCRUB_INTERVAL, "2s"); + CONF.set(ScmConfigKeys.OZONE_SCM_PIPELINE_DESTROY_TIMEOUT, "5s"); + + CONF.setTimeDuration(HDDS_RECON_HEARTBEAT_INTERVAL, + 1, TimeUnit.SECONDS); + CONF.setTimeDuration(OZONE_RECON_OM_SNAPSHOT_TASK_INTERVAL_DELAY, + 1, TimeUnit.SECONDS); + + CONF.set(ScmUtils.getContainerReportConfPrefix() + + ".queue.wait.threshold", "1"); + CONF.set(ScmUtils.getContainerReportConfPrefix() + + ".execute.wait.threshold", "1"); + + ReconTaskConfig reconTaskConfig = CONF.getObject(ReconTaskConfig.class); + reconTaskConfig.setMissingContainerTaskInterval(Duration.ofSeconds(1)); + CONF.setFromObject(reconTaskConfig); + + ReplicationManager.ReplicationManagerConfiguration replicationConf = + CONF.getObject(ReplicationManager + .ReplicationManagerConfiguration.class); + replicationConf.setInterval(Duration.ofSeconds(1)); + replicationConf.setUnderReplicatedInterval(Duration.ofSeconds(1)); + replicationConf.setOverReplicatedInterval(Duration.ofSeconds(1)); + CONF.setFromObject(replicationConf); + } +} diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/recon/TestReconEndpointUtil.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/recon/TestReconEndpointUtil.java new file mode 100644 index 00000000000..002de94cb02 --- /dev/null +++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/recon/TestReconEndpointUtil.java @@ -0,0 +1,186 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with this + * work for additional information regarding copyright ownership. The ASF + * licenses this file to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + *

+ * http://www.apache.org/licenses/LICENSE-2.0 + *

+ * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT + * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the + * License for the specific language governing permissions and limitations under + * the License. + */ +package org.apache.hadoop.ozone.recon; + +import com.fasterxml.jackson.core.JsonProcessingException; +import com.fasterxml.jackson.databind.ObjectMapper; +import com.google.common.base.Strings; +import org.apache.commons.io.IOUtils; +import org.apache.hadoop.hdds.conf.OzoneConfiguration; +import org.apache.hadoop.hdds.server.http.HttpConfig; +import org.apache.hadoop.hdfs.web.URLConnectionFactory; +import org.apache.hadoop.ozone.recon.api.types.UnhealthyContainersResponse; +import org.hadoop.ozone.recon.schema.ContainerSchemaDefinition; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; + +import java.io.InputStream; +import java.net.ConnectException; +import java.net.HttpURLConnection; +import java.net.URL; +import java.nio.charset.StandardCharsets; + +import static java.net.HttpURLConnection.HTTP_CREATED; +import static java.net.HttpURLConnection.HTTP_OK; +import static org.apache.hadoop.hdds.recon.ReconConfigKeys.OZONE_RECON_ADDRESS_DEFAULT; +import static org.apache.hadoop.hdds.recon.ReconConfigKeys.OZONE_RECON_ADDRESS_KEY; +import static org.apache.hadoop.hdds.recon.ReconConfigKeys.OZONE_RECON_HTTPS_ADDRESS_DEFAULT; +import static org.apache.hadoop.hdds.recon.ReconConfigKeys.OZONE_RECON_HTTPS_ADDRESS_KEY; +import static org.apache.hadoop.hdds.recon.ReconConfigKeys.OZONE_RECON_HTTP_ADDRESS_DEFAULT; +import static org.apache.hadoop.hdds.recon.ReconConfigKeys.OZONE_RECON_HTTP_ADDRESS_KEY; +import static org.apache.hadoop.hdds.server.http.HttpConfig.getHttpPolicy; +import static org.apache.hadoop.http.HttpServer2.HTTPS_SCHEME; +import static org.apache.hadoop.http.HttpServer2.HTTP_SCHEME; + +/** + * Utility class, used by integration tests, + * for getting responses from Recon Endpoints. + */ +public final class TestReconEndpointUtil { + + private static final Logger LOG = + LoggerFactory.getLogger(TestReconEndpointUtil.class); + + private static final String CONTAINER_ENDPOINT = "/api/v1/containers"; + private static final String OM_DB_SYNC_ENDPOINT = "/api/v1/triggerdbsync/om"; + + private TestReconEndpointUtil() { + } + + public static void triggerReconDbSyncWithOm( + OzoneConfiguration conf) { + StringBuilder urlBuilder = new StringBuilder(); + urlBuilder.append(getReconWebAddress(conf)) + .append(OM_DB_SYNC_ENDPOINT); + + String response = ""; + try { + response = makeHttpCall(conf, urlBuilder); + } catch (Exception e) { + LOG.error("Error getting db sync response from Recon"); + } + + if (!Strings.isNullOrEmpty(response) && + !response.equals("true")) { + LOG.error("Triggering Recon DB sync with OM failed."); + } + } + + public static UnhealthyContainersResponse getUnhealthyContainersFromRecon( + OzoneConfiguration conf, ContainerSchemaDefinition.UnHealthyContainerStates containerState) + throws JsonProcessingException { + StringBuilder urlBuilder = new StringBuilder(); + urlBuilder.append(getReconWebAddress(conf)) + .append(CONTAINER_ENDPOINT) + .append("/unhealthy/") + .append(containerState); + + String containersResponse = ""; + try { + containersResponse = makeHttpCall(conf, urlBuilder); + } catch (Exception e) { + LOG.error("Error getting unhealthy containers response from Recon"); + } + + final ObjectMapper objectMapper = new ObjectMapper(); + + return objectMapper.readValue(containersResponse, + UnhealthyContainersResponse.class); + } + + public static String makeHttpCall(OzoneConfiguration conf, StringBuilder url) + throws Exception { + + System.out.println("Connecting to Recon: " + url + " ..."); + final URLConnectionFactory connectionFactory = + URLConnectionFactory.newDefaultURLConnectionFactory(conf); + + boolean isSpnegoEnabled = isHTTPSEnabled(conf); + HttpURLConnection httpURLConnection; + + try { + httpURLConnection = (HttpURLConnection) connectionFactory.openConnection( + new URL(url.toString()), isSpnegoEnabled); + httpURLConnection.connect(); + int errorCode = httpURLConnection.getResponseCode(); + InputStream inputStream = httpURLConnection.getInputStream(); + + if ((errorCode == HTTP_OK) || (errorCode == HTTP_CREATED)) { + return IOUtils.toString(inputStream, StandardCharsets.UTF_8); + } + + if (httpURLConnection.getErrorStream() != null) { + System.out.println("Recon is being initialized. " + + "Please wait a moment"); + return null; + } else { + System.out.println("Unexpected null in http payload," + + " while processing request"); + } + return null; + } catch (ConnectException ex) { + System.err.println("Connection Refused. Please make sure the " + + "Recon Server has been started."); + return null; + } + } + + public static String getReconWebAddress(OzoneConfiguration conf) { + final String protocol; + final HttpConfig.Policy webPolicy = getHttpPolicy(conf); + + final boolean isHostDefault; + String host; + + if (webPolicy.isHttpsEnabled()) { + protocol = HTTPS_SCHEME; + host = conf.get(OZONE_RECON_HTTPS_ADDRESS_KEY, + OZONE_RECON_HTTPS_ADDRESS_DEFAULT); + isHostDefault = getHostOnly(host).equals( + getHostOnly(OZONE_RECON_HTTPS_ADDRESS_DEFAULT)); + } else { + protocol = HTTP_SCHEME; + host = conf.get(OZONE_RECON_HTTP_ADDRESS_KEY, + OZONE_RECON_HTTP_ADDRESS_DEFAULT); + isHostDefault = getHostOnly(host).equals( + getHostOnly(OZONE_RECON_HTTP_ADDRESS_DEFAULT)); + } + + if (isHostDefault) { + // Fallback to : + final String rpcHost = + conf.get(OZONE_RECON_ADDRESS_KEY, OZONE_RECON_ADDRESS_DEFAULT); + host = getHostOnly(rpcHost) + ":" + getPort(host); + } + + return protocol + "://" + host; + } + + public static String getHostOnly(String host) { + return host.split(":", 2)[0]; + } + + public static String getPort(String host) { + return host.split(":", 2)[1]; + } + + public static boolean isHTTPSEnabled(OzoneConfiguration conf) { + return getHttpPolicy(conf) == HttpConfig.Policy.HTTPS_ONLY; + } + +} + diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/recon/TestReconInsightsForDeletedDirectories.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/recon/TestReconInsightsForDeletedDirectories.java index 74751dde6de..4c059be1b54 100644 --- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/recon/TestReconInsightsForDeletedDirectories.java +++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/recon/TestReconInsightsForDeletedDirectories.java @@ -47,7 +47,6 @@ import org.junit.jupiter.api.BeforeAll; import org.junit.jupiter.api.AfterAll; import org.junit.jupiter.api.AfterEach; -import org.junit.jupiter.api.Assertions; import org.slf4j.Logger; import org.slf4j.LoggerFactory; @@ -62,6 +61,9 @@ import static org.apache.hadoop.ozone.om.OMConfigKeys.OZONE_DIR_DELETING_SERVICE_INTERVAL; import static org.apache.hadoop.ozone.om.OMConfigKeys.OZONE_PATH_DELETING_LIMIT_PER_TASK; import static org.apache.hadoop.ozone.om.OMConfigKeys.OZONE_OM_RATIS_ENABLE_KEY; +import static org.junit.jupiter.api.Assertions.assertEquals; +import static org.junit.jupiter.api.Assertions.assertFalse; +import static org.junit.jupiter.api.Assertions.fail; import static org.mockito.Mockito.mock; /** @@ -132,7 +134,7 @@ public void cleanup() { fs.delete(fileStatus.getPath(), true); } } catch (IOException ex) { - Assertions.fail("Failed to cleanup files."); + fail("Failed to cleanup files."); } } @@ -201,7 +203,7 @@ public void testGetDeletedDirectoryInfo() } if (directoryObjectId == null) { - Assertions.fail("directoryObjectId is null. Test case cannot proceed."); + fail("directoryObjectId is null. Test case cannot proceed."); } else { // Retrieve Namespace Summary for dir1 from Recon. ReconNamespaceSummaryManagerImpl namespaceSummaryManager = @@ -210,8 +212,8 @@ public void testGetDeletedDirectoryInfo() NSSummary summary = namespaceSummaryManager.getNSSummary(directoryObjectId); // Assert that the directory dir1 has 10 sub-files and size of 1000 bytes. - Assertions.assertEquals(10, summary.getNumOfFiles()); - Assertions.assertEquals(10, summary.getSizeOfFiles()); + assertEquals(10, summary.getNumOfFiles()); + assertEquals(10, summary.getSizeOfFiles()); } // Delete the entire directory dir1. @@ -238,7 +240,7 @@ public void testGetDeletedDirectoryInfo() KeyInsightInfoResponse entity = (KeyInsightInfoResponse) deletedDirInfo.getEntity(); // Assert the size of deleted directory is 10. - Assertions.assertEquals(10, entity.getUnreplicatedDataSize()); + assertEquals(10, entity.getUnreplicatedDataSize()); // Cleanup the tables. cleanupTables(); @@ -327,7 +329,7 @@ public void testGetDeletedDirectoryInfoForNestedDirectories() KeyInsightInfoResponse entity = (KeyInsightInfoResponse) deletedDirInfo.getEntity(); // Assert the size of deleted directory is 3. - Assertions.assertEquals(3, entity.getUnreplicatedDataSize()); + assertEquals(3, entity.getUnreplicatedDataSize()); // Cleanup the tables. cleanupTables(); @@ -368,7 +370,7 @@ public void testGetDeletedDirectoryInfoWithMultipleSubdirectories() fs.delete(rootDir, true); // Verify that the directory is deleted - Assertions.assertFalse(fs.exists(rootDir), "Directory was not deleted"); + assertFalse(fs.exists(rootDir), "Directory was not deleted"); // Sync data from Ozone Manager to Recon. syncDataFromOM(); @@ -389,7 +391,7 @@ public void testGetDeletedDirectoryInfoWithMultipleSubdirectories() KeyInsightInfoResponse entity = (KeyInsightInfoResponse) deletedDirInfo.getEntity(); // Assert the size of deleted directory is 100. - Assertions.assertEquals(100, entity.getUnreplicatedDataSize()); + assertEquals(100, entity.getUnreplicatedDataSize()); // Cleanup the tables. cleanupTables(); @@ -471,7 +473,7 @@ private boolean assertTableRowCount(int expectedCount, LOG.info("{} actual row count={}, expectedCount={}", table.getName(), count, expectedCount); } catch (IOException ex) { - Assertions.fail("Test failed with: " + ex); + fail("Test failed with: " + ex); } return count == expectedCount; } diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/recon/TestReconWithOzoneManagerHA.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/recon/TestReconWithOzoneManagerHA.java index 721f04085b0..8baad9cb97b 100644 --- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/recon/TestReconWithOzoneManagerHA.java +++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/recon/TestReconWithOzoneManagerHA.java @@ -44,7 +44,6 @@ import org.junit.jupiter.api.Timeout; import java.util.HashMap; -import java.util.UUID; import java.util.concurrent.atomic.AtomicReference; import static java.nio.charset.StandardCharsets.UTF_8; @@ -75,8 +74,6 @@ public void setup() throws Exception { conf.setFromObject(dbConf); cluster = (MiniOzoneHAClusterImpl) MiniOzoneCluster.newOMHABuilder(conf) - .setClusterId(UUID.randomUUID().toString()) - .setScmId(UUID.randomUUID().toString()) .setOMServiceId(OM_SERVICE_ID) .setNumDatanodes(1) .setNumOfOzoneManagers(3) diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/reconfig/TestDatanodeReconfiguration.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/reconfig/TestDatanodeReconfiguration.java index c0b3d7d5413..c3a38f3b5ef 100644 --- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/reconfig/TestDatanodeReconfiguration.java +++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/reconfig/TestDatanodeReconfiguration.java @@ -33,6 +33,7 @@ import static org.apache.hadoop.ozone.OzoneConfigKeys.OZONE_BLOCK_DELETING_SERVICE_WORKERS; import static org.apache.hadoop.ozone.container.common.statemachine.DatanodeConfiguration.HDDS_DATANODE_BLOCK_DELETE_THREAD_MAX; +import static org.apache.hadoop.ozone.container.replication.ReplicationServer.ReplicationConfig.REPLICATION_STREAMS_LIMIT_KEY; import static org.junit.jupiter.api.Assertions.assertEquals; /** @@ -49,6 +50,7 @@ void reconfigurableProperties() { Set expected = ImmutableSet.builder() .add(HDDS_DATANODE_BLOCK_DELETE_THREAD_MAX) .add(OZONE_BLOCK_DELETING_SERVICE_WORKERS) + .add(REPLICATION_STREAMS_LIMIT_KEY) .addAll(new DatanodeConfiguration().reconfigurableProperties()) .build(); @@ -91,6 +93,20 @@ void blockDeletingServiceWorkers(int delta) throws ReconfigurationException { assertEquals(newValue, executor.getCorePoolSize()); } + @ParameterizedTest + @ValueSource(ints = { -1, +1 }) + void replicationStreamsLimit(int delta) throws ReconfigurationException { + ThreadPoolExecutor executor = + getFirstDatanode().getDatanodeStateMachine().getContainer() + .getReplicationServer().getExecutor(); + int newValue = executor.getCorePoolSize() + delta; + + getFirstDatanode().getReconfigurationHandler().reconfigurePropertyImpl( + REPLICATION_STREAMS_LIMIT_KEY, String.valueOf(newValue)); + assertEquals(newValue, executor.getMaximumPoolSize()); + assertEquals(newValue, executor.getCorePoolSize()); + } + private HddsDatanodeService getFirstDatanode() { return getCluster().getHddsDatanodes().get(0); } diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/scm/package-info.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/scm/package-info.java deleted file mode 100644 index d5563a43bfa..00000000000 --- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/scm/package-info.java +++ /dev/null @@ -1,18 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - *

- * http://www.apache.org/licenses/LICENSE-2.0 - *

- * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package org.apache.hadoop.ozone.scm; diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/scm/pipeline/package-info.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/scm/pipeline/package-info.java deleted file mode 100644 index a71f981fd50..00000000000 --- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/scm/pipeline/package-info.java +++ /dev/null @@ -1,24 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - *

- * http://www.apache.org/licenses/LICENSE-2.0 - *

- * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - *

- * Utility classes to encode/decode DTO objects to/from byte array. - */ - -/** - * Unit tests for Pipeline related functions in SCM. - */ -package org.apache.hadoop.ozone.scm.pipeline; diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/shell/TestDeletedBlocksTxnShell.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/shell/TestDeletedBlocksTxnShell.java index e74041ceafb..9d0552a169f 100644 --- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/shell/TestDeletedBlocksTxnShell.java +++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/shell/TestDeletedBlocksTxnShell.java @@ -16,6 +16,7 @@ */ package org.apache.hadoop.ozone.shell; +import org.apache.commons.lang3.RandomUtils; import org.apache.hadoop.hdds.client.RatisReplicationConfig; import org.apache.hadoop.hdds.conf.OzoneConfiguration; import org.apache.hadoop.hdds.protocol.proto.HddsProtos; @@ -34,7 +35,6 @@ import org.apache.hadoop.ozone.admin.scm.GetFailedDeletedBlocksTxnSubcommand; import org.apache.hadoop.ozone.admin.scm.ResetDeletedBlockRetryCountSubcommand; import org.junit.jupiter.api.AfterEach; -import org.junit.jupiter.api.Assertions; import org.junit.jupiter.api.BeforeEach; import org.junit.jupiter.api.Test; import org.junit.jupiter.api.io.TempDir; @@ -52,15 +52,15 @@ import java.util.HashMap; import java.util.List; import java.util.Map; -import java.util.Random; import java.util.Set; -import java.util.UUID; import java.util.regex.Matcher; import java.util.regex.Pattern; import java.util.stream.Collectors; import static org.apache.hadoop.hdds.protocol.proto.HddsProtos.ReplicationFactor.THREE; import static org.apache.hadoop.hdds.scm.ScmConfigKeys.OZONE_SCM_BLOCK_DELETION_MAX_RETRY; +import static org.assertj.core.api.Assertions.assertThat; +import static org.junit.jupiter.api.Assertions.assertEquals; /** * Test for DeletedBlocksTxnSubcommand Cli. @@ -74,8 +74,6 @@ public class TestDeletedBlocksTxnShell { private final ByteArrayOutputStream outContent = new ByteArrayOutputStream(); private MiniOzoneHAClusterImpl cluster = null; private OzoneConfiguration conf; - private String clusterId; - private String scmId; private String scmServiceId; private File txnFile; private int numOfSCMs = 3; @@ -93,16 +91,12 @@ public class TestDeletedBlocksTxnShell { @BeforeEach public void init() throws Exception { conf = new OzoneConfiguration(); - clusterId = UUID.randomUUID().toString(); - scmId = UUID.randomUUID().toString(); scmServiceId = "scm-service-test1"; conf.setBoolean(ScmConfigKeys.OZONE_SCM_HA_ENABLE_KEY, true); conf.setInt(OZONE_SCM_BLOCK_DELETION_MAX_RETRY, 20); cluster = (MiniOzoneHAClusterImpl) MiniOzoneCluster.newOMHABuilder(conf) - .setClusterId(clusterId) - .setScmId(scmId) .setSCMServiceId(scmServiceId) .setNumOfStorageContainerManagers(numOfSCMs) .setNumOfActiveSCMs(numOfSCMs) @@ -129,9 +123,8 @@ public void shutdown() { //> private Map> generateData(int dataSize) throws Exception { Map> blockMap = new HashMap<>(); - Random random = new Random(1); - int continerIDBase = random.nextInt(100); - int localIDBase = random.nextInt(1000); + int continerIDBase = RandomUtils.nextInt(0, 100); + int localIDBase = RandomUtils.nextInt(0, 1000); for (int i = 0; i < dataSize; i++) { long containerID = continerIDBase + i; updateContainerMetadata(containerID); @@ -194,7 +187,7 @@ public void testDeletedBlocksTxnSubcommand() throws Exception { flush(); currentValidTxnNum = deletedBlockLog.getNumOfValidTransactions(); LOG.info("Valid num of txns: {}", currentValidTxnNum); - Assertions.assertEquals(30, currentValidTxnNum); + assertEquals(30, currentValidTxnNum); // let the first 20 txns be failed List txIds = new ArrayList<>(); @@ -208,7 +201,7 @@ public void testDeletedBlocksTxnSubcommand() throws Exception { flush(); currentValidTxnNum = deletedBlockLog.getNumOfValidTransactions(); LOG.info("Valid num of txns: {}", currentValidTxnNum); - Assertions.assertEquals(10, currentValidTxnNum); + assertEquals(10, currentValidTxnNum); ContainerOperationClient scmClient = new ContainerOperationClient(conf); CommandLine cmd; @@ -224,12 +217,12 @@ public void testDeletedBlocksTxnSubcommand() throws Exception { while (m.find()) { matchCount += 1; } - Assertions.assertEquals(20, matchCount); + assertEquals(20, matchCount); // print the first 10 failed txns info into file cmd.parseArgs("-o", txnFile.getAbsolutePath(), "-c", "10"); getCommand.execute(scmClient); - Assertions.assertTrue(txnFile.exists()); + assertThat(txnFile).exists(); ResetDeletedBlockRetryCountSubcommand resetCommand = new ResetDeletedBlockRetryCountSubcommand(); @@ -241,7 +234,7 @@ public void testDeletedBlocksTxnSubcommand() throws Exception { flush(); currentValidTxnNum = deletedBlockLog.getNumOfValidTransactions(); LOG.info("Valid num of txns: {}", currentValidTxnNum); - Assertions.assertEquals(20, currentValidTxnNum); + assertEquals(20, currentValidTxnNum); // reset the given txIds list cmd.parseArgs("-l", "11,12,13,14,15"); @@ -249,7 +242,7 @@ public void testDeletedBlocksTxnSubcommand() throws Exception { flush(); currentValidTxnNum = deletedBlockLog.getNumOfValidTransactions(); LOG.info("Valid num of txns: {}", currentValidTxnNum); - Assertions.assertEquals(25, currentValidTxnNum); + assertEquals(25, currentValidTxnNum); // reset the non-existing txns and valid txns, should do nothing cmd.parseArgs("-l", "1,2,3,4,5,100,101,102,103,104,105"); @@ -257,7 +250,7 @@ public void testDeletedBlocksTxnSubcommand() throws Exception { flush(); currentValidTxnNum = deletedBlockLog.getNumOfValidTransactions(); LOG.info("Valid num of txns: {}", currentValidTxnNum); - Assertions.assertEquals(25, currentValidTxnNum); + assertEquals(25, currentValidTxnNum); // reset all the result expired txIds, all transactions should be available cmd.parseArgs("-a"); @@ -265,6 +258,6 @@ public void testDeletedBlocksTxnSubcommand() throws Exception { flush(); currentValidTxnNum = deletedBlockLog.getNumOfValidTransactions(); LOG.info("Valid num of txns: {}", currentValidTxnNum); - Assertions.assertEquals(30, currentValidTxnNum); + assertEquals(30, currentValidTxnNum); } } diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/shell/TestNSSummaryAdmin.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/shell/TestNSSummaryAdmin.java index c07a48b1058..c9e6e13806e 100644 --- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/shell/TestNSSummaryAdmin.java +++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/shell/TestNSSummaryAdmin.java @@ -38,8 +38,7 @@ import java.util.UUID; import static org.apache.hadoop.hdds.recon.ReconConfigKeys.OZONE_RECON_ADDRESS_KEY; -import static org.junit.jupiter.api.Assertions.assertTrue; -import static org.junit.jupiter.api.Assertions.assertFalse; +import static org.assertj.core.api.Assertions.assertThat; /** * Test for Namespace CLI. @@ -116,9 +115,9 @@ public void testNSSummaryCLIRoot() throws UnsupportedEncodingException { String path = "/"; executeAdminCommands(path); // Should throw warning - only buckets can have bucket layout. - assertTrue(getOutContentString().contains("[Warning] Namespace CLI is not designed for OBS bucket layout.")); - assertTrue(getOutContentString().contains("Put more files into it to visualize DU")); - assertTrue(getOutContentString().contains("Put more files into it to visualize file size distribution")); + assertThat(getOutContentString()).contains("[Warning] Namespace CLI is not designed for OBS bucket layout."); + assertThat(getOutContentString()).contains("Put more files into it to visualize DU"); + assertThat(getOutContentString()).contains("Put more files into it to visualize file size distribution"); } /** @@ -130,9 +129,10 @@ public void testNSSummaryCLIFSO() throws UnsupportedEncodingException { String path = "/" + volumeName + "/" + bucketFSO; executeAdminCommands(path); // Should not throw warning, since bucket is in FSO bucket layout. - assertFalse(getOutContentString().contains("[Warning] Namespace CLI is not designed for OBS bucket layout.")); - assertTrue(getOutContentString().contains("Put more files into it to visualize DU")); - assertTrue(getOutContentString().contains("Put more files into it to visualize file size distribution")); + assertThat(getOutContentString()) + .doesNotContain("[Warning] Namespace CLI is not designed for OBS bucket layout."); + assertThat(getOutContentString()).contains("Put more files into it to visualize DU"); + assertThat(getOutContentString()).contains("Put more files into it to visualize file size distribution"); } /** @@ -144,9 +144,9 @@ public void testNSSummaryCLIOBS() throws UnsupportedEncodingException { String path = "/" + volumeName + "/" + bucketOBS; executeAdminCommands(path); // Should throw warning, since bucket is in OBS bucket layout. - assertTrue(getOutContentString().contains("[Warning] Namespace CLI is not designed for OBS bucket layout.")); - assertTrue(getOutContentString().contains("Put more files into it to visualize DU")); - assertTrue(getOutContentString().contains("Put more files into it to visualize file size distribution")); + assertThat(getOutContentString()).contains("[Warning] Namespace CLI is not designed for OBS bucket layout."); + assertThat(getOutContentString()).contains("Put more files into it to visualize DU"); + assertThat(getOutContentString()).contains("Put more files into it to visualize file size distribution"); } /** diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/shell/TestOzoneContainerUpgradeShell.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/shell/TestOzoneContainerUpgradeShell.java index 5f2380e97e9..a79e2de245d 100644 --- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/shell/TestOzoneContainerUpgradeShell.java +++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/shell/TestOzoneContainerUpgradeShell.java @@ -45,7 +45,6 @@ import org.apache.hadoop.ozone.container.common.utils.DatanodeStoreCache; import org.apache.hadoop.ozone.om.helpers.OmKeyArgs; import org.apache.hadoop.ozone.om.helpers.OmKeyLocationInfo; -import org.junit.jupiter.api.Assertions; import org.junit.jupiter.api.BeforeAll; import org.junit.jupiter.api.Test; import org.slf4j.Logger; @@ -72,6 +71,8 @@ import static org.apache.hadoop.hdds.scm.ScmConfigKeys.OZONE_SCM_HEARTBEAT_PROCESS_INTERVAL; import static org.apache.hadoop.ozone.OzoneConfigKeys.OZONE_ADMINISTRATORS; import static org.apache.hadoop.ozone.container.common.statemachine.DatanodeConfiguration.CONTAINER_SCHEMA_V3_ENABLED; +import static org.assertj.core.api.Assertions.assertThat; +import static org.junit.jupiter.api.Assertions.assertEquals; /** * Test Ozone Container upgrade shell. @@ -80,8 +81,6 @@ public class TestOzoneContainerUpgradeShell { private static final Logger LOG = LoggerFactory.getLogger(TestOzoneContainerUpgradeShell.class); private static String omServiceId; - private static String clusterId; - private static String scmId; private static MiniOzoneCluster cluster = null; private static OzoneClient client; private static OzoneConfiguration conf = null; @@ -91,11 +90,10 @@ public class TestOzoneContainerUpgradeShell { protected static void startCluster() throws Exception { // Init HA cluster omServiceId = "om-service-test-upgrade-container1"; - clusterId = UUID.randomUUID().toString(); - scmId = UUID.randomUUID().toString(); final int numDNs = 3; - cluster = MiniOzoneCluster.newBuilder(conf).setClusterId(clusterId) - .setScmId(scmId).setOMServiceId(omServiceId).setNumDatanodes(numDNs) + cluster = MiniOzoneCluster.newBuilder(conf) + .setOMServiceId(omServiceId) + .setNumDatanodes(numDNs) .build(); cluster.waitForClusterToBeReady(); client = cluster.newClient(); @@ -157,7 +155,7 @@ public void testNormalContainerUpgrade() throws Exception { String[] args = new String[]{"upgrade", "--yes"}; int exitCode = commandLine.execute(args); - Assertions.assertEquals(0, exitCode); + assertEquals(0, exitCode); // datanode2 NodeOperationalState is IN_SERVICE upgrade fail. OzoneConfiguration datanode2Conf = datanodeConfigs.get(1); @@ -169,9 +167,9 @@ public void testNormalContainerUpgrade() throws Exception { String[] args2 = new String[]{"upgrade", "--yes"}; int exit2Code = commandLine2.execute(args2); - Assertions.assertEquals(0, exit2Code); + assertEquals(0, exit2Code); String cmdOut = stdout2.toString(); - Assertions.assertTrue(cmdOut.contains("IN_MAINTENANCE")); + assertThat(cmdOut).contains("IN_MAINTENANCE"); } private CommandLine upgradeCommand(PrintWriter pstdout) { diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/shell/TestOzoneDatanodeShell.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/shell/TestOzoneDatanodeShell.java index df668bf44c7..c40e2e009b6 100644 --- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/shell/TestOzoneDatanodeShell.java +++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/shell/TestOzoneDatanodeShell.java @@ -35,8 +35,8 @@ import picocli.CommandLine.ParseResult; import picocli.CommandLine.RunLast; -import static org.junit.jupiter.api.Assertions.assertTrue; -import static org.junit.jupiter.api.Assertions.fail; +import static org.assertj.core.api.Assertions.assertThat; +import static org.junit.jupiter.api.Assertions.assertThrows; /** * This test class specified for testing Ozone datanode shell command. @@ -90,19 +90,13 @@ private void executeDatanodeWithError(HddsDatanodeService hdds, String[] args, if (Strings.isNullOrEmpty(expectedError)) { executeDatanode(hdds, args); } else { - try { - executeDatanode(hdds, args); - fail("Exception is expected from command execution " + Arrays.asList(args)); - } catch (Exception ex) { - if (!Strings.isNullOrEmpty(expectedError)) { - Throwable exceptionToCheck = ex; - if (exceptionToCheck.getCause() != null) { - exceptionToCheck = exceptionToCheck.getCause(); - } - assertTrue(exceptionToCheck.getMessage().contains(expectedError), - String.format("Error of shell code doesn't contain the " + "exception [%s] in [%s]", expectedError, - exceptionToCheck.getMessage())); + Exception ex = assertThrows(Exception.class, () -> executeDatanode(hdds, args)); + if (!Strings.isNullOrEmpty(expectedError)) { + Throwable exceptionToCheck = ex; + if (exceptionToCheck.getCause() != null) { + exceptionToCheck = exceptionToCheck.getCause(); } + assertThat(exceptionToCheck.getMessage()).contains(expectedError); } } } diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/shell/TestOzoneDebugShell.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/shell/TestOzoneDebugShell.java index e89e1217fb4..b50cea759ea 100644 --- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/shell/TestOzoneDebugShell.java +++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/shell/TestOzoneDebugShell.java @@ -44,7 +44,6 @@ import org.apache.hadoop.ozone.om.helpers.OmKeyLocationInfo; import org.apache.ozone.test.GenericTestUtils; import org.junit.jupiter.api.AfterAll; -import org.junit.jupiter.api.Assertions; import org.junit.jupiter.api.BeforeAll; import org.junit.jupiter.api.Test; import picocli.CommandLine; @@ -69,6 +68,8 @@ import static org.apache.hadoop.ozone.OzoneConsts.OM_KEY_PREFIX; import static org.apache.hadoop.ozone.OzoneConsts.OM_SNAPSHOT_CHECKPOINT_DIR; import static org.apache.hadoop.ozone.OzoneConsts.OM_DB_NAME; +import static org.assertj.core.api.Assertions.assertThat; +import static org.junit.jupiter.api.Assertions.assertEquals; /** * Test Ozone Debug shell. @@ -76,8 +77,6 @@ public class TestOzoneDebugShell { private static String omServiceId; - private static String clusterId; - private static String scmId; private static MiniOzoneCluster cluster = null; private static OzoneClient client; @@ -87,12 +86,8 @@ public class TestOzoneDebugShell { protected static void startCluster() throws Exception { // Init HA cluster omServiceId = "om-service-test1"; - clusterId = UUID.randomUUID().toString(); - scmId = UUID.randomUUID().toString(); final int numDNs = 3; cluster = MiniOzoneCluster.newBuilder(conf) - .setClusterId(clusterId) - .setScmId(scmId) .setOMServiceId(omServiceId) .setNumDatanodes(numDNs) .build(); @@ -127,12 +122,12 @@ public void testChunkInfoCmdBeforeAfterCloseContainer() throws Exception { writeKey(volumeName, bucketName, keyName); int exitCode = runChunkInfoCommand(volumeName, bucketName, keyName); - Assertions.assertEquals(0, exitCode); + assertEquals(0, exitCode); closeContainerForKey(volumeName, bucketName, keyName); exitCode = runChunkInfoCommand(volumeName, bucketName, keyName); - Assertions.assertEquals(0, exitCode); + assertEquals(0, exitCode); } @Test @@ -142,7 +137,7 @@ public void testChunkInfoVerifyPathsAreDifferent() throws Exception { final String keyName = UUID.randomUUID().toString(); writeKey(volumeName, bucketName, keyName); int exitCode = runChunkInfoAndVerifyPaths(volumeName, bucketName, keyName); - Assertions.assertEquals(0, exitCode); + assertEquals(0, exitCode); } @Test @@ -163,7 +158,7 @@ public void testLdbCliForOzoneSnapshot() throws Exception { OzoneSnapshot snapshot = client.getObjectStore().listSnapshot(volumeName, bucketName, null, null) .next(); - Assertions.assertEquals(snapshotName, snapshot.getName()); + assertEquals(snapshotName, snapshot.getName()); String dbPath = getSnapshotDBPath(snapshot.getCheckpointDir()); String snapshotCurrent = dbPath + OM_KEY_PREFIX + "CURRENT"; GenericTestUtils @@ -171,9 +166,9 @@ public void testLdbCliForOzoneSnapshot() throws Exception { String[] args = new String[] {"--db=" + dbPath, "scan", "--cf", "keyTable"}; int exitCode = cmd.execute(args); - Assertions.assertEquals(0, exitCode); + assertEquals(0, exitCode); String cmdOut = stdout.toString(); - Assertions.assertTrue(cmdOut.contains(keyName)); + assertThat(cmdOut).contains(keyName); } private static String getSnapshotDBPath(String checkPointDir) { @@ -233,7 +228,7 @@ private int runChunkInfoAndVerifyPaths(String volumeName, String bucketName, // DN storage directories are set differently for each DN // in MiniOzoneCluster as datanode-0,datanode-1,datanode-2 which is why // we expect 3 paths here in the set. - Assertions.assertEquals(3, blockFilePaths.size()); + assertEquals(3, blockFilePaths.size()); } return exitCode; } diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/shell/TestOzoneShellHA.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/shell/TestOzoneShellHA.java index 628a101ea96..6eb89265982 100644 --- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/shell/TestOzoneShellHA.java +++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/shell/TestOzoneShellHA.java @@ -30,12 +30,17 @@ import java.util.UUID; import org.apache.hadoop.fs.FSDataOutputStream; +import org.apache.hadoop.crypto.key.KeyProvider; +import org.apache.hadoop.crypto.key.kms.KMSClientProvider; +import org.apache.hadoop.crypto.key.kms.server.MiniKMS; import org.apache.hadoop.hdds.utils.IOUtils; +import org.apache.hadoop.fs.CommonConfigurationKeysPublic; import org.apache.hadoop.fs.FileChecksum; import org.apache.hadoop.fs.FileStatus; import org.apache.hadoop.fs.FileSystem; import org.apache.hadoop.fs.FileUtil; import org.apache.hadoop.fs.Path; +import org.apache.hadoop.fs.TrashPolicy; import org.apache.hadoop.hdds.cli.GenericCli; import org.apache.hadoop.hdds.cli.OzoneAdmin; import org.apache.hadoop.hdds.client.ReplicationType; @@ -61,7 +66,6 @@ import org.apache.hadoop.security.UserGroupInformation; import org.apache.ozone.test.GenericTestUtils; import org.apache.hadoop.util.ToolRunner; -import org.apache.hadoop.fs.TrashPolicy; import org.apache.hadoop.ozone.om.TrashPolicyOzone; import com.google.common.base.Strings; @@ -81,13 +85,15 @@ import static org.apache.hadoop.ozone.om.exceptions.OMException.ResultCodes.BUCKET_NOT_FOUND; import static org.apache.hadoop.ozone.om.exceptions.OMException.ResultCodes.VOLUME_NOT_EMPTY; import static org.apache.hadoop.ozone.om.exceptions.OMException.ResultCodes.VOLUME_NOT_FOUND; +import static org.assertj.core.api.Assertions.assertThat; import static org.junit.jupiter.api.Assertions.assertEquals; import static org.junit.jupiter.api.Assertions.assertFalse; import static org.junit.jupiter.api.Assertions.assertNotEquals; +import static org.junit.jupiter.api.Assertions.assertInstanceOf; import static org.junit.jupiter.api.Assertions.assertNotNull; +import static org.junit.jupiter.api.Assertions.assertNull; import static org.junit.jupiter.api.Assertions.assertThrows; import static org.junit.jupiter.api.Assertions.assertTrue; -import static org.junit.jupiter.api.Assertions.fail; import org.junit.jupiter.api.AfterAll; import org.junit.jupiter.api.AfterEach; @@ -125,6 +131,8 @@ public class TestOzoneShellHA { private static File testFile; private static String testFilePathString; private static MiniOzoneCluster cluster = null; + private static File testDir; + private static MiniKMS miniKMS; private static OzoneClient client; private OzoneShell ozoneShell = null; private OzoneAdmin ozoneAdminShell = null; @@ -135,8 +143,6 @@ public class TestOzoneShellHA { private static final PrintStream OLD_ERR = System.err; private static String omServiceId; - private static String clusterId; - private static String scmId; private static int numOfOMs; /** @@ -149,9 +155,20 @@ public class TestOzoneShellHA { public static void init() throws Exception { OzoneConfiguration conf = new OzoneConfiguration(); conf.setBoolean(OZONE_FS_HSYNC_ENABLED, true); + startKMS(); startCluster(conf); } + protected static void startKMS() throws Exception { + testDir = GenericTestUtils.getTestDir( + TestOzoneShellHA.class.getSimpleName()); + File kmsDir = new File(testDir, UUID.randomUUID().toString()); + assertTrue(kmsDir.mkdirs()); + MiniKMS.Builder miniKMSBuilder = new MiniKMS.Builder(); + miniKMS = miniKMSBuilder.setKmsConfDir(kmsDir).build(); + miniKMS.start(); + } + protected static void startCluster(OzoneConfiguration conf) throws Exception { String path = GenericTestUtils.getTempPath( TestOzoneShellHA.class.getSimpleName()); @@ -166,12 +183,10 @@ protected static void startCluster(OzoneConfiguration conf) throws Exception { // Init HA cluster omServiceId = "om-service-test1"; numOfOMs = 3; - clusterId = UUID.randomUUID().toString(); - scmId = UUID.randomUUID().toString(); final int numDNs = 5; + conf.set(CommonConfigurationKeysPublic.HADOOP_SECURITY_KEY_PROVIDER_PATH, + getKeyProviderURI(miniKMS)); cluster = MiniOzoneCluster.newOMHABuilder(conf) - .setClusterId(clusterId) - .setScmId(scmId) .setOMServiceId(omServiceId) .setNumOfOzoneManagers(numOfOMs) .setNumDatanodes(numDNs) @@ -190,9 +205,17 @@ public static void shutdown() { cluster.shutdown(); } + if (miniKMS != null) { + miniKMS.stop(); + } + if (baseDir != null) { FileUtil.fullyDelete(baseDir, true); } + + if (testDir != null) { + FileUtil.fullyDelete(testDir, true); + } } @BeforeEach @@ -249,21 +272,13 @@ private void executeWithError(OzoneShell shell, String[] args, if (Strings.isNullOrEmpty(expectedError)) { execute(shell, args); } else { - try { - execute(shell, args); - fail("Exception is expected from command execution " + Arrays - .asList(args)); - } catch (Exception ex) { - if (!Strings.isNullOrEmpty(expectedError)) { - Throwable exceptionToCheck = ex; - if (exceptionToCheck.getCause() != null) { - exceptionToCheck = exceptionToCheck.getCause(); - } - assertTrue(exceptionToCheck.getMessage().contains(expectedError), - String.format("Error of OzoneShell code doesn't contain the " + - "exception [%s] in [%s]", - expectedError, exceptionToCheck.getMessage())); + Exception ex = assertThrows(Exception.class, () -> execute(shell, args)); + if (!Strings.isNullOrEmpty(expectedError)) { + Throwable exceptionToCheck = ex; + if (exceptionToCheck.getCause() != null) { + exceptionToCheck = exceptionToCheck.getCause(); } + assertThat(exceptionToCheck.getMessage()).contains(expectedError); } } } @@ -410,7 +425,7 @@ public void testRATISTypeECReplication() { Throwable t = assertThrows(ExecutionException.class, () -> execute(ozoneShell, args)); Throwable c = t.getCause(); - assertTrue(c instanceof IllegalArgumentException); + assertInstanceOf(IllegalArgumentException.class, c); assertEquals("rs-3-2-1024k is not supported for " + ReplicationType.RATIS + " replication type", c.getMessage()); } @@ -1155,36 +1170,36 @@ public void testShQuota() throws Exception { "--space-quota", "0GB"}; ExecutionException eException = assertThrows(ExecutionException.class, () -> execute(ozoneShell, volumeArgs1)); - assertTrue(eException.getMessage() - .contains("Invalid value for space quota")); + assertThat(eException.getMessage()) + .contains("Invalid value for space quota"); out.reset(); String[] volumeArgs2 = new String[]{"volume", "setquota", "vol4", "--space-quota", "-1GB"}; eException = assertThrows(ExecutionException.class, () -> execute(ozoneShell, volumeArgs2)); - assertTrue(eException.getMessage() - .contains("Invalid value for space quota")); + assertThat(eException.getMessage()) + .contains("Invalid value for space quota"); out.reset(); String[] volumeArgs3 = new String[]{"volume", "setquota", "vol4", "--space-quota", "test"}; eException = assertThrows(ExecutionException.class, () -> execute(ozoneShell, volumeArgs3)); - assertTrue(eException.getMessage() + assertThat(eException.getMessage()) .contains("test is invalid. " + "The quota value should be a positive integer " + - "with byte numeration(B, KB, MB, GB and TB)")); + "with byte numeration(B, KB, MB, GB and TB)"); out.reset(); String[] volumeArgs4 = new String[]{"volume", "setquota", "vol4", "--space-quota", "1.5GB"}; eException = assertThrows(ExecutionException.class, () -> execute(ozoneShell, volumeArgs4)); - assertTrue(eException.getMessage() + assertThat(eException.getMessage()) .contains("1.5GB is invalid. " + "The quota value should be a positive integer " + - "with byte numeration(B, KB, MB, GB and TB)")); + "with byte numeration(B, KB, MB, GB and TB)"); out.reset(); String[] volumeArgs5 = new String[]{"volume", "setquota", "vol4", @@ -1192,24 +1207,24 @@ public void testShQuota() throws Exception { MissingParameterException mException = assertThrows( MissingParameterException.class, () -> execute(ozoneShell, volumeArgs5)); - assertTrue(mException.getMessage() - .contains("Missing required parameter")); + assertThat(mException.getMessage()) + .contains("Missing required parameter"); out.reset(); String[] volumeArgs6 = new String[]{"volume", "setquota", "vol4", "--namespace-quota", "0"}; eException = assertThrows(ExecutionException.class, () -> execute(ozoneShell, volumeArgs6)); - assertTrue(eException.getMessage() - .contains("Invalid value for namespace quota")); + assertThat(eException.getMessage()) + .contains("Invalid value for namespace quota"); out.reset(); String[] volumeArgs7 = new String[]{"volume", "setquota", "vol4", "--namespace-quota"}; mException = assertThrows(MissingParameterException.class, () -> execute(ozoneShell, volumeArgs7)); - assertTrue(mException.getMessage() - .contains("Missing required parameter")); + assertThat(mException.getMessage()) + .contains("Missing required parameter"); out.reset(); // Test set bucket quota to invalid values @@ -1217,60 +1232,60 @@ public void testShQuota() throws Exception { "--space-quota", "0GB"}; eException = assertThrows(ExecutionException.class, () -> execute(ozoneShell, bucketArgs1)); - assertTrue(eException.getMessage() - .contains("Invalid value for space quota")); + assertThat(eException.getMessage()) + .contains("Invalid value for space quota"); out.reset(); String[] bucketArgs2 = new String[]{"bucket", "setquota", "vol4/buck4", "--space-quota", "-1GB"}; eException = assertThrows(ExecutionException.class, () -> execute(ozoneShell, bucketArgs2)); - assertTrue(eException.getMessage() - .contains("Invalid value for space quota")); + assertThat(eException.getMessage()) + .contains("Invalid value for space quota"); out.reset(); String[] bucketArgs3 = new String[]{"bucket", "setquota", "vol4/buck4", "--space-quota", "test"}; eException = assertThrows(ExecutionException.class, () -> execute(ozoneShell, bucketArgs3)); - assertTrue(eException.getMessage() + assertThat(eException.getMessage()) .contains("test is invalid. " + "The quota value should be a positive integer " + - "with byte numeration(B, KB, MB, GB and TB)")); + "with byte numeration(B, KB, MB, GB and TB)"); out.reset(); String[] bucketArgs4 = new String[]{"bucket", "setquota", "vol4/buck4", "--space-quota", "1.5GB"}; eException = assertThrows(ExecutionException.class, () -> execute(ozoneShell, bucketArgs4)); - assertTrue(eException.getMessage() + assertThat(eException.getMessage()) .contains("1.5GB is invalid. " + "The quota value should be a positive integer " + - "with byte numeration(B, KB, MB, GB and TB)")); + "with byte numeration(B, KB, MB, GB and TB)"); out.reset(); String[] bucketArgs5 = new String[]{"bucket", "setquota", "vol4/buck4", "--space-quota"}; mException = assertThrows(MissingParameterException.class, () -> execute(ozoneShell, bucketArgs5)); - assertTrue(mException.getMessage() - .contains("Missing required parameter")); + assertThat(mException.getMessage()) + .contains("Missing required parameter"); out.reset(); String[] bucketArgs6 = new String[]{"bucket", "setquota", "vol4/buck4", "--namespace-quota", "0"}; eException = assertThrows(ExecutionException.class, () -> execute(ozoneShell, bucketArgs6)); - assertTrue(eException.getMessage() - .contains("Invalid value for namespace quota")); + assertThat(eException.getMessage()) + .contains("Invalid value for namespace quota"); out.reset(); String[] bucketArgs7 = new String[]{"bucket", "setquota", "vol4/buck4", "--namespace-quota"}; mException = assertThrows(MissingParameterException.class, () -> execute(ozoneShell, bucketArgs7)); - assertTrue(mException.getMessage() - .contains("Missing required parameter")); + assertThat(mException.getMessage()) + .contains("Missing required parameter"); out.reset(); // Test set bucket spaceQuota or nameSpaceQuota to normal value. @@ -1358,7 +1373,7 @@ public void testCreateBucketWithECReplicationConfig() throws Exception { client.getObjectStore().getVolume(volumeName); OzoneBucket bucket = volume.getBucket("bucket0"); try (OzoneOutputStream out = bucket.createKey("myKey", 2000)) { - assertTrue(out.getOutputStream() instanceof ECKeyOutputStream); + assertInstanceOf(ECKeyOutputStream.class, out.getOutputStream()); } } @@ -1386,6 +1401,29 @@ public void testPutKeyOnBucketWithECReplicationConfig() throws Exception { key.getReplicationConfig().getReplicationType()); } + @Test + public void testPutKeyWithECReplicationConfig() throws Exception { + final String volumeName = UUID.randomUUID().toString(); + final String bucketName = UUID.randomUUID().toString(); + final String keyName = UUID.randomUUID().toString(); + getVolume(volumeName); + String bucketPath = + Path.SEPARATOR + volumeName + Path.SEPARATOR + bucketName; + String[] args = + new String[] {"bucket", "create", bucketPath}; + execute(ozoneShell, args); + + args = new String[] {"key", "put", "-r", "rs-3-2-1024k", "-t", "EC", + bucketPath + Path.SEPARATOR + keyName, testFilePathString}; + execute(ozoneShell, args); + + OzoneKeyDetails key = + client.getObjectStore().getVolume(volumeName) + .getBucket(bucketName).getKey(keyName); + assertEquals(HddsProtos.ReplicationType.EC, + key.getReplicationConfig().getReplicationType()); + } + @Test public void testCreateBucketWithRatisReplicationConfig() throws Exception { final String volumeName = "volume101"; @@ -1399,7 +1437,7 @@ public void testCreateBucketWithRatisReplicationConfig() throws Exception { client.getObjectStore().getVolume(volumeName); OzoneBucket bucket = volume.getBucket("bucket1"); try (OzoneOutputStream out = bucket.createKey("myKey", 2000)) { - assertTrue(out.getOutputStream() instanceof KeyOutputStream); + assertInstanceOf(KeyOutputStream.class, out.getOutputStream()); assertFalse(out.getOutputStream() instanceof ECKeyOutputStream); } } @@ -1424,7 +1462,7 @@ public void testSetECReplicationConfigOnBucket() throws Exception { execute(ozoneShell, args); bucket = volume.getBucket("bucket0"); try (OzoneOutputStream out = bucket.createKey("newECKey", 1024)) { - assertTrue(out.getOutputStream() instanceof ECKeyOutputStream); + assertInstanceOf(ECKeyOutputStream.class, out.getOutputStream()); } args = new String[] {"bucket", "set-replication-config", bucketPath, "-t", @@ -1436,6 +1474,33 @@ public void testSetECReplicationConfigOnBucket() throws Exception { } } + @Test + public void testSetEncryptionKey() throws Exception { + final String volumeName = "volume111"; + getVolume(volumeName); + String bucketPath = "/volume111/bucket0"; + String[] args = new String[]{"bucket", "create", bucketPath}; + execute(ozoneShell, args); + + OzoneVolume volume = + client.getObjectStore().getVolume(volumeName); + OzoneBucket bucket = volume.getBucket("bucket0"); + assertNull(bucket.getEncryptionKeyName()); + String newEncKey = "enckey1"; + + KeyProvider provider = cluster.getOzoneManager().getKmsProvider(); + KeyProvider.Options options = KeyProvider.options(cluster.getConf()); + options.setDescription(newEncKey); + options.setBitLength(128); + provider.createKey(newEncKey, options); + provider.flush(); + + args = new String[]{"bucket", "set-encryption-key", bucketPath, "-k", + newEncKey}; + execute(ozoneShell, args); + assertEquals(newEncKey, volume.getBucket("bucket0").getEncryptionKeyName()); + } + @Test public void testCreateBucketWithECReplicationConfigWithoutReplicationParam() { getVolume("volume102"); @@ -1738,7 +1803,7 @@ public void testListVolumeBucketKeyShouldPrintValidJsonArray() parseOutputIntoArrayList(); // Can include s3v and volumes from other test cases that aren't cleaned up, // hence >= instead of equals. - assertTrue(volumeListOut.size() >= testVolumes.size()); + assertThat(volumeListOut.size()).isGreaterThanOrEqualTo(testVolumes.size()); final HashSet volumeSet = new HashSet<>(testVolumes); volumeListOut.forEach(map -> volumeSet.remove(map.get("name"))); // Should have found all the volumes created for this test @@ -1792,9 +1857,9 @@ public void testClientBucketLayoutValidation() { }; ParameterException exception = assertThrows(ParameterException.class, () -> execute(ozoneShell, arg2)); - assertTrue(exception.getMessage() + assertThat(exception.getMessage()) .contains("expected one of [FILE_SYSTEM_OPTIMIZED, OBJECT_STORE, " + - "LEGACY]")); + "LEGACY]"); String[] arg3 = new String[]{ @@ -1804,9 +1869,9 @@ public void testClientBucketLayoutValidation() { exception = assertThrows(ParameterException.class, () -> execute(ozoneShell, arg3)); - assertTrue(exception.getMessage() + assertThat(exception.getMessage()) .contains("expected one of [FILE_SYSTEM_OPTIMIZED, OBJECT_STORE, " + - "LEGACY] ")); + "LEGACY] "); } @Test @@ -1839,7 +1904,7 @@ public void testListAllKeys() // Number of keys should return less than 120(100 by default) args = new String[]{"key", "list", volumeName}; execute(ozoneShell, args); - assertTrue(getNumOfKeys() < 120); + assertThat(getNumOfKeys()).isLessThan(120); out.reset(); // Use --all option to get all the keys @@ -2051,4 +2116,9 @@ public void testLinkedAndNonLinkedBucketMetaData() new String[]{"volume", "delete", "/volume1"}); out.reset(); } + + private static String getKeyProviderURI(MiniKMS kms) { + return KMSClientProvider.SCHEME_NAME + "://" + + kms.getKMSUrl().toExternalForm().replace("://", "@"); + } } diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/shell/TestOzoneShellHAWithFSO.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/shell/TestOzoneShellHAWithFSO.java index 485cb2c919a..a0ad35500ca 100644 --- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/shell/TestOzoneShellHAWithFSO.java +++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/shell/TestOzoneShellHAWithFSO.java @@ -39,6 +39,7 @@ public static void init() throws Exception { conf.set(OMConfigKeys.OZONE_DEFAULT_BUCKET_LAYOUT, OMConfigKeys.OZONE_BUCKET_LAYOUT_FILE_SYSTEM_OPTIMIZED); conf.setBoolean(OzoneConfigKeys.OZONE_FS_HSYNC_ENABLED, true); + startKMS(); startCluster(conf); } diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/shell/TestOzoneTenantShell.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/shell/TestOzoneTenantShell.java index e542e9494d6..6abfbed2bd3 100644 --- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/shell/TestOzoneTenantShell.java +++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/shell/TestOzoneTenantShell.java @@ -57,7 +57,6 @@ import java.security.PrivilegedExceptionAction; import java.util.Arrays; import java.util.List; -import java.util.UUID; import static java.nio.charset.StandardCharsets.UTF_8; import static org.apache.hadoop.ozone.om.OMConfigKeys.OZONE_OM_MULTITENANCY_ENABLED; @@ -65,10 +64,10 @@ import static org.apache.hadoop.ozone.om.OMConfigKeys.OZONE_OM_RANGER_HTTPS_ADMIN_API_USER; import static org.apache.hadoop.ozone.om.OMConfigKeys.OZONE_RANGER_HTTPS_ADDRESS_KEY; import static org.apache.hadoop.ozone.om.OMMultiTenantManagerImpl.OZONE_OM_TENANT_DEV_SKIP_RANGER; +import static org.assertj.core.api.Assertions.assertThat; import static org.junit.jupiter.api.Assertions.assertEquals; import static org.junit.jupiter.api.Assertions.assertNotEquals; -import static org.junit.jupiter.api.Assertions.assertTrue; -import static org.junit.jupiter.api.Assertions.fail; +import static org.junit.jupiter.api.Assertions.assertThrows; /** * Integration test for Ozone tenant shell command. HA enabled. @@ -108,8 +107,6 @@ public class TestOzoneTenantShell { private static final PrintStream OLD_ERR = System.err; private static String omServiceId; - private static String clusterId; - private static String scmId; private static int numOfOMs; private static final boolean USE_ACTUAL_RANGER = false; @@ -156,11 +153,7 @@ public static void init() throws Exception { // Init cluster omServiceId = "om-service-test1"; numOfOMs = 3; - clusterId = UUID.randomUUID().toString(); - scmId = UUID.randomUUID().toString(); cluster = MiniOzoneCluster.newOMHABuilder(conf) - .setClusterId(clusterId) - .setScmId(scmId) .setOMServiceId(omServiceId) .setNumOfOzoneManagers(numOfOMs) .withoutDatanodes() // Remove this once we are actually writing data @@ -254,19 +247,13 @@ private void executeWithError(OzoneShell shell, String[] args, if (Strings.isNullOrEmpty(expectedError)) { execute(shell, args); } else { - try { - execute(shell, args); - fail("Exception is expected from command execution " + Arrays.asList(args)); - } catch (Exception ex) { - if (!Strings.isNullOrEmpty(expectedError)) { - Throwable exceptionToCheck = ex; - if (exceptionToCheck.getCause() != null) { - exceptionToCheck = exceptionToCheck.getCause(); - } - assertTrue(exceptionToCheck.getMessage().contains(expectedError), - String.format("Error of OzoneShell code doesn't contain the exception [%s] in [%s]", expectedError, - exceptionToCheck.getMessage())); + Exception ex = assertThrows(Exception.class, () -> execute(shell, args)); + if (!Strings.isNullOrEmpty(expectedError)) { + Throwable exceptionToCheck = ex; + if (exceptionToCheck.getCause() != null) { + exceptionToCheck = exceptionToCheck.getCause(); } + assertThat(exceptionToCheck.getMessage()).contains(expectedError); } } } @@ -358,7 +345,7 @@ private void checkOutput(String str, String stringToMatch, if (exactMatch) { assertEquals(stringToMatch, str); } else { - assertTrue(str.contains(stringToMatch), str); + assertThat(str).contains(stringToMatch); } } @@ -447,7 +434,7 @@ public void testOzoneTenantBasicOperations() throws IOException { checkOutput(err, "", true); lines = FileUtils.readLines(AUDIT_LOG_FILE, (String)null); - assertTrue(lines.size() > 0); + assertThat(lines.size()).isGreaterThan(0); checkOutput(lines.get(lines.size() - 1), "ret=SUCCESS", false); // Check volume creation diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/shell/TestReconfigShell.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/shell/TestReconfigShell.java index 50742791287..427b36d9a95 100644 --- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/shell/TestReconfigShell.java +++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/shell/TestReconfigShell.java @@ -42,8 +42,8 @@ import static org.apache.hadoop.hdds.protocol.proto.HddsProtos.NodeOperationalState.DECOMMISSIONED; import static org.apache.hadoop.hdds.protocol.proto.HddsProtos.NodeOperationalState.IN_SERVICE; +import static org.assertj.core.api.Assertions.assertThat; import static org.junit.jupiter.api.Assertions.assertEquals; -import static org.junit.jupiter.api.Assertions.assertTrue; /** * * Integration test for {@code ozone admin reconfig} command. HA enabled. @@ -68,8 +68,6 @@ public static void setup() throws Exception { OzoneConfiguration conf = new OzoneConfiguration(); String omServiceId = UUID.randomUUID().toString(); cluster = MiniOzoneCluster.newOMHABuilder(conf) - .setClusterId(UUID.randomUUID().toString()) - .setScmId(UUID.randomUUID().toString()) .setOMServiceId(omServiceId) .setNumOfOzoneManagers(1) .setNumOfStorageContainerManagers(1) @@ -138,7 +136,7 @@ private void assertReconfigurablePropertiesOutput( List outs = Arrays.asList(output.split(System.getProperty("line.separator"))); for (String property : except) { - assertTrue(outs.contains(property), String.format("Not found %s in output: %s", property, output)); + assertThat(outs).contains(property); } } @@ -176,8 +174,7 @@ private void executeAndAssertBulkReconfigCount(int except) "reconfig", "--in-service-datanodes", "properties"}); String output = capture.getOutput(); - assertTrue(capture.getOutput().contains(String.format("successfully %d", except)), - String.format("Excepted successfully %d. output: %s%n", except, output)); + assertThat(capture.getOutput()).contains(String.format("successfully %d", except)); } } } diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/shell/TestScmAdminHA.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/shell/TestScmAdminHA.java index 3754bfceccd..2e1b7a78736 100644 --- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/shell/TestScmAdminHA.java +++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/shell/TestScmAdminHA.java @@ -18,7 +18,6 @@ package org.apache.hadoop.ozone.shell; import java.net.InetSocketAddress; -import java.util.UUID; import org.apache.hadoop.hdds.cli.OzoneAdmin; import org.apache.hadoop.hdds.conf.OzoneConfiguration; @@ -36,8 +35,6 @@ public class TestScmAdminHA { private static OzoneConfiguration conf; private static String omServiceId; private static int numOfOMs; - private static String clusterId; - private static String scmId; private static MiniOzoneCluster cluster; @BeforeAll @@ -48,11 +45,7 @@ public static void init() throws Exception { // Init HA cluster omServiceId = "om-service-test1"; numOfOMs = 3; - clusterId = UUID.randomUUID().toString(); - scmId = UUID.randomUUID().toString(); cluster = MiniOzoneCluster.newOMHABuilder(conf) - .setClusterId(clusterId) - .setScmId(scmId) .setOMServiceId(omServiceId) .setNumOfOzoneManagers(numOfOMs) .build(); diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/shell/TestTransferLeadershipShell.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/shell/TestTransferLeadershipShell.java index 3e5377ce228..62d50708c83 100644 --- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/shell/TestTransferLeadershipShell.java +++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/shell/TestTransferLeadershipShell.java @@ -27,7 +27,6 @@ import org.apache.hadoop.ozone.om.OzoneManager; import org.apache.ratis.protocol.RaftPeer; import org.junit.jupiter.api.AfterEach; -import org.junit.jupiter.api.Assertions; import org.junit.jupiter.api.BeforeEach; import org.junit.jupiter.api.Test; @@ -35,7 +34,11 @@ import java.util.ArrayList; import java.util.Collection; import java.util.List; -import java.util.UUID; + +import static org.assertj.core.api.Assertions.assertThat; +import static org.junit.jupiter.api.Assertions.assertEquals; +import static org.junit.jupiter.api.Assertions.assertNotNull; +import static org.junit.jupiter.api.Assertions.assertNotSame; /** * Test transferLeadership with SCM HA setup. @@ -43,8 +46,6 @@ public class TestTransferLeadershipShell { private MiniOzoneHAClusterImpl cluster = null; private OzoneConfiguration conf; - private String clusterId; - private String scmId; private String omServiceId; private String scmServiceId; private int numOfOMs = 3; @@ -60,15 +61,13 @@ public class TestTransferLeadershipShell { @BeforeEach public void init() throws Exception { conf = new OzoneConfiguration(); - clusterId = UUID.randomUUID().toString(); - scmId = UUID.randomUUID().toString(); omServiceId = "om-service-test1"; scmServiceId = "scm-service-test1"; conf.setLong(ScmConfigKeys.OZONE_SCM_HA_RATIS_SNAPSHOT_THRESHOLD, SNAPSHOT_THRESHOLD); cluster = (MiniOzoneHAClusterImpl) MiniOzoneCluster.newHABuilder(conf) - .setClusterId(clusterId).setScmId(scmId).setOMServiceId(omServiceId) + .setOMServiceId(omServiceId) .setSCMServiceId(scmServiceId).setNumOfOzoneManagers(numOfOMs) .setNumOfStorageContainerManagers(numOfSCMs) .setNumOfActiveSCMs(numOfSCMs).setNumOfActiveOMs(numOfOMs) @@ -90,7 +89,7 @@ public void shutdown() { public void testOmTransfer() throws Exception { OzoneManager oldLeader = cluster.getOMLeader(); List omList = new ArrayList<>(cluster.getOzoneManagersList()); - Assertions.assertTrue(omList.contains(oldLeader)); + assertThat(omList).contains(oldLeader); omList.remove(oldLeader); OzoneManager newLeader = omList.get(0); cluster.waitForClusterToBeReady(); @@ -98,14 +97,14 @@ public void testOmTransfer() throws Exception { String[] args1 = {"om", "transfer", "-n", newLeader.getOMNodeId()}; ozoneAdmin.execute(args1); Thread.sleep(3000); - Assertions.assertEquals(newLeader, cluster.getOMLeader()); + assertEquals(newLeader, cluster.getOMLeader()); assertOMResetPriorities(); oldLeader = cluster.getOMLeader(); String[] args3 = {"om", "transfer", "-r"}; ozoneAdmin.execute(args3); Thread.sleep(3000); - Assertions.assertNotSame(oldLeader, cluster.getOMLeader()); + assertNotSame(oldLeader, cluster.getOMLeader()); assertOMResetPriorities(); } @@ -114,7 +113,7 @@ public void testScmTransfer() throws Exception { StorageContainerManager oldLeader = getScmLeader(cluster); List scmList = new ArrayList<>(cluster. getStorageContainerManagersList()); - Assertions.assertTrue(scmList.contains(oldLeader)); + assertThat(scmList).contains(oldLeader); scmList.remove(oldLeader); StorageContainerManager newLeader = scmList.get(0); @@ -122,14 +121,14 @@ public void testScmTransfer() throws Exception { String[] args1 = {"scm", "transfer", "-n", newLeader.getScmId()}; ozoneAdmin.execute(args1); cluster.waitForClusterToBeReady(); - Assertions.assertEquals(newLeader, getScmLeader(cluster)); + assertEquals(newLeader, getScmLeader(cluster)); assertSCMResetPriorities(); oldLeader = getScmLeader(cluster); String[] args3 = {"scm", "transfer", "-r"}; ozoneAdmin.execute(args3); cluster.waitForClusterToBeReady(); - Assertions.assertNotSame(oldLeader, getScmLeader(cluster)); + assertNotSame(oldLeader, getScmLeader(cluster)); assertSCMResetPriorities(); } @@ -141,14 +140,14 @@ private void assertOMResetPriorities() { .getPeers(); for (RaftPeer raftPeer: raftPeers) { - Assertions.assertEquals(RatisHelper.NEUTRAL_PRIORITY, + assertEquals(RatisHelper.NEUTRAL_PRIORITY, raftPeer.getPriority()); } } private void assertSCMResetPriorities() { StorageContainerManager scm = getScmLeader(cluster); - Assertions.assertNotNull(scm); + assertNotNull(scm); Collection raftPeers = scm .getScmHAManager() .getRatisServer() @@ -156,7 +155,7 @@ private void assertSCMResetPriorities() { .getGroup() .getPeers(); for (RaftPeer raftPeer: raftPeers) { - Assertions.assertEquals(RatisHelper.NEUTRAL_PRIORITY, + assertEquals(RatisHelper.NEUTRAL_PRIORITY, raftPeer.getPriority()); } } diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/tools/contract/AbstractContractDistCpTest.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/tools/contract/AbstractContractDistCpTest.java index 5b6047acb39..8985af2ac56 100644 --- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/tools/contract/AbstractContractDistCpTest.java +++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/tools/contract/AbstractContractDistCpTest.java @@ -56,7 +56,7 @@ import org.apache.hadoop.util.functional.RemoteIterators; import org.junit.jupiter.api.BeforeEach; -import org.junit.Test; +import org.junit.jupiter.api.Test; import org.slf4j.Logger; import org.slf4j.LoggerFactory; diff --git a/hadoop-ozone/integration-test/src/test/resources/ozone-site.xml b/hadoop-ozone/integration-test/src/test/resources/ozone-site.xml index 4c415c0c7e0..4e79ae97fc2 100644 --- a/hadoop-ozone/integration-test/src/test/resources/ozone-site.xml +++ b/hadoop-ozone/integration-test/src/test/resources/ozone-site.xml @@ -41,6 +41,21 @@ 4 + + ozone.scm.handler.count.key + 20 + + + + ozone.om.handler.count.key + 20 + + + + dfs.container.ratis.datastream.enabled + true + + ozone.scm.close.container.wait.duration 1s @@ -63,4 +78,38 @@ ozone.scm.ha.ratis.log.appender.queue.byte-limit 4MB + + + ozone.scm.chunk.size + 1MB + + + ozone.scm.block.size + 4MB + + + ozone.client.stream.buffer.flush.size + 1MB + + + ozone.client.stream.buffer.max.size + 2MB + + + ozone.client.stream.buffer.size + 1MB + + + ozone.client.datastream.buffer.flush.size + 4MB + + + ozone.client.datastream.min.packet.size + 256KB + + + ozone.client.datastream.window.size + 8MB + + diff --git a/hadoop-ozone/interface-client/src/main/proto/OmClientProtocol.proto b/hadoop-ozone/interface-client/src/main/proto/OmClientProtocol.proto index 191820ba4a4..9cafd9b31b8 100644 --- a/hadoop-ozone/interface-client/src/main/proto/OmClientProtocol.proto +++ b/hadoop-ozone/interface-client/src/main/proto/OmClientProtocol.proto @@ -528,7 +528,7 @@ enum Status { UNAUTHORIZED = 91; S3_SECRET_ALREADY_EXISTS = 92; - + INVALID_PATH = 93; TOO_MANY_BUCKETS = 94; @@ -816,6 +816,7 @@ message BucketArgs { optional uint64 quotaInNamespace = 9; optional string ownerName = 10; optional hadoop.hdds.DefaultReplicationConfig defaultReplicationConfig = 11; + optional BucketEncryptionInfoProto bekInfo = 12; } message PrefixInfo { @@ -1922,18 +1923,27 @@ message SnapshotMoveKeyInfos { message SnapshotPurgeRequest { repeated string snapshotDBKeys = 1; + repeated string updatedSnapshotDBKey = 2 [deprecated = true]; } message SetSnapshotPropertyRequest { - optional string snapshotKey = 1; - optional SnapshotSize snapshotSize = 2; - optional bool deepCleanedDeletedDir = 3; - optional bool deepCleanedDeletedKey = 4; + optional SnapshotProperty snapshotProperty = 1 [deprecated = true]; + optional string snapshotKey = 2; + optional SnapshotSize snapshotSize = 3; + optional bool deepCleanedDeletedDir = 4; + optional bool deepCleanedDeletedKey = 5; +} + +// SnapshotProperty in entirely deprecated, Keeping it here for proto.lock compatibility +message SnapshotProperty { + optional string snapshotKey = 1 [deprecated = true]; + optional uint64 exclusiveSize = 2 [deprecated = true]; + optional uint64 exclusiveReplicatedSize = 3 [deprecated = true]; } message SnapshotSize { - optional uint64 exclusiveSize = 2; - optional uint64 exclusiveReplicatedSize = 3; + optional uint64 exclusiveSize = 1; + optional uint64 exclusiveReplicatedSize = 2; } message DeleteTenantRequest { @@ -2105,7 +2115,8 @@ message RecoverLeaseRequest { } message RecoverLeaseResponse { - optional KeyInfo keyInfo = 1; + optional bool response = 1 [deprecated=true]; + optional KeyInfo keyInfo = 2; } message SetTimesRequest { diff --git a/hadoop-ozone/interface-client/src/main/resources/proto.lock b/hadoop-ozone/interface-client/src/main/resources/proto.lock index ffe53f04cbf..b8f5c395bae 100644 --- a/hadoop-ozone/interface-client/src/main/resources/proto.lock +++ b/hadoop-ozone/interface-client/src/main/resources/proto.lock @@ -174,6 +174,9 @@ { "name": "Type", "enum_fields": [ + { + "name": "UnknownCommand" + }, { "name": "CreateVolume", "integer": 11 @@ -445,6 +448,103 @@ { "name": "GetKeyInfo", "integer": 111 + }, + { + "name": "CreateSnapshot", + "integer": 112 + }, + { + "name": "ListSnapshot", + "integer": 113 + }, + { + "name": "SnapshotDiff", + "integer": 114 + }, + { + "name": "DeleteSnapshot", + "integer": 115 + }, + { + "name": "SnapshotMoveDeletedKeys", + "integer": 116 + }, + { + "name": "TransferLeadership", + "integer": 117 + }, + { + "name": "SnapshotPurge", + "integer": 118 + }, + { + "name": "RecoverLease", + "integer": 119 + }, + { + "name": "SetTimes", + "integer": 120 + }, + { + "name": "RefetchSecretKey", + "integer": 121 + }, + { + "name": "ListSnapshotDiffJobs", + "integer": 122 + }, + { + "name": "CancelSnapshotDiff", + "integer": 123 + }, + { + "name": "SetSafeMode", + "integer": 124 + }, + { + "name": "PrintCompactionLogDag", + "integer": 125 + }, + { + "name": "ListKeysLight", + "integer": 126 + }, + { + "name": "AbortExpiredMultiPartUploads", + "integer": 127 + }, + { + "name": "SetSnapshotProperty", + "integer": 128 + }, + { + "name": "ListStatusLight", + "integer": 129 + }, + { + "name": "GetSnapshotInfo", + "integer": 130 + } + ] + }, + { + "name": "SafeMode", + "enum_fields": [ + { + "name": "ENTER", + "integer": 1 + }, + { + "name": "LEAVE", + "integer": 2 + }, + { + "name": "FORCE_EXIT", + "integer": 3 + }, + { + "name": "GET", + "integer": 4 } ] }, @@ -782,6 +882,38 @@ { "name": "FEATURE_NOT_ENABLED", "integer": 86 + }, + { + "name": "INVALID_SNAPSHOT_ERROR", + "integer": 87 + }, + { + "name": "CONTAINS_SNAPSHOT", + "integer": 88 + }, + { + "name": "SSL_CONNECTION_FAILURE", + "integer": 89 + }, + { + "name": "RENAME_OPEN_FILE", + "integer": 90 + }, + { + "name": "UNAUTHORIZED", + "integer": 91 + }, + { + "name": "S3_SECRET_ALREADY_EXISTS", + "integer": 92 + }, + { + "name": "INVALID_PATH", + "integer": 93 + }, + { + "name": "TOO_MANY_BUCKETS", + "integer": 94 } ] }, @@ -866,6 +998,19 @@ } ] }, + { + "name": "SnapshotStatusProto", + "enum_fields": [ + { + "name": "SNAPSHOT_ACTIVE", + "integer": 1 + }, + { + "name": "SNAPSHOT_DELETED", + "integer": 2 + } + ] + }, { "name": "OzoneObj.ObjectType", "enum_fields": [ @@ -1016,6 +1161,56 @@ "integer": 4 } ] + }, + { + "name": "SnapshotDiffResponse.JobStatusProto", + "enum_fields": [ + { + "name": "QUEUED", + "integer": 1 + }, + { + "name": "IN_PROGRESS", + "integer": 2 + }, + { + "name": "DONE", + "integer": 3 + }, + { + "name": "REJECTED", + "integer": 4 + }, + { + "name": "FAILED", + "integer": 5 + }, + { + "name": "CANCELLED", + "integer": 6 + } + ] + }, + { + "name": "DiffReportEntryProto.DiffTypeProto", + "enum_fields": [ + { + "name": "CREATE", + "integer": 1 + }, + { + "name": "MODIFY", + "integer": 2 + }, + { + "name": "DELETE", + "integer": 3 + }, + { + "name": "RENAME", + "integer": 4 + } + ] } ], "messages": [ @@ -1495,6 +1690,108 @@ "name": "GetKeyInfoRequest", "type": "GetKeyInfoRequest", "optional": true + }, + { + "id": 112, + "name": "CreateSnapshotRequest", + "type": "CreateSnapshotRequest", + "optional": true + }, + { + "id": 113, + "name": "ListSnapshotRequest", + "type": "ListSnapshotRequest", + "optional": true + }, + { + "id": 114, + "name": "snapshotDiffRequest", + "type": "SnapshotDiffRequest", + "optional": true + }, + { + "id": 115, + "name": "DeleteSnapshotRequest", + "type": "DeleteSnapshotRequest", + "optional": true + }, + { + "id": 116, + "name": "SnapshotMoveDeletedKeysRequest", + "type": "SnapshotMoveDeletedKeysRequest", + "optional": true + }, + { + "id": 117, + "name": "TransferOmLeadershipRequest", + "type": "hdds.TransferLeadershipRequestProto", + "optional": true + }, + { + "id": 118, + "name": "SnapshotPurgeRequest", + "type": "SnapshotPurgeRequest", + "optional": true + }, + { + "id": 119, + "name": "RecoverLeaseRequest", + "type": "RecoverLeaseRequest", + "optional": true + }, + { + "id": 120, + "name": "SetTimesRequest", + "type": "SetTimesRequest", + "optional": true + }, + { + "id": 121, + "name": "RefetchSecretKeyRequest", + "type": "RefetchSecretKeyRequest", + "optional": true + }, + { + "id": 122, + "name": "ListSnapshotDiffJobRequest", + "type": "ListSnapshotDiffJobRequest", + "optional": true + }, + { + "id": 123, + "name": "CancelSnapshotDiffRequest", + "type": "CancelSnapshotDiffRequest", + "optional": true + }, + { + "id": 124, + "name": "SetSafeModeRequest", + "type": "SetSafeModeRequest", + "optional": true + }, + { + "id": 125, + "name": "PrintCompactionLogDagRequest", + "type": "PrintCompactionLogDagRequest", + "optional": true + }, + { + "id": 126, + "name": "multipartUploadsExpiredAbortRequest", + "type": "MultipartUploadsExpiredAbortRequest", + "optional": true + }, + { + "id": 127, + "name": "SetSnapshotPropertyRequest", + "type": "SetSnapshotPropertyRequest", + "optional": true + }, + { + "id": 128, + "name": "SnapshotInfoRequest", + "type": "SnapshotInfoRequest", + "optional": true } ] }, @@ -1944,6 +2241,126 @@ "name": "GetKeyInfoResponse", "type": "GetKeyInfoResponse", "optional": true + }, + { + "id": 112, + "name": "CreateSnapshotResponse", + "type": "CreateSnapshotResponse", + "optional": true + }, + { + "id": 113, + "name": "ListSnapshotResponse", + "type": "ListSnapshotResponse", + "optional": true + }, + { + "id": 114, + "name": "snapshotDiffResponse", + "type": "SnapshotDiffResponse", + "optional": true + }, + { + "id": 115, + "name": "DeleteSnapshotResponse", + "type": "DeleteSnapshotResponse", + "optional": true + }, + { + "id": 116, + "name": "SnapshotMoveDeletedKeysResponse", + "type": "SnapshotMoveDeletedKeysResponse", + "optional": true + }, + { + "id": 117, + "name": "TransferOmLeadershipResponse", + "type": "hdds.TransferLeadershipResponseProto", + "optional": true + }, + { + "id": 118, + "name": "SnapshotPurgeResponse", + "type": "SnapshotPurgeResponse", + "optional": true + }, + { + "id": 119, + "name": "RecoverLeaseResponse", + "type": "RecoverLeaseResponse", + "optional": true + }, + { + "id": 120, + "name": "SetTimesResponse", + "type": "SetTimesResponse", + "optional": true + }, + { + "id": 121, + "name": "RefetchSecretKeyResponse", + "type": "RefetchSecretKeyResponse", + "optional": true + }, + { + "id": 122, + "name": "ListSnapshotDiffJobResponse", + "type": "ListSnapshotDiffJobResponse", + "optional": true + }, + { + "id": 123, + "name": "cancelSnapshotDiffResponse", + "type": "CancelSnapshotDiffResponse", + "optional": true + }, + { + "id": 124, + "name": "SetSafeModeResponse", + "type": "SetSafeModeResponse", + "optional": true + }, + { + "id": 125, + "name": "PrintCompactionLogDagResponse", + "type": "PrintCompactionLogDagResponse", + "optional": true + }, + { + "id": 126, + "name": "listKeysLightResponse", + "type": "ListKeysLightResponse", + "optional": true + }, + { + "id": 127, + "name": "multipartUploadsExpiredAbortResponse", + "type": "MultipartUploadsExpiredAbortResponse", + "optional": true + }, + { + "id": 128, + "name": "SetSnapshotPropertyResponse", + "type": "SetSnapshotPropertyResponse", + "optional": true + }, + { + "id": 129, + "name": "listStatusLightResponse", + "type": "ListStatusLightResponse", + "optional": true + }, + { + "id": 130, + "name": "SnapshotInfoResponse", + "type": "SnapshotInfoResponse", + "optional": true + }, + { + "id": 131, + "name": "omLockDetails", + "type": "OMLockDetailsProto", + "optional": true } ] }, @@ -2262,6 +2679,20 @@ } ] }, + { + "name": "RefetchSecretKeyRequest" + }, + { + "name": "RefetchSecretKeyResponse", + "fields": [ + { + "id": 1, + "name": "id", + "type": "hdds.UUID", + "optional": true + } + ] + }, { "name": "CheckVolumeAccessRequest", "fields": [ @@ -2730,22 +3161,206 @@ ] }, { - "name": "OzoneObj", + "name": "SnapshotInfo", "fields": [ { "id": 1, - "name": "resType", - "type": "ObjectType", - "required": true + "name": "snapshotID", + "type": "hadoop.hdds.UUID", + "optional": true }, { "id": 2, - "name": "storeType", - "type": "StoreType", - "required": true, - "options": [ - { - "name": "default", + "name": "name", + "type": "string", + "optional": true + }, + { + "id": 3, + "name": "volumeName", + "type": "string", + "optional": true + }, + { + "id": 4, + "name": "bucketName", + "type": "string", + "optional": true + }, + { + "id": 5, + "name": "snapshotStatus", + "type": "SnapshotStatusProto", + "optional": true + }, + { + "id": 6, + "name": "creationTime", + "type": "uint64", + "optional": true + }, + { + "id": 7, + "name": "deletionTime", + "type": "uint64", + "optional": true + }, + { + "id": 8, + "name": "pathPreviousSnapshotID", + "type": "hadoop.hdds.UUID", + "optional": true + }, + { + "id": 9, + "name": "globalPreviousSnapshotID", + "type": "hadoop.hdds.UUID", + "optional": true + }, + { + "id": 10, + "name": "snapshotPath", + "type": "string", + "optional": true + }, + { + "id": 11, + "name": "checkpointDir", + "type": "string", + "optional": true + }, + { + "id": 12, + "name": "dbTxSequenceNumber", + "type": "int64", + "optional": true + }, + { + "id": 13, + "name": "deepClean", + "type": "bool", + "optional": true + }, + { + "id": 14, + "name": "sstFiltered", + "type": "bool", + "optional": true + }, + { + "id": 15, + "name": "referencedSize", + "type": "uint64", + "optional": true + }, + { + "id": 16, + "name": "referencedReplicatedSize", + "type": "uint64", + "optional": true + }, + { + "id": 17, + "name": "exclusiveSize", + "type": "uint64", + "optional": true + }, + { + "id": 18, + "name": "exclusiveReplicatedSize", + "type": "uint64", + "optional": true + } + ] + }, + { + "name": "SnapshotDiffJobProto", + "fields": [ + { + "id": 1, + "name": "creationTime", + "type": "uint64", + "optional": true + }, + { + "id": 2, + "name": "jobId", + "type": "string", + "optional": true + }, + { + "id": 3, + "name": "status", + "type": "SnapshotDiffResponse.JobStatusProto", + "optional": true + }, + { + "id": 4, + "name": "volume", + "type": "string", + "optional": true + }, + { + "id": 5, + "name": "bucket", + "type": "string", + "optional": true + }, + { + "id": 6, + "name": "fromSnapshot", + "type": "string", + "optional": true + }, + { + "id": 7, + "name": "toSnapshot", + "type": "string", + "optional": true + }, + { + "id": 8, + "name": "forceFullDiff", + "type": "bool", + "optional": true + }, + { + "id": 9, + "name": "totalDiffEntries", + "type": "uint64", + "optional": true + }, + { + "id": 10, + "name": "message", + "type": "string", + "optional": true + }, + { + "id": 11, + "name": "disableNativeDiff", + "type": "bool", + "optional": true + } + ] + }, + { + "name": "OzoneObj", + "fields": [ + { + "id": 1, + "name": "resType", + "type": "ObjectType", + "required": true + }, + { + "id": 2, + "name": "storeType", + "type": "StoreType", + "required": true, + "options": [ + { + "name": "default", "value": "S3" } ] @@ -3033,6 +3648,12 @@ "name": "count", "type": "int32", "optional": true + }, + { + "id": 5, + "name": "hasSnapshot", + "type": "bool", + "optional": true } ] }, @@ -3463,6 +4084,59 @@ "name": "fileChecksum", "type": "FileChecksumProto", "optional": true + }, + { + "id": 19, + "name": "isFile", + "type": "bool", + "optional": true + } + ] + }, + { + "name": "BasicKeyInfo", + "fields": [ + { + "id": 1, + "name": "keyName", + "type": "string", + "optional": true + }, + { + "id": 2, + "name": "dataSize", + "type": "uint64", + "optional": true + }, + { + "id": 3, + "name": "creationTime", + "type": "uint64", + "optional": true + }, + { + "id": 4, + "name": "modificationTime", + "type": "uint64", + "optional": true + }, + { + "id": 5, + "name": "type", + "type": "hadoop.hdds.ReplicationType", + "optional": true + }, + { + "id": 6, + "name": "factor", + "type": "hadoop.hdds.ReplicationFactor", + "optional": true + }, + { + "id": 7, + "name": "ecReplicationConfig", + "type": "hadoop.hdds.ECReplicationConfig", + "optional": true } ] }, @@ -3553,6 +4227,41 @@ } ] }, + { + "name": "OzoneFileStatusProtoLight", + "fields": [ + { + "id": 1, + "name": "volumeName", + "type": "string", + "optional": true + }, + { + "id": 2, + "name": "bucketName", + "type": "string", + "optional": true + }, + { + "id": 3, + "name": "basicKeyInfo", + "type": "BasicKeyInfo", + "optional": true + }, + { + "id": 4, + "name": "blockSize", + "type": "uint64", + "optional": true + }, + { + "id": 5, + "name": "isDirectory", + "type": "bool", + "optional": true + } + ] + }, { "name": "GetFileStatusRequest", "fields": [ @@ -3576,11 +4285,22 @@ ] }, { - "name": "CreateDirectoryRequest", + "name": "GetFileStatusLightResponse", "fields": [ { "id": 1, - "name": "keyArgs", + "name": "status", + "type": "OzoneFileStatusProtoLight", + "required": true + } + ] + }, + { + "name": "CreateDirectoryRequest", + "fields": [ + { + "id": 1, + "name": "keyArgs", "type": "KeyArgs", "required": true } @@ -3709,6 +4429,17 @@ } ] }, + { + "name": "ListStatusLightResponse", + "fields": [ + { + "id": 1, + "name": "statuses", + "type": "OzoneFileStatusProtoLight", + "is_repeated": true + } + ] + }, { "name": "CreateKeyRequest", "fields": [ @@ -4027,6 +4758,18 @@ "name": "deletedKeys", "type": "DeletedKeys", "is_repeated": true + }, + { + "id": 2, + "name": "snapshotTableKey", + "type": "string", + "optional": true + }, + { + "id": 3, + "name": "keysToUpdate", + "type": "SnapshotMoveKeyInfos", + "is_repeated": true } ] }, @@ -4067,6 +4810,12 @@ "name": "deletedPath", "type": "PurgePathRequest", "is_repeated": true + }, + { + "id": 2, + "name": "snapshotTableKey", + "type": "string", + "optional": true } ] }, @@ -4338,6 +5087,29 @@ "name": "keyInfo", "type": "KeyInfo", "is_repeated": true + }, + { + "id": 3, + "name": "isTruncated", + "type": "bool", + "optional": true + } + ] + }, + { + "name": "ListKeysLightResponse", + "fields": [ + { + "id": 1, + "name": "basicKeyInfo", + "type": "BasicKeyInfo", + "is_repeated": true + }, + { + "id": 2, + "name": "isTruncated", + "type": "bool", + "optional": true } ] }, @@ -4355,6 +5127,12 @@ "name": "clientID", "type": "uint64", "required": true + }, + { + "id": 3, + "name": "hsync", + "type": "bool", + "optional": true } ] }, @@ -4464,6 +5242,12 @@ "name": "latestSequenceNumber", "type": "uint64", "optional": true + }, + { + "id": 4, + "name": "dbUpdateSuccess", + "type": "bool", + "optional": true } ] }, @@ -4932,6 +5716,54 @@ { "name": "MultipartUploadAbortResponse" }, + { + "name": "MultipartUploadsExpiredAbortRequest", + "fields": [ + { + "id": 1, + "name": "expiredMultipartUploadsPerBucket", + "type": "ExpiredMultipartUploadsBucket", + "is_repeated": true + } + ] + }, + { + "name": "ExpiredMultipartUploadsBucket", + "fields": [ + { + "id": 1, + "name": "volumeName", + "type": "string", + "required": true + }, + { + "id": 2, + "name": "bucketName", + "type": "string", + "required": true + }, + { + "id": 3, + "name": "multipartUploads", + "type": "ExpiredMultipartUploadInfo", + "is_repeated": true + } + ] + }, + { + "name": "ExpiredMultipartUploadInfo", + "fields": [ + { + "id": 1, + "name": "name", + "type": "string", + "required": true + } + ] + }, + { + "name": "MultipartUploadsExpiredAbortResponse" + }, { "name": "MultipartUploadListPartsRequest", "fields": [ @@ -5150,6 +5982,18 @@ "name": "payloadSizeResp", "type": "int32", "optional": true + }, + { + "id": 3, + "name": "readOnly", + "type": "bool", + "optional": true, + "options": [ + { + "name": "default", + "value": "true" + } + ] } ] }, @@ -5492,6 +6336,12 @@ "name": "adminRoleName", "type": "string", "optional": true + }, + { + "id": 5, + "name": "forceCreationWhenVolumeExists", + "type": "bool", + "optional": true } ] }, @@ -5507,107 +6357,188 @@ ] }, { - "name": "DeleteTenantRequest", + "name": "CreateSnapshotRequest", "fields": [ { "id": 1, - "name": "tenantId", + "name": "volumeName", + "type": "string", + "optional": true + }, + { + "id": 2, + "name": "bucketName", + "type": "string", + "optional": true + }, + { + "id": 3, + "name": "snapshotName", "type": "string", "optional": true + }, + { + "id": 4, + "name": "snapshotId", + "type": "hadoop.hdds.UUID", + "optional": true + }, + { + "id": 5, + "name": "creationTime", + "type": "uint64", + "optional": true } ] }, { - "name": "TenantAssignUserAccessIdRequest", + "name": "ListSnapshotRequest", "fields": [ { "id": 1, - "name": "userPrincipal", + "name": "volumeName", "type": "string", "optional": true }, { "id": 2, - "name": "tenantId", + "name": "bucketName", "type": "string", "optional": true }, { "id": 3, - "name": "accessId", + "name": "prefix", + "type": "string", + "optional": true + }, + { + "id": 4, + "name": "prevSnapshot", "type": "string", "optional": true + }, + { + "id": 5, + "name": "maxListResult", + "type": "uint32", + "optional": true } ] }, { - "name": "TenantRevokeUserAccessIdRequest", + "name": "SnapshotDiffRequest", "fields": [ { "id": 1, - "name": "accessId", + "name": "volumeName", "type": "string", "optional": true }, { "id": 2, - "name": "tenantId", + "name": "bucketName", + "type": "string", + "optional": true + }, + { + "id": 3, + "name": "fromSnapshot", + "type": "string", + "optional": true + }, + { + "id": 4, + "name": "toSnapshot", "type": "string", "optional": true + }, + { + "id": 5, + "name": "token", + "type": "string", + "optional": true + }, + { + "id": 6, + "name": "pageSize", + "type": "uint32", + "optional": true + }, + { + "id": 7, + "name": "forceFullDiff", + "type": "bool", + "optional": true + }, + { + "id": 8, + "name": "disableNativeDiff", + "type": "bool", + "optional": true } ] }, { - "name": "TenantAssignAdminRequest", + "name": "CancelSnapshotDiffRequest", "fields": [ { "id": 1, - "name": "accessId", + "name": "volumeName", "type": "string", "optional": true }, { "id": 2, - "name": "tenantId", + "name": "bucketName", "type": "string", "optional": true }, { "id": 3, - "name": "delegated", - "type": "bool", + "name": "fromSnapshot", + "type": "string", + "optional": true + }, + { + "id": 4, + "name": "toSnapshot", + "type": "string", "optional": true } ] }, { - "name": "TenantRevokeAdminRequest", + "name": "ListSnapshotDiffJobRequest", "fields": [ { "id": 1, - "name": "accessId", + "name": "volumeName", "type": "string", - "optional": true + "required": true }, { "id": 2, - "name": "tenantId", + "name": "bucketName", + "type": "string", + "required": true + }, + { + "id": 3, + "name": "jobStatus", "type": "string", "optional": true + }, + { + "id": 4, + "name": "listAll", + "type": "bool", + "optional": true } ] }, { - "name": "GetS3VolumeContextRequest" - }, - { - "name": "CreateTenantResponse" - }, - { - "name": "SetRangerServiceVersionResponse" - }, - { - "name": "DeleteTenantResponse", + "name": "DeleteSnapshotRequest", "fields": [ { "id": 1, @@ -5617,19 +6548,462 @@ }, { "id": 2, - "name": "volRefCount", - "type": "int64", + "name": "bucketName", + "type": "string", + "optional": true + }, + { + "id": 3, + "name": "snapshotName", + "type": "string", + "optional": true + }, + { + "id": 4, + "name": "deletionTime", + "type": "uint64", "optional": true } ] }, { - "name": "TenantAssignUserAccessIdResponse", + "name": "SnapshotInfoRequest", "fields": [ { "id": 1, - "name": "s3Secret", - "type": "S3Secret", + "name": "volumeName", + "type": "string", + "optional": true + }, + { + "id": 2, + "name": "bucketName", + "type": "string", + "optional": true + }, + { + "id": 3, + "name": "snapshotName", + "type": "string", + "optional": true + } + ] + }, + { + "name": "PrintCompactionLogDagRequest", + "fields": [ + { + "id": 1, + "name": "fileNamePrefix", + "type": "string", + "optional": true + }, + { + "id": 2, + "name": "graphType", + "type": "string", + "optional": true + } + ] + }, + { + "name": "SnapshotMoveDeletedKeysRequest", + "fields": [ + { + "id": 1, + "name": "fromSnapshot", + "type": "SnapshotInfo", + "optional": true + }, + { + "id": 2, + "name": "nextDBKeys", + "type": "SnapshotMoveKeyInfos", + "is_repeated": true + }, + { + "id": 3, + "name": "reclaimKeys", + "type": "SnapshotMoveKeyInfos", + "is_repeated": true + }, + { + "id": 4, + "name": "renamedKeys", + "type": "hadoop.hdds.KeyValue", + "is_repeated": true + }, + { + "id": 5, + "name": "deletedDirsToMove", + "type": "string", + "is_repeated": true + } + ] + }, + { + "name": "SnapshotMoveKeyInfos", + "fields": [ + { + "id": 1, + "name": "key", + "type": "string", + "optional": true + }, + { + "id": 2, + "name": "keyInfos", + "type": "KeyInfo", + "is_repeated": true + } + ] + }, + { + "name": "SnapshotPurgeRequest", + "fields": [ + { + "id": 1, + "name": "snapshotDBKeys", + "type": "string", + "is_repeated": true + }, + { + "id": 2, + "name": "updatedSnapshotDBKey", + "type": "string", + "is_repeated": true + } + ] + }, + { + "name": "SetSnapshotPropertyRequest", + "fields": [ + { + "id": 1, + "name": "snapshotProperty", + "type": "SnapshotProperty", + "optional": true + } + ] + }, + { + "name": "SnapshotProperty", + "fields": [ + { + "id": 1, + "name": "snapshotKey", + "type": "string", + "optional": true + }, + { + "id": 2, + "name": "exclusiveSize", + "type": "uint64", + "optional": true + }, + { + "id": 3, + "name": "exclusiveReplicatedSize", + "type": "uint64", + "optional": true + } + ] + }, + { + "name": "DeleteTenantRequest", + "fields": [ + { + "id": 1, + "name": "tenantId", + "type": "string", + "optional": true + } + ] + }, + { + "name": "TenantAssignUserAccessIdRequest", + "fields": [ + { + "id": 1, + "name": "userPrincipal", + "type": "string", + "optional": true + }, + { + "id": 2, + "name": "tenantId", + "type": "string", + "optional": true + }, + { + "id": 3, + "name": "accessId", + "type": "string", + "optional": true + } + ] + }, + { + "name": "TenantRevokeUserAccessIdRequest", + "fields": [ + { + "id": 1, + "name": "accessId", + "type": "string", + "optional": true + }, + { + "id": 2, + "name": "tenantId", + "type": "string", + "optional": true + } + ] + }, + { + "name": "TenantAssignAdminRequest", + "fields": [ + { + "id": 1, + "name": "accessId", + "type": "string", + "optional": true + }, + { + "id": 2, + "name": "tenantId", + "type": "string", + "optional": true + }, + { + "id": 3, + "name": "delegated", + "type": "bool", + "optional": true + } + ] + }, + { + "name": "TenantRevokeAdminRequest", + "fields": [ + { + "id": 1, + "name": "accessId", + "type": "string", + "optional": true + }, + { + "id": 2, + "name": "tenantId", + "type": "string", + "optional": true + } + ] + }, + { + "name": "GetS3VolumeContextRequest" + }, + { + "name": "CreateTenantResponse" + }, + { + "name": "SetRangerServiceVersionResponse" + }, + { + "name": "CreateSnapshotResponse", + "fields": [ + { + "id": 1, + "name": "snapshotInfo", + "type": "SnapshotInfo", + "optional": true + } + ] + }, + { + "name": "ListSnapshotResponse", + "fields": [ + { + "id": 1, + "name": "snapshotInfo", + "type": "SnapshotInfo", + "is_repeated": true + } + ] + }, + { + "name": "SnapshotDiffResponse", + "fields": [ + { + "id": 1, + "name": "snapshotDiffReport", + "type": "SnapshotDiffReportProto", + "optional": true + }, + { + "id": 2, + "name": "jobStatus", + "type": "JobStatusProto", + "optional": true + }, + { + "id": 3, + "name": "waitTimeInMs", + "type": "int64", + "optional": true + }, + { + "id": 4, + "name": "reason", + "type": "string", + "optional": true + } + ] + }, + { + "name": "CancelSnapshotDiffResponse", + "fields": [ + { + "id": 1, + "name": "reason", + "type": "string", + "optional": true + } + ] + }, + { + "name": "ListSnapshotDiffJobResponse", + "fields": [ + { + "id": 1, + "name": "snapshotDiffJob", + "type": "SnapshotDiffJobProto", + "is_repeated": true + } + ] + }, + { + "name": "DeleteSnapshotResponse" + }, + { + "name": "SnapshotInfoResponse", + "fields": [ + { + "id": 1, + "name": "snapshotInfo", + "type": "SnapshotInfo", + "optional": true + } + ] + }, + { + "name": "PrintCompactionLogDagResponse", + "fields": [ + { + "id": 1, + "name": "message", + "type": "string", + "optional": true + } + ] + }, + { + "name": "SnapshotMoveDeletedKeysResponse" + }, + { + "name": "SnapshotPurgeResponse" + }, + { + "name": "SetSnapshotPropertyResponse" + }, + { + "name": "SnapshotDiffReportProto", + "fields": [ + { + "id": 1, + "name": "volumeName", + "type": "string", + "optional": true + }, + { + "id": 2, + "name": "bucketName", + "type": "string", + "optional": true + }, + { + "id": 3, + "name": "fromSnapshot", + "type": "string", + "optional": true + }, + { + "id": 4, + "name": "toSnapshot", + "type": "string", + "optional": true + }, + { + "id": 5, + "name": "diffList", + "type": "DiffReportEntryProto", + "is_repeated": true + }, + { + "id": 6, + "name": "token", + "type": "string", + "optional": true + } + ] + }, + { + "name": "DiffReportEntryProto", + "fields": [ + { + "id": 1, + "name": "diffType", + "type": "DiffTypeProto", + "optional": true + }, + { + "id": 2, + "name": "sourcePath", + "type": "string", + "optional": true + }, + { + "id": 3, + "name": "targetPath", + "type": "string", + "optional": true + } + ] + }, + { + "name": "DeleteTenantResponse", + "fields": [ + { + "id": 1, + "name": "volumeName", + "type": "string", + "optional": true + }, + { + "id": 2, + "name": "volRefCount", + "type": "int64", + "optional": true + } + ] + }, + { + "name": "TenantAssignUserAccessIdResponse", + "fields": [ + { + "id": 1, + "name": "s3Secret", + "type": "S3Secret", "optional": true } ] @@ -5699,6 +7073,117 @@ "optional": true } ] + }, + { + "name": "RecoverLeaseRequest", + "fields": [ + { + "id": 1, + "name": "volumeName", + "type": "string", + "optional": true + }, + { + "id": 2, + "name": "bucketName", + "type": "string", + "optional": true + }, + { + "id": 3, + "name": "keyName", + "type": "string", + "optional": true + } + ] + }, + { + "name": "RecoverLeaseResponse", + "fields": [ + { + "id": 1, + "name": "response", + "type": "bool", + "optional": true + } + ] + }, + { + "name": "SetTimesRequest", + "fields": [ + { + "id": 1, + "name": "keyArgs", + "type": "KeyArgs", + "required": true + }, + { + "id": 2, + "name": "mtime", + "type": "uint64", + "required": true + }, + { + "id": 3, + "name": "atime", + "type": "uint64", + "required": true + } + ] + }, + { + "name": "SetTimesResponse" + }, + { + "name": "SetSafeModeRequest", + "fields": [ + { + "id": 1, + "name": "safeMode", + "type": "SafeMode", + "required": true + } + ] + }, + { + "name": "SetSafeModeResponse", + "fields": [ + { + "id": 1, + "name": "response", + "type": "bool", + "optional": true + } + ] + }, + { + "name": "OMLockDetailsProto", + "fields": [ + { + "id": 1, + "name": "isLockAcquired", + "type": "bool", + "optional": true + }, + { + "id": 2, + "name": "waitLockNanos", + "type": "uint64", + "optional": true + }, + { + "id": 3, + "name": "readLockNanos", + "type": "uint64", + "optional": true + }, + { + "id": 4, + "name": "writeLockNanos", + "type": "uint64", + "optional": true + } + ] } ], "services": [ diff --git a/hadoop-ozone/interface-storage/pom.xml b/hadoop-ozone/interface-storage/pom.xml index 3ab535852ce..8ec4e0d9940 100644 --- a/hadoop-ozone/interface-storage/pom.xml +++ b/hadoop-ozone/interface-storage/pom.xml @@ -28,7 +28,6 @@ Apache Ozone Storage Interface jar - false diff --git a/hadoop-ozone/interface-storage/src/main/java/org/apache/hadoop/ozone/om/OMMetadataManager.java b/hadoop-ozone/interface-storage/src/main/java/org/apache/hadoop/ozone/om/OMMetadataManager.java index 85af6e538d1..51e0808b17d 100644 --- a/hadoop-ozone/interface-storage/src/main/java/org/apache/hadoop/ozone/om/OMMetadataManager.java +++ b/hadoop-ozone/interface-storage/src/main/java/org/apache/hadoop/ozone/om/OMMetadataManager.java @@ -451,6 +451,19 @@ List getExpiredMultipartUploads( String getMultipartKey(String volume, String bucket, String key, String uploadId); + /** + * Returns the DB key name of a multipart upload key in OM metadata store + * for FSO-enabled buckets. + * + * @param volume - volume name + * @param bucket - bucket name + * @param key - key name + * @param uploadId - the upload id for this key + * @return bytes of DB key. + */ + String getMultipartKeyFSO(String volume, String bucket, String key, String + uploadId) throws IOException; + /** * Gets the multipart info table which holds the information about diff --git a/hadoop-ozone/interface-storage/src/main/java/org/apache/hadoop/ozone/om/helpers/OmPrefixInfo.java b/hadoop-ozone/interface-storage/src/main/java/org/apache/hadoop/ozone/om/helpers/OmPrefixInfo.java index 4ec5b952703..4cc76868f74 100644 --- a/hadoop-ozone/interface-storage/src/main/java/org/apache/hadoop/ozone/om/helpers/OmPrefixInfo.java +++ b/hadoop-ozone/interface-storage/src/main/java/org/apache/hadoop/ozone/om/helpers/OmPrefixInfo.java @@ -164,7 +164,9 @@ public OmPrefixInfo build() { public PersistedPrefixInfo getProtobuf() { PersistedPrefixInfo.Builder pib = PersistedPrefixInfo.newBuilder().setName(name) - .addAllMetadata(KeyValueUtil.toProtobuf(metadata)); + .addAllMetadata(KeyValueUtil.toProtobuf(metadata)) + .setObjectID(objectID) + .setUpdateID(updateID); if (acls != null) { pib.addAllAcls(OzoneAclStorageUtil.toProtobuf(acls)); } @@ -186,6 +188,14 @@ public static OmPrefixInfo getFromProtobuf(PersistedPrefixInfo prefixInfo) { if (prefixInfo.getAclsList() != null) { opib.setAcls(OzoneAclStorageUtil.fromProtobuf(prefixInfo.getAclsList())); } + + if (prefixInfo.hasObjectID()) { + opib.setObjectID(prefixInfo.getObjectID()); + } + + if (prefixInfo.hasUpdateID()) { + opib.setUpdateID(prefixInfo.getUpdateID()); + } return opib.build(); } @@ -200,12 +210,25 @@ public boolean equals(Object o) { OmPrefixInfo that = (OmPrefixInfo) o; return name.equals(that.name) && Objects.equals(acls, that.acls) && - Objects.equals(metadata, that.metadata); + Objects.equals(metadata, that.metadata) && + objectID == that.objectID && + updateID == that.updateID; } @Override public int hashCode() { - return Objects.hash(name); + return Objects.hash(name, acls, metadata, objectID, updateID); + } + + @Override + public String toString() { + return "OmPrefixInfo{" + + "name='" + name + '\'' + + ", acls=" + acls + + ", metadata=" + metadata + + ", objectID=" + objectID + + ", updateID=" + updateID + + '}'; } /** diff --git a/hadoop-ozone/interface-storage/src/test/java/org/apache/hadoop/ozone/om/helpers/TestOmMultipartKeyInfoCodec.java b/hadoop-ozone/interface-storage/src/test/java/org/apache/hadoop/ozone/om/helpers/TestOmMultipartKeyInfoCodec.java index 31846c44a7f..fc209624d85 100644 --- a/hadoop-ozone/interface-storage/src/test/java/org/apache/hadoop/ozone/om/helpers/TestOmMultipartKeyInfoCodec.java +++ b/hadoop-ozone/interface-storage/src/test/java/org/apache/hadoop/ozone/om/helpers/TestOmMultipartKeyInfoCodec.java @@ -23,13 +23,14 @@ import org.apache.hadoop.hdds.utils.db.Codec; import org.apache.hadoop.hdds.utils.db.Proto2CodecTestBase; import org.apache.hadoop.util.Time; -import org.junit.jupiter.api.Assertions; import org.junit.jupiter.api.Test; import java.util.UUID; import static java.nio.charset.StandardCharsets.UTF_8; import static org.assertj.core.api.Assertions.assertThat; +import static org.junit.jupiter.api.Assertions.assertEquals; +import static org.junit.jupiter.api.Assertions.assertNotNull; /** * Test {@link OmMultipartKeyInfo#getCodec()}. @@ -58,7 +59,7 @@ public void testOmMultipartKeyInfoCodec() { } catch (java.io.IOException e) { e.printStackTrace(); } - Assertions.assertNotNull(data); + assertNotNull(data); OmMultipartKeyInfo multipartKeyInfo = null; try { @@ -66,7 +67,7 @@ public void testOmMultipartKeyInfoCodec() { } catch (java.io.IOException e) { e.printStackTrace(); } - Assertions.assertEquals(omMultipartKeyInfo, multipartKeyInfo); + assertEquals(omMultipartKeyInfo, multipartKeyInfo); // When random byte data passed returns null. try { diff --git a/hadoop-ozone/interface-storage/src/test/java/org/apache/hadoop/ozone/om/helpers/TestOmPrefixInfo.java b/hadoop-ozone/interface-storage/src/test/java/org/apache/hadoop/ozone/om/helpers/TestOmPrefixInfo.java index 914697b3a62..5226f315c8b 100644 --- a/hadoop-ozone/interface-storage/src/test/java/org/apache/hadoop/ozone/om/helpers/TestOmPrefixInfo.java +++ b/hadoop-ozone/interface-storage/src/test/java/org/apache/hadoop/ozone/om/helpers/TestOmPrefixInfo.java @@ -23,14 +23,14 @@ import org.apache.hadoop.ozone.OzoneAcl; import org.apache.hadoop.ozone.storage.proto.OzoneManagerStorageProtos; import org.apache.hadoop.ozone.security.acl.IAccessAuthorizer; -import org.junit.jupiter.api.Assertions; import org.junit.jupiter.api.Test; import java.util.Collections; import java.util.HashMap; - import static org.apache.hadoop.ozone.OzoneAcl.AclScope.ACCESS; +import static org.junit.jupiter.api.Assertions.assertEquals; +import static org.junit.jupiter.api.Assertions.assertNotEquals; /** * Class to test OmPrefixInfo. @@ -91,7 +91,7 @@ public void testCopyObject() { ACCESS); OmPrefixInfo clonePrefixInfo = omPrefixInfo.copyObject(); - Assertions.assertEquals(omPrefixInfo, clonePrefixInfo); + assertEquals(omPrefixInfo, clonePrefixInfo); // Change acls and check. @@ -99,7 +99,7 @@ public void testCopyObject() { IAccessAuthorizer.ACLIdentityType.USER, username, IAccessAuthorizer.ACLType.READ, ACCESS)); - Assertions.assertNotEquals(omPrefixInfo, clonePrefixInfo); + assertNotEquals(omPrefixInfo, clonePrefixInfo); } @@ -116,10 +116,10 @@ public void testgetFromProtobufOneMetadataOneAcl() { OmPrefixInfo ompri = OmPrefixInfo.getFromProtobuf(prefixInfo); - Assertions.assertEquals(prefixInfoPath, ompri.getName()); - Assertions.assertEquals(1, ompri.getMetadata().size()); - Assertions.assertEquals(metaval, ompri.getMetadata().get(metakey)); - Assertions.assertEquals(1, ompri.getAcls().size()); + assertEquals(prefixInfoPath, ompri.getName()); + assertEquals(1, ompri.getMetadata().size()); + assertEquals(metaval, ompri.getMetadata().get(metakey)); + assertEquals(1, ompri.getAcls().size()); } @Test @@ -133,8 +133,8 @@ public void testGetProtobuf() { omPrefixInfo.getMetadata().put("key", "value"); OzoneManagerStorageProtos.PersistedPrefixInfo pi = omPrefixInfo.getProtobuf(); - Assertions.assertEquals(testPath, pi.getName()); - Assertions.assertEquals(1, pi.getAclsCount()); - Assertions.assertEquals(1, pi.getMetadataCount()); + assertEquals(testPath, pi.getName()); + assertEquals(1, pi.getAclsCount()); + assertEquals(1, pi.getMetadataCount()); } } diff --git a/hadoop-ozone/interface-storage/src/test/java/org/apache/hadoop/ozone/om/helpers/TestOmPrefixInfoCodec.java b/hadoop-ozone/interface-storage/src/test/java/org/apache/hadoop/ozone/om/helpers/TestOmPrefixInfoCodec.java index 6015491468c..f3ad1d8c762 100644 --- a/hadoop-ozone/interface-storage/src/test/java/org/apache/hadoop/ozone/om/helpers/TestOmPrefixInfoCodec.java +++ b/hadoop-ozone/interface-storage/src/test/java/org/apache/hadoop/ozone/om/helpers/TestOmPrefixInfoCodec.java @@ -22,7 +22,6 @@ import org.apache.hadoop.ozone.OzoneAcl; import org.apache.hadoop.ozone.security.acl.IAccessAuthorizer.ACLIdentityType; import org.apache.hadoop.ozone.security.acl.IAccessAuthorizer.ACLType; -import org.junit.jupiter.api.Assertions; import org.junit.jupiter.api.Test; import java.io.IOException; @@ -30,6 +29,7 @@ import java.util.List; import static org.apache.hadoop.ozone.OzoneAcl.AclScope.ACCESS; +import static org.junit.jupiter.api.Assertions.assertEquals; /** * Test {@link OmPrefixInfo#getCodec()}. @@ -57,6 +57,6 @@ public void testToAndFromPersistedFormat() throws IOException { OmPrefixInfo opiLoad = codec.fromPersistedFormat( codec.toPersistedFormat(opiSave)); - Assertions.assertEquals(opiSave, opiLoad, "Loaded not equals to saved"); + assertEquals(opiSave, opiLoad, "Loaded not equals to saved"); } } diff --git a/hadoop-ozone/interface-storage/src/test/java/org/apache/hadoop/ozone/om/helpers/TestRepeatedOmKeyInfoCodec.java b/hadoop-ozone/interface-storage/src/test/java/org/apache/hadoop/ozone/om/helpers/TestRepeatedOmKeyInfoCodec.java index b2602f7bb0a..efe7df3dec6 100644 --- a/hadoop-ozone/interface-storage/src/test/java/org/apache/hadoop/ozone/om/helpers/TestRepeatedOmKeyInfoCodec.java +++ b/hadoop-ozone/interface-storage/src/test/java/org/apache/hadoop/ozone/om/helpers/TestRepeatedOmKeyInfoCodec.java @@ -39,7 +39,6 @@ import static org.junit.jupiter.api.Assertions.assertNull; import static org.junit.jupiter.api.Assertions.assertNotNull; import static org.junit.jupiter.api.Assertions.assertFalse; -import static org.junit.jupiter.api.Assertions.fail; /** * Test {@link RepeatedOmKeyInfo#getCodec(boolean)}. @@ -87,7 +86,7 @@ private OmKeyInfo getKeyInfo(int chunkNum) { } @Test - public void test() throws InterruptedException { + void test() throws Exception { threadSafety(); testWithoutPipeline(1); testWithoutPipeline(2); @@ -95,39 +94,28 @@ public void test() throws InterruptedException { testCompatibility(2); } - public void testWithoutPipeline(int chunkNum) { + public void testWithoutPipeline(int chunkNum) throws IOException { final Codec codec = RepeatedOmKeyInfo.getCodec(true); OmKeyInfo originKey = getKeyInfo(chunkNum); RepeatedOmKeyInfo repeatedOmKeyInfo = new RepeatedOmKeyInfo(originKey); - try { - byte[] rawData = codec.toPersistedFormat(repeatedOmKeyInfo); - RepeatedOmKeyInfo key = codec.fromPersistedFormat(rawData); - System.out.println("Chunk number = " + chunkNum + - ", Serialized key size without pipeline = " + rawData.length); - assertNull(key.getOmKeyInfoList().get(0).getLatestVersionLocations() - .getLocationList().get(0).getPipeline()); - } catch (IOException e) { - fail("Should success"); - } + + byte[] rawData = codec.toPersistedFormat(repeatedOmKeyInfo); + RepeatedOmKeyInfo key = codec.fromPersistedFormat(rawData); + assertNull(key.getOmKeyInfoList().get(0).getLatestVersionLocations() + .getLocationList().get(0).getPipeline()); } - public void testCompatibility(int chunkNum) { + public void testCompatibility(int chunkNum) throws IOException { final Codec codecWithoutPipeline = RepeatedOmKeyInfo.getCodec(true); final Codec codecWithPipeline = RepeatedOmKeyInfo.getCodec(false); OmKeyInfo originKey = getKeyInfo(chunkNum); RepeatedOmKeyInfo repeatedOmKeyInfo = new RepeatedOmKeyInfo(originKey); - try { - byte[] rawData = codecWithPipeline.toPersistedFormat(repeatedOmKeyInfo); - RepeatedOmKeyInfo key = codecWithoutPipeline.fromPersistedFormat(rawData); - System.out.println("Chunk number = " + chunkNum + - ", Serialized key size with pipeline = " + rawData.length); - assertNotNull(key.getOmKeyInfoList().get(0).getLatestVersionLocations() - .getLocationList().get(0).getPipeline()); - } catch (IOException e) { - fail("Should success"); - } + byte[] rawData = codecWithPipeline.toPersistedFormat(repeatedOmKeyInfo); + RepeatedOmKeyInfo key = codecWithoutPipeline.fromPersistedFormat(rawData); + assertNotNull(key.getOmKeyInfoList().get(0).getLatestVersionLocations() + .getLocationList().get(0).getPipeline()); } public void threadSafety() throws InterruptedException { diff --git a/hadoop-ozone/interface-storage/src/test/java/org/apache/hadoop/ozone/om/helpers/TestS3SecretValueCodec.java b/hadoop-ozone/interface-storage/src/test/java/org/apache/hadoop/ozone/om/helpers/TestS3SecretValueCodec.java index 19cb6861971..0f3af5f3a3b 100644 --- a/hadoop-ozone/interface-storage/src/test/java/org/apache/hadoop/ozone/om/helpers/TestS3SecretValueCodec.java +++ b/hadoop-ozone/interface-storage/src/test/java/org/apache/hadoop/ozone/om/helpers/TestS3SecretValueCodec.java @@ -22,9 +22,12 @@ import org.apache.hadoop.hdds.utils.db.Codec; import org.apache.hadoop.hdds.utils.db.Proto2CodecTestBase; -import org.junit.jupiter.api.Assertions; import org.junit.jupiter.api.Test; +import static org.junit.jupiter.api.Assertions.assertEquals; +import static org.junit.jupiter.api.Assertions.assertNotNull; + + /** * Test {@link S3SecretValue#getCodec()}. */ @@ -40,14 +43,14 @@ public void testCodecWithCorrectData() throws Exception { final Codec codec = getCodec(); S3SecretValue s3SecretValue = - new S3SecretValue(UUID.randomUUID().toString(), + S3SecretValue.of(UUID.randomUUID().toString(), UUID.randomUUID().toString()); byte[] data = codec.toPersistedFormat(s3SecretValue); - Assertions.assertNotNull(data); + assertNotNull(data); S3SecretValue docdedS3Secret = codec.fromPersistedFormat(data); - Assertions.assertEquals(s3SecretValue, docdedS3Secret); + assertEquals(s3SecretValue, docdedS3Secret); } } diff --git a/hadoop-ozone/ozone-manager/pom.xml b/hadoop-ozone/ozone-manager/pom.xml index 8438dbbf2c4..d076e12932d 100644 --- a/hadoop-ozone/ozone-manager/pom.xml +++ b/hadoop-ozone/ozone-manager/pom.xml @@ -29,7 +29,6 @@ https://maven.apache.org/xsd/maven-4.0.0.xsd"> jar - false @@ -88,7 +87,7 @@ https://maven.apache.org/xsd/maven-4.0.0.xsd"> org.bouncycastle - bcprov-jdk15on + bcprov-jdk18on io.netty diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/common/BekInfoUtils.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/common/BekInfoUtils.java new file mode 100644 index 00000000000..7cfad3b8a33 --- /dev/null +++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/common/BekInfoUtils.java @@ -0,0 +1,70 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with this + * work for additional information regarding copyright ownership. The ASF + * licenses this file to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + *

+ * http://www.apache.org/licenses/LICENSE-2.0 + *

+ * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS,WITHOUT + * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the + * License for the specific language governing permissions and limitations under + * the License. + */ + +package org.apache.hadoop.ozone.common; + +import org.apache.hadoop.crypto.CipherSuite; +import org.apache.hadoop.crypto.key.KeyProvider; +import org.apache.hadoop.crypto.key.KeyProviderCryptoExtension; +import org.apache.hadoop.fs.CommonConfigurationKeys; +import org.apache.hadoop.ozone.om.exceptions.OMException; +import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.BucketEncryptionInfoProto; +import org.apache.hadoop.ozone.protocolPB.OMPBHelper; + +import java.io.IOException; + +import static org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.CryptoProtocolVersionProto.ENCRYPTION_ZONES; + +/** + * Utility class for common bucket encryption key operations. + */ +public final class BekInfoUtils { + + private BekInfoUtils() { + } + + public static BucketEncryptionInfoProto getBekInfo( + KeyProviderCryptoExtension kmsProvider, BucketEncryptionInfoProto bek) + throws IOException { + BucketEncryptionInfoProto.Builder bekb = null; + if (kmsProvider == null) { + throw new OMException("Invalid KMS provider, check configuration " + + CommonConfigurationKeys.HADOOP_SECURITY_KEY_PROVIDER_PATH, + OMException.ResultCodes.INVALID_KMS_PROVIDER); + } + if (bek.getKeyName() == null) { + throw new OMException("Bucket encryption key needed.", OMException + .ResultCodes.BUCKET_ENCRYPTION_KEY_NOT_FOUND); + } + // Talk to KMS to retrieve the bucket encryption key info. + KeyProvider.Metadata metadata = kmsProvider.getMetadata( + bek.getKeyName()); + if (metadata == null) { + throw new OMException("Bucket encryption key " + bek.getKeyName() + + " doesn't exist.", + OMException.ResultCodes.BUCKET_ENCRYPTION_KEY_NOT_FOUND); + } + // If the provider supports pool for EDEKs, this will fill in the pool + kmsProvider.warmUpEncryptedKeys(bek.getKeyName()); + bekb = BucketEncryptionInfoProto.newBuilder() + .setKeyName(bek.getKeyName()) + .setCryptoProtocolVersion(ENCRYPTION_ZONES) + .setSuite(OMPBHelper.convert( + CipherSuite.convert(metadata.getCipher()))); + return bekb.build(); + } +} diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/KeyManagerImpl.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/KeyManagerImpl.java index 407f26b7a09..590fe9ef272 100644 --- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/KeyManagerImpl.java +++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/KeyManagerImpl.java @@ -151,7 +151,7 @@ import static org.apache.hadoop.ozone.security.acl.OzoneObj.ResourceType.KEY; import static org.apache.hadoop.util.Time.monotonicNow; -import org.jetbrains.annotations.NotNull; +import jakarta.annotation.Nonnull; import org.slf4j.Logger; import org.slf4j.LoggerFactory; @@ -697,10 +697,12 @@ public BackgroundService getMultipartUploadCleanupService() { return multipartUploadCleanupService; } + @Override public SstFilteringService getSnapshotSstFilteringService() { return snapshotSstFilteringService; } + @Override public SnapshotDeletingService getSnapshotDeletingService() { return snapshotDeletingService; } @@ -834,8 +836,8 @@ public OmMultipartUploadListParts listParts(String volumeName, //if there are no parts, use the replicationType from the open key. if (isBucketFSOptimized(volumeName, bucketName)) { multipartKey = - getMultipartOpenKeyFSO(volumeName, bucketName, keyName, - uploadID); + OMMultipartUploadUtils.getMultipartOpenKey(volumeName, bucketName, keyName, uploadID, + metadataManager, BucketLayout.FILE_SYSTEM_OPTIMIZED); } OmKeyInfo omKeyInfo = metadataManager.getOpenKeyTable(bucketLayout) @@ -905,13 +907,6 @@ private String getPartName(PartKeyInfo partKeyInfo, String volName, return partName; } - private String getMultipartOpenKeyFSO(String volumeName, String bucketName, - String keyName, String uploadID) throws IOException { - OMMetadataManager metaMgr = metadataManager; - return OMMultipartUploadUtils.getMultipartOpenKeyFSO( - volumeName, bucketName, keyName, uploadID, metaMgr); - } - /** * Returns list of ACLs for given Ozone object. * @@ -1462,10 +1457,6 @@ public static boolean isKeyDeleted(String key, Table keyTable) { && omKeyInfoCacheValue.getCacheValue() == null; } - public static boolean isKeyInCache(String key, Table keyTable) { - return keyTable.getCacheValue(new CacheKey(key)) != null; - } - /** * Helper function for listStatus to find key in TableCache. */ @@ -1684,9 +1675,7 @@ public List listStatus(OmKeyArgs args, boolean recursive, TableIterator> iterator; Iterator, CacheValue>> cacheIter = keyTable.cacheIterator(); - String startCacheKey = OZONE_URI_DELIMITER + volumeName + - OZONE_URI_DELIMITER + bucketName + OZONE_URI_DELIMITER + - ((startKey.equals(OZONE_URI_DELIMITER)) ? "" : startKey); + String startCacheKey = metadataManager.getOzoneKey(volumeName, bucketName, startKey); // First, find key in TableCache listStatusFindKeyInTableCache(cacheIter, keyArgs, startCacheKey, @@ -2113,7 +2102,7 @@ private void setUpdatedContainerLocation(OmKeyInfo keyInfo, } } - @NotNull + @Nonnull private Stream extractContainerIDs(OmKeyInfo keyInfo) { return keyInfo.getKeyLocationVersions().stream() .flatMap(v -> v.getLocationList().stream()) diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/ListIterator.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/ListIterator.java index 0b2639e16b3..7981222c4c6 100644 --- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/ListIterator.java +++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/ListIterator.java @@ -37,6 +37,7 @@ import java.util.NoSuchElementException; import java.util.PriorityQueue; import java.util.TreeMap; +import java.util.function.Predicate; import static org.apache.hadoop.ozone.om.lock.OzoneManagerLock.Resource.BUCKET_LOCK; @@ -117,14 +118,17 @@ public static class DbTableIter implements ? extends Table.KeyValue> tableIterator; private final Table table; - private HeapEntry currentKey; + private HeapEntry currentEntry; + private Predicate doesKeyExistInCache; DbTableIter(int entryIteratorId, Table table, - String prefixKey, String startKey) throws IOException { + String prefixKey, String startKey, + Predicate doesKeyExistInCache) throws IOException { this.entryIteratorId = entryIteratorId; this.table = table; this.tableIterator = table.iterator(prefixKey); - this.currentKey = null; + this.currentEntry = null; + this.doesKeyExistInCache = doesKeyExistInCache; // only seek for the start key if the start key is lexicographically // after the prefix key. For example @@ -141,11 +145,11 @@ public static class DbTableIter implements } private void getNextKey() throws IOException { - while (tableIterator.hasNext() && currentKey == null) { + while (tableIterator.hasNext() && currentEntry == null) { Table.KeyValue entry = tableIterator.next(); String entryKey = entry.getKey(); - if (!KeyManagerImpl.isKeyInCache(entryKey, table)) { - currentKey = new HeapEntry(entryIteratorId, + if (!doesKeyExistInCache.test(entryKey)) { + currentEntry = new HeapEntry(entryIteratorId, table.getName(), entryKey, entry.getValue()); } } @@ -157,13 +161,13 @@ public boolean hasNext() { } catch (IOException t) { throw new UncheckedIOException(t); } - return currentKey != null; + return currentEntry != null; } public HeapEntry next() { if (hasNext()) { - HeapEntry ret = currentKey; - currentKey = null; + HeapEntry ret = currentEntry; + currentEntry = null; return ret; } throw new NoSuchElementException(); @@ -186,7 +190,6 @@ public static class CacheIter private final String prefixKey; private final String startKey; private final String tableName; - private final int entryIteratorId; CacheIter(int entryIteratorId, String tableName, @@ -194,7 +197,6 @@ public static class CacheIter CacheValue>> cacheIter, String startKey, String prefixKey) { this.cacheKeyMap = new TreeMap<>(); - this.startKey = startKey; this.prefixKey = prefixKey; this.tableName = tableName; @@ -202,7 +204,7 @@ public static class CacheIter populateCacheMap(cacheIter); - cacheCreatedKeyIter = cacheKeyMap.entrySet().iterator(); + cacheCreatedKeyIter = cacheKeyMap.entrySet().stream().filter(e -> e.getValue() != null).iterator(); } private void populateCacheMap(Iterator, @@ -236,6 +238,10 @@ private void populateCacheMap(Iterator, } } + public boolean doesKeyExistInCache(String key) { + return cacheKeyMap.containsKey(key); + } + public boolean hasNext() { return cacheCreatedKeyIter.hasNext(); } @@ -292,11 +298,13 @@ public static class MinHeapIterator implements ClosableIterator { try { int iteratorId = 0; for (Table table : tables) { - iterators.add(new CacheIter<>(iteratorId, table.getName(), - table.cacheIterator(), startKey, prefixKey)); + CacheIter cacheIter = new CacheIter<>(iteratorId, table.getName(), + table.cacheIterator(), startKey, prefixKey); + Predicate doesKeyExistInCache = cacheIter::doesKeyExistInCache; + iterators.add(cacheIter); iteratorId++; iterators.add(new DbTableIter<>(iteratorId, table, prefixKey, - startKey)); + startKey, doesKeyExistInCache)); iteratorId++; } } finally { diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/OMDBCheckpointServlet.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/OMDBCheckpointServlet.java index 2a7771fe60a..cc8acc48340 100644 --- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/OMDBCheckpointServlet.java +++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/OMDBCheckpointServlet.java @@ -36,7 +36,8 @@ import org.apache.hadoop.security.UserGroupInformation; import org.apache.ozone.rocksdiff.RocksDBCheckpointDiffer; -import org.jetbrains.annotations.NotNull; +import com.google.common.base.Preconditions; +import jakarta.annotation.Nonnull; import org.slf4j.Logger; import org.slf4j.LoggerFactory; @@ -241,9 +242,9 @@ public DBCheckpoint getCheckpoint(Path tmpdir, boolean flush) long startTime = System.currentTimeMillis(); long pauseCounter = PAUSE_COUNTER.incrementAndGet(); - // Pause compactions, Copy/link files and get checkpoint. try { LOG.info("Compaction pausing {} started.", pauseCounter); + // Pause compactions, Copy/link files and get checkpoint. differ.incrementTarballRequestCount(); FileUtils.copyDirectory(compactionLogDir.getOriginalDir(), compactionLogDir.getTmpDir()); @@ -252,13 +253,9 @@ public DBCheckpoint getCheckpoint(Path tmpdir, boolean flush) checkpoint = getDbStore().getCheckpoint(flush); } finally { // Unpause the compaction threads. - synchronized (getDbStore().getRocksDBCheckpointDiffer()) { - differ.decrementTarballRequestCount(); - differ.notifyAll(); - long elapsedTime = System.currentTimeMillis() - startTime; - LOG.info("Compaction pausing {} ended. Elapsed ms: {}", - pauseCounter, elapsedTime); - } + differ.decrementTarballRequestCountAndNotify(); + long elapsedTime = System.currentTimeMillis() - startTime; + LOG.info("Compaction pausing {} ended. Elapsed ms: {}", pauseCounter, elapsedTime); } return checkpoint; } @@ -615,7 +612,7 @@ private void writeFilesToArchive( } } - @NotNull + @Nonnull private static Path getMetaDirPath(Path checkpointLocation) { // This check is done to take care of findbugs else below getParent() // should not be null. @@ -644,29 +641,35 @@ public BootstrapStateHandler.Lock getBootstrapStateLock() { } static class Lock extends BootstrapStateHandler.Lock { - private final BootstrapStateHandler keyDeletingService; - private final BootstrapStateHandler sstFilteringService; - private final BootstrapStateHandler rocksDbCheckpointDiffer; - private final BootstrapStateHandler snapshotDeletingService; + private final List locks; private final OzoneManager om; Lock(OzoneManager om) { + Preconditions.checkNotNull(om); + Preconditions.checkNotNull(om.getKeyManager()); + Preconditions.checkNotNull(om.getMetadataManager()); + Preconditions.checkNotNull(om.getMetadataManager().getStore()); + this.om = om; - keyDeletingService = om.getKeyManager().getDeletingService(); - sstFilteringService = om.getKeyManager().getSnapshotSstFilteringService(); - rocksDbCheckpointDiffer = om.getMetadataManager().getStore() - .getRocksDBCheckpointDiffer(); - snapshotDeletingService = om.getKeyManager().getSnapshotDeletingService(); + + locks = Stream.of( + om.getKeyManager().getDeletingService(), + om.getKeyManager().getSnapshotSstFilteringService(), + om.getMetadataManager().getStore().getRocksDBCheckpointDiffer(), + om.getKeyManager().getSnapshotDeletingService() + ) + .filter(Objects::nonNull) + .map(BootstrapStateHandler::getBootstrapStateLock) + .collect(Collectors.toList()); } @Override public BootstrapStateHandler.Lock lock() throws InterruptedException { // First lock all the handlers. - keyDeletingService.getBootstrapStateLock().lock(); - sstFilteringService.getBootstrapStateLock().lock(); - rocksDbCheckpointDiffer.getBootstrapStateLock().lock(); - snapshotDeletingService.getBootstrapStateLock().lock(); + for (BootstrapStateHandler.Lock lock : locks) { + lock.lock(); + } // Then wait for the double buffer to be flushed. om.awaitDoubleBufferFlush(); @@ -675,10 +678,7 @@ public BootstrapStateHandler.Lock lock() @Override public void unlock() { - snapshotDeletingService.getBootstrapStateLock().unlock(); - rocksDbCheckpointDiffer.getBootstrapStateLock().unlock(); - sstFilteringService.getBootstrapStateLock().unlock(); - keyDeletingService.getBootstrapStateLock().unlock(); + locks.forEach(BootstrapStateHandler.Lock::unlock); } } } diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/OMMultiTenantManager.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/OMMultiTenantManager.java index 5bc1d7be6d3..f68789b5394 100644 --- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/OMMultiTenantManager.java +++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/OMMultiTenantManager.java @@ -20,8 +20,8 @@ import java.io.IOException; import java.util.Arrays; import java.util.Collections; +import java.util.Optional; -import com.google.common.base.Optional; import org.apache.commons.lang3.StringUtils; import org.apache.hadoop.hdds.annotation.InterfaceAudience; import org.apache.hadoop.hdds.annotation.InterfaceStability; diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/OMMultiTenantManagerImpl.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/OMMultiTenantManagerImpl.java index 1f1b4066765..1d25a49fc56 100644 --- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/OMMultiTenantManagerImpl.java +++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/OMMultiTenantManagerImpl.java @@ -25,10 +25,6 @@ import static org.apache.hadoop.ozone.om.exceptions.OMException.ResultCodes.INVALID_ACCESS_ID; import static org.apache.hadoop.ozone.om.exceptions.OMException.ResultCodes.TENANT_AUTHORIZER_ERROR; import static org.apache.hadoop.ozone.om.exceptions.OMException.ResultCodes.TENANT_NOT_FOUND; -import static org.apache.hadoop.ozone.om.multitenant.AccessPolicy.AccessGrantType.ALLOW; -import static org.apache.hadoop.ozone.security.acl.IAccessAuthorizer.ACLType.ALL; -import static org.apache.hadoop.ozone.security.acl.OzoneObj.ResourceType.KEY; -import static org.apache.hadoop.ozone.security.acl.OzoneObj.StoreType.OZONE; import java.io.IOException; import java.util.ArrayList; @@ -37,10 +33,7 @@ import java.util.HashSet; import java.util.List; import java.util.Map; - -import com.google.common.annotations.VisibleForTesting; -import com.google.common.base.Optional; - +import java.util.Optional; import java.util.concurrent.ConcurrentHashMap; import java.util.concurrent.TimeUnit; import java.util.concurrent.locks.ReentrantReadWriteLock; @@ -57,7 +50,6 @@ import org.apache.hadoop.ozone.om.helpers.OmDBTenantState; import org.apache.hadoop.ozone.om.helpers.OmDBUserPrincipalInfo; import org.apache.hadoop.ozone.om.helpers.TenantUserList; -import org.apache.hadoop.ozone.om.multitenant.AccessPolicy; import org.apache.hadoop.ozone.om.multitenant.AuthorizerLock; import org.apache.hadoop.ozone.om.multitenant.AuthorizerLockImpl; import org.apache.hadoop.ozone.om.multitenant.BucketNameSpace; @@ -68,18 +60,16 @@ import org.apache.hadoop.ozone.om.multitenant.MultiTenantAccessController.Policy; import org.apache.hadoop.ozone.om.multitenant.MultiTenantAccessController.Role; import org.apache.hadoop.ozone.om.service.OMRangerBGSyncService; -import org.apache.hadoop.ozone.om.multitenant.OzoneOwnerPrincipal; import org.apache.hadoop.ozone.om.multitenant.OzoneTenant; -import org.apache.hadoop.ozone.om.multitenant.RangerAccessPolicy; import org.apache.hadoop.ozone.om.multitenant.RangerClientMultiTenantAccessController; import org.apache.hadoop.ozone.om.multitenant.Tenant; import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.UserAccessIdInfo; import org.apache.hadoop.ozone.security.acl.OzoneObj; -import org.apache.hadoop.ozone.security.acl.OzoneObjInfo; import org.apache.hadoop.security.UserGroupInformation; import org.slf4j.Logger; import org.slf4j.LoggerFactory; +import com.google.common.annotations.VisibleForTesting; import com.google.common.base.Preconditions; /** @@ -833,7 +823,7 @@ public Optional getTenantForAccessID(String accessID) OmDBAccessIdInfo omDBAccessIdInfo = omMetadataManager.getTenantAccessIdTable().get(accessID); if (omDBAccessIdInfo == null) { - return Optional.absent(); + return Optional.empty(); } return Optional.of(omDBAccessIdInfo.getTenantId()); } @@ -853,24 +843,6 @@ private String getTenantForAccessIDThrowIfNotFound(String accessId) return optionalTenant.get(); } - // TODO: This policy doesn't seem necessary as the bucket-level policy has - // already granted the key-level access. - // Not sure if that is the intended behavior in Ranger though. - // Still, could add this KeyAccess policy as well in Ranger, doesn't hurt. - private AccessPolicy newDefaultKeyAccessPolicy(String volumeName, - String bucketName) throws IOException { - AccessPolicy policy = new RangerAccessPolicy( - // principal already contains volume name - volumeName + "-KeyAccess"); - - OzoneObjInfo obj = OzoneObjInfo.Builder.newBuilder() - .setResType(KEY).setStoreType(OZONE).setVolumeName(volumeName) - .setBucketName("*").setKeyName("*").build(); - // Bucket owners should have ALL permission on their keys - policy.addAccessPolicyElem(obj, new OzoneOwnerPrincipal(), ALL, ALLOW); - return policy; - } - public OzoneConfiguration getConf() { return conf; } diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/OMPerformanceMetrics.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/OMPerformanceMetrics.java index 7ac97a538ca..d118e2f4ecc 100644 --- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/OMPerformanceMetrics.java +++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/OMPerformanceMetrics.java @@ -108,10 +108,10 @@ public static void unregister() { private MutableRate createRatisRequestLatencyNs; @Metric(about = "Convert ratis response to om response nano seconds") - private MutableRate createOmResoonseLatencyNs; + private MutableRate createOmResponseLatencyNs; @Metric(about = "Ratis local command execution latency in nano seconds") - private MutableRate validateAndUpdateCacneLatencyNs; + private MutableRate validateAndUpdateCacheLatencyNs; @Metric(about = "ACLs check latency in listKeys") private MutableRate listKeysAclCheckLatencyNs; @@ -209,11 +209,11 @@ public MutableRate getCreateRatisRequestLatencyNs() { } public MutableRate getCreateOmResponseLatencyNs() { - return createOmResoonseLatencyNs; + return createOmResponseLatencyNs; } - public MutableRate getValidateAndUpdateCacneLatencyNs() { - return validateAndUpdateCacneLatencyNs; + public MutableRate getValidateAndUpdateCacheLatencyNs() { + return validateAndUpdateCacheLatencyNs; } public MutableRate getListKeysAclCheckLatencyNs() { diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/OmMetadataManagerImpl.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/OmMetadataManagerImpl.java index cd0382f599d..6bcefc47cb7 100644 --- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/OmMetadataManagerImpl.java +++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/OmMetadataManagerImpl.java @@ -88,6 +88,7 @@ import org.apache.hadoop.ozone.om.lock.OzoneManagerLock; import org.apache.hadoop.hdds.utils.TransactionInfo; import org.apache.hadoop.ozone.om.protocolPB.OzoneManagerProtocolClientSideTranslatorPB; +import org.apache.hadoop.ozone.om.request.file.OMFileRequest; import org.apache.hadoop.ozone.om.request.util.OMMultipartUploadUtils; import org.apache.hadoop.ozone.om.snapshot.ReferenceCounted; import org.apache.hadoop.ozone.om.snapshot.SnapshotCache; @@ -319,6 +320,7 @@ public class OmMetadataManagerImpl implements OMMetadataManager, private final Map tableCacheMetricsMap = new HashMap<>(); private SnapshotChainManager snapshotChainManager; + private final S3Batcher s3Batcher = new S3SecretBatcher(); /** * OmMetadataManagerImpl constructor. @@ -877,6 +879,20 @@ public String getMultipartKey(String volume, String bucket, String key, return OmMultipartUpload.getDbKey(volume, bucket, key, uploadId); } + @Override + public String getMultipartKeyFSO(String volume, String bucket, String key, String uploadId) throws IOException { + final long volumeId = getVolumeId(volume); + final long bucketId = getBucketId(volume, + bucket); + long parentId = + OMFileRequest.getParentID(volumeId, bucketId, key, this); + + String fileName = OzoneFSUtils.getFileName(key); + + return getMultipartKey(volumeId, bucketId, parentId, + fileName, uploadId); + } + /** * Returns the OzoneManagerLock used on Metadata DB. * @@ -1046,10 +1062,13 @@ private boolean isKeyPresentInTableCache(String keyPrefix, */ private boolean isKeyPresentInTable(String keyPrefix, Table table) - throws IOException { + throws IOException { try (TableIterator> - keyIter = table.iterator()) { - KeyValue kv = keyIter.seek(keyPrefix); + keyIter = table.iterator(keyPrefix)) { + KeyValue kv = null; + if (keyIter.hasNext()) { + kv = keyIter.next(); + } // Iterate through all the entries in the table which start with // the current bucket's prefix. @@ -1449,8 +1468,8 @@ public List listSnapshot( bucketName, snapshotInfoTable)) { try { while (snapshotIterator.hasNext() && maxListResult > 0) { - SnapshotInfo snapshotInfo = (SnapshotInfo) snapshotIterator.next() - .getValue(); + SnapshotInfo snapshotInfo = + (SnapshotInfo) snapshotIterator.next().getValue(); if (!snapshotInfo.getName().equals(prevSnapshot)) { snapshotInfos.add(snapshotInfo); maxListResult--; @@ -2029,25 +2048,7 @@ public void revokeSecret(String kerberosId) throws IOException { @Override public S3Batcher batcher() { - return new S3Batcher() { - @Override - public void addWithBatch(AutoCloseable batchOperator, - String id, S3SecretValue s3SecretValue) - throws IOException { - if (batchOperator instanceof BatchOperation) { - s3SecretTable.putWithBatch((BatchOperation) batchOperator, - id, s3SecretValue); - } - } - - @Override - public void deleteWithBatch(AutoCloseable batchOperator, String id) - throws IOException { - if (batchOperator instanceof BatchOperation) { - s3SecretTable.deleteWithBatch((BatchOperation) batchOperator, id); - } - } - }; + return s3Batcher; } @Override @@ -2259,4 +2260,23 @@ public boolean containsIncompleteMPUs(String volume, String bucket) return false; } + + private final class S3SecretBatcher implements S3Batcher { + @Override + public void addWithBatch(AutoCloseable batchOperator, String id, S3SecretValue s3SecretValue) + throws IOException { + if (batchOperator instanceof BatchOperation) { + s3SecretTable.putWithBatch((BatchOperation) batchOperator, + id, s3SecretValue); + } + } + + @Override + public void deleteWithBatch(AutoCloseable batchOperator, String id) + throws IOException { + if (batchOperator instanceof BatchOperation) { + s3SecretTable.deleteWithBatch((BatchOperation) batchOperator, id); + } + } + } } diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/OmSnapshotManager.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/OmSnapshotManager.java index 6e9af3960ca..2dab56ede67 100644 --- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/OmSnapshotManager.java +++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/OmSnapshotManager.java @@ -73,7 +73,7 @@ import org.slf4j.Logger; import org.slf4j.LoggerFactory; -import javax.annotation.Nonnull; +import jakarta.annotation.Nonnull; import static org.apache.hadoop.ozone.OzoneConsts.OM_DB_NAME; import static org.apache.commons.lang3.StringUtils.isBlank; diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/OzoneListStatusHelper.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/OzoneListStatusHelper.java index 9735ea209d9..d93e1db736a 100644 --- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/OzoneListStatusHelper.java +++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/OzoneListStatusHelper.java @@ -85,21 +85,17 @@ public Collection listStatusFSO(OmKeyArgs args, String startKey, long numEntries, String clientAddress, boolean allowPartialPrefixes) throws IOException { Preconditions.checkNotNull(args, "Key args can not be null"); - final String volumeName = args.getVolumeName(); final String bucketName = args.getBucketName(); String keyName = args.getKeyName(); String prefixKey = keyName; - final String volumeKey = metadataManager.getVolumeKey(volumeName); final String bucketKey = metadataManager.getBucketKey(volumeName, bucketName); - final OmVolumeArgs volumeInfo = metadataManager.getVolumeTable() .get(volumeKey); final OmBucketInfo omBucketInfo = metadataManager.getBucketTable() .get(bucketKey); - if (volumeInfo == null || omBucketInfo == null) { if (LOG.isDebugEnabled()) { LOG.debug(String.format("%s does not exist.", (volumeInfo == null) ? @@ -109,16 +105,9 @@ public Collection listStatusFSO(OmKeyArgs args, return new ArrayList<>(); } - // Determine if the prefixKey is determined from the startKey - // if the keyName is null if (StringUtils.isNotBlank(startKey)) { if (StringUtils.isNotBlank(keyName)) { - if (!OzoneFSUtils.isSibling(keyName, startKey) && - !OzoneFSUtils.isImmediateChild(keyName, startKey)) { - if (LOG.isDebugEnabled()) { - LOG.debug("StartKey {} is not an immediate child or not a sibling" - + " of keyName {}. Returns empty list", startKey, keyName); - } + if (!validateStartKey(startKey, keyName)) { return new ArrayList<>(); } } else { @@ -131,10 +120,8 @@ public Collection listStatusFSO(OmKeyArgs args, .build(); } } - OzoneFileStatus fileStatus = getStatusHelper.apply(args, clientAddress, allowPartialPrefixes); - String dbPrefixKey; if (fileStatus == null) { // if the file status is null, prefix is a not a valid filesystem path @@ -155,19 +142,65 @@ public Collection listStatusFSO(OmKeyArgs args, throw ome; } } else { - // If the keyname is a file just return one entry + // If the keyName is a file just return one entry if partial prefixes are + // not allowed. + // If partial prefixes are allowed, the found file should also be + // considered as a prefix. if (fileStatus.isFile()) { - return Collections.singletonList(fileStatus); + if (!allowPartialPrefixes) { + return Collections.singletonList(fileStatus); + } else { + try { + dbPrefixKey = getDbKey(keyName, args, volumeInfo, omBucketInfo); + prefixKey = OzoneFSUtils.getParentDir(keyName); + } catch (OMException ome) { + if (ome.getResult() == FILE_NOT_FOUND) { + // the parent dir cannot be found return null list + if (LOG.isDebugEnabled()) { + LOG.debug("Parent directory of keyName:{} does not exist." + + "Returns empty list", keyName); + } + return new ArrayList<>(); + } + throw ome; + } + } + } else { + // fetch the db key based on parent prefix id. + long id = getId(fileStatus, omBucketInfo); + final long volumeId = volumeInfo.getObjectID(); + final long bucketId = omBucketInfo.getObjectID(); + dbPrefixKey = + metadataManager.getOzonePathKey(volumeId, bucketId, id, ""); } + } + String startKeyPrefix = getStartKeyPrefixIfPresent(args, startKey, volumeInfo, omBucketInfo); + TreeMap map = + getSortedEntries(numEntries, prefixKey, dbPrefixKey, startKeyPrefix, omBucketInfo); + + return map.values().stream().filter(e -> e != null).collect( + Collectors.toList()); + } - // fetch the db key based on parent prefix id. - long id = getId(fileStatus, omBucketInfo); - final long volumeId = volumeInfo.getObjectID(); - final long bucketId = omBucketInfo.getObjectID(); - dbPrefixKey = metadataManager.getOzonePathKey(volumeId, bucketId, - id, ""); + /** + * Determine if the prefixKey is determined from the startKey + * if the keyName is null. + */ + private static boolean validateStartKey( + String startKey, String keyName) { + if (!OzoneFSUtils.isSibling(keyName, startKey) && + !OzoneFSUtils.isImmediateChild(keyName, startKey)) { + if (LOG.isDebugEnabled()) { + LOG.debug("StartKey {} is not an immediate child or not a sibling" + + " of keyName {}. Returns empty list", startKey, keyName); + } + return false; } + return true; + } + private String getStartKeyPrefixIfPresent(OmKeyArgs args, String startKey, + OmVolumeArgs volumeInfo, OmBucketInfo omBucketInfo) throws IOException { // Determine startKeyPrefix for DB iteration String startKeyPrefix = ""; try { @@ -179,41 +212,49 @@ public Collection listStatusFSO(OmKeyArgs args, throw ome; } } + return startKeyPrefix; + } - TreeMap map = new TreeMap<>(); - - BucketLayout bucketLayout = omBucketInfo.getBucketLayout(); + /** + * fetch the sorted output using a min heap iterator where + * every remove from the heap will give the smallest entry and return + * a treemap. + */ + private TreeMap getSortedEntries(long numEntries, + String prefixKey, String dbPrefixKey, String startKeyPrefix, + OmBucketInfo bucketInfo) throws IOException { + String volumeName = bucketInfo.getVolumeName(); + String bucketName = bucketInfo.getBucketName(); + BucketLayout bucketLayout = bucketInfo.getBucketLayout(); ReplicationConfig replication = - Optional.ofNullable(omBucketInfo.getDefaultReplicationConfig()) + Optional.ofNullable(bucketInfo.getDefaultReplicationConfig()) .map(DefaultReplicationConfig::getReplicationConfig) .orElse(omDefaultReplication); - // fetch the sorted output using a min heap iterator where - // every remove from the heap will give the smallest entry. - try (ListIterator.MinHeapIterator heapIterator = - new ListIterator.MinHeapIterator(metadataManager, dbPrefixKey, - bucketLayout, startKeyPrefix, volumeName, bucketName)) { + TreeMap map = new TreeMap<>(); + try ( + ListIterator.MinHeapIterator heapIterator = new ListIterator.MinHeapIterator( + metadataManager, dbPrefixKey, bucketLayout, startKeyPrefix, + volumeName, bucketName)) { try { while (map.size() < numEntries && heapIterator.hasNext()) { ListIterator.HeapEntry entry = heapIterator.next(); - OzoneFileStatus status = getStatus(prefixKey, - scmBlockSize, volumeName, bucketName, replication, entry); + OzoneFileStatus status = getStatus(prefixKey, scmBlockSize, volumeName, bucketName, + replication, entry); // Caution: DO NOT use putIfAbsent. putIfAbsent undesirably overwrites // the value with `status` when the existing value in the map is null. if (!map.containsKey(entry.getKey())) { map.put(entry.getKey(), status); } } + return map; } catch (NoSuchElementException e) { throw new IOException(e); } catch (UncheckedIOException e) { throw e.getCause(); } } - - return map.values().stream().filter(e -> e != null).collect( - Collectors.toList()); } private OzoneFileStatus getStatus(String prefixPath, long scmBlockSz, diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/OzoneManager.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/OzoneManager.java index 36106d50be0..4b654e3d195 100644 --- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/OzoneManager.java +++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/OzoneManager.java @@ -42,6 +42,7 @@ import java.util.LinkedHashMap; import java.util.List; import java.util.Map; +import java.util.Optional; import java.util.Set; import java.util.StringTokenizer; import java.util.Timer; @@ -52,7 +53,6 @@ import java.util.concurrent.atomic.AtomicReference; import java.util.stream.Collectors; -import com.google.common.base.Optional; import com.google.common.base.Strings; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.conf.StorageUnit; @@ -3586,7 +3586,7 @@ S3VolumeContext getS3VolumeContext(boolean skipChecks) throws IOException { // If S3 Multi-Tenancy is not enabled, all S3 requests will be redirected // to the default s3v for compatibility final Optional optionalTenantId = isS3MultiTenancyEnabled() ? - multiTenantManager.getTenantForAccessID(accessId) : Optional.absent(); + multiTenantManager.getTenantForAccessID(accessId) : Optional.empty(); if (!optionalTenantId.isPresent()) { final UserGroupInformation s3gUGI = diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/PrefixManagerImpl.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/PrefixManagerImpl.java index 8c0b9150c33..d801d1dbf33 100644 --- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/PrefixManagerImpl.java +++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/PrefixManagerImpl.java @@ -16,6 +16,7 @@ */ package org.apache.hadoop.ozone.om; +import com.google.common.annotations.VisibleForTesting; import com.google.common.base.Strings; import org.apache.hadoop.ozone.OmUtils; import org.apache.hadoop.ozone.OzoneAcl; @@ -115,6 +116,26 @@ public List getAcl(OzoneObj obj) throws IOException { return EMPTY_ACL_LIST; } + @VisibleForTesting + public OmPrefixInfo getPrefixInfo(OzoneObj obj) throws IOException { + validateOzoneObj(obj); + String prefixPath = obj.getPath(); + metadataManager.getLock().acquireReadLock(PREFIX_LOCK, prefixPath); + try { + String longestPrefix = prefixTree.getLongestPrefix(prefixPath); + if (prefixPath.equals(longestPrefix)) { + RadixNode lastNode = + prefixTree.getLastNodeInPrefixPath(prefixPath); + if (lastNode != null && lastNode.getValue() != null) { + return lastNode.getValue(); + } + } + } finally { + metadataManager.getLock().releaseReadLock(PREFIX_LOCK, prefixPath); + } + return null; + } + /** * Check access for given ozoneObject. * @@ -222,40 +243,39 @@ public OMPrefixAclOpResult addAcl(OzoneObj ozoneObj, OzoneAcl ozoneAcl, } boolean changed = prefixInfo.addAcl(ozoneAcl); - if (changed) { - if (newPrefix) { - inheritParentAcl(ozoneObj, prefixInfo); - } - // update the in-memory prefix tree - prefixTree.insert(ozoneObj.getPath(), prefixInfo); + // Update the in-memory prefix tree regardless whether the ACL is changed. + // Under OM HA, update ID of the prefix info is updated for every request. + if (newPrefix) { + inheritParentAcl(ozoneObj, prefixInfo); + } + // update the in-memory prefix tree + prefixTree.insert(ozoneObj.getPath(), prefixInfo); - if (!isRatisEnabled) { - metadataManager.getPrefixTable().put(ozoneObj.getPath(), prefixInfo); - } + if (!isRatisEnabled) { + metadataManager.getPrefixTable().put(ozoneObj.getPath(), prefixInfo); } return new OMPrefixAclOpResult(prefixInfo, changed); } public OMPrefixAclOpResult removeAcl(OzoneObj ozoneObj, OzoneAcl ozoneAcl, OmPrefixInfo prefixInfo) throws IOException { - boolean removed = false; - if (prefixInfo != null) { - removed = prefixInfo.removeAcl(ozoneAcl); + if (prefixInfo == null) { + return new OMPrefixAclOpResult(null, false); } - // Nothing is matching to remove. - if (removed) { - // Update in-memory prefix tree. - if (prefixInfo.getAcls().isEmpty()) { - prefixTree.removePrefixPath(ozoneObj.getPath()); - if (!isRatisEnabled) { - metadataManager.getPrefixTable().delete(ozoneObj.getPath()); - } - } else { - prefixTree.insert(ozoneObj.getPath(), prefixInfo); - if (!isRatisEnabled) { - metadataManager.getPrefixTable().put(ozoneObj.getPath(), prefixInfo); - } + boolean removed = prefixInfo.removeAcl(ozoneAcl); + + // Update in-memory prefix tree regardless whether the ACL is changed. + // Under OM HA, update ID of the prefix info is updated for every request. + if (prefixInfo.getAcls().isEmpty()) { + prefixTree.removePrefixPath(ozoneObj.getPath()); + if (!isRatisEnabled) { + metadataManager.getPrefixTable().delete(ozoneObj.getPath()); + } + } else { + prefixTree.insert(ozoneObj.getPath(), prefixInfo); + if (!isRatisEnabled) { + metadataManager.getPrefixTable().put(ozoneObj.getPath(), prefixInfo); } } return new OMPrefixAclOpResult(prefixInfo, removed); @@ -305,12 +325,10 @@ public OMPrefixAclOpResult setAcl(OzoneObj ozoneObj, List ozoneAcls, } boolean changed = prefixInfo.setAcls(ozoneAcls); - if (changed) { - inheritParentAcl(ozoneObj, prefixInfo); - prefixTree.insert(ozoneObj.getPath(), prefixInfo); - if (!isRatisEnabled) { - metadataManager.getPrefixTable().put(ozoneObj.getPath(), prefixInfo); - } + inheritParentAcl(ozoneObj, prefixInfo); + prefixTree.insert(ozoneObj.getPath(), prefixInfo); + if (!isRatisEnabled) { + metadataManager.getPrefixTable().put(ozoneObj.getPath(), prefixInfo); } return new OMPrefixAclOpResult(prefixInfo, changed); } diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/S3SecretManagerImpl.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/S3SecretManagerImpl.java index 4e46adc66b1..195ff816bc5 100644 --- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/S3SecretManagerImpl.java +++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/S3SecretManagerImpl.java @@ -62,8 +62,7 @@ public S3SecretValue getSecret(String kerberosID) throws IOException { // purposely deleted the secret. Hence, we do not have to check the DB. return null; } - return new S3SecretValue(cacheValue.getKerberosID(), - cacheValue.getAwsSecret()); + return cacheValue; } S3SecretValue result = s3SecretStore.getSecret(kerberosID); if (result != null) { diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/ScmClient.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/ScmClient.java index 3a15f2e8d54..77ee0d5851f 100644 --- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/ScmClient.java +++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/ScmClient.java @@ -27,7 +27,7 @@ import org.apache.hadoop.hdds.scm.protocol.ScmBlockLocationProtocol; import org.apache.hadoop.hdds.scm.protocol.StorageContainerLocationProtocol; import org.apache.hadoop.util.CacheMetrics; -import org.jetbrains.annotations.NotNull; +import jakarta.annotation.Nonnull; import java.io.IOException; import java.util.HashMap; @@ -77,16 +77,16 @@ static LoadingCache createContainerLocationCache( .expireAfterWrite(ttl, unit) .recordStats() .build(new CacheLoader() { - @NotNull + @Nonnull @Override - public Pipeline load(@NotNull Long key) throws Exception { + public Pipeline load(@Nonnull Long key) throws Exception { return containerClient.getContainerWithPipeline(key).getPipeline(); } - @NotNull + @Nonnull @Override public Map loadAll( - @NotNull Iterable keys) throws Exception { + @Nonnull Iterable keys) throws Exception { return containerClient.getContainerWithPipelineBatch(keys) .stream() .collect(Collectors.toMap( diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/TrashOzoneFileSystem.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/TrashOzoneFileSystem.java index 5ada6137346..6e1c9da34cb 100644 --- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/TrashOzoneFileSystem.java +++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/TrashOzoneFileSystem.java @@ -460,6 +460,18 @@ List listKeys(String volumeName, String bucketName, String startKey, } } + /** + * Returns a OMRequest builder with specified type. + * @param cmdType type of the request + */ + private OzoneManagerProtocolProtos.OMRequest.Builder + createOMRequest(OzoneManagerProtocolProtos.Type cmdType) throws IOException { + return OzoneManagerProtocolProtos.OMRequest.newBuilder() + .setClientId(CLIENT_ID.toString()) + .setVersion(ClientVersion.CURRENT_VERSION) + .setUserInfo(getUserInfo()) + .setCmdType(cmdType); + } private OzoneManagerProtocolProtos.OMRequest getRenameKeyRequest( @@ -483,12 +495,8 @@ List listKeys(String volumeName, String bucketName, String startKey, OzoneManagerProtocolProtos.OMRequest omRequest = null; try { - omRequest = OzoneManagerProtocolProtos.OMRequest.newBuilder() - .setClientId(CLIENT_ID.toString()) - .setVersion(ClientVersion.CURRENT_VERSION) - .setUserInfo(getUserInfo()) + omRequest = createOMRequest(OzoneManagerProtocolProtos.Type.RenameKey) .setRenameKeyRequest(renameKeyRequest) - .setCmdType(OzoneManagerProtocolProtos.Type.RenameKey) .build(); } catch (IOException e) { LOG.error("Couldn't get userinfo", e); @@ -549,13 +557,8 @@ private OzoneManagerProtocolProtos.OMRequest getDeleteKeyRequest( OzoneManagerProtocolProtos.OMRequest omRequest = null; try { - omRequest = - OzoneManagerProtocolProtos.OMRequest.newBuilder() - .setClientId(CLIENT_ID.toString()) - .setVersion(ClientVersion.CURRENT_VERSION) - .setUserInfo(getUserInfo()) + omRequest = createOMRequest(OzoneManagerProtocolProtos.Type.DeleteKey) .setDeleteKeyRequest(deleteKeyRequest) - .setCmdType(OzoneManagerProtocolProtos.Type.DeleteKey) .build(); } catch (IOException e) { LOG.error("Couldn't get userinfo", e); @@ -619,12 +622,8 @@ boolean processKeyPath(List keyPathList) { OzoneManagerProtocolProtos.OMRequest omRequest = null; try { - omRequest = OzoneManagerProtocolProtos.OMRequest.newBuilder() - .setClientId(CLIENT_ID.toString()) - .setVersion(ClientVersion.CURRENT_VERSION) - .setUserInfo(getUserInfo()) + omRequest = createOMRequest(OzoneManagerProtocolProtos.Type.DeleteKeys) .setDeleteKeysRequest(deleteKeysRequest) - .setCmdType(OzoneManagerProtocolProtos.Type.DeleteKeys) .build(); } catch (IOException e) { LOG.error("Couldn't get userinfo", e); diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/ratis/OzoneManagerDoubleBuffer.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/ratis/OzoneManagerDoubleBuffer.java index d1d971f3f4b..2c1276c43e7 100644 --- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/ratis/OzoneManagerDoubleBuffer.java +++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/ratis/OzoneManagerDoubleBuffer.java @@ -19,7 +19,6 @@ package org.apache.hadoop.ozone.om.ratis; import com.google.common.annotations.VisibleForTesting; -import com.google.common.base.Preconditions; import java.io.IOException; import java.util.ArrayList; import java.util.Arrays; @@ -53,6 +52,7 @@ import org.apache.hadoop.util.Time; import org.apache.ratis.server.protocol.TermIndex; import org.apache.ratis.util.ExitUtils; +import org.apache.ratis.util.Preconditions; import org.apache.ratis.util.function.CheckedRunnable; import org.slf4j.Logger; import org.slf4j.LoggerFactory; @@ -94,36 +94,11 @@ OMClientResponse getResponse() { } } - // Taken unbounded queue, if sync thread is taking too long time, we - // might end up taking huge memory to add entries to the buffer. - // TODO: We can avoid this using unbounded queue and use queue with - // capacity, if queue is full we can wait for sync to be completed to - // add entries. But in this also we might block rpc handlers, as we - // clear entries after sync. Or we can come up with a good approach to - // solve this. - private Queue currentBuffer; - private Queue readyBuffer; - - private final Daemon daemon; - private final OMMetadataManager omMetadataManager; - private final AtomicLong flushedTransactionCount = new AtomicLong(0); - private final AtomicLong flushIterations = new AtomicLong(0); - private final AtomicBoolean isRunning = new AtomicBoolean(false); - private final OzoneManagerDoubleBufferMetrics ozoneManagerDoubleBufferMetrics; - private long maxFlushedTransactionsInOneIteration; - - private final Consumer updateLastAppliedIndex; - private final boolean isRatisEnabled; - private final boolean isTracingEnabled; - private final Semaphore unFlushedTransactions; - private final FlushNotifier flushNotifier; - private final S3SecretManager s3SecretManager; - /** * Builder for creating OzoneManagerDoubleBuffer. */ - public static class Builder { - private OMMetadataManager mm; + public static final class Builder { + private OMMetadataManager omMetadataManager; private Consumer updateLastAppliedIndex = termIndex -> { }; private boolean isRatisEnabled = false; private boolean isTracingEnabled = false; @@ -132,9 +107,10 @@ public static class Builder { private S3SecretManager s3SecretManager; private String threadPrefix = ""; + private Builder() { } - public Builder setOmMetadataManager(OMMetadataManager omm) { - this.mm = omm; + public Builder setOmMetadataManager(OMMetadataManager omMetadataManager) { + this.omMetadataManager = omMetadataManager; return this; } @@ -153,8 +129,8 @@ public Builder enableTracing(boolean enableTracing) { return this; } - public Builder setmaxUnFlushedTransactionCount(int size) { - this.maxUnFlushedTransactionCount = size; + public Builder setMaxUnFlushedTransactionCount(int maxUnFlushedTransactionCount) { + this.maxUnFlushedTransactionCount = maxUnFlushedTransactionCount; return this; } @@ -174,44 +150,76 @@ public Builder setS3SecretManager(S3SecretManager s3SecretManager) { } public OzoneManagerDoubleBuffer build() { - if (isRatisEnabled) { - Preconditions.checkState(maxUnFlushedTransactionCount > 0L, - "when ratis is enable, maxUnFlushedTransactions " + - "should be bigger than 0"); - } + Preconditions.assertTrue(isRatisEnabled == maxUnFlushedTransactionCount > 0L, + () -> "Ratis is " + (isRatisEnabled ? "enabled" : "disabled") + + " but maxUnFlushedTransactionCount = " + maxUnFlushedTransactionCount); if (flushNotifier == null) { flushNotifier = new FlushNotifier(); } - return new OzoneManagerDoubleBuffer(mm, updateLastAppliedIndex, isRatisEnabled, - isTracingEnabled, maxUnFlushedTransactionCount, - flushNotifier, s3SecretManager, threadPrefix); + return new OzoneManagerDoubleBuffer(this); } } - @SuppressWarnings("checkstyle:parameternumber") - private OzoneManagerDoubleBuffer(OMMetadataManager omMetadataManager, - Consumer updateLastAppliedIndex, - boolean isRatisEnabled, boolean isTracingEnabled, - int maxUnFlushedTransactions, - FlushNotifier flushNotifier, S3SecretManager s3SecretManager, - String threadPrefix) { + public static Builder newBuilder() { + return new Builder(); + } + + static Semaphore newSemaphore(int permits) { + return permits > 0 ? new Semaphore(permits) : null; + } + + private Queue currentBuffer; + private Queue readyBuffer; + /** + * Limit the number of un-flushed transactions for {@link OzoneManagerStateMachine}. + * It is set to null if ratis is disabled; see {@link #isRatisEnabled()}. + */ + private final Semaphore unFlushedTransactions; + + /** To flush the buffers. */ + private final Daemon daemon; + /** Is the {@link #daemon} running? */ + private final AtomicBoolean isRunning = new AtomicBoolean(false); + /** Notify flush operations are completed by the {@link #daemon}. */ + private final FlushNotifier flushNotifier; + + private final OMMetadataManager omMetadataManager; + + private final Consumer updateLastAppliedIndex; + + private final S3SecretManager s3SecretManager; + + private final boolean isTracingEnabled; + + private final OzoneManagerDoubleBufferMetrics metrics = OzoneManagerDoubleBufferMetrics.create(); + + /** Accumulative count (for testing and debug only). */ + private final AtomicLong flushedTransactionCount = new AtomicLong(); + /** The number of flush iterations (for testing and debug only). */ + private final AtomicLong flushIterations = new AtomicLong(); + + private OzoneManagerDoubleBuffer(Builder b) { this.currentBuffer = new ConcurrentLinkedQueue<>(); this.readyBuffer = new ConcurrentLinkedQueue<>(); - this.isRatisEnabled = isRatisEnabled; - this.isTracingEnabled = isTracingEnabled; - this.unFlushedTransactions = new Semaphore(maxUnFlushedTransactions); - this.omMetadataManager = omMetadataManager; - this.updateLastAppliedIndex = updateLastAppliedIndex; - this.ozoneManagerDoubleBufferMetrics = - OzoneManagerDoubleBufferMetrics.create(); - this.flushNotifier = flushNotifier; + + this.omMetadataManager = b.omMetadataManager; + this.s3SecretManager = b.s3SecretManager; + this.updateLastAppliedIndex = b.updateLastAppliedIndex; + this.flushNotifier = b.flushNotifier; + this.unFlushedTransactions = newSemaphore(b.maxUnFlushedTransactionCount); + + this.isTracingEnabled = b.isTracingEnabled; + isRunning.set(true); // Daemon thread which runs in background and flushes transactions to DB. daemon = new Daemon(this::flushTransactions); - daemon.setName(threadPrefix + "OMDoubleBufferFlushThread"); + daemon.setName(b.threadPrefix + "OMDoubleBufferFlushThread"); daemon.start(); - this.s3SecretManager = s3SecretManager; + } + + private boolean isRatisEnabled() { + return unFlushedTransactions != null; } /** @@ -219,6 +227,7 @@ private OzoneManagerDoubleBuffer(OMMetadataManager omMetadataManager, * blocking until all are available, or the thread is interrupted. */ public void acquireUnFlushedTransactions(int n) throws InterruptedException { + Preconditions.assertTrue(isRatisEnabled(), "Ratis is not enabled"); unFlushedTransactions.acquire(n); } @@ -226,7 +235,7 @@ public void acquireUnFlushedTransactions(int n) throws InterruptedException { * Releases the given number of permits, * returning them to the unFlushedTransactions. */ - public void releaseUnFlushedTransactions(int n) { + void releaseUnFlushedTransactions(int n) { unFlushedTransactions.release(n); } @@ -354,39 +363,34 @@ private void flushBatch(Queue buffer) throws IOException { () -> omMetadataManager.getStore() .commitBatchOperation(batchOperation)); - ozoneManagerDoubleBufferMetrics.updateFlushTime( - Time.monotonicNow() - startTime); + metrics.updateFlushTime(Time.monotonicNow() - startTime); } // Complete futures first and then do other things. // So that handler threads will be released. - if (!isRatisEnabled) { + if (!isRatisEnabled()) { buffer.stream() .map(Entry::getResponse) .map(OMClientResponse::getFlushFuture) .forEach(f -> f.complete(null)); } - flushedTransactionCount.addAndGet(flushedTransactionsSize); - flushIterations.incrementAndGet(); - - if (LOG.isDebugEnabled()) { - LOG.debug("Sync iteration {} flushed transactions in this iteration {}", - flushIterations.get(), - flushedTransactionsSize); - } + final long accumulativeCount = flushedTransactionCount.addAndGet(flushedTransactionsSize); + final long flushedIterations = flushIterations.incrementAndGet(); + LOG.debug("Sync iteration: {}, size in this iteration: {}, accumulative count: {}", + flushedIterations, flushedTransactionsSize, accumulativeCount); // Clean up committed transactions. cleanupCache(cleanupEpochs); - if (isRatisEnabled) { + if (isRatisEnabled()) { releaseUnFlushedTransactions(flushedTransactionsSize); } // update the last updated index in OzoneManagerStateMachine. updateLastAppliedIndex.accept(lastTransaction); // set metrics. - updateMetrics(flushedTransactionsSize); + metrics.updateFlush(flushedTransactionsSize); } private String addToBatch(Queue buffer, BatchOperation batchOperation) { @@ -492,25 +496,6 @@ private void cleanupCache(Map> cleanupEpochs) { private synchronized void clearReadyBuffer() { readyBuffer.clear(); } - /** - * Update OzoneManagerDoubleBuffer metrics values. - */ - private void updateMetrics(int flushedTransactionsSize) { - ozoneManagerDoubleBufferMetrics.incrTotalNumOfFlushOperations(); - ozoneManagerDoubleBufferMetrics.incrTotalSizeOfFlushedTransactions( - flushedTransactionsSize); - ozoneManagerDoubleBufferMetrics.setAvgFlushTransactionsInOneIteration( - (float) ozoneManagerDoubleBufferMetrics - .getTotalNumOfFlushedTransactions() / - ozoneManagerDoubleBufferMetrics.getTotalNumOfFlushOperations()); - if (maxFlushedTransactionsInOneIteration < flushedTransactionsSize) { - maxFlushedTransactionsInOneIteration = flushedTransactionsSize; - ozoneManagerDoubleBufferMetrics - .setMaxNumberOfTransactionsFlushedInOneIteration( - flushedTransactionsSize); - } - ozoneManagerDoubleBufferMetrics.updateQueueSize(flushedTransactionsSize); - } /** * Stop OM DoubleBuffer flush thread. @@ -520,7 +505,7 @@ private void updateMetrics(int flushedTransactionsSize) { @SuppressWarnings("squid:S2142") public void stop() { stopDaemon(); - ozoneManagerDoubleBufferMetrics.unRegister(); + metrics.unRegister(); } @VisibleForTesting @@ -553,22 +538,6 @@ private void terminate(Throwable t, int status, OMResponse omResponse) { ExitUtils.terminate(status, message.toString(), t, LOG); } - /** - * Returns the flushed transaction count to OM DB. - * @return flushedTransactionCount - */ - public long getFlushedTransactionCount() { - return flushedTransactionCount.get(); - } - - /** - * Returns total number of flush iterations run by sync thread. - * @return flushIterations - */ - public long getFlushIterations() { - return flushIterations.get(); - } - /** * Add OmResponseBufferEntry to buffer. */ @@ -576,7 +545,7 @@ public synchronized void add(OMClientResponse response, TermIndex termIndex) { currentBuffer.add(new Entry(termIndex, response)); notify(); - if (!isRatisEnabled) { + if (!isRatisEnabled()) { response.setFlushFuture(new CompletableFuture<>()); } } @@ -623,8 +592,20 @@ private synchronized void swapCurrentAndReadyBuffer() { } @VisibleForTesting - public OzoneManagerDoubleBufferMetrics getOzoneManagerDoubleBufferMetrics() { - return ozoneManagerDoubleBufferMetrics; + OzoneManagerDoubleBufferMetrics getMetrics() { + return metrics; + } + + /** @return the flushed transaction count to OM DB. */ + @VisibleForTesting + long getFlushedTransactionCountForTesting() { + return flushedTransactionCount.get(); + } + + /** @return total number of flush iterations run by sync thread. */ + @VisibleForTesting + long getFlushIterationsForTesting() { + return flushIterations.get(); } @VisibleForTesting @@ -666,7 +647,7 @@ private CompletableFuture await() { } private int complete() { - Preconditions.checkState(future.complete(count)); + Preconditions.assertTrue(future.complete(count)); return future.join(); } } @@ -681,7 +662,7 @@ synchronized CompletableFuture await() { final int flush = flushCount + 2; LOG.debug("await flush {}", flush); final Entry entry = flushFutures.computeIfAbsent(flush, key -> new Entry()); - Preconditions.checkState(flushFutures.size() <= 2); + Preconditions.assertTrue(flushFutures.size() <= 2); return entry.await(); } diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/ratis/OzoneManagerStateMachine.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/ratis/OzoneManagerStateMachine.java index 45767ec7d07..90fcba40f5d 100644 --- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/ratis/OzoneManagerStateMachine.java +++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/ratis/OzoneManagerStateMachine.java @@ -20,10 +20,8 @@ import com.google.common.annotations.VisibleForTesting; import com.google.common.base.Preconditions; import com.google.common.util.concurrent.ThreadFactoryBuilder; -import com.google.protobuf.ServiceException; import java.io.IOException; import java.util.ArrayList; -import java.util.Collection; import java.util.List; import java.util.concurrent.CompletableFuture; import java.util.concurrent.ExecutorService; @@ -37,16 +35,13 @@ import org.apache.hadoop.ozone.om.OzoneManagerPrepareState; import org.apache.hadoop.ozone.om.exceptions.OMException; import org.apache.hadoop.ozone.om.helpers.OMRatisHelper; -import org.apache.hadoop.ozone.om.ratis.metrics.OzoneManagerStateMachineMetrics; import org.apache.hadoop.ozone.om.ratis.utils.OzoneManagerRatisUtils; import org.apache.hadoop.ozone.om.lock.OMLockDetails; import org.apache.hadoop.ozone.om.response.DummyOMClientResponse; import org.apache.hadoop.ozone.om.response.OMClientResponse; import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos; -import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos - .OMRequest; -import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos - .OMResponse; +import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.OMRequest; +import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.OMResponse; import org.apache.hadoop.ozone.protocolPB.OzoneManagerRequestHandler; import org.apache.hadoop.ozone.protocolPB.RequestHandler; import org.apache.hadoop.security.UserGroupInformation; @@ -90,7 +85,6 @@ public class OzoneManagerStateMachine extends BaseStateMachine { LoggerFactory.getLogger(OzoneManagerStateMachine.class); private final SimpleStateMachineStorage storage = new SimpleStateMachineStorage(); - private final OzoneManagerRatisServer omRatisServer; private final OzoneManager ozoneManager; private RequestHandler handler; private RaftGroupId raftGroupId; @@ -106,14 +100,10 @@ public class OzoneManagerStateMachine extends BaseStateMachine { /** The last index skipped by {@link #notifyTermIndexUpdated(long, long)}. */ private volatile long lastSkippedIndex = RaftLog.INVALID_LOG_INDEX; - private OzoneManagerStateMachineMetrics metrics; - - public OzoneManagerStateMachine(OzoneManagerRatisServer ratisServer, boolean isTracingEnabled) throws IOException { - this.omRatisServer = ratisServer; this.isTracingEnabled = isTracingEnabled; - this.ozoneManager = omRatisServer.getOzoneManager(); + this.ozoneManager = ratisServer.getOzoneManager(); loadSnapshotInfoFromDB(); this.threadPrefix = ozoneManager.getThreadNamePrefix(); @@ -132,7 +122,6 @@ public OzoneManagerStateMachine(OzoneManagerRatisServer ratisServer, .setNameFormat(threadPrefix + "InstallSnapshotThread").build(); this.installSnapshotExecutor = HadoopExecutors.newSingleThreadExecutor(installSnapshotThreadFactory); - this.metrics = OzoneManagerStateMachineMetrics.create(); } /** @@ -270,7 +259,14 @@ public TransactionContext startTransaction( ctxt.setException(ioe); return ctxt; } - return handleStartTransactionRequests(raftClientRequest, omRequest); + + return TransactionContext.newBuilder() + .setClientRequest(raftClientRequest) + .setStateMachine(this) + .setServerRole(RaftProtos.RaftPeerRole.LEADER) + .setLogData(raftClientRequest.getMessage().getContent()) + .setStateMachineContext(omRequest) + .build(); } @Override @@ -396,11 +392,11 @@ public CompletableFuture query(Message request) { public synchronized void pause() { LOG.info("OzoneManagerStateMachine is pausing"); statePausedCount.incrementAndGet(); - if (getLifeCycleState() == LifeCycle.State.PAUSED) { + final LifeCycle.State state = getLifeCycleState(); + if (state == LifeCycle.State.PAUSED) { return; } - final LifeCycle lc = getLifeCycle(); - if (lc.getCurrentState() != LifeCycle.State.NEW) { + if (state != LifeCycle.State.NEW) { getLifeCycle().transition(LifeCycle.State.PAUSING); getLifeCycle().transition(LifeCycle.State.PAUSED); } @@ -427,13 +423,13 @@ public synchronized void unpause(long newLastAppliedSnaphsotIndex, } public OzoneManagerDoubleBuffer buildDoubleBufferForRatis() { - int maxUnflushedTransactionSize = ozoneManager.getConfiguration() + final int maxUnFlushedTransactionCount = ozoneManager.getConfiguration() .getInt(OMConfigKeys.OZONE_OM_UNFLUSHED_TRANSACTION_MAX_COUNT, OMConfigKeys.OZONE_OM_UNFLUSHED_TRANSACTION_MAX_COUNT_DEFAULT); - return new OzoneManagerDoubleBuffer.Builder() + return OzoneManagerDoubleBuffer.newBuilder() .setOmMetadataManager(ozoneManager.getMetadataManager()) .setUpdateLastAppliedIndex(this::updateLastAppliedTermIndex) - .setmaxUnFlushedTransactionCount(maxUnflushedTransactionSize) + .setMaxUnFlushedTransactionCount(maxUnFlushedTransactionCount) .setThreadPrefix(threadPrefix) .setS3SecretManager(ozoneManager.getS3SecretManager()) .enableRatis(true) @@ -499,18 +495,9 @@ public CompletableFuture notifyInstallSnapshotFromLeader( LOG.info("Received install snapshot notification from OM leader: {} with " + "term index: {}", leaderNodeId, firstTermIndexInLog); - CompletableFuture future = CompletableFuture.supplyAsync( + return CompletableFuture.supplyAsync( () -> ozoneManager.installSnapshotFromLeader(leaderNodeId), installSnapshotExecutor); - return future; - } - - /** - * Notifies the state machine that the raft peer is no longer leader. - */ - @Override - public void notifyNotLeader(Collection pendingEntries) - throws IOException { } @Override @@ -530,29 +517,10 @@ public void close() { } } - /** - * Handle the RaftClientRequest and return TransactionContext object. - * @param raftClientRequest - * @param omRequest - * @return TransactionContext - */ - private TransactionContext handleStartTransactionRequests( - RaftClientRequest raftClientRequest, OMRequest omRequest) { - - return TransactionContext.newBuilder() - .setClientRequest(raftClientRequest) - .setStateMachine(this) - .setServerRole(RaftProtos.RaftPeerRole.LEADER) - .setLogData(raftClientRequest.getMessage().getContent()) - .setStateMachineContext(omRequest) - .build(); - } - /** * Submits write request to OM and returns the response Message. * @param request OMRequest * @return response from OM - * @throws ServiceException */ private OMResponse runCommand(OMRequest request, TermIndex termIndex) { try { @@ -635,23 +603,10 @@ public OzoneManagerRequestHandler getHandler() { return (OzoneManagerRequestHandler) this.handler; } - @VisibleForTesting - public void setRaftGroupId(RaftGroupId raftGroupId) { - this.raftGroupId = raftGroupId; - } - - @VisibleForTesting - public OzoneManagerStateMachineMetrics getMetrics() { - return this.metrics; - } - public void stop() { ozoneManagerDoubleBuffer.stop(); HadoopExecutors.shutdown(executorService, LOG, 5, TimeUnit.SECONDS); HadoopExecutors.shutdown(installSnapshotExecutor, LOG, 5, TimeUnit.SECONDS); - if (metrics != null) { - metrics.unRegister(); - } } /** diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/ratis/metrics/OzoneManagerDoubleBufferMetrics.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/ratis/metrics/OzoneManagerDoubleBufferMetrics.java index f77eda081a7..351f1852893 100644 --- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/ratis/metrics/OzoneManagerDoubleBufferMetrics.java +++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/ratis/metrics/OzoneManagerDoubleBufferMetrics.java @@ -131,6 +131,17 @@ public void updateQueueSize(long size) { queueSize.add(size); } + public void updateFlush(int flushedTransactionsInOneIteration) { + incrTotalNumOfFlushOperations(); + incrTotalSizeOfFlushedTransactions(flushedTransactionsInOneIteration); + setAvgFlushTransactionsInOneIteration(getTotalNumOfFlushedTransactions() / (float)getTotalNumOfFlushOperations()); + final long max = getMaxNumberOfTransactionsFlushedInOneIteration(); + if (flushedTransactionsInOneIteration > max) { + maxNumberOfTransactionsFlushedInOneIteration.incr(flushedTransactionsInOneIteration - max); + } + updateQueueSize(flushedTransactionsInOneIteration); + } + @VisibleForTesting public MutableStat getQueueSize() { return queueSize; diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/ratis/metrics/OzoneManagerStateMachineMetrics.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/ratis/metrics/OzoneManagerStateMachineMetrics.java deleted file mode 100644 index 51d26ef7ac0..00000000000 --- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/ratis/metrics/OzoneManagerStateMachineMetrics.java +++ /dev/null @@ -1,87 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - *

- * http://www.apache.org/licenses/LICENSE-2.0 - *

- * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package org.apache.hadoop.ozone.om.ratis.metrics; - -import org.apache.hadoop.metrics2.MetricsCollector; -import org.apache.hadoop.metrics2.MetricsRecordBuilder; -import org.apache.hadoop.metrics2.MetricsSource; -import org.apache.hadoop.metrics2.MetricsSystem; -import org.apache.hadoop.metrics2.annotation.Metric; -import org.apache.hadoop.metrics2.annotation.Metrics; -import org.apache.hadoop.metrics2.lib.DefaultMetricsSystem; -import org.apache.hadoop.metrics2.lib.MetricsRegistry; -import org.apache.hadoop.metrics2.lib.MutableCounterLong; -import org.apache.hadoop.ozone.OzoneConsts; - -/** - * Class which maintains metrics related to OzoneManager state machine. - */ -@Metrics(about = "OzoneManagerStateMachine Metrics", context = OzoneConsts.OZONE) -public final class OzoneManagerStateMachineMetrics implements MetricsSource { - - private static final String SOURCE_NAME = - OzoneManagerStateMachineMetrics.class.getSimpleName(); - private MetricsRegistry registry; - private static OzoneManagerStateMachineMetrics instance; - - @Metric(about = "Number of apply transactions in applyTransactionMap.") - private MutableCounterLong applyTransactionMapSize; - - @Metric(about = "Number of ratis transactions in ratisTransactionMap.") - private MutableCounterLong ratisTransactionMapSize; - - private OzoneManagerStateMachineMetrics() { - registry = new MetricsRegistry(SOURCE_NAME); - } - - public static synchronized OzoneManagerStateMachineMetrics create() { - if (instance != null) { - return instance; - } else { - MetricsSystem ms = DefaultMetricsSystem.instance(); - OzoneManagerStateMachineMetrics metrics = new OzoneManagerStateMachineMetrics(); - instance = ms.register(SOURCE_NAME, "OzoneManager StateMachine Metrics", - metrics); - return instance; - } - } - - public void updateApplyTransactionMapSize(long size) { - this.applyTransactionMapSize.incr( - Math.negateExact(applyTransactionMapSize.value()) + size); - } - - public void updateRatisTransactionMapSize(long size) { - this.ratisTransactionMapSize.incr( - Math.negateExact(ratisTransactionMapSize.value()) + size); - } - - public void unRegister() { - MetricsSystem ms = DefaultMetricsSystem.instance(); - ms.unregisterSource(SOURCE_NAME); - } - - public void getMetrics(MetricsCollector collector, boolean all) { - MetricsRecordBuilder rb = collector.addRecord(SOURCE_NAME); - - applyTransactionMapSize.snapshot(rb, all); - ratisTransactionMapSize.snapshot(rb, all); - rb.endRecord(); - } -} diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/BucketLayoutAwareOMKeyRequestFactory.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/BucketLayoutAwareOMKeyRequestFactory.java index a7ac5b4c0ab..4a5558ed7f1 100644 --- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/BucketLayoutAwareOMKeyRequestFactory.java +++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/BucketLayoutAwareOMKeyRequestFactory.java @@ -51,7 +51,7 @@ import org.apache.hadoop.ozone.om.request.s3.multipart.S3MultipartUploadAbortRequestWithFSO; import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.OMRequest; import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.Type; -import org.jetbrains.annotations.NotNull; +import jakarta.annotation.Nonnull; import org.slf4j.Logger; import org.slf4j.LoggerFactory; @@ -294,7 +294,7 @@ static void addRequestClass(Type requestType, * @throws InvocationTargetException if the request class constructor throws * an exception. */ - @NotNull + @Nonnull static OMKeyRequest getRequestInstanceFromMap(OMRequest omRequest, String classKey, BucketLayout bucketLayout) diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/OMClientRequest.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/OMClientRequest.java index 3483856bdd3..2698d12f9f8 100644 --- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/OMClientRequest.java +++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/OMClientRequest.java @@ -56,7 +56,7 @@ import org.slf4j.Logger; import org.slf4j.LoggerFactory; -import javax.annotation.Nonnull; +import jakarta.annotation.Nonnull; import java.io.IOException; import java.net.InetAddress; import java.nio.file.InvalidPathException; diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/bucket/OMBucketCreateRequest.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/bucket/OMBucketCreateRequest.java index de8152af468..7cce3ac456f 100644 --- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/bucket/OMBucketCreateRequest.java +++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/bucket/OMBucketCreateRequest.java @@ -18,10 +18,7 @@ package org.apache.hadoop.ozone.om.request.bucket; -import org.apache.hadoop.crypto.CipherSuite; -import org.apache.hadoop.crypto.key.KeyProvider; import org.apache.hadoop.crypto.key.KeyProviderCryptoExtension; -import org.apache.hadoop.fs.CommonConfigurationKeys; import org.apache.hadoop.hdds.client.DefaultReplicationConfig; import org.apache.ratis.server.protocol.TermIndex; import org.apache.hadoop.hdds.utils.db.cache.CacheKey; @@ -33,6 +30,7 @@ import org.apache.hadoop.ozone.audit.AuditLogger; import org.apache.hadoop.ozone.audit.OMAction; import org.apache.hadoop.ozone.om.OMConfigKeys; +import org.apache.hadoop.ozone.common.BekInfoUtils; import org.apache.hadoop.ozone.om.OMMetadataManager; import org.apache.hadoop.ozone.om.OMMetrics; import org.apache.hadoop.ozone.om.OzoneManager; @@ -52,14 +50,12 @@ import org.apache.hadoop.ozone.om.response.bucket.OMBucketCreateResponse; import org.apache.hadoop.ozone.om.upgrade.OMLayoutFeature; import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos; -import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.BucketEncryptionInfoProto; import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.BucketInfo; import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.CreateBucketRequest; import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.CreateBucketResponse; import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.OMRequest; import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.OMResponse; import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.Type; -import org.apache.hadoop.ozone.protocolPB.OMPBHelper; import org.apache.hadoop.ozone.security.acl.IAccessAuthorizer; import org.apache.hadoop.ozone.security.acl.OzoneObj; import org.apache.hadoop.util.Time; @@ -75,7 +71,6 @@ import static org.apache.hadoop.ozone.om.exceptions.OMException.ResultCodes.VOLUME_NOT_FOUND; import static org.apache.hadoop.ozone.om.lock.OzoneManagerLock.Resource.BUCKET_LOCK; import static org.apache.hadoop.ozone.om.lock.OzoneManagerLock.Resource.VOLUME_LOCK; -import static org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.CryptoProtocolVersionProto.ENCRYPTION_ZONES; /** * Handles CreateBucket Request. @@ -116,7 +111,8 @@ public OMRequest preExecute(OzoneManager ozoneManager) throws IOException { .setModificationTime(initialTime); if (bucketInfo.hasBeinfo()) { - newBucketInfo.setBeinfo(getBeinfo(kmsProvider, bucketInfo)); + newBucketInfo.setBeinfo( + BekInfoUtils.getBekInfo(kmsProvider, bucketInfo.getBeinfo())); } boolean hasSourceVolume = bucketInfo.hasSourceVolume(); @@ -338,38 +334,6 @@ private void addDefaultAcls(OmBucketInfo omBucketInfo, omBucketInfo.setAcls(acls); } - private BucketEncryptionInfoProto getBeinfo( - KeyProviderCryptoExtension kmsProvider, BucketInfo bucketInfo) - throws IOException { - BucketEncryptionInfoProto bek = bucketInfo.getBeinfo(); - BucketEncryptionInfoProto.Builder bekb = null; - if (kmsProvider == null) { - throw new OMException("Invalid KMS provider, check configuration " + - CommonConfigurationKeys.HADOOP_SECURITY_KEY_PROVIDER_PATH, - OMException.ResultCodes.INVALID_KMS_PROVIDER); - } - if (bek.getKeyName() == null) { - throw new OMException("Bucket encryption key needed.", OMException - .ResultCodes.BUCKET_ENCRYPTION_KEY_NOT_FOUND); - } - // Talk to KMS to retrieve the bucket encryption key info. - KeyProvider.Metadata metadata = kmsProvider.getMetadata( - bek.getKeyName()); - if (metadata == null) { - throw new OMException("Bucket encryption key " + bek.getKeyName() - + " doesn't exist.", - OMException.ResultCodes.BUCKET_ENCRYPTION_KEY_NOT_FOUND); - } - // If the provider supports pool for EDEKs, this will fill in the pool - kmsProvider.warmUpEncryptedKeys(bek.getKeyName()); - bekb = BucketEncryptionInfoProto.newBuilder() - .setKeyName(bek.getKeyName()) - .setCryptoProtocolVersion(ENCRYPTION_ZONES) - .setSuite(OMPBHelper.convert( - CipherSuite.convert(metadata.getCipher()))); - return bekb.build(); - } - /** * Check namespace quota. */ diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/bucket/OMBucketSetPropertyRequest.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/bucket/OMBucketSetPropertyRequest.java index 821c374c2d1..9c7ef1087c1 100644 --- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/bucket/OMBucketSetPropertyRequest.java +++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/bucket/OMBucketSetPropertyRequest.java @@ -23,12 +23,15 @@ import java.util.List; import com.google.common.base.Preconditions; +import org.apache.hadoop.crypto.key.KeyProviderCryptoExtension; import org.apache.hadoop.hdds.client.DefaultReplicationConfig; import org.apache.ratis.server.protocol.TermIndex; import org.apache.hadoop.ozone.OzoneConsts; import org.apache.hadoop.ozone.audit.AuditLogger; import org.apache.hadoop.ozone.audit.OMAction; +import org.apache.hadoop.ozone.common.BekInfoUtils; import org.apache.hadoop.ozone.om.helpers.OmVolumeArgs; +import org.apache.hadoop.ozone.om.helpers.BucketEncryptionKeyInfo; import org.apache.hadoop.ozone.om.request.util.OmResponseUtil; import org.apache.hadoop.ozone.om.request.validation.RequestFeatureValidator; import org.apache.hadoop.ozone.om.request.validation.RequestProcessingPhase; @@ -87,6 +90,18 @@ public OMRequest preExecute(OzoneManager ozoneManager) .getSetBucketPropertyRequest().toBuilder() .setModificationTime(modificationTime); + BucketArgs bucketArgs = + getOmRequest().getSetBucketPropertyRequest().getBucketArgs(); + + if (bucketArgs.hasBekInfo()) { + KeyProviderCryptoExtension kmsProvider = ozoneManager.getKmsProvider(); + BucketArgs.Builder bucketArgsBuilder = + setBucketPropertyRequestBuilder.getBucketArgsBuilder(); + bucketArgsBuilder.setBekInfo( + BekInfoUtils.getBekInfo(kmsProvider, bucketArgs.getBekInfo())); + setBucketPropertyRequestBuilder.setBucketArgs(bucketArgsBuilder.build()); + } + return getOmRequest().toBuilder() .setSetBucketPropertyRequest(setBucketPropertyRequestBuilder) .setUserInfo(getUserInfo()) @@ -190,6 +205,11 @@ public OMClientResponse validateAndUpdateCache(OzoneManager ozoneManager, TermIn bucketInfoBuilder.setDefaultReplicationConfig(defaultReplicationConfig); } + BucketEncryptionKeyInfo bek = omBucketArgs.getBucketEncryptionKeyInfo(); + if (bek != null && bek.getKeyName() != null) { + bucketInfoBuilder.setBucketEncryptionKey(bek); + } + omBucketInfo = bucketInfoBuilder.build(); // Update table cache. diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/file/OMDirectoryCreateRequest.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/file/OMDirectoryCreateRequest.java index 1c636aab964..f24dee8ae65 100644 --- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/file/OMDirectoryCreateRequest.java +++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/file/OMDirectoryCreateRequest.java @@ -264,7 +264,6 @@ public static List getAllParentInfo(OzoneManager ozoneManager, KeyArgs keyArgs, List missingParents, OmBucketInfo bucketInfo, OMFileRequest.OMPathInfo omPathInfo, long trxnLogIndex) throws IOException { - OMMetadataManager omMetadataManager = ozoneManager.getMetadataManager(); List missingParentInfos = new ArrayList<>(); // The base id is left shifted by 8 bits for creating space to @@ -297,10 +296,6 @@ public static List getAllParentInfo(OzoneManager ozoneManager, objectCount++; missingParentInfos.add(parentKeyInfo); - omMetadataManager.getKeyTable(BucketLayout.DEFAULT).addCacheEntry( - omMetadataManager.getOzoneKey( - volumeName, bucketName, parentKeyInfo.getKeyName()), - parentKeyInfo, trxnLogIndex); } return missingParentInfos; diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/file/OMFileRequest.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/file/OMFileRequest.java index 250a65c08d8..a1e660691cd 100644 --- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/file/OMFileRequest.java +++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/file/OMFileRequest.java @@ -51,12 +51,11 @@ import org.apache.hadoop.ozone.om.helpers.OzoneFSUtils; import org.apache.hadoop.ozone.om.helpers.OzoneFileStatus; import org.apache.hadoop.ozone.om.helpers.BucketLayout; -import org.jetbrains.annotations.NotNull; -import org.jetbrains.annotations.Nullable; import org.slf4j.Logger; import org.slf4j.LoggerFactory; -import javax.annotation.Nonnull; +import jakarta.annotation.Nonnull; +import jakarta.annotation.Nullable; import static org.apache.hadoop.hdds.scm.net.NetConstants.PATH_SEPARATOR_STR; import static org.apache.hadoop.ozone.om.OzoneManagerUtils.getBucketLayout; @@ -732,7 +731,7 @@ public static OzoneFileStatus getOMKeyInfoIfExists( * @param keyName user given key name * @return OmKeyInfo object */ - @NotNull + @Nonnull public static OmKeyInfo getOmKeyInfo(String volumeName, String bucketName, OmDirectoryInfo dirInfo, String keyName) { @@ -762,7 +761,7 @@ public static OmKeyInfo getOmKeyInfo(String volumeName, String bucketName, * @param fileName file name * @return absolute path */ - @NotNull + @Nonnull public static String getAbsolutePath(String prefixName, String fileName) { if (Strings.isNullOrEmpty(prefixName)) { return fileName; diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/key/OMAllocateBlockRequestWithFSO.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/key/OMAllocateBlockRequestWithFSO.java index 1328d8a0497..4f0c9fe6024 100644 --- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/key/OMAllocateBlockRequestWithFSO.java +++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/key/OMAllocateBlockRequestWithFSO.java @@ -45,7 +45,7 @@ import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.KeyArgs; import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.OMRequest; import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.OMResponse; -import org.jetbrains.annotations.NotNull; +import jakarta.annotation.Nonnull; import org.slf4j.Logger; import org.slf4j.LoggerFactory; @@ -222,7 +222,7 @@ private void addOpenTableCacheEntry(long trxnLogIndex, openKeyInfo, fileName, trxnLogIndex); } - @NotNull + @Nonnull private OMClientResponse getOmClientResponse(long clientID, OMResponse.Builder omResponse, OmKeyInfo openKeyInfo, OmBucketInfo omBucketInfo, long volumeId) { diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/key/OMKeyCommitRequest.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/key/OMKeyCommitRequest.java index 154e456414e..d182e4f6c3d 100644 --- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/key/OMKeyCommitRequest.java +++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/key/OMKeyCommitRequest.java @@ -50,7 +50,7 @@ import org.apache.hadoop.ozone.om.upgrade.OMLayoutFeature; import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.Type; import org.apache.hadoop.ozone.security.acl.IAccessAuthorizer; -import org.jetbrains.annotations.NotNull; +import jakarta.annotation.Nonnull; import org.slf4j.Logger; import org.slf4j.LoggerFactory; @@ -395,7 +395,7 @@ public OMClientResponse validateAndUpdateCache(OzoneManager ozoneManager, TermIn return omClientResponse; } - @NotNull + @Nonnull protected List getOmKeyLocationInfos( OzoneManager ozoneManager, KeyArgs commitKeyArgs) { List locationInfoList = new ArrayList<>(); diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/key/OMKeyCreateRequestWithFSO.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/key/OMKeyCreateRequestWithFSO.java index 65a485305d3..0dec9fa459f 100644 --- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/key/OMKeyCreateRequestWithFSO.java +++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/key/OMKeyCreateRequestWithFSO.java @@ -29,7 +29,6 @@ import org.apache.hadoop.ozone.om.helpers.OmBucketInfo; import org.apache.hadoop.ozone.om.helpers.OmDirectoryInfo; import org.apache.hadoop.ozone.om.helpers.OmKeyInfo; -import org.apache.hadoop.ozone.om.helpers.OzoneFSUtils; import org.apache.hadoop.ozone.om.helpers.BucketLayout; import org.apache.hadoop.ozone.om.helpers.OmKeyLocationInfo; import org.apache.hadoop.ozone.om.request.file.OMDirectoryCreateRequestWithFSO; @@ -54,7 +53,6 @@ import static org.apache.hadoop.ozone.om.lock.OzoneManagerLock.Resource.BUCKET_LOCK; import static org.apache.hadoop.ozone.om.request.file.OMFileRequest.OMDirectoryResult.DIRECTORY_EXISTS; import static org.apache.hadoop.ozone.om.request.file.OMFileRequest.OMDirectoryResult.FILE_EXISTS_IN_GIVENPATH; -import static org.apache.hadoop.ozone.om.request.file.OMFileRequest.getParentId; /** * Handles CreateKey request layout version1. @@ -253,16 +251,6 @@ protected String getDBMultipartOpenKey(String volumeName, String bucketName, String keyName, String uploadID, OMMetadataManager omMetadataManager) throws IOException { - - final long volumeId = omMetadataManager.getVolumeId(volumeName); - final long bucketId = omMetadataManager.getBucketId(volumeName, - bucketName); - long parentId = - getParentId(omMetadataManager, volumeName, bucketName, keyName); - - String fileName = OzoneFSUtils.getFileName(keyName); - - return omMetadataManager.getMultipartKey(volumeId, bucketId, parentId, - fileName, uploadID); + return omMetadataManager.getMultipartKeyFSO(volumeName, bucketName, keyName, uploadID); } } diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/key/OMKeyRequest.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/key/OMKeyRequest.java index c06aa186cc7..d7cdd363200 100644 --- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/key/OMKeyRequest.java +++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/key/OMKeyRequest.java @@ -18,8 +18,8 @@ package org.apache.hadoop.ozone.om.request.key; -import javax.annotation.Nonnull; -import javax.annotation.Nullable; +import jakarta.annotation.Nonnull; +import jakarta.annotation.Nullable; import java.io.IOException; import java.security.GeneralSecurityException; import java.security.PrivilegedExceptionAction; @@ -30,10 +30,10 @@ import java.util.Iterator; import java.util.List; import java.util.Map; +import java.util.Optional; import java.util.stream.Collectors; import com.google.common.annotations.VisibleForTesting; -import com.google.common.base.Optional; import com.google.common.base.Preconditions; import org.apache.commons.lang3.tuple.Pair; import org.apache.hadoop.hdds.client.ContainerBlockID; @@ -284,7 +284,7 @@ protected String getVolumeOwner(OMMetadataManager omMetadataManager, protected static Optional getFileEncryptionInfo( OzoneManager ozoneManager, OmBucketInfo bucketInfo) throws IOException { - Optional encInfo = Optional.absent(); + Optional encInfo = Optional.empty(); BucketEncryptionKeyInfo ezInfo = bucketInfo.getEncryptionKeyInfo(); if (ezInfo != null) { final String ezKeyName = ezInfo.getKeyName(); diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/key/OMKeysDeleteRequest.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/key/OMKeysDeleteRequest.java index 6ddd257e22c..be89da369cd 100644 --- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/key/OMKeysDeleteRequest.java +++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/key/OMKeysDeleteRequest.java @@ -50,7 +50,7 @@ import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.OMResponse; import org.apache.hadoop.ozone.security.acl.IAccessAuthorizer; import org.apache.hadoop.ozone.security.acl.OzoneObj; -import org.jetbrains.annotations.NotNull; +import jakarta.annotation.Nonnull; import org.slf4j.Logger; import org.slf4j.LoggerFactory; @@ -254,7 +254,7 @@ protected OzoneFileStatus getOzoneKeyStatus( return null; } - @NotNull + @Nonnull @SuppressWarnings("parameternumber") protected OMClientResponse getOmClientResponse(OzoneManager ozoneManager, List omKeyInfoList, List dirList, diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/key/OmKeysDeleteRequestWithFSO.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/key/OmKeysDeleteRequestWithFSO.java index 8858458f2c8..b90fd15b267 100644 --- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/key/OmKeysDeleteRequestWithFSO.java +++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/key/OmKeysDeleteRequestWithFSO.java @@ -31,9 +31,9 @@ import org.apache.hadoop.ozone.om.response.OMClientResponse; import org.apache.hadoop.ozone.om.response.key.OMKeysDeleteResponseWithFSO; import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos; -import org.jetbrains.annotations.NotNull; import org.slf4j.Logger; import org.slf4j.LoggerFactory; +import jakarta.annotation.Nonnull; import java.io.IOException; import java.util.List; @@ -141,7 +141,7 @@ protected long markKeysAsDeletedInCache( return quotaReleased; } - @NotNull @Override + @Nonnull @Override protected OMClientResponse getOmClientResponse(OzoneManager ozoneManager, List omKeyInfoList, List dirList, OzoneManagerProtocolProtos.OMResponse.Builder omResponse, diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/key/acl/prefix/OMPrefixAclRequest.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/key/acl/prefix/OMPrefixAclRequest.java index f523a16e872..345886c050b 100644 --- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/key/acl/prefix/OMPrefixAclRequest.java +++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/key/acl/prefix/OMPrefixAclRequest.java @@ -22,6 +22,7 @@ import java.nio.file.InvalidPathException; import java.util.Map; +import org.apache.hadoop.ozone.om.helpers.OzoneFSUtils; import org.apache.ratis.server.protocol.TermIndex; import org.apache.hadoop.ozone.audit.AuditLogger; import org.apache.hadoop.ozone.om.OMMetadataManager; @@ -75,7 +76,9 @@ public OMClientResponse validateAndUpdateCache(OzoneManager ozoneManager, TermIn PrefixManagerImpl prefixManager = (PrefixManagerImpl) ozoneManager.getPrefixManager(); try { + prefixManager.validateOzoneObj(getOzoneObj()); String prefixPath = getOzoneObj().getPath(); + validatePrefixPath(prefixPath); ObjectParser objectParser = new ObjectParser(prefixPath, OzoneManagerProtocolProtos.OzoneObj.ObjectType.PREFIX); volume = objectParser.getVolume(); @@ -94,6 +97,9 @@ public OMClientResponse validateAndUpdateCache(OzoneManager ozoneManager, TermIn lockAcquired = getOmLockDetails().isLockAcquired(); omPrefixInfo = omMetadataManager.getPrefixTable().get(prefixPath); + if (omPrefixInfo != null) { + omPrefixInfo.setUpdateID(trxnLogIndex, ozoneManager.isRatisEnabled()); + } try { operationResult = apply(prefixManager, omPrefixInfo, trxnLogIndex); @@ -112,7 +118,6 @@ public OMClientResponse validateAndUpdateCache(OzoneManager ozoneManager, TermIn "No prefix info for the prefix path: " + prefixPath, OMException.ResultCodes.PREFIX_NOT_FOUND); } - omPrefixInfo.setUpdateID(trxnLogIndex, ozoneManager.isRatisEnabled()); // As for remove acl list, for a prefix if after removing acl from // the existing acl list, if list size becomes zero, delete the @@ -155,6 +160,13 @@ public OMClientResponse validateAndUpdateCache(OzoneManager ozoneManager, TermIn return omClientResponse; } + private void validatePrefixPath(String prefixPath) throws OMException { + if (!OzoneFSUtils.isValidName(prefixPath)) { + throw new OMException("Invalid prefix path name: " + prefixPath, + OMException.ResultCodes.INVALID_PATH_IN_ACL_REQUEST); + } + } + /** * Get the path name from the request. * @return path name diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/s3/multipart/S3ExpiredMultipartUploadsAbortRequest.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/s3/multipart/S3ExpiredMultipartUploadsAbortRequest.java index 53ceab9ebc1..c4fe3f7ab99 100644 --- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/s3/multipart/S3ExpiredMultipartUploadsAbortRequest.java +++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/s3/multipart/S3ExpiredMultipartUploadsAbortRequest.java @@ -19,7 +19,6 @@ package org.apache.hadoop.ozone.om.request.s3.multipart; -import com.google.common.base.Optional; import org.apache.ratis.server.protocol.TermIndex; import org.apache.hadoop.hdds.utils.db.cache.CacheKey; import org.apache.hadoop.hdds.utils.db.cache.CacheValue; @@ -312,11 +311,11 @@ private void updateTableCache(OzoneManager ozoneManager, .isExist(multipartOpenKey)) { omMetadataManager.getOpenKeyTable(bucketLayout) .addCacheEntry(new CacheKey<>(multipartOpenKey), - new CacheValue<>(Optional.absent(), trxnLogIndex)); + CacheValue.get(trxnLogIndex)); } omMetadataManager.getMultipartInfoTable() .addCacheEntry(new CacheKey<>(expiredMPUKeyName), - new CacheValue<>(Optional.absent(), trxnLogIndex)); + CacheValue.get(trxnLogIndex)); long numParts = omMultipartKeyInfo.getPartKeyInfoMap().size(); ozoneManager.getMetrics().incNumExpiredMPUAborted(); diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/s3/multipart/S3MultipartUploadCompleteRequest.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/s3/multipart/S3MultipartUploadCompleteRequest.java index 1b52318e4d0..99c98e3b48b 100644 --- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/s3/multipart/S3MultipartUploadCompleteRequest.java +++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/s3/multipart/S3MultipartUploadCompleteRequest.java @@ -67,7 +67,7 @@ import org.slf4j.Logger; import org.slf4j.LoggerFactory; -import javax.annotation.Nullable; +import jakarta.annotation.Nullable; import static org.apache.hadoop.ozone.om.exceptions.OMException.ResultCodes.NOT_A_FILE; import static org.apache.hadoop.ozone.om.lock.OzoneManagerLock.Resource.BUCKET_LOCK; diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/s3/multipart/S3MultipartUploadCompleteRequestWithFSO.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/s3/multipart/S3MultipartUploadCompleteRequestWithFSO.java index 35867bb84e8..c224786b108 100644 --- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/s3/multipart/S3MultipartUploadCompleteRequestWithFSO.java +++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/s3/multipart/S3MultipartUploadCompleteRequestWithFSO.java @@ -130,22 +130,7 @@ protected String getDBOzoneKey(OMMetadataManager omMetadataManager, protected String getDBMultipartOpenKey(String volumeName, String bucketName, String keyName, String uploadID, OMMetadataManager omMetadataManager) throws IOException { - - long parentId = - getParentId(omMetadataManager, volumeName, bucketName, keyName); - - String fileName = keyName; - Path filePath = Paths.get(keyName).getFileName(); - if (filePath != null) { - fileName = filePath.toString(); - } - - final long volumeId = omMetadataManager.getVolumeId(volumeName); - final long bucketId = omMetadataManager.getBucketId(volumeName, - bucketName); - - return omMetadataManager.getMultipartKey(volumeId, bucketId, - parentId, fileName, uploadID); + return omMetadataManager.getMultipartKeyFSO(volumeName, bucketName, keyName, uploadID); } @Override diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/s3/security/OMSetSecretRequest.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/s3/security/OMSetSecretRequest.java index 1edece52da2..aa7fe46992e 100644 --- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/s3/security/OMSetSecretRequest.java +++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/s3/security/OMSetSecretRequest.java @@ -113,26 +113,20 @@ public OMClientResponse validateAndUpdateCache(OzoneManager ozoneManager, TermIn try { omClientResponse = ozoneManager.getS3SecretManager() .doUnderLock(accessId, s3SecretManager -> { - // Intentionally set to final so they can only be set once. - final S3SecretValue newS3SecretValue; // Update legacy S3SecretTable, if the accessId entry exists - if (s3SecretManager.hasS3Secret(accessId)) { - // accessId found in S3SecretTable. Update S3SecretTable - LOG.debug("Updating S3SecretTable cache entry"); - // Update S3SecretTable cache entry in this case - newS3SecretValue = new S3SecretValue(accessId, secretKey); - // Set the transactionLogIndex to be used for updating. - newS3SecretValue.setTransactionLogIndex(termIndex.getIndex()); - s3SecretManager - .updateCache(accessId, newS3SecretValue); - } else { + if (!s3SecretManager.hasS3Secret(accessId)) { // If S3SecretTable is not updated, // throw ACCESS_ID_NOT_FOUND exception. throw new OMException("accessId '" + accessId + "' not found.", OMException.ResultCodes.ACCESS_ID_NOT_FOUND); } + // Update S3SecretTable cache entry in this case + // Set the transactionLogIndex to be used for updating. + final S3SecretValue newS3SecretValue = S3SecretValue.of(accessId, secretKey, termIndex.getIndex()); + s3SecretManager.updateCache(accessId, newS3SecretValue); + // Compose response final SetS3SecretResponse.Builder setSecretResponse = SetS3SecretResponse.newBuilder() diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/s3/security/S3GetSecretRequest.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/s3/security/S3GetSecretRequest.java index dcf5688e395..90c27038eb4 100644 --- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/s3/security/S3GetSecretRequest.java +++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/s3/security/S3GetSecretRequest.java @@ -150,21 +150,16 @@ public OMClientResponse validateAndUpdateCache(OzoneManager ozoneManager, TermIn try { omClientResponse = ozoneManager.getS3SecretManager() .doUnderLock(accessId, s3SecretManager -> { - S3SecretValue assignS3SecretValue; - S3SecretValue s3SecretValue = - s3SecretManager.getSecret(accessId); + final S3SecretValue assignS3SecretValue; + S3SecretValue s3SecretValue = s3SecretManager.getSecret(accessId); if (s3SecretValue == null) { // Not found in S3SecretTable. if (createIfNotExist) { // Add new entry in this case - assignS3SecretValue = - new S3SecretValue(accessId, awsSecret.get()); - // Set the transactionLogIndex to be used for updating. - assignS3SecretValue.setTransactionLogIndex(termIndex.getIndex()); + assignS3SecretValue = S3SecretValue.of(accessId, awsSecret.get(), termIndex.getIndex()); // Add cache entry first. - s3SecretManager.updateCache(accessId, - assignS3SecretValue); + s3SecretManager.updateCache(accessId, assignS3SecretValue); } else { assignS3SecretValue = null; } diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/s3/security/S3SecretRequestHelper.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/s3/security/S3SecretRequestHelper.java index 407ee947e59..b27f492e421 100644 --- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/s3/security/S3SecretRequestHelper.java +++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/s3/security/S3SecretRequestHelper.java @@ -17,7 +17,6 @@ */ package org.apache.hadoop.ozone.om.request.s3.security; -import com.google.common.base.Optional; import org.apache.hadoop.ipc.ProtobufRpcEngine; import org.apache.hadoop.ozone.om.OMMultiTenantManager; import org.apache.hadoop.ozone.om.OzoneManager; @@ -29,6 +28,7 @@ import org.slf4j.LoggerFactory; import java.io.IOException; +import java.util.Optional; import static org.apache.hadoop.security.SaslRpcServer.AuthMethod.KERBEROS; diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/s3/tenant/OMTenantAssignAdminRequest.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/s3/tenant/OMTenantAssignAdminRequest.java index a480df26e58..3aa1437c184 100644 --- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/s3/tenant/OMTenantAssignAdminRequest.java +++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/s3/tenant/OMTenantAssignAdminRequest.java @@ -18,7 +18,6 @@ */ package org.apache.hadoop.ozone.om.request.s3.tenant; -import com.google.common.base.Optional; import com.google.common.base.Preconditions; import org.apache.commons.lang3.StringUtils; import org.apache.ratis.server.protocol.TermIndex; @@ -48,6 +47,7 @@ import java.nio.file.InvalidPathException; import java.util.HashMap; import java.util.Map; +import java.util.Optional; import static org.apache.hadoop.ozone.om.lock.OzoneManagerLock.Resource.VOLUME_LOCK; import static org.apache.hadoop.ozone.om.upgrade.OMLayoutFeature.MULTITENANCY_SCHEMA; diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/s3/tenant/OMTenantAssignUserAccessIdRequest.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/s3/tenant/OMTenantAssignUserAccessIdRequest.java index a69b357419a..3179a7d0f37 100644 --- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/s3/tenant/OMTenantAssignUserAccessIdRequest.java +++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/s3/tenant/OMTenantAssignUserAccessIdRequest.java @@ -271,10 +271,7 @@ public OMClientResponse validateAndUpdateCache(OzoneManager ozoneManager, TermIn } } - final S3SecretValue s3SecretValue = - new S3SecretValue(accessId, awsSecret); - // Set the transactionLogIndex to be used for updating. - s3SecretValue.setTransactionLogIndex(transactionLogIndex); + final S3SecretValue s3SecretValue = S3SecretValue.of(accessId, awsSecret, transactionLogIndex); // Add to tenantAccessIdTable final OmDBAccessIdInfo omDBAccessIdInfo = new OmDBAccessIdInfo.Builder() diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/s3/tenant/OMTenantRevokeAdminRequest.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/s3/tenant/OMTenantRevokeAdminRequest.java index eb876b46fed..89d19d00d27 100644 --- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/s3/tenant/OMTenantRevokeAdminRequest.java +++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/s3/tenant/OMTenantRevokeAdminRequest.java @@ -18,7 +18,6 @@ */ package org.apache.hadoop.ozone.om.request.s3.tenant; -import com.google.common.base.Optional; import com.google.common.base.Preconditions; import org.apache.commons.lang3.StringUtils; import org.apache.ratis.server.protocol.TermIndex; @@ -48,6 +47,7 @@ import java.nio.file.InvalidPathException; import java.util.HashMap; import java.util.Map; +import java.util.Optional; import static org.apache.hadoop.ozone.om.lock.OzoneManagerLock.Resource.VOLUME_LOCK; import static org.apache.hadoop.ozone.om.upgrade.OMLayoutFeature.MULTITENANCY_SCHEMA; diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/s3/tenant/OMTenantRevokeUserAccessIdRequest.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/s3/tenant/OMTenantRevokeUserAccessIdRequest.java index e5c56095412..74ce1002821 100644 --- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/s3/tenant/OMTenantRevokeUserAccessIdRequest.java +++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/s3/tenant/OMTenantRevokeUserAccessIdRequest.java @@ -18,7 +18,6 @@ */ package org.apache.hadoop.ozone.om.request.s3.tenant; -import com.google.common.base.Optional; import com.google.common.base.Preconditions; import org.apache.commons.lang3.StringUtils; import org.apache.ratis.server.protocol.TermIndex; @@ -52,6 +51,7 @@ import java.nio.file.InvalidPathException; import java.util.HashMap; import java.util.Map; +import java.util.Optional; import static org.apache.hadoop.ozone.om.lock.OzoneManagerLock.Resource.VOLUME_LOCK; import static org.apache.hadoop.ozone.om.upgrade.OMLayoutFeature.MULTITENANCY_SCHEMA; diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/util/OMMultipartUploadUtils.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/util/OMMultipartUploadUtils.java index 4d8e466fd34..42c65188782 100644 --- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/util/OMMultipartUploadUtils.java +++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/util/OMMultipartUploadUtils.java @@ -23,8 +23,6 @@ import org.apache.hadoop.ozone.om.OMMetadataManager; import org.apache.hadoop.ozone.om.helpers.BucketLayout; import org.apache.hadoop.ozone.om.helpers.OmKeyInfo; -import org.apache.hadoop.ozone.om.helpers.OzoneFSUtils; -import org.apache.hadoop.ozone.om.request.file.OMFileRequest; import java.io.IOException; import java.util.UUID; @@ -93,38 +91,12 @@ public static String getMultipartOpenKey(String volumeName, OMMetadataManager omMetadataManager, BucketLayout bucketLayout) throws IOException { if (bucketLayout == BucketLayout.FILE_SYSTEM_OPTIMIZED) { - return getMultipartOpenKeyFSO(volumeName, bucketName, - keyName, multipartUploadId, omMetadataManager); + return omMetadataManager.getMultipartKeyFSO(volumeName, bucketName, keyName, multipartUploadId); } else { - return getMultipartOpenKey(volumeName, bucketName, - keyName, multipartUploadId, omMetadataManager); + return omMetadataManager.getMultipartKey(volumeName, bucketName, keyName, multipartUploadId); } } - public static String getMultipartOpenKey(String volumeName, - String bucketName, String keyName, String multipartUploadId, - OMMetadataManager omMetadataManager) { - return omMetadataManager.getMultipartKey( - volumeName, bucketName, keyName, multipartUploadId); - } - - public static String getMultipartOpenKeyFSO(String volumeName, - String bucketName, String keyName, String uploadID, - OMMetadataManager metaMgr) throws IOException { - String fileName = OzoneFSUtils.getFileName(keyName); - - final long volumeId = metaMgr.getVolumeId(volumeName); - final long bucketId = metaMgr.getBucketId(volumeName, bucketName); - long parentID = - OMFileRequest.getParentID(volumeId, bucketId, - keyName, metaMgr); - - String multipartKey = metaMgr.getMultipartKey(volumeId, bucketId, - parentID, fileName, uploadID); - - return multipartKey; - } - /** * Check whether key's isMultipartKey flag is set. diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/util/ObjectParser.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/util/ObjectParser.java index 9b8270205a8..804d41b11dd 100644 --- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/util/ObjectParser.java +++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/util/ObjectParser.java @@ -48,15 +48,11 @@ public ObjectParser(String path, ObjectType objectType) throws OMException { } else if (objectType == ObjectType.BUCKET && tokens.length == 2) { volume = tokens[0]; bucket = tokens[1]; - } else if (objectType == ObjectType.KEY && tokens.length == 3) { + } else if ((objectType == ObjectType.KEY || + objectType == ObjectType.PREFIX) && tokens.length == 3) { volume = tokens[0]; bucket = tokens[1]; key = tokens[2]; - } else if (objectType == ObjectType.PREFIX && tokens.length >= 1) { - volume = tokens[0]; - if (tokens.length >= 2) { - bucket = tokens[1]; - } } else { throw new OMException("Illegal path " + path, OMException.ResultCodes.INVALID_PATH_IN_ACL_REQUEST); diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/response/DummyOMClientResponse.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/response/DummyOMClientResponse.java index 0f6a11889f8..b72239fa39f 100644 --- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/response/DummyOMClientResponse.java +++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/response/DummyOMClientResponse.java @@ -22,7 +22,7 @@ import org.apache.hadoop.ozone.om.OMMetadataManager; import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.OMResponse; -import javax.annotation.Nonnull; +import jakarta.annotation.Nonnull; import java.io.IOException; /** diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/response/bucket/OMBucketCreateResponse.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/response/bucket/OMBucketCreateResponse.java index 9b9b35a106b..f2053e05d78 100644 --- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/response/bucket/OMBucketCreateResponse.java +++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/response/bucket/OMBucketCreateResponse.java @@ -29,8 +29,8 @@ .OMResponse; import org.apache.hadoop.hdds.utils.db.BatchOperation; -import javax.annotation.Nullable; -import javax.annotation.Nonnull; +import jakarta.annotation.Nullable; +import jakarta.annotation.Nonnull; import static org.apache.hadoop.ozone.om.OmMetadataManagerImpl.BUCKET_TABLE; import static org.apache.hadoop.ozone.om.OmMetadataManagerImpl.VOLUME_TABLE; diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/response/bucket/OMBucketDeleteResponse.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/response/bucket/OMBucketDeleteResponse.java index c766d990a20..16db6ffbbbd 100644 --- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/response/bucket/OMBucketDeleteResponse.java +++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/response/bucket/OMBucketDeleteResponse.java @@ -28,7 +28,7 @@ .OMResponse; import org.apache.hadoop.hdds.utils.db.BatchOperation; -import javax.annotation.Nonnull; +import jakarta.annotation.Nonnull; import static org.apache.hadoop.ozone.om.OmMetadataManagerImpl.BUCKET_TABLE; import static org.apache.hadoop.ozone.om.OmMetadataManagerImpl.VOLUME_TABLE; diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/response/bucket/OMBucketSetOwnerResponse.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/response/bucket/OMBucketSetOwnerResponse.java index 268787f33a9..f99f3d3c480 100644 --- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/response/bucket/OMBucketSetOwnerResponse.java +++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/response/bucket/OMBucketSetOwnerResponse.java @@ -26,7 +26,7 @@ .OMResponse; import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos; -import javax.annotation.Nonnull; +import jakarta.annotation.Nonnull; import java.io.IOException; import static org.apache.hadoop.ozone.om.OmMetadataManagerImpl.BUCKET_TABLE; diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/response/bucket/OMBucketSetPropertyResponse.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/response/bucket/OMBucketSetPropertyResponse.java index b9d3cf04f99..99d928a9821 100644 --- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/response/bucket/OMBucketSetPropertyResponse.java +++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/response/bucket/OMBucketSetPropertyResponse.java @@ -27,7 +27,7 @@ .OMResponse; import org.apache.hadoop.hdds.utils.db.BatchOperation; -import javax.annotation.Nonnull; +import jakarta.annotation.Nonnull; import static org.apache.hadoop.ozone.om.OmMetadataManagerImpl.BUCKET_TABLE; diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/response/bucket/acl/OMBucketAclResponse.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/response/bucket/acl/OMBucketAclResponse.java index 0a725231925..a48325d90b1 100644 --- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/response/bucket/acl/OMBucketAclResponse.java +++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/response/bucket/acl/OMBucketAclResponse.java @@ -26,7 +26,7 @@ .OMResponse; import org.apache.hadoop.hdds.utils.db.BatchOperation; -import javax.annotation.Nonnull; +import jakarta.annotation.Nonnull; import java.io.IOException; import static org.apache.hadoop.ozone.om.OmMetadataManagerImpl.BUCKET_TABLE; diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/response/file/OMDirectoryCreateResponse.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/response/file/OMDirectoryCreateResponse.java index d484ce116e0..858e105b9df 100644 --- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/response/file/OMDirectoryCreateResponse.java +++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/response/file/OMDirectoryCreateResponse.java @@ -32,7 +32,7 @@ import org.slf4j.Logger; import org.slf4j.LoggerFactory; -import javax.annotation.Nonnull; +import jakarta.annotation.Nonnull; import java.io.IOException; import java.util.List; diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/response/file/OMDirectoryCreateResponseWithFSO.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/response/file/OMDirectoryCreateResponseWithFSO.java index 1f39e382084..38a588f8430 100644 --- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/response/file/OMDirectoryCreateResponseWithFSO.java +++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/response/file/OMDirectoryCreateResponseWithFSO.java @@ -30,7 +30,7 @@ import org.slf4j.Logger; import org.slf4j.LoggerFactory; -import javax.annotation.Nonnull; +import jakarta.annotation.Nonnull; import java.io.IOException; import java.util.List; diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/response/file/OMFileCreateResponse.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/response/file/OMFileCreateResponse.java index 02e62a097e7..8533ef41173 100644 --- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/response/file/OMFileCreateResponse.java +++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/response/file/OMFileCreateResponse.java @@ -18,7 +18,7 @@ package org.apache.hadoop.ozone.om.response.file; -import javax.annotation.Nonnull; +import jakarta.annotation.Nonnull; import org.apache.hadoop.ozone.om.helpers.BucketLayout; import org.apache.hadoop.ozone.om.helpers.OmBucketInfo; diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/response/file/OMFileCreateResponseWithFSO.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/response/file/OMFileCreateResponseWithFSO.java index fd0d9957cad..4e7704fee3c 100644 --- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/response/file/OMFileCreateResponseWithFSO.java +++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/response/file/OMFileCreateResponseWithFSO.java @@ -28,7 +28,7 @@ import org.apache.hadoop.ozone.om.response.CleanupTableInfo; import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.OMResponse; -import javax.annotation.Nonnull; +import jakarta.annotation.Nonnull; import java.io.IOException; import java.util.ArrayList; import java.util.List; diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/response/file/OMRecoverLeaseResponse.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/response/file/OMRecoverLeaseResponse.java index fcefa473ff4..690d9bf0d4e 100644 --- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/response/file/OMRecoverLeaseResponse.java +++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/response/file/OMRecoverLeaseResponse.java @@ -27,7 +27,7 @@ import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos .OMResponse; -import javax.annotation.Nonnull; +import jakarta.annotation.Nonnull; import java.io.IOException; import static org.apache.hadoop.ozone.om.OmMetadataManagerImpl.FILE_TABLE; diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/response/key/AbstractOMKeyDeleteResponse.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/response/key/AbstractOMKeyDeleteResponse.java index 7dd89281e0c..febf4a44c3e 100644 --- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/response/key/AbstractOMKeyDeleteResponse.java +++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/response/key/AbstractOMKeyDeleteResponse.java @@ -31,8 +31,8 @@ import org.apache.hadoop.hdds.utils.db.BatchOperation; import java.io.IOException; -import javax.annotation.Nullable; -import javax.annotation.Nonnull; +import jakarta.annotation.Nullable; +import jakarta.annotation.Nonnull; import static org.apache.hadoop.ozone.om.OmMetadataManagerImpl.DELETED_TABLE; diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/response/key/OMAllocateBlockResponse.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/response/key/OMAllocateBlockResponse.java index 2d7fef876f5..19f8a354485 100644 --- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/response/key/OMAllocateBlockResponse.java +++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/response/key/OMAllocateBlockResponse.java @@ -27,7 +27,7 @@ import org.apache.hadoop.hdds.utils.db.BatchOperation; import java.io.IOException; -import javax.annotation.Nonnull; +import jakarta.annotation.Nonnull; import static org.apache.hadoop.ozone.om.OmMetadataManagerImpl.BUCKET_TABLE; import static org.apache.hadoop.ozone.om.OmMetadataManagerImpl.OPEN_KEY_TABLE; diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/response/key/OMAllocateBlockResponseWithFSO.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/response/key/OMAllocateBlockResponseWithFSO.java index 3a48d72d1dc..f90331a6c01 100644 --- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/response/key/OMAllocateBlockResponseWithFSO.java +++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/response/key/OMAllocateBlockResponseWithFSO.java @@ -26,7 +26,7 @@ import org.apache.hadoop.ozone.om.response.CleanupTableInfo; import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.OMResponse; -import javax.annotation.Nonnull; +import jakarta.annotation.Nonnull; import java.io.IOException; import static org.apache.hadoop.ozone.om.OmMetadataManagerImpl.BUCKET_TABLE; diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/response/key/OMDirectoriesPurgeResponseWithFSO.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/response/key/OMDirectoriesPurgeResponseWithFSO.java index 758133b4111..bb9562dff21 100644 --- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/response/key/OMDirectoriesPurgeResponseWithFSO.java +++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/response/key/OMDirectoriesPurgeResponseWithFSO.java @@ -42,7 +42,7 @@ import org.slf4j.Logger; import org.slf4j.LoggerFactory; -import javax.annotation.Nonnull; +import jakarta.annotation.Nonnull; import java.io.IOException; import java.util.List; diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/response/key/OMKeyCommitResponse.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/response/key/OMKeyCommitResponse.java index 0de6d27eb5b..685b2969808 100644 --- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/response/key/OMKeyCommitResponse.java +++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/response/key/OMKeyCommitResponse.java @@ -30,7 +30,7 @@ import org.apache.hadoop.hdds.utils.db.BatchOperation; import java.io.IOException; -import javax.annotation.Nonnull; +import jakarta.annotation.Nonnull; import static org.apache.hadoop.ozone.om.OmMetadataManagerImpl.BUCKET_TABLE; import static org.apache.hadoop.ozone.om.OmMetadataManagerImpl.DELETED_TABLE; diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/response/key/OMKeyCommitResponseWithFSO.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/response/key/OMKeyCommitResponseWithFSO.java index c12c3a295d3..13034f77dfb 100644 --- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/response/key/OMKeyCommitResponseWithFSO.java +++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/response/key/OMKeyCommitResponseWithFSO.java @@ -29,7 +29,7 @@ import org.apache.hadoop.ozone.om.response.CleanupTableInfo; import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.OMResponse; -import javax.annotation.Nonnull; +import jakarta.annotation.Nonnull; import java.io.IOException; diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/response/key/OMKeyCreateResponse.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/response/key/OMKeyCreateResponse.java index 962c5b82b8f..367c286738a 100644 --- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/response/key/OMKeyCreateResponse.java +++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/response/key/OMKeyCreateResponse.java @@ -20,7 +20,7 @@ import java.io.IOException; import java.util.List; -import javax.annotation.Nonnull; +import jakarta.annotation.Nonnull; import org.apache.hadoop.ozone.om.OMMetadataManager; import org.apache.hadoop.ozone.om.helpers.BucketLayout; diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/response/key/OMKeyCreateResponseWithFSO.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/response/key/OMKeyCreateResponseWithFSO.java index cac9e693a23..8c3027bae88 100644 --- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/response/key/OMKeyCreateResponseWithFSO.java +++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/response/key/OMKeyCreateResponseWithFSO.java @@ -26,7 +26,7 @@ import org.apache.hadoop.ozone.om.response.file.OMFileCreateResponseWithFSO; import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.OMResponse; -import javax.annotation.Nonnull; +import jakarta.annotation.Nonnull; import java.util.List; import static org.apache.hadoop.ozone.om.OmMetadataManagerImpl.BUCKET_TABLE; diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/response/key/OMKeyDeleteResponse.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/response/key/OMKeyDeleteResponse.java index 7e5339ee544..0cb0d745d19 100644 --- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/response/key/OMKeyDeleteResponse.java +++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/response/key/OMKeyDeleteResponse.java @@ -29,7 +29,7 @@ import org.apache.hadoop.hdds.utils.db.BatchOperation; import java.io.IOException; -import javax.annotation.Nonnull; +import jakarta.annotation.Nonnull; import static org.apache.hadoop.ozone.om.OmMetadataManagerImpl.BUCKET_TABLE; import static org.apache.hadoop.ozone.om.OmMetadataManagerImpl.DELETED_TABLE; diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/response/key/OMKeyDeleteResponseWithFSO.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/response/key/OMKeyDeleteResponseWithFSO.java index b52e4f44761..f52ea1b4ce0 100644 --- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/response/key/OMKeyDeleteResponseWithFSO.java +++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/response/key/OMKeyDeleteResponseWithFSO.java @@ -27,7 +27,7 @@ import org.apache.hadoop.ozone.om.response.CleanupTableInfo; import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.OMResponse; -import javax.annotation.Nonnull; +import jakarta.annotation.Nonnull; import java.io.IOException; import static org.apache.hadoop.ozone.om.OmMetadataManagerImpl.BUCKET_TABLE; diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/response/key/OMKeyPurgeResponse.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/response/key/OMKeyPurgeResponse.java index 719ece21c15..4e9ee756331 100644 --- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/response/key/OMKeyPurgeResponse.java +++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/response/key/OMKeyPurgeResponse.java @@ -38,7 +38,7 @@ import java.io.IOException; import java.util.List; -import javax.annotation.Nonnull; +import jakarta.annotation.Nonnull; import static org.apache.hadoop.ozone.om.OmMetadataManagerImpl.DELETED_TABLE; import static org.apache.hadoop.ozone.om.OmSnapshotManager.getSnapshotPrefix; diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/response/key/OMKeyRenameResponse.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/response/key/OMKeyRenameResponse.java index 9a97a3ea050..b1077751f4d 100644 --- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/response/key/OMKeyRenameResponse.java +++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/response/key/OMKeyRenameResponse.java @@ -28,7 +28,7 @@ import org.apache.hadoop.hdds.utils.db.BatchOperation; import java.io.IOException; -import javax.annotation.Nonnull; +import jakarta.annotation.Nonnull; import static org.apache.hadoop.ozone.om.OmMetadataManagerImpl.KEY_TABLE; import static org.apache.hadoop.ozone.om.OmMetadataManagerImpl.SNAPSHOT_RENAMED_TABLE; diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/response/key/OMKeyRenameResponseWithFSO.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/response/key/OMKeyRenameResponseWithFSO.java index 5daff92e74b..8403e72b51e 100644 --- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/response/key/OMKeyRenameResponseWithFSO.java +++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/response/key/OMKeyRenameResponseWithFSO.java @@ -29,7 +29,7 @@ import org.apache.hadoop.ozone.om.response.CleanupTableInfo; import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.OMResponse; -import javax.annotation.Nonnull; +import jakarta.annotation.Nonnull; import java.io.IOException; import static org.apache.hadoop.ozone.om.OmMetadataManagerImpl.DIRECTORY_TABLE; diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/response/key/OMKeySetTimesResponse.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/response/key/OMKeySetTimesResponse.java index 895bdfdf916..a0e38195271 100644 --- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/response/key/OMKeySetTimesResponse.java +++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/response/key/OMKeySetTimesResponse.java @@ -20,7 +20,7 @@ import java.io.IOException; -import javax.annotation.Nonnull; +import jakarta.annotation.Nonnull; import org.apache.hadoop.ozone.om.OMMetadataManager; import org.apache.hadoop.ozone.om.helpers.BucketLayout; diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/response/key/OMKeySetTimesResponseWithFSO.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/response/key/OMKeySetTimesResponseWithFSO.java index 024e9eb6ca3..699c79f4fe6 100644 --- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/response/key/OMKeySetTimesResponseWithFSO.java +++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/response/key/OMKeySetTimesResponseWithFSO.java @@ -25,9 +25,8 @@ import org.apache.hadoop.ozone.om.request.file.OMFileRequest; import org.apache.hadoop.ozone.om.response.CleanupTableInfo; import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos; -import org.jetbrains.annotations.NotNull; -import javax.annotation.Nonnull; +import jakarta.annotation.Nonnull; import java.io.IOException; import static org.apache.hadoop.ozone.om.OmMetadataManagerImpl.DIRECTORY_TABLE; @@ -44,8 +43,8 @@ public class OMKeySetTimesResponseWithFSO extends OMKeySetTimesResponse { private long bucketId; public OMKeySetTimesResponseWithFSO( - @NotNull OzoneManagerProtocolProtos.OMResponse omResponse, - @NotNull OmKeyInfo omKeyInfo, boolean isDirectory, + @Nonnull OzoneManagerProtocolProtos.OMResponse omResponse, + @Nonnull OmKeyInfo omKeyInfo, boolean isDirectory, @Nonnull BucketLayout bucketLayout, @Nonnull long volumeId, @Nonnull long bucketId) { super(omResponse, omKeyInfo, bucketLayout); @@ -61,7 +60,7 @@ public OMKeySetTimesResponseWithFSO( * @param omResponse */ public OMKeySetTimesResponseWithFSO( - @NotNull OzoneManagerProtocolProtos.OMResponse omResponse, + @Nonnull OzoneManagerProtocolProtos.OMResponse omResponse, BucketLayout bucketLayout) { super(omResponse, bucketLayout); } diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/response/key/OMKeysDeleteResponse.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/response/key/OMKeysDeleteResponse.java index 3c518421999..23f3acd45ac 100644 --- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/response/key/OMKeysDeleteResponse.java +++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/response/key/OMKeysDeleteResponse.java @@ -27,7 +27,7 @@ import org.apache.hadoop.ozone.om.response.CleanupTableInfo; import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.OMResponse; -import javax.annotation.Nonnull; +import jakarta.annotation.Nonnull; import java.io.IOException; import java.util.List; diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/response/key/OMKeysDeleteResponseWithFSO.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/response/key/OMKeysDeleteResponseWithFSO.java index 43fed7ad150..3a662727b02 100644 --- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/response/key/OMKeysDeleteResponseWithFSO.java +++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/response/key/OMKeysDeleteResponseWithFSO.java @@ -25,9 +25,8 @@ import org.apache.hadoop.ozone.om.helpers.OmKeyInfo; import org.apache.hadoop.ozone.om.response.CleanupTableInfo; import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos; -import org.jetbrains.annotations.NotNull; -import javax.annotation.Nonnull; +import jakarta.annotation.Nonnull; import java.io.IOException; import java.util.List; @@ -49,10 +48,10 @@ public class OMKeysDeleteResponseWithFSO extends OMKeysDeleteResponse { private long volumeId; public OMKeysDeleteResponseWithFSO( - @NotNull OzoneManagerProtocolProtos.OMResponse omResponse, - @NotNull List keyDeleteList, - @NotNull List dirDeleteList, boolean isRatisEnabled, - @NotNull OmBucketInfo omBucketInfo, @Nonnull long volId, + @Nonnull OzoneManagerProtocolProtos.OMResponse omResponse, + @Nonnull List keyDeleteList, + @Nonnull List dirDeleteList, boolean isRatisEnabled, + @Nonnull OmBucketInfo omBucketInfo, @Nonnull long volId, @Nonnull List dbOpenKeys) { super(omResponse, keyDeleteList, isRatisEnabled, omBucketInfo, dbOpenKeys); this.dirsList = dirDeleteList; diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/response/key/OMKeysRenameResponse.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/response/key/OMKeysRenameResponse.java index f0ea2922a7c..26be8ea3c53 100644 --- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/response/key/OMKeysRenameResponse.java +++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/response/key/OMKeysRenameResponse.java @@ -27,7 +27,7 @@ import org.apache.hadoop.ozone.om.response.OMClientResponse; import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.OMResponse; -import javax.annotation.Nonnull; +import jakarta.annotation.Nonnull; import java.io.IOException; import java.util.Map; diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/response/key/OMOpenKeysDeleteResponse.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/response/key/OMOpenKeysDeleteResponse.java index 25598e596fc..1c34e1c66a0 100644 --- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/response/key/OMOpenKeysDeleteResponse.java +++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/response/key/OMOpenKeysDeleteResponse.java @@ -25,7 +25,7 @@ import org.apache.hadoop.ozone.om.response.CleanupTableInfo; import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.OMResponse; -import javax.annotation.Nonnull; +import jakarta.annotation.Nonnull; import java.io.IOException; import java.util.Map; diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/response/key/acl/OMKeyAclResponse.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/response/key/acl/OMKeyAclResponse.java index 329090585bf..39bda4204d1 100644 --- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/response/key/acl/OMKeyAclResponse.java +++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/response/key/acl/OMKeyAclResponse.java @@ -20,7 +20,7 @@ import java.io.IOException; -import javax.annotation.Nonnull; +import jakarta.annotation.Nonnull; import org.apache.hadoop.ozone.om.OMMetadataManager; import org.apache.hadoop.ozone.om.helpers.BucketLayout; diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/response/key/acl/OMKeyAclResponseWithFSO.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/response/key/acl/OMKeyAclResponseWithFSO.java index 9bcfac86f68..067219bcfa1 100644 --- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/response/key/acl/OMKeyAclResponseWithFSO.java +++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/response/key/acl/OMKeyAclResponseWithFSO.java @@ -25,9 +25,8 @@ import org.apache.hadoop.ozone.om.request.file.OMFileRequest; import org.apache.hadoop.ozone.om.response.CleanupTableInfo; import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos; -import org.jetbrains.annotations.NotNull; -import javax.annotation.Nonnull; +import jakarta.annotation.Nonnull; import java.io.IOException; import static org.apache.hadoop.ozone.om.OmMetadataManagerImpl.DIRECTORY_TABLE; @@ -44,8 +43,8 @@ public class OMKeyAclResponseWithFSO extends OMKeyAclResponse { private long bucketId; public OMKeyAclResponseWithFSO( - @NotNull OzoneManagerProtocolProtos.OMResponse omResponse, - @NotNull OmKeyInfo omKeyInfo, boolean isDirectory, + @Nonnull OzoneManagerProtocolProtos.OMResponse omResponse, + @Nonnull OmKeyInfo omKeyInfo, boolean isDirectory, @Nonnull BucketLayout bucketLayout, @Nonnull long volumeId, @Nonnull long bucketId) { super(omResponse, omKeyInfo, bucketLayout); @@ -61,7 +60,7 @@ public OMKeyAclResponseWithFSO( * @param omResponse */ public OMKeyAclResponseWithFSO( - @NotNull OzoneManagerProtocolProtos.OMResponse omResponse, + @Nonnull OzoneManagerProtocolProtos.OMResponse omResponse, BucketLayout bucketLayout) { super(omResponse, bucketLayout); } diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/response/key/acl/prefix/OMPrefixAclResponse.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/response/key/acl/prefix/OMPrefixAclResponse.java index 288a38fea0f..a4663984a2c 100644 --- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/response/key/acl/prefix/OMPrefixAclResponse.java +++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/response/key/acl/prefix/OMPrefixAclResponse.java @@ -25,7 +25,7 @@ import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.OMResponse; import org.apache.hadoop.hdds.utils.db.BatchOperation; -import javax.annotation.Nonnull; +import jakarta.annotation.Nonnull; import java.io.IOException; import static org.apache.hadoop.ozone.om.OmMetadataManagerImpl.PREFIX_TABLE; diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/response/s3/multipart/AbstractS3MultipartAbortResponse.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/response/s3/multipart/AbstractS3MultipartAbortResponse.java index 42267b644cf..0b32561f34d 100644 --- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/response/s3/multipart/AbstractS3MultipartAbortResponse.java +++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/response/s3/multipart/AbstractS3MultipartAbortResponse.java @@ -33,7 +33,7 @@ import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.OMResponse; import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.PartKeyInfo; -import javax.annotation.Nonnull; +import jakarta.annotation.Nonnull; import java.io.IOException; import java.util.Collections; diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/response/s3/multipart/S3ExpiredMultipartUploadsAbortResponse.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/response/s3/multipart/S3ExpiredMultipartUploadsAbortResponse.java index 363074b596e..d75c3b34665 100644 --- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/response/s3/multipart/S3ExpiredMultipartUploadsAbortResponse.java +++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/response/s3/multipart/S3ExpiredMultipartUploadsAbortResponse.java @@ -27,7 +27,7 @@ import org.apache.hadoop.ozone.om.response.CleanupTableInfo; import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.OMResponse; -import javax.annotation.Nonnull; +import jakarta.annotation.Nonnull; import java.io.IOException; import java.util.List; import java.util.Map; diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/response/s3/multipart/S3InitiateMultipartUploadResponse.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/response/s3/multipart/S3InitiateMultipartUploadResponse.java index 4d4fca5e673..dd62c6b7a28 100644 --- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/response/s3/multipart/S3InitiateMultipartUploadResponse.java +++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/response/s3/multipart/S3InitiateMultipartUploadResponse.java @@ -29,7 +29,7 @@ import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.OMResponse; import org.apache.hadoop.hdds.utils.db.BatchOperation; -import javax.annotation.Nonnull; +import jakarta.annotation.Nonnull; import java.io.IOException; import static org.apache.hadoop.ozone.om.OmMetadataManagerImpl.MULTIPARTINFO_TABLE; diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/response/s3/multipart/S3InitiateMultipartUploadResponseWithFSO.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/response/s3/multipart/S3InitiateMultipartUploadResponseWithFSO.java index e92a886bae8..21885d71511 100644 --- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/response/s3/multipart/S3InitiateMultipartUploadResponseWithFSO.java +++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/response/s3/multipart/S3InitiateMultipartUploadResponseWithFSO.java @@ -30,7 +30,7 @@ import org.apache.hadoop.ozone.om.response.CleanupTableInfo; import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.OMResponse; -import javax.annotation.Nonnull; +import jakarta.annotation.Nonnull; import java.io.IOException; import java.util.List; diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/response/s3/multipart/S3MultipartUploadAbortResponse.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/response/s3/multipart/S3MultipartUploadAbortResponse.java index f3e2054fadd..20639812772 100644 --- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/response/s3/multipart/S3MultipartUploadAbortResponse.java +++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/response/s3/multipart/S3MultipartUploadAbortResponse.java @@ -28,7 +28,7 @@ import org.apache.hadoop.hdds.utils.db.BatchOperation; import java.io.IOException; -import javax.annotation.Nonnull; +import jakarta.annotation.Nonnull; import static org.apache.hadoop.ozone.om.OmMetadataManagerImpl.BUCKET_TABLE; import static org.apache.hadoop.ozone.om.OmMetadataManagerImpl.DELETED_TABLE; diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/response/s3/multipart/S3MultipartUploadAbortResponseWithFSO.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/response/s3/multipart/S3MultipartUploadAbortResponseWithFSO.java index 7ddb2cea9e3..11bedef6d52 100644 --- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/response/s3/multipart/S3MultipartUploadAbortResponseWithFSO.java +++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/response/s3/multipart/S3MultipartUploadAbortResponseWithFSO.java @@ -24,7 +24,7 @@ import org.apache.hadoop.ozone.om.response.CleanupTableInfo; import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.OMResponse; -import javax.annotation.Nonnull; +import jakarta.annotation.Nonnull; import static org.apache.hadoop.ozone.om.OmMetadataManagerImpl.DELETED_TABLE; import static org.apache.hadoop.ozone.om.OmMetadataManagerImpl.MULTIPARTINFO_TABLE; diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/response/s3/multipart/S3MultipartUploadCommitPartResponse.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/response/s3/multipart/S3MultipartUploadCommitPartResponse.java index f11789b1eff..8199c840d09 100644 --- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/response/s3/multipart/S3MultipartUploadCommitPartResponse.java +++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/response/s3/multipart/S3MultipartUploadCommitPartResponse.java @@ -43,8 +43,8 @@ import static org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos .Status.OK; -import javax.annotation.Nonnull; -import javax.annotation.Nullable; +import jakarta.annotation.Nonnull; +import jakarta.annotation.Nullable; /** * Response for S3MultipartUploadCommitPart request. diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/response/s3/multipart/S3MultipartUploadCommitPartResponseWithFSO.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/response/s3/multipart/S3MultipartUploadCommitPartResponseWithFSO.java index e8979c76937..ed2ae34b310 100644 --- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/response/s3/multipart/S3MultipartUploadCommitPartResponseWithFSO.java +++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/response/s3/multipart/S3MultipartUploadCommitPartResponseWithFSO.java @@ -26,8 +26,8 @@ import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos; import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.OMResponse; -import javax.annotation.Nonnull; -import javax.annotation.Nullable; +import jakarta.annotation.Nonnull; +import jakarta.annotation.Nullable; import static org.apache.hadoop.ozone.om.OmMetadataManagerImpl.OPEN_FILE_TABLE; import static org.apache.hadoop.ozone.om.OmMetadataManagerImpl.DELETED_TABLE; diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/response/s3/multipart/S3MultipartUploadCompleteResponse.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/response/s3/multipart/S3MultipartUploadCompleteResponse.java index 829457cd4bd..3e390b0288e 100644 --- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/response/s3/multipart/S3MultipartUploadCompleteResponse.java +++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/response/s3/multipart/S3MultipartUploadCompleteResponse.java @@ -33,7 +33,7 @@ import org.apache.hadoop.hdds.utils.db.BatchOperation; import javax.annotation.CheckForNull; -import javax.annotation.Nonnull; +import jakarta.annotation.Nonnull; import static org.apache.hadoop.ozone.om.OmMetadataManagerImpl.BUCKET_TABLE; import static org.apache.hadoop.ozone.om.OmMetadataManagerImpl.DELETED_TABLE; diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/response/s3/multipart/S3MultipartUploadCompleteResponseWithFSO.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/response/s3/multipart/S3MultipartUploadCompleteResponseWithFSO.java index 3a6e1e39d56..29edfe38253 100644 --- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/response/s3/multipart/S3MultipartUploadCompleteResponseWithFSO.java +++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/response/s3/multipart/S3MultipartUploadCompleteResponseWithFSO.java @@ -28,7 +28,7 @@ import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.OMResponse; import javax.annotation.CheckForNull; -import javax.annotation.Nonnull; +import jakarta.annotation.Nonnull; import java.io.IOException; import java.util.List; diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/response/s3/security/OMSetSecretResponse.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/response/s3/security/OMSetSecretResponse.java index caef3270fd5..79a6297fce2 100644 --- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/response/s3/security/OMSetSecretResponse.java +++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/response/s3/security/OMSetSecretResponse.java @@ -29,8 +29,8 @@ import org.slf4j.Logger; import org.slf4j.LoggerFactory; -import javax.annotation.Nonnull; -import javax.annotation.Nullable; +import jakarta.annotation.Nonnull; +import jakarta.annotation.Nullable; import java.io.IOException; import static org.apache.hadoop.ozone.om.OmMetadataManagerImpl.S3_SECRET_TABLE; diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/response/s3/security/S3GetSecretResponse.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/response/s3/security/S3GetSecretResponse.java index f6194734c41..df55af31fd6 100644 --- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/response/s3/security/S3GetSecretResponse.java +++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/response/s3/security/S3GetSecretResponse.java @@ -28,8 +28,8 @@ import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.OMResponse; import org.apache.hadoop.hdds.utils.db.BatchOperation; -import javax.annotation.Nonnull; -import javax.annotation.Nullable; +import jakarta.annotation.Nonnull; +import jakarta.annotation.Nullable; import java.io.IOException; import static org.apache.hadoop.ozone.om.OmMetadataManagerImpl.S3_SECRET_TABLE; diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/response/s3/security/S3RevokeSecretResponse.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/response/s3/security/S3RevokeSecretResponse.java index 5391bc2be0b..bea8f6dea8d 100644 --- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/response/s3/security/S3RevokeSecretResponse.java +++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/response/s3/security/S3RevokeSecretResponse.java @@ -26,8 +26,8 @@ import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.OMResponse; import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.Status; -import javax.annotation.Nonnull; -import javax.annotation.Nullable; +import jakarta.annotation.Nonnull; +import jakarta.annotation.Nullable; import java.io.IOException; import static org.apache.hadoop.ozone.om.OmMetadataManagerImpl.S3_SECRET_TABLE; diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/response/s3/tenant/OMSetRangerServiceVersionResponse.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/response/s3/tenant/OMSetRangerServiceVersionResponse.java index d1b21bc994c..28dcf52923b 100644 --- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/response/s3/tenant/OMSetRangerServiceVersionResponse.java +++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/response/s3/tenant/OMSetRangerServiceVersionResponse.java @@ -22,7 +22,7 @@ import java.io.IOException; -import javax.annotation.Nonnull; +import jakarta.annotation.Nonnull; import org.apache.hadoop.hdds.utils.db.BatchOperation; import org.apache.hadoop.ozone.om.OMMetadataManager; diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/response/s3/tenant/OMTenantAssignAdminResponse.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/response/s3/tenant/OMTenantAssignAdminResponse.java index 710c74aca5e..6b1a37097be 100644 --- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/response/s3/tenant/OMTenantAssignAdminResponse.java +++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/response/s3/tenant/OMTenantAssignAdminResponse.java @@ -26,7 +26,7 @@ import org.apache.hadoop.ozone.om.response.OMClientResponse; import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.OMResponse; -import javax.annotation.Nonnull; +import jakarta.annotation.Nonnull; import java.io.IOException; import static org.apache.hadoop.ozone.om.OmMetadataManagerImpl.TENANT_ACCESS_ID_TABLE; diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/response/s3/tenant/OMTenantAssignUserAccessIdResponse.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/response/s3/tenant/OMTenantAssignUserAccessIdResponse.java index ba5ad616964..2b529a15f83 100644 --- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/response/s3/tenant/OMTenantAssignUserAccessIdResponse.java +++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/response/s3/tenant/OMTenantAssignUserAccessIdResponse.java @@ -30,7 +30,7 @@ import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos; import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.OMResponse; -import javax.annotation.Nonnull; +import jakarta.annotation.Nonnull; import java.io.IOException; import static org.apache.hadoop.ozone.om.OmMetadataManagerImpl.PRINCIPAL_TO_ACCESS_IDS_TABLE; diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/response/s3/tenant/OMTenantCreateResponse.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/response/s3/tenant/OMTenantCreateResponse.java index a845734dd57..a3d282e37c5 100644 --- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/response/s3/tenant/OMTenantCreateResponse.java +++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/response/s3/tenant/OMTenantCreateResponse.java @@ -28,7 +28,7 @@ import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.OMResponse; import org.apache.hadoop.ozone.storage.proto.OzoneManagerStorageProtos.PersistedUserVolumeInfo; -import javax.annotation.Nonnull; +import jakarta.annotation.Nonnull; import java.io.IOException; import static org.apache.hadoop.ozone.om.OmMetadataManagerImpl.TENANT_STATE_TABLE; diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/response/s3/tenant/OMTenantDeleteResponse.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/response/s3/tenant/OMTenantDeleteResponse.java index 6e7bddc0afa..ea9b613d300 100644 --- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/response/s3/tenant/OMTenantDeleteResponse.java +++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/response/s3/tenant/OMTenantDeleteResponse.java @@ -26,8 +26,8 @@ import org.apache.hadoop.ozone.om.response.OMClientResponse; import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.OMResponse; -import javax.annotation.Nonnull; -import javax.annotation.Nullable; +import jakarta.annotation.Nonnull; +import jakarta.annotation.Nullable; import java.io.IOException; import static org.apache.hadoop.ozone.om.OmMetadataManagerImpl.TENANT_STATE_TABLE; diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/response/s3/tenant/OMTenantRevokeAdminResponse.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/response/s3/tenant/OMTenantRevokeAdminResponse.java index 71d98848397..4cbe758d9f8 100644 --- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/response/s3/tenant/OMTenantRevokeAdminResponse.java +++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/response/s3/tenant/OMTenantRevokeAdminResponse.java @@ -26,7 +26,7 @@ import org.apache.hadoop.ozone.om.response.OMClientResponse; import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.OMResponse; -import javax.annotation.Nonnull; +import jakarta.annotation.Nonnull; import java.io.IOException; import static org.apache.hadoop.ozone.om.OmMetadataManagerImpl.TENANT_ACCESS_ID_TABLE; diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/response/s3/tenant/OMTenantRevokeUserAccessIdResponse.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/response/s3/tenant/OMTenantRevokeUserAccessIdResponse.java index 93f3b8aa67a..a7056eb03b3 100644 --- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/response/s3/tenant/OMTenantRevokeUserAccessIdResponse.java +++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/response/s3/tenant/OMTenantRevokeUserAccessIdResponse.java @@ -27,7 +27,7 @@ import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos; import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.OMResponse; -import javax.annotation.Nonnull; +import jakarta.annotation.Nonnull; import java.io.IOException; import static org.apache.hadoop.ozone.om.OmMetadataManagerImpl.PRINCIPAL_TO_ACCESS_IDS_TABLE; diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/response/security/OMCancelDelegationTokenResponse.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/response/security/OMCancelDelegationTokenResponse.java index 4a328b0252c..28733555c43 100644 --- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/response/security/OMCancelDelegationTokenResponse.java +++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/response/security/OMCancelDelegationTokenResponse.java @@ -27,8 +27,8 @@ import org.apache.hadoop.hdds.utils.db.BatchOperation; import org.apache.hadoop.hdds.utils.db.Table; -import javax.annotation.Nonnull; -import javax.annotation.Nullable; +import jakarta.annotation.Nonnull; +import jakarta.annotation.Nullable; import java.io.IOException; import static org.apache.hadoop.ozone.om.OmMetadataManagerImpl.DELEGATION_TOKEN_TABLE; diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/response/security/OMGetDelegationTokenResponse.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/response/security/OMGetDelegationTokenResponse.java index abd967e99c9..a54369610a7 100644 --- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/response/security/OMGetDelegationTokenResponse.java +++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/response/security/OMGetDelegationTokenResponse.java @@ -27,8 +27,8 @@ import org.apache.hadoop.hdds.utils.db.BatchOperation; import org.apache.hadoop.hdds.utils.db.Table; -import javax.annotation.Nonnull; -import javax.annotation.Nullable; +import jakarta.annotation.Nonnull; +import jakarta.annotation.Nullable; import java.io.IOException; import static org.apache.hadoop.ozone.om.OmMetadataManagerImpl.DELEGATION_TOKEN_TABLE; diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/response/security/OMRenewDelegationTokenResponse.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/response/security/OMRenewDelegationTokenResponse.java index 3ee8e2b7c12..eedb5b2c0e4 100644 --- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/response/security/OMRenewDelegationTokenResponse.java +++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/response/security/OMRenewDelegationTokenResponse.java @@ -27,8 +27,8 @@ import org.apache.hadoop.hdds.utils.db.BatchOperation; import org.apache.hadoop.hdds.utils.db.Table; -import javax.annotation.Nonnull; -import javax.annotation.Nullable; +import jakarta.annotation.Nonnull; +import jakarta.annotation.Nullable; import java.io.IOException; import static org.apache.hadoop.ozone.om.OmMetadataManagerImpl.DELEGATION_TOKEN_TABLE; diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/response/snapshot/OMSnapshotCreateResponse.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/response/snapshot/OMSnapshotCreateResponse.java index fd4c11b8620..5b3db25114d 100644 --- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/response/snapshot/OMSnapshotCreateResponse.java +++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/response/snapshot/OMSnapshotCreateResponse.java @@ -28,7 +28,7 @@ import org.apache.hadoop.ozone.om.response.OMClientResponse; import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.OMResponse; -import javax.annotation.Nonnull; +import jakarta.annotation.Nonnull; import java.io.IOException; import static org.apache.hadoop.ozone.OzoneConsts.OM_KEY_PREFIX; diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/response/snapshot/OMSnapshotDeleteResponse.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/response/snapshot/OMSnapshotDeleteResponse.java index 654d62bb4b6..3f569d06f6b 100644 --- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/response/snapshot/OMSnapshotDeleteResponse.java +++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/response/snapshot/OMSnapshotDeleteResponse.java @@ -25,7 +25,7 @@ import org.apache.hadoop.ozone.om.response.OMClientResponse; import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.OMResponse; -import javax.annotation.Nonnull; +import jakarta.annotation.Nonnull; import java.io.IOException; import static org.apache.hadoop.ozone.om.OmMetadataManagerImpl.SNAPSHOT_INFO_TABLE; diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/response/snapshot/OMSnapshotMoveDeletedKeysResponse.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/response/snapshot/OMSnapshotMoveDeletedKeysResponse.java index f4142400d7c..1255e4ae7f4 100644 --- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/response/snapshot/OMSnapshotMoveDeletedKeysResponse.java +++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/response/snapshot/OMSnapshotMoveDeletedKeysResponse.java @@ -37,7 +37,7 @@ import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.SnapshotMoveKeyInfos; import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.OMResponse; -import javax.annotation.Nonnull; +import jakarta.annotation.Nonnull; import java.io.IOException; import java.util.List; diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/response/snapshot/OMSnapshotPurgeResponse.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/response/snapshot/OMSnapshotPurgeResponse.java index 863bf5f62b8..b8db58d7fd9 100644 --- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/response/snapshot/OMSnapshotPurgeResponse.java +++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/response/snapshot/OMSnapshotPurgeResponse.java @@ -30,7 +30,7 @@ import org.slf4j.Logger; import org.slf4j.LoggerFactory; -import javax.annotation.Nonnull; +import jakarta.annotation.Nonnull; import java.io.IOException; import java.nio.file.Path; import java.nio.file.Paths; diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/response/snapshot/OMSnapshotSetPropertyResponse.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/response/snapshot/OMSnapshotSetPropertyResponse.java index ed2953b5415..c018de78cb8 100644 --- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/response/snapshot/OMSnapshotSetPropertyResponse.java +++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/response/snapshot/OMSnapshotSetPropertyResponse.java @@ -25,7 +25,7 @@ import org.apache.hadoop.ozone.om.response.OMClientResponse; import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.OMResponse; -import javax.annotation.Nonnull; +import jakarta.annotation.Nonnull; import java.io.IOException; import static org.apache.hadoop.ozone.om.OmMetadataManagerImpl.SNAPSHOT_INFO_TABLE; diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/response/volume/OMVolumeAclOpResponse.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/response/volume/OMVolumeAclOpResponse.java index f9f0688c3a0..fb8aa73e064 100644 --- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/response/volume/OMVolumeAclOpResponse.java +++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/response/volume/OMVolumeAclOpResponse.java @@ -26,7 +26,7 @@ import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.OMResponse; import org.apache.hadoop.hdds.utils.db.BatchOperation; -import javax.annotation.Nonnull; +import jakarta.annotation.Nonnull; import java.io.IOException; import static org.apache.hadoop.ozone.om.OmMetadataManagerImpl.VOLUME_TABLE; diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/response/volume/OMVolumeCreateResponse.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/response/volume/OMVolumeCreateResponse.java index b48d7703b00..752171c0719 100644 --- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/response/volume/OMVolumeCreateResponse.java +++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/response/volume/OMVolumeCreateResponse.java @@ -32,7 +32,7 @@ import org.apache.hadoop.hdds.utils.db.BatchOperation; -import javax.annotation.Nonnull; +import jakarta.annotation.Nonnull; import static org.apache.hadoop.ozone.om.OmMetadataManagerImpl.VOLUME_TABLE; diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/response/volume/OMVolumeDeleteResponse.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/response/volume/OMVolumeDeleteResponse.java index 5f14aa5dcbc..e129a9b2a25 100644 --- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/response/volume/OMVolumeDeleteResponse.java +++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/response/volume/OMVolumeDeleteResponse.java @@ -29,7 +29,7 @@ OzoneManagerStorageProtos.PersistedUserVolumeInfo; import org.apache.hadoop.hdds.utils.db.BatchOperation; -import javax.annotation.Nonnull; +import jakarta.annotation.Nonnull; import static org.apache.hadoop.ozone.om.OmMetadataManagerImpl.VOLUME_TABLE; diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/response/volume/OMVolumeSetOwnerResponse.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/response/volume/OMVolumeSetOwnerResponse.java index 4e663aa0489..985c3f2b88d 100644 --- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/response/volume/OMVolumeSetOwnerResponse.java +++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/response/volume/OMVolumeSetOwnerResponse.java @@ -32,7 +32,7 @@ .OMResponse; import org.apache.hadoop.hdds.utils.db.BatchOperation; -import javax.annotation.Nonnull; +import jakarta.annotation.Nonnull; import static org.apache.hadoop.ozone.om.OmMetadataManagerImpl.VOLUME_TABLE; diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/response/volume/OMVolumeSetQuotaResponse.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/response/volume/OMVolumeSetQuotaResponse.java index c6210254b9e..64573bf6d78 100644 --- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/response/volume/OMVolumeSetQuotaResponse.java +++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/response/volume/OMVolumeSetQuotaResponse.java @@ -28,7 +28,7 @@ import java.io.IOException; -import javax.annotation.Nonnull; +import jakarta.annotation.Nonnull; import static org.apache.hadoop.ozone.om.OmMetadataManagerImpl.VOLUME_TABLE; diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/snapshot/RocksDbPersistentMap.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/snapshot/RocksDbPersistentMap.java index 4387baa521a..f102eb0a5e9 100644 --- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/snapshot/RocksDbPersistentMap.java +++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/snapshot/RocksDbPersistentMap.java @@ -21,7 +21,7 @@ import java.io.IOException; import java.util.Map; import java.util.Optional; -import javax.annotation.Nonnull; +import jakarta.annotation.Nonnull; import java.util.NoSuchElementException; diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/snapshot/SnapshotDiffManager.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/snapshot/SnapshotDiffManager.java index 99134799de9..41e990097ec 100644 --- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/snapshot/SnapshotDiffManager.java +++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/snapshot/SnapshotDiffManager.java @@ -61,7 +61,7 @@ import org.apache.ozone.rocksdiff.DifferSnapshotInfo; import org.apache.ozone.rocksdiff.RocksDBCheckpointDiffer; import org.apache.ozone.rocksdiff.RocksDiffUtils; -import org.jetbrains.annotations.NotNull; +import jakarta.annotation.Nonnull; import org.rocksdb.ColumnFamilyDescriptor; import org.rocksdb.ColumnFamilyHandle; import org.rocksdb.RocksDBException; @@ -555,7 +555,7 @@ public SnapshotDiffResponse getSnapshotDiffReport( } } - @NotNull + @Nonnull public static OFSPath getSnapshotRootPath(String volume, String bucket) { org.apache.hadoop.fs.Path bucketPath = new org.apache.hadoop.fs.Path( OZONE_URI_DELIMITER + volume + OZONE_URI_DELIMITER + bucket); diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/protocolPB/OzoneManagerProtocolServerSideTranslatorPB.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/protocolPB/OzoneManagerProtocolServerSideTranslatorPB.java index a4ec2c2f200..cf9bb4f0bbc 100644 --- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/protocolPB/OzoneManagerProtocolServerSideTranslatorPB.java +++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/protocolPB/OzoneManagerProtocolServerSideTranslatorPB.java @@ -62,21 +62,20 @@ import org.slf4j.LoggerFactory; /** - * This class is the server-side translator that forwards requests received on - * {@link OzoneManagerProtocolPB} - * to the OzoneManagerService server implementation. + * This is the server-side translator that forwards requests received + * from {@link OzoneManagerProtocolPB} to {@link OzoneManager}. */ -public class OzoneManagerProtocolServerSideTranslatorPB implements - OzoneManagerProtocolPB { - private static final Logger LOG = LoggerFactory - .getLogger(OzoneManagerProtocolServerSideTranslatorPB.class); - private static final String OM_REQUESTS_PACKAGE = - "org.apache.hadoop.ozone"; +public class OzoneManagerProtocolServerSideTranslatorPB implements OzoneManagerProtocolPB { + private static final Logger LOG = LoggerFactory .getLogger(OzoneManagerProtocolServerSideTranslatorPB.class); + private static final String OM_REQUESTS_PACKAGE = "org.apache.hadoop.ozone"; private final OzoneManagerRatisServer omRatisServer; private final RequestHandler handler; - private final boolean isRatisEnabled; private final OzoneManager ozoneManager; + /** + * Only used to handle write requests when ratis is disabled. + * When ratis is enabled, write requests are handled by the state machine. + */ private final OzoneManagerDoubleBuffer ozoneManagerDoubleBuffer; private final AtomicLong transactionIndex; private final OzoneProtocolMessageDispatcher("OzoneProtocol", metrics, LOG, OMPBHelper::processForDebug, OMPBHelper::processForDebug); + // TODO: make this injectable for testing... - requestValidations = - new RequestValidations() - .fromPackage(OM_REQUESTS_PACKAGE) - .withinContext( - ValidationContext.of(ozoneManager.getVersionManager(), - ozoneManager.getMetadataManager())) - .load(); + this.requestValidations = new RequestValidations() + .fromPackage(OM_REQUESTS_PACKAGE) + .withinContext(ValidationContext.of(ozoneManager.getVersionManager(), ozoneManager.getMetadataManager())) + .load(); + } + private boolean isRatisEnabled() { + return ozoneManagerDoubleBuffer == null; } /** @@ -197,7 +188,7 @@ private OMResponse internalProcessRequest(OMRequest request) throws } } - if (!isRatisEnabled) { + if (!isRatisEnabled()) { return submitRequestDirectlyToOM(request); } @@ -320,13 +311,7 @@ private OMResponse submitRequestDirectlyToOM(OMRequest request) { return omClientResponse.getOMResponse(); } - /** - * Create OMResponse from the specified OMRequest and exception. - * - * @param omRequest - * @param exception - * @return OMResponse - */ + /** @return an {@link OMResponse} from the given {@link OMRequest} and the given exception. */ private OMResponse createErrorResponse( OMRequest omRequest, IOException exception) { // Added all write command types here, because in future if any of the @@ -344,7 +329,7 @@ private OMResponse createErrorResponse( } public void stop() { - if (!isRatisEnabled) { + if (ozoneManagerDoubleBuffer != null) { ozoneManagerDoubleBuffer.stop(); } } diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/protocolPB/OzoneManagerRequestHandler.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/protocolPB/OzoneManagerRequestHandler.java index 8918cf070b1..b97e2160f95 100644 --- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/protocolPB/OzoneManagerRequestHandler.java +++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/protocolPB/OzoneManagerRequestHandler.java @@ -406,7 +406,7 @@ public OMClientResponse handleWriteRequest(OMRequest omRequest, TermIndex termIn OMClientRequest omClientRequest = OzoneManagerRatisUtils.createClientRequest(omRequest, impl); return captureLatencyNs( - impl.getPerfMetrics().getValidateAndUpdateCacneLatencyNs(), + impl.getPerfMetrics().getValidateAndUpdateCacheLatencyNs(), () -> { OMClientResponse omClientResponse = omClientRequest.validateAndUpdateCache(getOzoneManager(), termIndex); diff --git a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/OmTestManagers.java b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/OmTestManagers.java index 3db3263ec57..43d29c1608a 100644 --- a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/OmTestManagers.java +++ b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/OmTestManagers.java @@ -17,7 +17,9 @@ package org.apache.hadoop.ozone.om; +import static org.apache.ozone.test.GenericTestUtils.waitFor; import static org.mockito.Mockito.mock; + import org.apache.hadoop.crypto.key.KeyProviderCryptoExtension; import org.apache.hadoop.hdds.conf.OzoneConfiguration; import org.apache.hadoop.hdds.scm.HddsWhiteboxTestUtils; @@ -28,9 +30,11 @@ import org.apache.hadoop.ozone.client.OzoneClientFactory; import org.apache.hadoop.ozone.om.protocol.OzoneManagerProtocol; import org.apache.hadoop.hdds.security.token.OzoneBlockTokenSecretManager; +import org.apache.hadoop.ozone.om.ratis.OzoneManagerRatisServer.RaftServerStatus; import org.apache.hadoop.security.authentication.client.AuthenticationException; import java.io.IOException; +import java.util.concurrent.TimeoutException; /** * Test utility for creating a dummy OM, the associated @@ -38,15 +42,15 @@ */ public final class OmTestManagers { - private OzoneManagerProtocol writeClient; - private OzoneManager om; - private KeyManager keyManager; - private OMMetadataManager metadataManager; + private final OzoneManagerProtocol writeClient; + private final OzoneManager om; + private final KeyManager keyManager; + private final OMMetadataManager metadataManager; private KeyProviderCryptoExtension kmsProvider; - private VolumeManager volumeManager; - private BucketManager bucketManager; - private PrefixManager prefixManager; - private ScmBlockLocationProtocol scmBlockClient; + private final VolumeManager volumeManager; + private final BucketManager bucketManager; + private final PrefixManager prefixManager; + private final ScmBlockLocationProtocol scmBlockClient; public OzoneManager getOzoneManager() { return om; @@ -74,14 +78,14 @@ public ScmBlockLocationProtocol getScmBlockClient() { } public OmTestManagers(OzoneConfiguration conf) - throws AuthenticationException, IOException { + throws AuthenticationException, IOException, InterruptedException, TimeoutException { this(conf, null, null); } public OmTestManagers(OzoneConfiguration conf, ScmBlockLocationProtocol blockClient, StorageContainerLocationProtocol containerClient) - throws AuthenticationException, IOException { + throws AuthenticationException, IOException, InterruptedException, TimeoutException { if (containerClient == null) { containerClient = mock(StorageContainerLocationProtocol.class); } @@ -109,6 +113,9 @@ public OmTestManagers(OzoneConfiguration conf, "secretManager", mock(OzoneBlockTokenSecretManager.class)); om.start(); + waitFor(() -> om.getOmRatisServer().checkLeaderStatus() == RaftServerStatus.LEADER_AND_READY, + 10, 10_000); + writeClient = OzoneClientFactory.getRpcClient(conf) .getObjectStore().getClientProxy().getOzoneManagerClient(); metadataManager = (OmMetadataManagerImpl) HddsWhiteboxTestUtils diff --git a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/TestBucketManagerImpl.java b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/TestBucketManagerImpl.java index 7919b013b12..f9c9c5ecc81 100644 --- a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/TestBucketManagerImpl.java +++ b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/TestBucketManagerImpl.java @@ -18,7 +18,6 @@ import java.io.File; import java.io.IOException; -import java.nio.file.Path; import java.util.ArrayList; import java.util.Collections; @@ -43,9 +42,11 @@ import org.apache.hadoop.ozone.om.helpers.BucketLayout; import org.apache.hadoop.ozone.om.protocol.OzoneManagerProtocol; import org.apache.hadoop.ozone.om.request.OMRequestTestUtils; -import org.apache.hadoop.security.authentication.client.AuthenticationException; -import org.junit.jupiter.api.AfterEach; +import org.apache.ozone.test.OzoneTestBase; +import org.junit.jupiter.api.AfterAll; +import org.junit.jupiter.api.BeforeAll; import org.junit.jupiter.api.Test; +import org.junit.jupiter.api.TestInstance; import org.junit.jupiter.api.extension.ExtendWith; import org.junit.jupiter.api.io.TempDir; @@ -58,7 +59,6 @@ import static org.junit.jupiter.api.Assertions.assertNotNull; import static org.junit.jupiter.api.Assertions.assertThrows; import static org.junit.jupiter.api.Assertions.assertTrue; -import static org.junit.jupiter.api.Assertions.fail; import static org.mockito.Mockito.mock; import static org.mockito.Mockito.when; @@ -66,40 +66,36 @@ * This class tests the Bucket Manager Implementation using Mockito. */ @ExtendWith(MockitoExtension.class) -public class TestBucketManagerImpl { - - @TempDir - private Path folder; +@TestInstance(TestInstance.Lifecycle.PER_CLASS) +class TestBucketManagerImpl extends OzoneTestBase { private OmTestManagers omTestManagers; private OzoneManagerProtocol writeClient; - @AfterEach - public void cleanup() throws Exception { - OzoneManager om = omTestManagers.getOzoneManager(); - om.stop(); - } - - private OzoneConfiguration createNewTestPath() throws IOException { + @BeforeAll + void setup(@TempDir File folder) throws Exception { OzoneConfiguration conf = new OzoneConfiguration(); - File newFolder = folder.toFile(); - if (!newFolder.exists()) { - assertTrue(newFolder.mkdirs()); - } - ServerUtils.setOzoneMetaDirPath(conf, newFolder.toString()); - return conf; - } + ServerUtils.setOzoneMetaDirPath(conf, folder.toString()); - private void createSampleVol() throws IOException, AuthenticationException { - OzoneConfiguration conf = createNewTestPath(); omTestManagers = new OmTestManagers(conf); writeClient = omTestManagers.getWriteClient(); + } + @AfterAll + void cleanup() throws Exception { + omTestManagers.getOzoneManager().stop(); + } + + public String volumeName() { + return getTestName().toLowerCase(); + } + + private void createSampleVol(String volume) throws IOException { // This is a simple hack for testing, we just test if the volume via a // null check, do not parse the value part. So just write some dummy value. OmVolumeArgs args = OmVolumeArgs.newBuilder() - .setVolume("sample-vol") + .setVolume(volume) .setAdminName("bilbo") .setOwnerName("bilbo") .build(); @@ -107,25 +103,20 @@ private void createSampleVol() throws IOException, AuthenticationException { } @Test - public void testCreateBucketWithoutVolume() throws Exception { - OzoneConfiguration conf = createNewTestPath(); - omTestManagers = new OmTestManagers(conf); - OMException omEx = assertThrows(OMException.class, () -> { - writeClient = omTestManagers.getWriteClient(); - - OmBucketInfo bucketInfo = OmBucketInfo.newBuilder() - .setVolumeName("sample-vol") - .setBucketName("bucket-one") - .build(); - writeClient.createBucket(bucketInfo); - }); + void testCreateBucketWithoutVolume() { + OmBucketInfo bucketInfo = OmBucketInfo.newBuilder() + .setVolumeName(volumeName()) + .setBucketName("bucket-one") + .build(); + OMException omEx = assertThrows(OMException.class, () -> writeClient.createBucket(bucketInfo)); assertEquals(ResultCodes.VOLUME_NOT_FOUND, omEx.getResult()); assertEquals("Volume doesn't exist", omEx.getMessage()); } @Test - public void testCreateEncryptedBucket() throws Exception { - createSampleVol(); + void testCreateEncryptedBucket() throws Exception { + String volume = volumeName(); + createSampleVol(volume); KeyProviderCryptoExtension kmsProvider = omTestManagers.kmsProviderInit(); String testBekName = "key1"; @@ -138,17 +129,16 @@ public void testCreateEncryptedBucket() throws Exception { BucketManager bucketManager = omTestManagers.getBucketManager(); OmBucketInfo bucketInfo = OmBucketInfo.newBuilder() - .setVolumeName("sample-vol") + .setVolumeName(volume) .setBucketName("bucket-one") .setBucketEncryptionKey(new BucketEncryptionKeyInfo.Builder().setKeyName("key1").build()) .build(); writeClient.createBucket(bucketInfo); - assertNotNull(bucketManager.getBucketInfo("sample-vol", - "bucket-one")); + assertNotNull(bucketManager.getBucketInfo(volume, "bucket-one")); OmBucketInfo bucketInfoRead = - bucketManager.getBucketInfo("sample-vol", "bucket-one"); + bucketManager.getBucketInfo(volume, "bucket-one"); assertEquals(bucketInfoRead.getEncryptionKeyInfo().getKeyName(), bucketInfo.getEncryptionKeyInfo().getKeyName()); @@ -157,67 +147,62 @@ public void testCreateEncryptedBucket() throws Exception { @Test public void testCreateBucket() throws Exception { - createSampleVol(); + String volume = volumeName(); + createSampleVol(volume); BucketManager bucketManager = omTestManagers.getBucketManager(); OmBucketInfo bucketInfo = OmBucketInfo.newBuilder() - .setVolumeName("sample-vol") + .setVolumeName(volume) .setBucketName("bucket-one") .build(); writeClient.createBucket(bucketInfo); - assertNotNull(bucketManager.getBucketInfo("sample-vol", - "bucket-one")); + assertNotNull(bucketManager.getBucketInfo(volume, "bucket-one")); } @Test public void testCreateAlreadyExistingBucket() throws Exception { - createSampleVol(); + String volume = volumeName(); + createSampleVol(volume); - OMException omEx = assertThrows(OMException.class, () -> { - OmBucketInfo bucketInfo = OmBucketInfo.newBuilder() - .setVolumeName("sample-vol") - .setBucketName("bucket-one") - .build(); - writeClient.createBucket(bucketInfo); - writeClient.createBucket(bucketInfo); - }); - assertEquals(ResultCodes.BUCKET_ALREADY_EXISTS, - omEx.getResult()); + OmBucketInfo bucketInfo = OmBucketInfo.newBuilder() + .setVolumeName(volume) + .setBucketName("bucket-one") + .build(); + writeClient.createBucket(bucketInfo); + + OMException omEx = assertThrows(OMException.class, + () -> writeClient.createBucket(bucketInfo)); + assertEquals(ResultCodes.BUCKET_ALREADY_EXISTS, omEx.getResult()); assertEquals("Bucket already exist", omEx.getMessage()); } @Test public void testGetBucketInfoForInvalidBucket() throws Exception { - createSampleVol(); - OMException exception = assertThrows(OMException.class, () -> { - BucketManager bucketManager = omTestManagers.getBucketManager(); - bucketManager.getBucketInfo("sample-vol", "bucket-one"); - }); + String volume = volumeName(); + createSampleVol(volume); + + BucketManager bucketManager = omTestManagers.getBucketManager(); + + OMException exception = assertThrows(OMException.class, + () -> bucketManager.getBucketInfo(volume, "bucket-one")); assertThat(exception.getMessage()).contains("Bucket not found"); - assertEquals(ResultCodes.BUCKET_NOT_FOUND, - exception.getResult()); + assertEquals(ResultCodes.BUCKET_NOT_FOUND, exception.getResult()); } @Test - public void testGetBucketInfo() throws Exception { - final String volumeName = "sample-vol"; + void testGetBucketInfo() throws Exception { + final String volumeName = volumeName(); final String bucketName = "bucket-one"; - OzoneConfiguration conf = createNewTestPath(); - omTestManagers = new OmTestManagers(conf); - writeClient = omTestManagers.getWriteClient(); - OMMetadataManager metaMgr = omTestManagers.getMetadataManager(); BucketManager bucketManager = omTestManagers.getBucketManager(); // Check exception thrown when volume does not exist - try { - bucketManager.getBucketInfo(volumeName, bucketName); - fail("Should have thrown OMException"); - } catch (OMException omEx) { - assertEquals(ResultCodes.VOLUME_NOT_FOUND, omEx.getResult(), - "getBucketInfo() should have thrown " + - "VOLUME_NOT_FOUND as the parent volume is not created!"); - } + OMException omEx = assertThrows(OMException.class, + () -> bucketManager.getBucketInfo(volumeName, bucketName)); + assertEquals(ResultCodes.VOLUME_NOT_FOUND, omEx.getResult(), + "getBucketInfo() should have thrown " + + "VOLUME_NOT_FOUND as the parent volume is not created!"); + OmBucketInfo bucketInfo = OmBucketInfo.newBuilder() .setVolumeName(volumeName) .setBucketName(bucketName) @@ -235,16 +220,12 @@ public void testGetBucketInfo() throws Exception { writeClient.createVolume(args); // Create bucket createBucket(metaMgr, bucketInfo); + // Check exception thrown when bucket does not exist - try { - bucketManager.getBucketInfo(volumeName, "bucketNotExist"); - fail("Should have thrown OMException"); - } catch (OMException omEx) { - assertEquals( - ResultCodes.BUCKET_NOT_FOUND, omEx.getResult(), - "getBucketInfo() should have thrown BUCKET_NOT_FOUND " + - "as the parent volume exists but bucket doesn't!"); - } + OMException e2 = assertThrows(OMException.class, + () -> bucketManager.getBucketInfo(volumeName, "bucketNotExist")); + assertEquals(ResultCodes.BUCKET_NOT_FOUND, e2.getResult()); + OmBucketInfo result = bucketManager.getBucketInfo(volumeName, bucketName); assertEquals(volumeName, result.getVolumeName()); assertEquals(bucketName, result.getBucketName()); @@ -259,64 +240,68 @@ private void createBucket(OMMetadataManager metadataManager, @Test public void testSetBucketPropertyChangeStorageType() throws Exception { + String volume = volumeName(); + createSampleVol(volume); - createSampleVol(); OMMetadataManager metaMgr = omTestManagers.getMetadataManager(); BucketManager bucketManager = omTestManagers.getBucketManager(); OmBucketInfo bucketInfo = OmBucketInfo.newBuilder() - .setVolumeName("sample-vol") + .setVolumeName(volume) .setBucketName("bucket-one") .setStorageType(StorageType.DISK) .build(); createBucket(metaMgr, bucketInfo); OmBucketInfo result = bucketManager.getBucketInfo( - "sample-vol", "bucket-one"); + volume, "bucket-one"); assertEquals(StorageType.DISK, result.getStorageType()); OmBucketArgs bucketArgs = OmBucketArgs.newBuilder() - .setVolumeName("sample-vol") + .setVolumeName(volume) .setBucketName("bucket-one") .setStorageType(StorageType.SSD) .build(); writeClient.setBucketProperty(bucketArgs); OmBucketInfo updatedResult = bucketManager.getBucketInfo( - "sample-vol", "bucket-one"); + volume, "bucket-one"); assertEquals(StorageType.SSD, updatedResult.getStorageType()); } @Test public void testSetBucketPropertyChangeVersioning() throws Exception { - createSampleVol(); + String volume = volumeName(); + createSampleVol(volume); BucketManager bucketManager = omTestManagers.getBucketManager(); OmBucketInfo bucketInfo = OmBucketInfo.newBuilder() - .setVolumeName("sample-vol") + .setVolumeName(volume) .setBucketName("bucket-one") .setIsVersionEnabled(false) .build(); writeClient.createBucket(bucketInfo); OmBucketInfo result = bucketManager.getBucketInfo( - "sample-vol", "bucket-one"); + volume, "bucket-one"); assertFalse(result.getIsVersionEnabled()); OmBucketArgs bucketArgs = OmBucketArgs.newBuilder() - .setVolumeName("sample-vol") + .setVolumeName(volume) .setBucketName("bucket-one") .setIsVersionEnabled(true) .build(); writeClient.setBucketProperty(bucketArgs); OmBucketInfo updatedResult = bucketManager.getBucketInfo( - "sample-vol", "bucket-one"); + volume, "bucket-one"); assertTrue(updatedResult.getIsVersionEnabled()); } @Test - public void testDeleteBucket() throws Exception { - createSampleVol(); + void testDeleteBucket() throws Exception { + String volume = volumeName(); + createSampleVol(volume); + BucketManager bucketManager = omTestManagers.getBucketManager(); for (int i = 0; i < 5; i++) { OmBucketInfo bucketInfo = OmBucketInfo.newBuilder() - .setVolumeName("sample-vol") + .setVolumeName(volume) .setBucketName("bucket-" + i) .build(); writeClient.createBucket(bucketInfo); @@ -324,17 +309,13 @@ public void testDeleteBucket() throws Exception { for (int i = 0; i < 5; i++) { assertEquals("bucket-" + i, bucketManager.getBucketInfo( - "sample-vol", "bucket-" + i).getBucketName()); - } - try { - writeClient.deleteBucket("sample-vol", "bucket-1"); - assertNotNull(bucketManager.getBucketInfo( - "sample-vol", "bucket-2")); - } catch (IOException ex) { - fail(ex.getMessage()); + volume, "bucket-" + i).getBucketName()); } + writeClient.deleteBucket(volume, "bucket-1"); + assertNotNull(bucketManager.getBucketInfo(volume, "bucket-2")); + OMException omEx = assertThrows(OMException.class, () -> { - bucketManager.getBucketInfo("sample-vol", "bucket-1"); + bucketManager.getBucketInfo(volume, "bucket-1"); }); assertEquals(ResultCodes.BUCKET_NOT_FOUND, omEx.getResult()); @@ -343,15 +324,17 @@ public void testDeleteBucket() throws Exception { @Test public void testDeleteNonEmptyBucket() throws Exception { - createSampleVol(); + String volume = volumeName(); + createSampleVol(volume); + OmBucketInfo bucketInfo = OmBucketInfo.newBuilder() - .setVolumeName("sample-vol") + .setVolumeName(volume) .setBucketName("bucket-one") .build(); writeClient.createBucket(bucketInfo); //Create keys in bucket OmKeyArgs args1 = new OmKeyArgs.Builder() - .setVolumeName("sample-vol") + .setVolumeName(volume) .setBucketName("bucket-one") .setKeyName("key-one") .setAcls(Collections.emptyList()) @@ -364,7 +347,7 @@ public void testDeleteNonEmptyBucket() throws Exception { writeClient.commitKey(args1, session1.getId()); OmKeyArgs args2 = new OmKeyArgs.Builder() - .setVolumeName("sample-vol") + .setVolumeName(volume) .setBucketName("bucket-one") .setKeyName("key-two") .setAcls(Collections.emptyList()) @@ -376,7 +359,7 @@ public void testDeleteNonEmptyBucket() throws Exception { OpenKeySession session2 = writeClient.openKey(args2); writeClient.commitKey(args2, session2.getId()); OMException omEx = assertThrows(OMException.class, () -> { - writeClient.deleteBucket("sample-vol", "bucket-one"); + writeClient.deleteBucket(volume, "bucket-one"); }); assertEquals(ResultCodes.BUCKET_NOT_EMPTY, omEx.getResult()); @@ -385,10 +368,12 @@ public void testDeleteNonEmptyBucket() throws Exception { @Test public void testLinkedBucketResolution() throws Exception { - createSampleVol(); + String volume = volumeName(); + createSampleVol(volume); + ECReplicationConfig ecConfig = new ECReplicationConfig(3, 2); OmBucketInfo bucketInfo = OmBucketInfo.newBuilder() - .setVolumeName("sample-vol") + .setVolumeName(volume) .setBucketName("bucket-one") .setDefaultReplicationConfig( new DefaultReplicationConfig( @@ -405,23 +390,23 @@ public void testLinkedBucketResolution() throws Exception { writeClient.createBucket(bucketInfo); OmBucketInfo bucketLinkInfo = OmBucketInfo.newBuilder() - .setVolumeName("sample-vol") + .setVolumeName(volume) .setBucketName("link-one") - .setSourceVolume("sample-vol") + .setSourceVolume(volume) .setSourceBucket("bucket-one") .build(); writeClient.createBucket(bucketLinkInfo); OmBucketInfo bucketLink2 = OmBucketInfo.newBuilder() - .setVolumeName("sample-vol") + .setVolumeName(volume) .setBucketName("link-two") - .setSourceVolume("sample-vol") + .setSourceVolume(volume) .setSourceBucket("link-one") .build(); writeClient.createBucket(bucketLink2); OmBucketInfo storedLinkBucket = - writeClient.getBucketInfo("sample-vol", "link-two"); + writeClient.getBucketInfo(volume, "link-two"); assertNotNull(storedLinkBucket.getDefaultReplicationConfig(), "Replication config is not set"); assertEquals(ecConfig, @@ -432,12 +417,12 @@ public void testLinkedBucketResolution() throws Exception { assertEquals( "link-two", storedLinkBucket.getBucketName()); assertEquals( - "sample-vol", storedLinkBucket.getVolumeName()); + volume, storedLinkBucket.getVolumeName()); assertEquals( "link-one", storedLinkBucket.getSourceBucket()); assertEquals( - "sample-vol", storedLinkBucket.getSourceVolume()); + volume, storedLinkBucket.getSourceVolume()); assertEquals( bucketInfo.getBucketLayout(), diff --git a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/TestChunkStreams.java b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/TestChunkStreams.java index 60cfcd1a2c1..df7f5b67b4e 100644 --- a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/TestChunkStreams.java +++ b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/TestChunkStreams.java @@ -19,7 +19,7 @@ import org.apache.commons.lang3.RandomStringUtils; import org.apache.hadoop.hdds.scm.storage.BlockInputStream; import org.apache.hadoop.ozone.client.io.KeyInputStream; -import org.jetbrains.annotations.NotNull; +import jakarta.annotation.Nonnull; import org.junit.jupiter.api.Test; import java.io.ByteArrayInputStream; @@ -76,7 +76,7 @@ public void testErrorReadGroupInputStream() throws Exception { } } - @NotNull + @Nonnull private List createInputStreams(String dataString) { byte[] buf = dataString.getBytes(UTF_8); List streams = new ArrayList<>(); diff --git a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/TestKeyManagerUnit.java b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/TestKeyManagerUnit.java index 96b0c138910..6454a77d66f 100644 --- a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/TestKeyManagerUnit.java +++ b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/TestKeyManagerUnit.java @@ -19,8 +19,8 @@ package org.apache.hadoop.ozone.om; -import java.io.File; import java.io.IOException; +import java.nio.file.Path; import java.time.Instant; import java.util.ArrayList; import java.util.HashMap; @@ -31,8 +31,8 @@ import java.util.Map; import java.util.Set; import java.util.UUID; +import java.util.concurrent.atomic.AtomicLong; -import org.apache.commons.io.FileUtils; import org.apache.commons.lang3.RandomUtils; import org.apache.hadoop.hdds.HddsConfigKeys; import org.apache.hadoop.hdds.client.BlockID; @@ -68,14 +68,16 @@ import org.apache.hadoop.ozone.om.helpers.OzoneFileStatus; import org.apache.hadoop.ozone.om.protocol.OzoneManagerProtocol; import org.apache.hadoop.ozone.om.request.OMRequestTestUtils; -import org.apache.ozone.test.GenericTestUtils; import org.apache.hadoop.util.Time; +import org.apache.ozone.test.OzoneTestBase; import org.apache.ratis.util.ExitUtils; -import org.junit.jupiter.api.AfterEach; -import org.junit.jupiter.api.BeforeEach; +import org.junit.jupiter.api.AfterAll; import org.junit.jupiter.api.BeforeAll; +import org.junit.jupiter.api.BeforeEach; import org.junit.jupiter.api.Test; +import org.junit.jupiter.api.io.TempDir; +import org.junit.jupiter.api.TestInstance; import org.junit.jupiter.params.ParameterizedTest; import org.junit.jupiter.params.provider.ValueSource; @@ -88,8 +90,9 @@ import static org.junit.jupiter.api.Assertions.assertEquals; import static org.junit.jupiter.api.Assertions.assertFalse; import static org.junit.jupiter.api.Assertions.assertNotNull; -import static org.mockito.ArgumentMatchers.anySet; +import static org.mockito.Mockito.anySet; import static org.mockito.Mockito.mock; +import static org.mockito.Mockito.reset; import static org.mockito.Mockito.times; import static org.mockito.Mockito.verify; import static org.mockito.Mockito.when; @@ -97,7 +100,10 @@ /** * Unit test key manager. */ -public class TestKeyManagerUnit { +@TestInstance(TestInstance.Lifecycle.PER_CLASS) +class TestKeyManagerUnit extends OzoneTestBase { + + private static final AtomicLong CONTAINER_ID = new AtomicLong(); private OzoneConfiguration configuration; private OMMetadataManager metadataManager; @@ -105,23 +111,16 @@ public class TestKeyManagerUnit { private KeyManagerImpl keyManager; private Instant startDate; - private File testDir; private ScmBlockLocationProtocol blockClient; private OzoneManagerProtocol writeClient; private OzoneManager om; @BeforeAll - public static void setup() { + void setup(@TempDir Path testDir) throws Exception { ExitUtils.disableSystemExit(); - } - - @BeforeEach - public void init() throws Exception { configuration = new OzoneConfiguration(); - testDir = GenericTestUtils.getRandomizedTestDir(); - configuration.set(HddsConfigKeys.OZONE_METADATA_DIRS, - testDir.toString()); + configuration.set(HddsConfigKeys.OZONE_METADATA_DIRS, testDir.toString()); containerClient = mock(StorageContainerLocationProtocol.class); blockClient = mock(ScmBlockLocationProtocol.class); @@ -131,46 +130,56 @@ public void init() throws Exception { metadataManager = omTestManagers.getMetadataManager(); keyManager = (KeyManagerImpl)omTestManagers.getKeyManager(); writeClient = omTestManagers.getWriteClient(); + } + + @BeforeEach + void init() { + reset(blockClient, containerClient); startDate = Instant.ofEpochMilli(Time.now()); } - @AfterEach + @AfterAll public void cleanup() throws Exception { om.stop(); - FileUtils.deleteDirectory(testDir); } @Test public void listMultipartUploadPartsWithZeroUpload() throws IOException { //GIVEN - createBucket(metadataManager, "vol1", "bucket1"); + final String volume = volumeName(); + createBucket(metadataManager, volume, "bucket1"); OmMultipartInfo omMultipartInfo = - initMultipartUpload(writeClient, "vol1", "bucket1", "dir/key1"); + initMultipartUpload(writeClient, volume, "bucket1", "dir/key1"); //WHEN OmMultipartUploadListParts omMultipartUploadListParts = keyManager - .listParts("vol1", "bucket1", "dir/key1", omMultipartInfo.getUploadID(), + .listParts(volume, "bucket1", "dir/key1", omMultipartInfo.getUploadID(), 0, 10); assertEquals(0, omMultipartUploadListParts.getPartInfoList().size()); } + private String volumeName() { + return getTestName(); + } + @Test public void listMultipartUploads() throws IOException { //GIVEN - createBucket(metadataManager, "vol1", "bucket1"); - createBucket(metadataManager, "vol1", "bucket2"); + final String volume = volumeName(); + createBucket(metadataManager, volume, "bucket1"); + createBucket(metadataManager, volume, "bucket2"); - initMultipartUpload(writeClient, "vol1", "bucket1", "dir/key1"); - initMultipartUpload(writeClient, "vol1", "bucket1", "dir/key2"); - initMultipartUpload(writeClient, "vol1", "bucket2", "dir/key1"); + initMultipartUpload(writeClient, volume, "bucket1", "dir/key1"); + initMultipartUpload(writeClient, volume, "bucket1", "dir/key2"); + initMultipartUpload(writeClient, volume, "bucket2", "dir/key1"); //WHEN OmMultipartUploadList omMultipartUploadList = - keyManager.listMultipartUploads("vol1", "bucket1", ""); + keyManager.listMultipartUploads(volume, "bucket1", ""); //THEN List uploads = omMultipartUploadList.getUploads(); @@ -188,8 +197,8 @@ public void listMultipartUploads() throws IOException { @Test public void listMultipartUploadsWithFewEntriesInCache() throws IOException { - String volume = UUID.randomUUID().toString(); - String bucket = UUID.randomUUID().toString(); + String volume = volumeName(); + String bucket = "bucket"; //GIVEN createBucket(metadataManager, volume, bucket); @@ -276,20 +285,21 @@ public void listMultipartUploadsWithFewEntriesInCache() throws IOException { public void listMultipartUploadsWithPrefix() throws IOException { //GIVEN - createBucket(metadataManager, "vol1", "bucket1"); - createBucket(metadataManager, "vol1", "bucket2"); + final String volumeName = volumeName(); + createBucket(metadataManager, volumeName, "bucket1"); + createBucket(metadataManager, volumeName, "bucket2"); - initMultipartUpload(writeClient, "vol1", "bucket1", "dip/key1"); + initMultipartUpload(writeClient, volumeName, "bucket1", "dip/key1"); - initMultipartUpload(writeClient, "vol1", "bucket1", "dir/key1"); - initMultipartUpload(writeClient, "vol1", "bucket1", "dir/key2"); - initMultipartUpload(writeClient, "vol1", "bucket1", "key3"); + initMultipartUpload(writeClient, volumeName, "bucket1", "dir/key1"); + initMultipartUpload(writeClient, volumeName, "bucket1", "dir/key2"); + initMultipartUpload(writeClient, volumeName, "bucket1", "key3"); - initMultipartUpload(writeClient, "vol1", "bucket2", "dir/key1"); + initMultipartUpload(writeClient, volumeName, "bucket2", "dir/key1"); //WHEN OmMultipartUploadList omMultipartUploadList = - keyManager.listMultipartUploads("vol1", "bucket1", "dir"); + keyManager.listMultipartUploads(volumeName, "bucket1", "dir"); //THEN List uploads = omMultipartUploadList.getUploads(); @@ -357,7 +367,7 @@ public void testGetKeyInfo() throws IOException { final DatanodeDetails dn2 = MockDatanodeDetails.randomDatanodeDetails(); final DatanodeDetails dn3 = MockDatanodeDetails.randomDatanodeDetails(); final DatanodeDetails dn4 = MockDatanodeDetails.randomDatanodeDetails(); - final long containerID = 1L; + final long containerID = CONTAINER_ID.incrementAndGet(); Set containerIDs = newHashSet(containerID); final Pipeline pipeline1 = Pipeline.newBuilder() @@ -388,18 +398,19 @@ public void testGetKeyInfo() throws IOException { singletonList(new ContainerWithPipeline(ci, pipeline1)), singletonList(new ContainerWithPipeline(ci, pipeline2))); - insertVolume("volumeOne"); + final String volume = volumeName(); + insertVolume(volume); - insertBucket("volumeOne", "bucketOne"); + insertBucket(volume, "bucketOne"); BlockID blockID1 = new BlockID(containerID, 1L); - insertKey(null, "volumeOne", "bucketOne", "keyOne", blockID1); + insertKey(null, volume, "bucketOne", "keyOne", blockID1); BlockID blockID2 = new BlockID(containerID, 2L); - insertKey(null, "volumeOne", "bucketOne", "keyTwo", blockID2); + insertKey(null, volume, "bucketOne", "keyTwo", blockID2); // 1st call to get key1. OmKeyArgs keyArgs = new Builder() - .setVolumeName("volumeOne") + .setVolumeName(volume) .setBucketName("bucketOne") .setKeyName("keyOne") .build(); @@ -415,7 +426,7 @@ public void testGetKeyInfo() throws IOException { // subsequent call to key2 in same container sound result no scm calls. keyArgs = new Builder() - .setVolumeName("volumeOne") + .setVolumeName(volume) .setBucketName("bucketOne") .setKeyName("keyTwo") .build(); @@ -431,7 +442,7 @@ public void testGetKeyInfo() throws IOException { // Yet, another call with forceCacheUpdate should trigger a call to SCM. keyArgs = new Builder() - .setVolumeName("volumeOne") + .setVolumeName(volume) .setBucketName("bucketOne") .setKeyName("keyTwo") .setForceUpdateContainerCacheFromSCM(true) @@ -492,15 +503,16 @@ public void testLookupFileWithDnFailure() throws IOException { when(containerClient.getContainerWithPipelineBatch(containerIDs)) .thenReturn(cps); - insertVolume("volumeOne"); + final String volume = volumeName(); + insertVolume(volume); - insertBucket("volumeOne", "bucketOne"); + insertBucket(volume, "bucketOne"); - insertKey(pipelineOne, "volumeOne", "bucketOne", "keyOne", + insertKey(pipelineOne, volume, "bucketOne", "keyOne", new BlockID(1L, 1L)); final OmKeyArgs.Builder keyArgs = new OmKeyArgs.Builder() - .setVolumeName("volumeOne") + .setVolumeName(volume) .setBucketName("bucketOne") .setKeyName("keyOne"); @@ -566,7 +578,7 @@ private void insertVolume(String volumeName) throws IOException { @Test public void listStatus() throws Exception { - String volume = "vol"; + String volume = volumeName(); String bucket = "bucket"; String keyPrefix = "key"; String client = "client.host"; @@ -584,19 +596,20 @@ public void listStatus() throws Exception { Set containerIDs = new HashSet<>(); List containersWithPipeline = new ArrayList<>(); for (long i = 1; i <= 10; i++) { + final long containerID = CONTAINER_ID.incrementAndGet(); final OmKeyLocationInfo keyLocationInfo = new OmKeyLocationInfo.Builder() - .setBlockID(new BlockID(i, 1L)) + .setBlockID(new BlockID(containerID, 1L)) .setPipeline(pipeline) .setOffset(0) .setLength(256000) .build(); ContainerInfo containerInfo = new ContainerInfo.Builder() - .setContainerID(i) + .setContainerID(containerID) .build(); containersWithPipeline.add( new ContainerWithPipeline(containerInfo, pipeline)); - containerIDs.add(i); + containerIDs.add(containerID); OmKeyInfo keyInfo = new OmKeyInfo.Builder() .setVolumeName(volume) diff --git a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/TestOMDBDefinition.java b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/TestOMDBDefinition.java index f9c78ee63bb..36245dc8741 100644 --- a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/TestOMDBDefinition.java +++ b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/TestOMDBDefinition.java @@ -54,8 +54,7 @@ public void testDBDefinition() throws Exception { ArrayList missingDBDefTables = new ArrayList<>(); // Get list of tables from the RocksDB Store - Collection missingOmDBTables = - store.getTableNames().values(); + final Collection missingOmDBTables = new ArrayList<>(store.getTableNames().values()); missingOmDBTables.remove("default"); int countOmDBTables = missingOmDBTables.size(); // Remove the file if it is found in both the datastructures diff --git a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/TestOMMultiTenantManager.java b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/TestOMMultiTenantManager.java index adab44d43f1..0079585a85b 100644 --- a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/TestOMMultiTenantManager.java +++ b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/TestOMMultiTenantManager.java @@ -43,8 +43,8 @@ import static org.assertj.core.api.Assertions.assertThat; import static org.junit.jupiter.api.Assertions.assertEquals; import static org.junit.jupiter.api.Assertions.assertFalse; +import static org.junit.jupiter.api.Assertions.assertThrows; import static org.junit.jupiter.api.Assertions.assertTrue; -import static org.junit.jupiter.api.Assertions.fail; import static org.mockito.Mockito.doCallRealMethod; import static org.mockito.Mockito.mock; import static org.mockito.Mockito.when; @@ -116,12 +116,10 @@ public void testMultiTenancyCheckConfig() { */ private void expectConfigCheckToFail(OzoneManager ozoneManager, OzoneConfiguration conf) { - try { - OMMultiTenantManager.checkAndEnableMultiTenancy(ozoneManager, conf); - fail("Should have thrown RuntimeException"); - } catch (RuntimeException e) { - assertThat(e.getMessage()).contains("Failed to meet"); - } + RuntimeException e = + assertThrows(RuntimeException.class, + () -> OMMultiTenantManager.checkAndEnableMultiTenancy(ozoneManager, conf)); + assertThat(e.getMessage()).contains("Failed to meet"); } /** @@ -176,12 +174,9 @@ public void testMultiTenancyRequestsWhenDisabled() throws IOException { */ private void expectWriteRequestToFail(OzoneManager om, OMRequest omRequest) throws IOException { - try { - OzoneManagerRatisUtils.createClientRequest(omRequest, om); - fail("Should have thrown OMException"); - } catch (OMException e) { - assertEquals(FEATURE_NOT_ENABLED, e.getResult()); - } + OMException e = + assertThrows(OMException.class, () -> OzoneManagerRatisUtils.createClientRequest(omRequest, om)); + assertEquals(FEATURE_NOT_ENABLED, e.getResult()); } /** diff --git a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/TestOMMultiTenantManagerImpl.java b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/TestOMMultiTenantManagerImpl.java index 70c204c04c5..4530e526824 100644 --- a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/TestOMMultiTenantManagerImpl.java +++ b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/TestOMMultiTenantManagerImpl.java @@ -33,10 +33,9 @@ import java.io.IOException; import java.nio.file.Path; import java.util.List; +import java.util.Optional; import java.util.concurrent.TimeUnit; - -import com.google.common.base.Optional; import org.apache.hadoop.hdds.conf.OzoneConfiguration; import org.apache.hadoop.ozone.om.exceptions.OMException; import org.apache.hadoop.ozone.om.helpers.OmDBAccessIdInfo; diff --git a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/TestOMStorage.java b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/TestOMStorage.java index 23dd15b610b..ecada6ea284 100644 --- a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/TestOMStorage.java +++ b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/TestOMStorage.java @@ -42,7 +42,6 @@ import static org.junit.jupiter.api.Assertions.assertNull; import static org.junit.jupiter.api.Assertions.assertThrows; import static org.junit.jupiter.api.Assertions.assertTrue; -import static org.junit.jupiter.api.Assertions.fail; /** * Testing OMStorage class. @@ -103,16 +102,12 @@ public void testNoOmDbDirConfigured() { } @Test - public void testSetOmIdOnNotInitializedStorage() throws Exception { + void testSetOmIdOnNotInitializedStorage() throws Exception { OMStorage storage = new OMStorage(configWithOMDBDir()); assertNotEquals(INITIALIZED, storage.getState()); String omId = "omId"; - try { - storage.setOmId(omId); - } catch (IOException e) { - fail("Can not set OmId on a Storage that is not initialized."); - } + storage.setOmId(omId); assertEquals(omId, storage.getOmId()); assertGetNodeProperties(storage, omId); } @@ -145,16 +140,12 @@ public void testCertSerialIdOperations() throws Exception { } @Test - public void testSetOmNodeIdOnNotInitializedStorage() throws Exception { + void testSetOmNodeIdOnNotInitializedStorage() throws Exception { OMStorage storage = new OMStorage(configWithOMDBDir()); assertNotEquals(INITIALIZED, storage.getState()); String nodeId = "nodeId"; - try { - storage.setOmNodeId(nodeId); - } catch (IOException e) { - fail("Can not set OmNodeId on a Storage that is not initialized."); - } + storage.setOmNodeId(nodeId); assertEquals(nodeId, storage.getOmNodeId()); assertGetNodeProperties(storage, null, nodeId); } diff --git a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/TestOMTenantCreateRequest.java b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/TestOMTenantCreateRequest.java index f8bfbe3d579..dc685424886 100644 --- a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/TestOMTenantCreateRequest.java +++ b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/TestOMTenantCreateRequest.java @@ -44,8 +44,8 @@ import static org.junit.jupiter.api.Assertions.assertEquals; import static org.junit.jupiter.api.Assertions.assertNotNull; import static org.junit.jupiter.api.Assertions.assertThrows; -import static org.mockito.ArgumentMatchers.any; -import static org.mockito.ArgumentMatchers.anyString; +import static org.mockito.Mockito.any; +import static org.mockito.Mockito.anyString; import static org.mockito.Mockito.doNothing; import static org.mockito.Mockito.doReturn; import static org.mockito.Mockito.framework; diff --git a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/TestOmSnapshotManager.java b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/TestOmSnapshotManager.java index 79e201b4299..e1ae8f57d15 100644 --- a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/TestOmSnapshotManager.java +++ b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/TestOmSnapshotManager.java @@ -19,7 +19,6 @@ package org.apache.hadoop.ozone.om; -import org.apache.commons.io.FileUtils; import org.apache.hadoop.hdds.HddsConfigKeys; import org.apache.hadoop.hdds.conf.OzoneConfiguration; import org.apache.hadoop.hdds.scm.HddsWhiteboxTestUtils; @@ -31,9 +30,12 @@ import org.apache.hadoop.ozone.om.snapshot.OmSnapshotUtils; import org.apache.hadoop.util.Time; import org.apache.ozone.test.GenericTestUtils; -import org.junit.jupiter.api.AfterEach; +import org.junit.jupiter.api.AfterAll; +import org.junit.jupiter.api.BeforeAll; import org.junit.jupiter.api.BeforeEach; import org.junit.jupiter.api.Test; +import org.junit.jupiter.api.TestInstance; +import org.junit.jupiter.api.io.TempDir; import java.io.File; import java.io.IOException; @@ -79,10 +81,10 @@ /** * Unit test ozone snapshot manager. */ -public class TestOmSnapshotManager { +@TestInstance(TestInstance.Lifecycle.PER_CLASS) +class TestOmSnapshotManager { private OzoneManager om; - private File testDir; private static final String CANDIDATE_DIR_NAME = OM_DB_NAME + SNAPSHOT_CANDIDATE_DIR; private File leaderDir; @@ -94,12 +96,10 @@ public class TestOmSnapshotManager { private File s1File; private File f1File; - @BeforeEach - public void init() throws Exception { + @BeforeAll + void init(@TempDir File tempDir) throws Exception { OzoneConfiguration configuration = new OzoneConfiguration(); - testDir = GenericTestUtils.getRandomizedTestDir(); - configuration.set(HddsConfigKeys.OZONE_METADATA_DIRS, - testDir.toString()); + configuration.set(HddsConfigKeys.OZONE_METADATA_DIRS, tempDir.toString()); // Enable filesystem snapshot feature for the test regardless of the default configuration.setBoolean(OMConfigKeys.OZONE_FILESYSTEM_SNAPSHOT_ENABLED_KEY, true); @@ -110,13 +110,11 @@ public void init() throws Exception { OmTestManagers omTestManagers = new OmTestManagers(configuration); om = omTestManagers.getOzoneManager(); - setupData(); } - @AfterEach - public void cleanup() throws Exception { + @AfterAll + void stop() { om.stop(); - FileUtils.deleteDirectory(testDir); } @Test @@ -206,7 +204,8 @@ public void testCloseOnEviction() throws IOException { verify(firstSnapshotStore, timeout(3000).times(1)).close(); } - private void setupData() throws IOException { + @BeforeEach + void setupData(@TempDir File testDir) throws IOException { // Set up the leader with the following files: // leader/db.checkpoints/checkpoint1/f1.sst // leader/db.snapshots/checkpointState/snap1/s1.sst @@ -230,8 +229,7 @@ private void setupData() throws IOException { byte[] dummyData = {0}; // Create dummy leader files to calculate links. - leaderDir = new File(testDir.toString(), - "leader"); + leaderDir = new File(testDir, "leader"); assertTrue(leaderDir.mkdirs()); String pathSnap1 = OM_SNAPSHOT_CHECKPOINT_DIR + OM_KEY_PREFIX + "snap1"; String pathSnap2 = OM_SNAPSHOT_CHECKPOINT_DIR + OM_KEY_PREFIX + "snap2"; @@ -245,8 +243,7 @@ private void setupData() throws IOException { Files.write(Paths.get(leaderSnapDir2.toString(), "nonSstFile"), dummyData); // Also create the follower files. - candidateDir = new File(testDir.toString(), - CANDIDATE_DIR_NAME); + candidateDir = new File(testDir, CANDIDATE_DIR_NAME); File followerSnapDir1 = new File(candidateDir.toString(), pathSnap1); followerSnapDir2 = new File(candidateDir.toString(), pathSnap2); copyDirectory(leaderDir.toPath(), candidateDir.toPath()); @@ -359,9 +356,9 @@ public void testExcludeUtilities() throws IOException { * This test always passes in a null dest dir. */ @Test - public void testProcessFileWithNullDestDirParameter() throws IOException { - assertTrue(new File(testDir.toString(), "snap1").mkdirs()); - assertTrue(new File(testDir.toString(), "snap2").mkdirs()); + void testProcessFileWithNullDestDirParameter(@TempDir File testDir) throws IOException { + assertTrue(new File(testDir, "snap1").mkdirs()); + assertTrue(new File(testDir, "snap2").mkdirs()); Path copyFile = Paths.get(testDir.toString(), "snap1/copyfile.sst"); Files.write(copyFile, @@ -450,10 +447,10 @@ public void testProcessFileWithNullDestDirParameter() throws IOException { * This test always passes in a non-null dest dir. */ @Test - public void testProcessFileWithDestDirParameter() throws IOException { - assertTrue(new File(testDir.toString(), "snap1").mkdirs()); - assertTrue(new File(testDir.toString(), "snap2").mkdirs()); - assertTrue(new File(testDir.toString(), "snap3").mkdirs()); + void testProcessFileWithDestDirParameter(@TempDir File testDir) throws IOException { + assertTrue(new File(testDir, "snap1").mkdirs()); + assertTrue(new File(testDir, "snap2").mkdirs()); + assertTrue(new File(testDir, "snap3").mkdirs()); Path destDir = Paths.get(testDir.toString(), "destDir"); assertTrue(new File(destDir.toString()).mkdirs()); diff --git a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/TestOzoneManagerHttpServer.java b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/TestOzoneManagerHttpServer.java index 7d014a16e46..e1ada8c974d 100644 --- a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/TestOzoneManagerHttpServer.java +++ b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/TestOzoneManagerHttpServer.java @@ -20,6 +20,7 @@ import static org.junit.jupiter.api.Assertions.assertEquals; import static org.junit.jupiter.api.Assertions.assertTrue; + import java.io.File; import java.net.InetSocketAddress; import java.net.URL; @@ -27,7 +28,6 @@ import java.util.Arrays; import java.util.Collection; -import org.apache.hadoop.fs.FileUtil; import org.apache.hadoop.hdds.conf.OzoneConfiguration; import org.apache.hadoop.hdds.server.http.BaseHttpServer; import org.apache.hadoop.hdfs.web.URLConnectionFactory; @@ -36,11 +36,11 @@ import org.apache.hadoop.net.NetUtils; import org.apache.hadoop.ozone.OzoneConfigKeys; import org.apache.hadoop.security.ssl.KeyStoreTestUtil; -import org.apache.ozone.test.GenericTestUtils; import org.junit.jupiter.api.AfterAll; import org.junit.jupiter.api.BeforeAll; import org.junit.jupiter.api.Test; +import org.junit.jupiter.api.io.TempDir; import org.junit.jupiter.params.ParameterizedTest; import org.junit.jupiter.params.provider.MethodSource; @@ -48,8 +48,6 @@ * Test http server of OM with various HTTP option. */ public class TestOzoneManagerHttpServer { - private static final String BASEDIR = GenericTestUtils - .getTempPath(TestOzoneManagerHttpServer.class.getSimpleName()); private static String keystoresDir; private static String sslConfDir; private static OzoneConfiguration conf; @@ -63,17 +61,15 @@ public static Collection policy() { return Arrays.asList(params); } - @BeforeAll public static void setUp() throws Exception { - File base = new File(BASEDIR); - FileUtil.fullyDelete(base); + @BeforeAll public static void setUp(@TempDir File baseDir) throws Exception { // Create metadata directory - ozoneMetadataDirectory = new File(BASEDIR, "metadata"); + ozoneMetadataDirectory = new File(baseDir.getPath(), "metadata"); ozoneMetadataDirectory.mkdirs(); // Initialize the OzoneConfiguration conf = new OzoneConfiguration(); - keystoresDir = new File(BASEDIR).getAbsolutePath(); + keystoresDir = baseDir.getAbsolutePath(); sslConfDir = KeyStoreTestUtil.getClasspathDir( TestOzoneManagerHttpServer.class); KeyStoreTestUtil.setupSSLConfig(keystoresDir, sslConfDir, conf, false); @@ -95,7 +91,6 @@ public static Collection policy() { @AfterAll public static void tearDown() throws Exception { connectionFactory.destroy(); - FileUtil.fullyDelete(new File(BASEDIR)); KeyStoreTestUtil.cleanupSSLConfig(keystoresDir, sslConfDir); } diff --git a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/TestScmClient.java b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/TestScmClient.java index cf3f422e7e7..228f668d01a 100644 --- a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/TestScmClient.java +++ b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/TestScmClient.java @@ -47,7 +47,7 @@ import static org.apache.hadoop.hdds.client.ReplicationConfig.fromTypeAndFactor; import static org.junit.jupiter.api.Assertions.assertEquals; import static org.junit.jupiter.api.Assertions.assertThrows; -import static org.mockito.ArgumentMatchers.eq; +import static org.mockito.Mockito.eq; import static org.mockito.Mockito.mock; import static org.mockito.Mockito.times; import static org.mockito.Mockito.verify; diff --git a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/TestSstFilteringService.java b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/TestSstFilteringService.java index edc228a5d31..a8b026af05b 100644 --- a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/TestSstFilteringService.java +++ b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/TestSstFilteringService.java @@ -36,7 +36,6 @@ import org.apache.hadoop.ozone.om.request.OMRequestTestUtils; import org.apache.hadoop.ozone.om.snapshot.ReferenceCounted; import org.apache.hadoop.ozone.om.snapshot.SnapshotCache; -import org.apache.hadoop.security.authentication.client.AuthenticationException; import org.apache.ratis.util.ExitUtils; import org.junit.jupiter.api.AfterEach; import org.junit.jupiter.api.BeforeAll; @@ -87,7 +86,7 @@ public static void setup() { } @BeforeEach - public void init() throws AuthenticationException, IOException { + void init() throws Exception { conf = new OzoneConfiguration(); conf.set(OZONE_METADATA_DIRS, folder.getAbsolutePath()); conf.setTimeDuration(HDDS_CONTAINER_REPORT_INTERVAL, 200, diff --git a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/TestTrashService.java b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/TestTrashService.java index 9cdd965068d..b7c83956085 100644 --- a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/TestTrashService.java +++ b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/TestTrashService.java @@ -32,7 +32,6 @@ import org.apache.hadoop.ozone.om.helpers.OpenKeySession; import org.apache.hadoop.ozone.om.protocol.OzoneManagerProtocol; import org.apache.hadoop.ozone.om.request.OMRequestTestUtils; -import org.apache.hadoop.security.authentication.client.AuthenticationException; import org.apache.ratis.util.ExitUtils; import org.junit.jupiter.api.AfterEach; import org.junit.jupiter.api.BeforeEach; @@ -66,7 +65,7 @@ public class TestTrashService { private String bucketName; @BeforeEach - public void setup() throws IOException, AuthenticationException { + void setup() throws Exception { ExitUtils.disableSystemExit(); OzoneConfiguration configuration = new OzoneConfiguration(); diff --git a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/multitenant/TestMultiTenantAccessController.java b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/multitenant/TestMultiTenantAccessController.java index 966ef4e9850..f72a687d6a3 100644 --- a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/multitenant/TestMultiTenantAccessController.java +++ b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/multitenant/TestMultiTenantAccessController.java @@ -47,8 +47,8 @@ import static org.assertj.core.api.Assertions.assertThat; import static org.junit.jupiter.api.Assertions.assertEquals; import static org.junit.jupiter.api.Assertions.assertFalse; +import static org.junit.jupiter.api.Assertions.assertThrows; import static org.junit.jupiter.api.Assertions.assertTrue; -import static org.junit.jupiter.api.Assertions.fail; /** * To test MultiTenantAccessController with Ranger Client. @@ -166,12 +166,7 @@ public void testCreateGetDeletePolicies() throws Exception { assertEquals(prevPolicyVersion + 2L, currPolicyVersion); // get to check it is deleted. - try { - controller.getPolicy(policyName); - fail("Expected exception for missing policy."); - } catch (Exception ex) { - // Expected since policy is not there. - } + assertThrows(Exception.class, () -> controller.getPolicy(policyName)); } @Test @@ -194,12 +189,7 @@ public void testCreateDuplicatePolicy() throws Exception { .setName(policyName) .addVolume(volumeName + "2") .build(); - try { - controller.createPolicy(sameNamePolicy); - fail("Expected exception for duplicate policy."); - } catch (Exception ex) { - // Expected since a policy with the same name should not be allowed. - } + assertThrows(Exception.class, () -> controller.createPolicy(sameNamePolicy)); // Create a policy with different name but same resource. // Check for error. @@ -208,12 +198,7 @@ public void testCreateDuplicatePolicy() throws Exception { .setName(policyName + "2") .addVolume(volumeName) .build(); - try { - controller.createPolicy(sameResourcePolicy); - fail("Expected exception for duplicate policy."); - } catch (Exception ex) { - // Expected since a policy with the same resource should not be allowed. - } + assertThrows(Exception.class, () -> controller.createPolicy(sameResourcePolicy)); // delete policy. controller.deletePolicy(policyName); @@ -369,12 +354,7 @@ public void testCreateGetDeleteRoles() throws Exception { // delete role. controller.deleteRole(roleName); // get to check it is deleted. - try { - controller.getRole(roleName); - fail("Expected exception for missing role."); - } catch (Exception ex) { - // Expected since policy is not there. - } + assertThrows(Exception.class, () -> controller.getRole(roleName)); } @Test @@ -393,13 +373,7 @@ public void testCreateDuplicateRole() throws Exception { .setName(roleName) .setDescription(OZONE_TENANT_RANGER_ROLE_DESCRIPTION) .build(); - try { - controller.createRole(sameNameRole); - fail("Expected exception for duplicate role."); - } catch (Exception ex) { - // Expected since a policy with the same name should not be allowed. - } - + assertThrows(Exception.class, () -> controller.createRole(sameNameRole)); // delete role. controller.deleteRole(roleName); } diff --git a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/ratis/TestOzoneManagerDoubleBuffer.java b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/ratis/TestOzoneManagerDoubleBuffer.java index 202234a0d43..1890958cbaa 100644 --- a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/ratis/TestOzoneManagerDoubleBuffer.java +++ b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/ratis/TestOzoneManagerDoubleBuffer.java @@ -67,7 +67,7 @@ import static org.junit.jupiter.api.Assertions.assertEquals; import static org.junit.jupiter.api.Assertions.assertFalse; import static org.junit.jupiter.api.Assertions.assertTrue; -import static org.mockito.ArgumentMatchers.any; +import static org.mockito.Mockito.any; import static org.mockito.Mockito.doAnswer; import static org.mockito.Mockito.doNothing; import static org.mockito.Mockito.mock; @@ -81,16 +81,13 @@ class TestOzoneManagerDoubleBuffer { private OzoneManagerDoubleBuffer doubleBuffer; private OzoneManager ozoneManager; - private OmMetadataManagerImpl omMetadataManager; private S3SecretLockedManager secretManager; - private CreateSnapshotResponse snapshotResponse1 = - mock(CreateSnapshotResponse.class); - private CreateSnapshotResponse snapshotResponse2 = - mock(CreateSnapshotResponse.class); - private OMResponse omKeyResponse = mock(OMResponse.class); - private OMResponse omBucketResponse = mock(OMResponse.class); - private OMResponse omSnapshotResponse1 = mock(OMResponse.class); - private OMResponse omSnapshotResponse2 = mock(OMResponse.class); + private final CreateSnapshotResponse snapshotResponse1 = mock(CreateSnapshotResponse.class); + private final CreateSnapshotResponse snapshotResponse2 = mock(CreateSnapshotResponse.class); + private final OMResponse omKeyResponse = mock(OMResponse.class); + private final OMResponse omBucketResponse = mock(OMResponse.class); + private final OMResponse omSnapshotResponse1 = mock(OMResponse.class); + private final OMResponse omSnapshotResponse2 = mock(OMResponse.class); private static OMClientResponse omKeyCreateResponse = mock(OMKeyCreateResponse.class); private static OMClientResponse omBucketCreateResponse = @@ -104,10 +101,6 @@ class TestOzoneManagerDoubleBuffer { private OzoneManagerDoubleBuffer.FlushNotifier flushNotifier; private OzoneManagerDoubleBuffer.FlushNotifier spyFlushNotifier; - private static String userPrincipalId1 = "alice@EXAMPLE.COM"; - private static String userPrincipalId2 = "messi@EXAMPLE.COM"; - private static String userPrincipalId3 = "ronaldo@EXAMPLE.COM"; - @BeforeEach public void setup() throws IOException { OMMetrics omMetrics = OMMetrics.create(); @@ -117,8 +110,8 @@ public void setup() throws IOException { ozoneManager = mock(OzoneManager.class); when(ozoneManager.getMetrics()).thenReturn(omMetrics); - omMetadataManager = - new OmMetadataManagerImpl(ozoneConfiguration, ozoneManager); + + final OmMetadataManagerImpl omMetadataManager = new OmMetadataManagerImpl(ozoneConfiguration, ozoneManager); when(ozoneManager.getMetadataManager()).thenReturn(omMetadataManager); when(ozoneManager.getMaxUserVolumeCount()).thenReturn(10L); AuditLogger auditLogger = mock(AuditLogger.class); @@ -138,10 +131,10 @@ public void setup() throws IOException { flushNotifier = new OzoneManagerDoubleBuffer.FlushNotifier(); spyFlushNotifier = spy(flushNotifier); - doubleBuffer = new OzoneManagerDoubleBuffer.Builder() + doubleBuffer = OzoneManagerDoubleBuffer.newBuilder() .setOmMetadataManager(omMetadataManager) .setS3SecretManager(secretManager) - .setmaxUnFlushedTransactionCount(1000) + .setMaxUnFlushedTransactionCount(1000) .enableRatis(true) .setFlushNotifier(spyFlushNotifier) .build(); @@ -244,12 +237,10 @@ public void testOzoneManagerDoubleBuffer( // Flush the current buffer. doubleBuffer.flushCurrentBuffer(); - assertEquals(expectedFlushCounts, doubleBuffer.getFlushIterations()); - assertEquals(expectedFlushedTransactionCount, - doubleBuffer.getFlushedTransactionCount()); + assertEquals(expectedFlushCounts, doubleBuffer.getFlushIterationsForTesting()); + assertEquals(expectedFlushedTransactionCount, doubleBuffer.getFlushedTransactionCountForTesting()); - OzoneManagerDoubleBufferMetrics bufferMetrics = - doubleBuffer.getOzoneManagerDoubleBufferMetrics(); + final OzoneManagerDoubleBufferMetrics bufferMetrics = doubleBuffer.getMetrics(); assertEquals(expectedFlushCountsInMetric, bufferMetrics.getTotalNumOfFlushOperations()); @@ -259,6 +250,9 @@ public void testOzoneManagerDoubleBuffer( bufferMetrics.getMaxNumberOfTransactionsFlushedInOneIteration()); assertEquals(expectedAvgFlushTransactionsInMetric, bufferMetrics.getAvgFlushTransactionsInOneIteration(), 0.001); + + // reset max + bufferMetrics.setMaxNumberOfTransactionsFlushedInOneIteration(0); } @Test @@ -295,7 +289,7 @@ public void testAwaitFlush() throws Exception { doubleBuffer.getCurrentBufferSize()); // Start double buffer and wait for flush. - final Future await = awaitFlush(); + final Future await = doubleBuffer.awaitFlushAsync(); Future flusher = flushTransactions(executorService); await.get(); @@ -308,7 +302,7 @@ public void testAwaitFlush() throws Exception { assertEquals(0, doubleBuffer.getReadyBufferSize()); // Run again to make sure it works when double buffer is empty - awaitFlush().get(); + doubleBuffer.awaitFlushAsync().get(); // Clean up. flusher.cancel(false); @@ -318,6 +312,10 @@ public void testAwaitFlush() throws Exception { @Test public void testS3SecretCacheSizePostDoubleBufferFlush() throws IOException { + final String userPrincipalId1 = "alice@EXAMPLE.COM"; + final String userPrincipalId2 = "messi@EXAMPLE.COM"; + final String userPrincipalId3 = "ronaldo@EXAMPLE.COM"; + // Create a secret for "alice". // This effectively makes alice an S3 admin. KerberosName.setRuleMechanism(DEFAULT_MECHANISM); @@ -325,8 +323,7 @@ public void testS3SecretCacheSizePostDoubleBufferFlush() throws IOException { "RULE:[2:$1@$0](.*@EXAMPLE.COM)s/@.*//\n" + "RULE:[1:$1@$0](.*@EXAMPLE.COM)s/@.*//\n" + "DEFAULT"); - UserGroupInformation ugiAlice; - ugiAlice = UserGroupInformation.createRemoteUser(userPrincipalId1); + final UserGroupInformation ugiAlice = UserGroupInformation.createRemoteUser(userPrincipalId1); UserGroupInformation.createRemoteUser(userPrincipalId2); UserGroupInformation.createRemoteUser(userPrincipalId3); assertEquals("alice", ugiAlice.getShortUserName()); @@ -338,9 +335,9 @@ public void testS3SecretCacheSizePostDoubleBufferFlush() throws IOException { doubleBuffer.stopDaemon(); // Create 3 secrets and store them in the cache and double buffer. - processSuccessSecretRequest(userPrincipalId1, 1, true); - processSuccessSecretRequest(userPrincipalId2, 2, true); - processSuccessSecretRequest(userPrincipalId3, 3, true); + processSuccessSecretRequest(userPrincipalId1, 1); + processSuccessSecretRequest(userPrincipalId2, 2); + processSuccessSecretRequest(userPrincipalId3, 3); S3SecretCache cache = secretManager.cache(); // Check if all the three secrets are cached. @@ -357,8 +354,7 @@ public void testS3SecretCacheSizePostDoubleBufferFlush() throws IOException { assertNull(cache.get(userPrincipalId1)); } finally { // cleanup metrics - OzoneManagerDoubleBufferMetrics metrics = - doubleBuffer.getOzoneManagerDoubleBufferMetrics(); + final OzoneManagerDoubleBufferMetrics metrics = doubleBuffer.getMetrics(); metrics.setMaxNumberOfTransactionsFlushedInOneIteration(0); metrics.setAvgFlushTransactionsInOneIteration(0); metrics.incrTotalSizeOfFlushedTransactions( @@ -368,10 +364,7 @@ public void testS3SecretCacheSizePostDoubleBufferFlush() throws IOException { } } - private void processSuccessSecretRequest( - String userPrincipalId, - int txLogIndex, - boolean shouldHaveResponse) throws IOException { + private void processSuccessSecretRequest(String userPrincipalId, int txLogIndex) throws IOException { S3GetSecretRequest s3GetSecretRequest = new S3GetSecretRequest( new S3GetSecretRequest( @@ -399,11 +392,6 @@ private OzoneManagerProtocolProtos.OMRequest s3GetSecretRequest( ).build(); } - // Return a future that waits for the flush. - private Future awaitFlush() { - return doubleBuffer.awaitFlushAsync(); - } - private Future flushTransactions(ExecutorService executorService) { return executorService.submit(() -> { doubleBuffer.resume(); diff --git a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/ratis/TestOzoneManagerDoubleBufferWithDummyResponse.java b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/ratis/TestOzoneManagerDoubleBufferWithDummyResponse.java index 635f86f3ab5..dd8e642721e 100644 --- a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/ratis/TestOzoneManagerDoubleBufferWithDummyResponse.java +++ b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/ratis/TestOzoneManagerDoubleBufferWithDummyResponse.java @@ -75,9 +75,9 @@ public void setup() throws IOException { folder.toAbsolutePath().toString()); omMetadataManager = new OmMetadataManagerImpl(configuration, null); - doubleBuffer = new OzoneManagerDoubleBuffer.Builder() + doubleBuffer = OzoneManagerDoubleBuffer.newBuilder() .setOmMetadataManager(omMetadataManager) - .setmaxUnFlushedTransactionCount(10000) + .setMaxUnFlushedTransactionCount(10000) .enableRatis(true) .build(); } @@ -96,8 +96,7 @@ public void stop() { public void testDoubleBufferWithDummyResponse() throws Exception { String volumeName = UUID.randomUUID().toString(); int bucketCount = 100; - OzoneManagerDoubleBufferMetrics metrics = - doubleBuffer.getOzoneManagerDoubleBufferMetrics(); + final OzoneManagerDoubleBufferMetrics metrics = doubleBuffer.getMetrics(); // As we have not flushed/added any transactions, all metrics should have // value zero. @@ -113,11 +112,11 @@ public void testDoubleBufferWithDummyResponse() throws Exception { 100, 60000); assertThat(metrics.getTotalNumOfFlushOperations()).isGreaterThan(0); - assertEquals(bucketCount, doubleBuffer.getFlushedTransactionCount()); + assertEquals(bucketCount, doubleBuffer.getFlushedTransactionCountForTesting()); assertThat(metrics.getMaxNumberOfTransactionsFlushedInOneIteration()).isGreaterThan(0); assertEquals(bucketCount, omMetadataManager.countRowsInTable( omMetadataManager.getBucketTable())); - assertThat(doubleBuffer.getFlushIterations()).isGreaterThan(0); + assertThat(doubleBuffer.getFlushIterationsForTesting()).isGreaterThan(0); assertThat(metrics.getFlushTime().lastStat().numSamples()).isGreaterThan(0); assertThat(metrics.getAvgFlushTransactionsInOneIteration()).isGreaterThan(0); assertEquals(bucketCount, (long) metrics.getQueueSize().lastStat().total()); diff --git a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/ratis/TestOzoneManagerDoubleBufferWithOMResponse.java b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/ratis/TestOzoneManagerDoubleBufferWithOMResponse.java index 7178868dcf3..a97b24289cd 100644 --- a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/ratis/TestOzoneManagerDoubleBufferWithOMResponse.java +++ b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/ratis/TestOzoneManagerDoubleBufferWithOMResponse.java @@ -68,7 +68,7 @@ import static org.junit.jupiter.api.Assertions.assertNotNull; import static org.junit.jupiter.api.Assertions.assertNull; import static org.junit.jupiter.api.Assertions.fail; -import static org.mockito.ArgumentMatchers.any; +import static org.mockito.Mockito.any; import static org.mockito.Mockito.doNothing; import static org.mockito.Mockito.mock; import static org.mockito.Mockito.when; @@ -107,9 +107,9 @@ public void setup() throws IOException { auditLogger = mock(AuditLogger.class); when(ozoneManager.getAuditLogger()).thenReturn(auditLogger); doNothing().when(auditLogger).logWrite(any(AuditMessage.class)); - doubleBuffer = new OzoneManagerDoubleBuffer.Builder() + doubleBuffer = OzoneManagerDoubleBuffer.newBuilder() .setOmMetadataManager(omMetadataManager) - .setmaxUnFlushedTransactionCount(100000) + .setMaxUnFlushedTransactionCount(100000) .enableRatis(true) .build(); } @@ -173,9 +173,9 @@ public void testDoubleBufferWithMixOfTransactions() throws Exception { final int deleteCount = 5; // We are doing +1 for volume transaction. - GenericTestUtils.waitFor(() -> - doubleBuffer.getFlushedTransactionCount() == - (bucketCount + deleteCount + 1), 100, 120000); + GenericTestUtils.waitFor( + () -> doubleBuffer.getFlushedTransactionCountForTesting() == bucketCount + deleteCount + 1, + 100, 120000); assertEquals(1, omMetadataManager.countRowsInTable( omMetadataManager.getVolumeTable())); @@ -251,8 +251,9 @@ public void testDoubleBufferWithMixOfTransactionsParallel() throws Exception { final int deleteCount = 10; // We are doing +1 for volume transaction. - GenericTestUtils.waitFor(() -> doubleBuffer.getFlushedTransactionCount() - == (bucketCount + deleteCount + 2), 100, 120000); + GenericTestUtils.waitFor( + () -> doubleBuffer.getFlushedTransactionCountForTesting() == bucketCount + deleteCount + 2, + 100, 120000); assertEquals(2, omMetadataManager.countRowsInTable( omMetadataManager.getVolumeTable())); @@ -398,8 +399,8 @@ private void testDoubleBuffer(int volumeCount, int bucketsPerVolume) int expectedBuckets = bucketsPerVolume * volumeCount; long expectedTransactions = volumeCount + expectedBuckets; - GenericTestUtils.waitFor(() -> - expectedTransactions == doubleBuffer.getFlushedTransactionCount(), + GenericTestUtils.waitFor( + () -> expectedTransactions == doubleBuffer.getFlushedTransactionCountForTesting(), 100, volumeCount * 500); GenericTestUtils.waitFor(() -> @@ -411,7 +412,7 @@ private void testDoubleBuffer(int volumeCount, int bucketsPerVolume) assertRowCount(expectedBuckets, omMetadataManager.getBucketTable()), 300, volumeCount * 300); - assertThat(doubleBuffer.getFlushIterations()).isGreaterThan(0); + assertThat(doubleBuffer.getFlushIterationsForTesting()).isGreaterThan(0); } private boolean assertRowCount(int expected, Table table) { diff --git a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/ratis/TestOzoneManagerRatisServer.java b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/ratis/TestOzoneManagerRatisServer.java index c2f2feae62f..1e4787df427 100644 --- a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/ratis/TestOzoneManagerRatisServer.java +++ b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/ratis/TestOzoneManagerRatisServer.java @@ -22,7 +22,6 @@ import java.net.InetAddress; import java.net.InetSocketAddress; import java.nio.file.Path; -import java.nio.file.Paths; import java.util.Collections; import java.util.UUID; import java.util.concurrent.TimeUnit; @@ -91,11 +90,9 @@ public static void setup() { } @BeforeEach - public void init() throws Exception { + public void init(@TempDir Path metaDirPath) throws Exception { conf = new OzoneConfiguration(); omID = UUID.randomUUID().toString(); - final String path = GenericTestUtils.getTempPath(omID); - Path metaDirPath = Paths.get(path, "om-meta"); conf.set(HddsConfigKeys.OZONE_METADATA_DIRS, metaDirPath.toString()); conf.setTimeDuration(OMConfigKeys.OZONE_OM_RATIS_MINIMUM_TIMEOUT_KEY, RATIS_RPC_TIMEOUT, TimeUnit.MILLISECONDS); @@ -215,13 +212,11 @@ public void verifyRaftGroupIdGenerationWithDefaultOmServiceId() throws } @Test - public void verifyRaftGroupIdGenerationWithCustomOmServiceId() throws + public void verifyRaftGroupIdGenerationWithCustomOmServiceId(@TempDir Path metaDirPath) throws Exception { String customOmServiceId = "omSIdCustom123"; OzoneConfiguration newConf = new OzoneConfiguration(); String newOmId = UUID.randomUUID().toString(); - String path = GenericTestUtils.getTempPath(newOmId); - Path metaDirPath = Paths.get(path, "om-meta"); newConf.set(HddsConfigKeys.OZONE_METADATA_DIRS, metaDirPath.toString()); newConf.setTimeDuration(OMConfigKeys.OZONE_OM_RATIS_MINIMUM_TIMEOUT_KEY, RATIS_RPC_TIMEOUT, TimeUnit.MILLISECONDS); diff --git a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/ratis/TestOzoneManagerStateMachine.java b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/ratis/TestOzoneManagerStateMachine.java index 3ea7c512648..93997826bf3 100644 --- a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/ratis/TestOzoneManagerStateMachine.java +++ b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/ratis/TestOzoneManagerStateMachine.java @@ -49,8 +49,8 @@ import static org.junit.jupiter.api.Assertions.assertFalse; import static org.junit.jupiter.api.Assertions.assertInstanceOf; import static org.junit.jupiter.api.Assertions.assertSame; -import static org.junit.jupiter.api.Assertions.fail; -import static org.mockito.ArgumentMatchers.any; +import static org.junit.jupiter.api.Assertions.assertThrows; +import static org.mockito.Mockito.any; import static org.mockito.Mockito.mock; import static org.mockito.Mockito.when; @@ -180,19 +180,15 @@ public void testPreAppendTransaction() throws Exception { prepareState.getState().getStatus()); // Submitting a write request should now fail. - try { - ozoneManagerStateMachine.preAppendTransaction( - mockTransactionContext(createKeyRequest)); - fail("Expected StateMachineException to be thrown when " + - "submitting write request while prepared."); - } catch (StateMachineException smEx) { - assertFalse(smEx.leaderShouldStepDown()); - - Throwable cause = smEx.getCause(); - assertInstanceOf(OMException.class, cause); - assertEquals(((OMException) cause).getResult(), - OMException.ResultCodes.NOT_SUPPORTED_OPERATION_WHEN_PREPARED); - } + StateMachineException smEx = + assertThrows(StateMachineException.class, + () -> ozoneManagerStateMachine.preAppendTransaction(mockTransactionContext(createKeyRequest)), + "Expected StateMachineException to be thrown when submitting write request while prepared."); + assertFalse(smEx.leaderShouldStepDown()); + + Throwable cause = smEx.getCause(); + OMException omException = assertInstanceOf(OMException.class, cause); + assertEquals(omException.getResult(), OMException.ResultCodes.NOT_SUPPORTED_OPERATION_WHEN_PREPARED); // Should be able to prepare again without issue. submittedTrx = mockTransactionContext(prepareRequest); diff --git a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/ratis_snapshot/TestOmRatisSnapshotProvider.java b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/ratis_snapshot/TestOmRatisSnapshotProvider.java index ce3f4d3b4dd..42a17cd07b6 100644 --- a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/ratis_snapshot/TestOmRatisSnapshotProvider.java +++ b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/ratis_snapshot/TestOmRatisSnapshotProvider.java @@ -42,8 +42,8 @@ import static java.net.HttpURLConnection.HTTP_OK; import static org.apache.hadoop.ozone.OzoneConsts.MULTIPART_FORM_DATA_BOUNDARY; import static org.junit.jupiter.api.Assertions.assertEquals; -import static org.mockito.ArgumentMatchers.any; -import static org.mockito.ArgumentMatchers.anyBoolean; +import static org.mockito.Mockito.any; +import static org.mockito.Mockito.anyBoolean; import static org.mockito.Mockito.mock; import static org.mockito.Mockito.when; diff --git a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/OMRequestTestUtils.java b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/OMRequestTestUtils.java index 5b660fefc09..163aefc7d3a 100644 --- a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/OMRequestTestUtils.java +++ b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/OMRequestTestUtils.java @@ -48,6 +48,7 @@ import org.apache.hadoop.ozone.om.helpers.OmKeyLocationInfoGroup; import org.apache.hadoop.ozone.om.helpers.OmMultipartKeyInfo; import org.apache.hadoop.ozone.om.helpers.OmMultipartUpload; +import org.apache.hadoop.ozone.om.helpers.OmPrefixInfo; import org.apache.hadoop.ozone.om.helpers.OmVolumeArgs; import org.apache.hadoop.ozone.om.helpers.OmDirectoryInfo; import org.apache.hadoop.ozone.om.helpers.OzoneFSUtils; @@ -100,7 +101,7 @@ import org.apache.hadoop.hdds.utils.db.cache.CacheKey; import org.apache.hadoop.hdds.utils.db.cache.CacheValue; -import static org.mockito.ArgumentMatchers.any; +import static org.mockito.Mockito.any; import static org.mockito.Mockito.doCallRealMethod; import static org.mockito.Mockito.when; @@ -1623,4 +1624,54 @@ public static OMRequest createRequestWithS3Credentials(String accessId, .build(); } + /** + * Add key entry to PrefixTable. + * @throws Exception + */ + public static void addPrefixToTable(String volumeName, String bucketName, String prefixName, long trxnLogIndex, + OMMetadataManager omMetadataManager) throws Exception { + + OmPrefixInfo omPrefixInfo = createOmPrefixInfo(volumeName, bucketName, + prefixName, trxnLogIndex); + + addPrefixToTable(false, omPrefixInfo, trxnLogIndex, + omMetadataManager); + } + + /** + * Add key entry to PrefixTable. + * @throws Exception + */ + public static void addPrefixToTable(boolean addToCache, OmPrefixInfo omPrefixInfo, long trxnLogIndex, + OMMetadataManager omMetadataManager) throws Exception { + String prefixName = omPrefixInfo.getName(); + + if (addToCache) { + omMetadataManager.getPrefixTable() + .addCacheEntry(new CacheKey<>(omPrefixInfo.getName()), + CacheValue.get(trxnLogIndex, omPrefixInfo)); + } + omMetadataManager.getPrefixTable().put(prefixName, omPrefixInfo); + } + + /** + * Create OmPrefixInfo. + */ + public static OmPrefixInfo createOmPrefixInfo(String volumeName, String bucketName, String prefixName, + long trxnLogIndex) { + OzoneObjInfo prefixObj = OzoneObjInfo.Builder + .newBuilder() + .setVolumeName(volumeName) + .setBucketName(bucketName) + .setPrefixName(prefixName) + .setResType(ResourceType.PREFIX) + .setStoreType(OzoneObj.StoreType.OZONE) + .build(); + return OmPrefixInfo.newBuilder() + .setName(prefixObj.getPath()) + .setObjectID(System.currentTimeMillis()) + .setUpdateID(trxnLogIndex) + .build(); + } + } diff --git a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/TestBucketLayoutAwareOMKeyFactory.java b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/TestBucketLayoutAwareOMKeyFactory.java index d6913cb234d..bb3e3930059 100644 --- a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/TestBucketLayoutAwareOMKeyFactory.java +++ b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/TestBucketLayoutAwareOMKeyFactory.java @@ -35,6 +35,7 @@ import static org.apache.hadoop.ozone.om.request.BucketLayoutAwareOMKeyRequestFactory.OM_KEY_REQUEST_CLASSES; import static org.apache.hadoop.ozone.om.request.BucketLayoutAwareOMKeyRequestFactory.addRequestClass; import static org.junit.jupiter.api.Assertions.assertEquals; +import static org.junit.jupiter.api.Assertions.assertThrows; import static org.junit.jupiter.api.Assertions.fail; /** @@ -135,21 +136,17 @@ public void testAddInvalidRequestClass() addRequestClass(Type.PurgeDirectories, OMDirectoriesPurgeRequestWithFSO.class, BucketLayout.FILE_SYSTEM_OPTIMIZED); - try { - // This should fail, since this class does not have a valid constructor - - // one that takes an OMRequest and a BucketLayout as parameters. - getRequestInstanceFromMap( - OMRequest.newBuilder() - .setCmdType(Type.PurgeKeys) - .setClientId("xyz") - .build(), - getKey(Type.PurgeDirectories, BucketLayout.FILE_SYSTEM_OPTIMIZED), - BucketLayout.FILE_SYSTEM_OPTIMIZED); - fail("No exception thrown for invalid OMKeyRequest class"); - } catch (NoSuchMethodException ex) { - // expected exception. - LOG.info("Expected exception thrown for invalid OMKeyRequest class", ex); - } + // This should fail, since this class does not have a valid constructor - + // one that takes an OMRequest and a BucketLayout as parameters. + assertThrows(NoSuchMethodException.class, + () -> getRequestInstanceFromMap( + OMRequest.newBuilder() + .setCmdType(Type.PurgeKeys) + .setClientId("xyz") + .build(), + getKey(Type.PurgeDirectories, BucketLayout.FILE_SYSTEM_OPTIMIZED), + BucketLayout.FILE_SYSTEM_OPTIMIZED), + "No exception thrown for invalid OMKeyRequest class"); } /** diff --git a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/TestNormalizePaths.java b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/TestNormalizePaths.java index ae421470af8..1b38076b1e6 100644 --- a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/TestNormalizePaths.java +++ b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/TestNormalizePaths.java @@ -25,7 +25,7 @@ import static org.apache.hadoop.ozone.om.request.OMClientRequest.validateAndNormalizeKey; import static org.assertj.core.api.Assertions.assertThat; import static org.junit.jupiter.api.Assertions.assertEquals; -import static org.junit.jupiter.api.Assertions.fail; +import static org.junit.jupiter.api.Assertions.assertThrows; /** * Class to test normalize paths. @@ -75,12 +75,11 @@ public void testNormalizeKeyInvalidPaths() throws OMException { } private void checkInvalidPath(String keyName) { - try { - validateAndNormalizeKey(true, keyName); - fail("checkInvalidPath failed for path " + keyName); - } catch (OMException ex) { - assertThat(ex.getMessage()).contains("Invalid KeyPath"); - } + OMException ex = + assertThrows(OMException.class, + () -> validateAndNormalizeKey(true, keyName), + "checkInvalidPath failed for path " + keyName); + assertThat(ex.getMessage()).contains("Invalid KeyPath"); } diff --git a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/bucket/TestBucketRequest.java b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/bucket/TestBucketRequest.java index ea7cfdf844c..a7cdd2d6c53 100644 --- a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/bucket/TestBucketRequest.java +++ b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/bucket/TestBucketRequest.java @@ -39,7 +39,7 @@ import java.nio.file.Path; import static org.apache.hadoop.ozone.om.request.OMRequestTestUtils.setupReplicationConfigValidation; -import static org.mockito.ArgumentMatchers.any; +import static org.mockito.Mockito.any; import static org.mockito.Mockito.doNothing; import static org.mockito.Mockito.framework; import static org.mockito.Mockito.mock; diff --git a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/file/TestOMDirectoryCreateRequest.java b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/file/TestOMDirectoryCreateRequest.java index 17c9f6bfe5f..275e8a6f2aa 100644 --- a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/file/TestOMDirectoryCreateRequest.java +++ b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/file/TestOMDirectoryCreateRequest.java @@ -67,8 +67,8 @@ import static org.junit.jupiter.api.Assertions.assertNull; import static org.junit.jupiter.api.Assertions.assertSame; import static org.junit.jupiter.api.Assertions.assertThrows; -import static org.mockito.ArgumentMatchers.any; -import static org.mockito.ArgumentMatchers.anyString; +import static org.mockito.Mockito.any; +import static org.mockito.Mockito.anyString; import static org.mockito.Mockito.doNothing; import static org.mockito.Mockito.framework; import static org.mockito.Mockito.mock; diff --git a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/file/TestOMDirectoryCreateRequestWithFSO.java b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/file/TestOMDirectoryCreateRequestWithFSO.java index 82cb3c0d9bd..0eceb2246ee 100644 --- a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/file/TestOMDirectoryCreateRequestWithFSO.java +++ b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/file/TestOMDirectoryCreateRequestWithFSO.java @@ -46,7 +46,7 @@ import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.CreateDirectoryRequest; import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.KeyArgs; import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.OMRequest; -import org.jetbrains.annotations.NotNull; +import jakarta.annotation.Nonnull; import org.junit.jupiter.api.AfterEach; import org.junit.jupiter.api.BeforeEach; import org.junit.jupiter.api.Test; @@ -66,8 +66,8 @@ import static org.junit.jupiter.api.Assertions.assertNull; import static org.junit.jupiter.api.Assertions.assertSame; import static org.junit.jupiter.api.Assertions.assertTrue; -import static org.mockito.ArgumentMatchers.any; -import static org.mockito.ArgumentMatchers.anyString; +import static org.mockito.Mockito.any; +import static org.mockito.Mockito.anyString; import static org.mockito.Mockito.doNothing; import static org.mockito.Mockito.framework; import static org.mockito.Mockito.mock; @@ -736,7 +736,7 @@ private void verifyDirectoriesInheritAcls(List dirs, } } - @NotNull + @Nonnull private String createDirKey(List dirs, int depth) { String keyName = RandomStringUtils.randomAlphabetic(5); dirs.add(keyName); diff --git a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/file/TestOMFileCreateRequest.java b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/file/TestOMFileCreateRequest.java index c7ed3922262..b39068fd734 100644 --- a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/file/TestOMFileCreateRequest.java +++ b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/file/TestOMFileCreateRequest.java @@ -29,7 +29,7 @@ import org.apache.hadoop.ozone.om.helpers.OmBucketInfo; import org.apache.hadoop.ozone.om.helpers.OmDirectoryInfo; import org.apache.hadoop.ozone.om.request.OMRequestTestUtils; -import org.jetbrains.annotations.NotNull; +import jakarta.annotation.Nonnull; import org.junit.jupiter.api.Test; import org.apache.hadoop.hdds.protocol.proto.HddsProtos; @@ -570,7 +570,7 @@ private void verifyKeyNameInCreateFileResponse(String key, * @param replicationType * @return OMRequest */ - @NotNull + @Nonnull protected OMRequest createFileRequest( String volumeName, String bucketName, String keyName, HddsProtos.ReplicationFactor replicationFactor, @@ -600,7 +600,7 @@ protected OMRequest createFileRequest( * @param omRequest om request * @return OMFileCreateRequest reference */ - @NotNull + @Nonnull protected OMFileCreateRequest getOMFileCreateRequest(OMRequest omRequest) { return new OMFileCreateRequest(omRequest, getBucketLayout()); } diff --git a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/file/TestOMRecoverLeaseRequest.java b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/file/TestOMRecoverLeaseRequest.java index 991ff9e4f9f..5757beeb282 100644 --- a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/file/TestOMRecoverLeaseRequest.java +++ b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/file/TestOMRecoverLeaseRequest.java @@ -46,7 +46,7 @@ import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.OMResponse; import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.AllocateBlockRequest; import org.apache.hadoop.util.Time; -import org.jetbrains.annotations.NotNull; +import jakarta.annotation.Nonnull; import org.junit.jupiter.api.Test; import org.junit.jupiter.params.ParameterizedTest; import org.junit.jupiter.params.provider.ValueSource; @@ -62,7 +62,6 @@ import static org.junit.jupiter.api.Assertions.assertNotNull; import static org.junit.jupiter.api.Assertions.assertNull; - /** * Tests OMRecoverLeaseRequest. */ @@ -436,7 +435,7 @@ protected OMRequest createAllocateBlockRequest(String volumeName, String bucketN .setAllocateBlockRequest(allocateBlockRequest).build(); } - @NotNull + @Nonnull protected OMRequest createRecoverLeaseRequest( String volumeName, String bucketName, String keyName, boolean force) { RecoverLeaseRequest.Builder rb = RecoverLeaseRequest.newBuilder(); @@ -460,7 +459,7 @@ private OMClientResponse validateAndUpdateCache() throws Exception { return omClientResponse; } - @NotNull + @Nonnull protected OMRequest createKeyCommitRequest(KeyArgs keyArgs, boolean newClientID, boolean recovery) { CommitKeyRequest.Builder rb = CommitKeyRequest.newBuilder().setKeyArgs(keyArgs).setRecovery(recovery); diff --git a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/key/TestOMAllocateBlockRequest.java b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/key/TestOMAllocateBlockRequest.java index 4ba406f6595..eb99cd93256 100644 --- a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/key/TestOMAllocateBlockRequest.java +++ b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/key/TestOMAllocateBlockRequest.java @@ -29,7 +29,7 @@ import org.apache.hadoop.ozone.OzoneConsts; import org.apache.hadoop.ozone.om.helpers.BucketLayout; import org.apache.hadoop.ozone.om.request.OMRequestTestUtils; -import org.jetbrains.annotations.NotNull; +import jakarta.annotation.Nonnull; import org.junit.jupiter.api.Test; import org.apache.hadoop.ozone.om.helpers.OmKeyInfo; @@ -118,7 +118,7 @@ public void testValidateAndUpdateCache() throws Exception { } - @NotNull + @Nonnull protected OMAllocateBlockRequest getOmAllocateBlockRequest( OMRequest modifiedOmRequest) { return new OMAllocateBlockRequest(modifiedOmRequest, BucketLayout.DEFAULT); diff --git a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/key/TestOMAllocateBlockRequestWithFSO.java b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/key/TestOMAllocateBlockRequestWithFSO.java index 50da570f8c7..33512d355c0 100644 --- a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/key/TestOMAllocateBlockRequestWithFSO.java +++ b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/key/TestOMAllocateBlockRequestWithFSO.java @@ -32,7 +32,7 @@ import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos; import org.apache.hadoop.util.StringUtils; import org.apache.hadoop.util.Time; -import org.jetbrains.annotations.NotNull; +import jakarta.annotation.Nonnull; /** * Tests OMAllocateBlockRequest class prefix layout. @@ -40,7 +40,7 @@ public class TestOMAllocateBlockRequestWithFSO extends TestOMAllocateBlockRequest { - @NotNull + @Nonnull @Override protected OzoneConfiguration getOzoneConfiguration() { OzoneConfiguration config = super.getOzoneConfiguration(); @@ -81,7 +81,7 @@ protected String addKeyToOpenKeyTable(String volumeName, String bucketName) parentID, fileName); } - @NotNull + @Nonnull @Override protected OMAllocateBlockRequest getOmAllocateBlockRequest( OzoneManagerProtocolProtos.OMRequest modifiedOmRequest) { diff --git a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/key/TestOMDirectoriesPurgeRequestAndResponse.java b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/key/TestOMDirectoriesPurgeRequestAndResponse.java index a4176b147b8..f040bd50817 100644 --- a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/key/TestOMDirectoriesPurgeRequestAndResponse.java +++ b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/key/TestOMDirectoriesPurgeRequestAndResponse.java @@ -40,7 +40,7 @@ import org.apache.hadoop.ozone.om.response.key.OMKeyPurgeResponse; import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos; import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.OMRequest; -import org.jetbrains.annotations.NotNull; +import jakarta.annotation.Nonnull; import org.junit.jupiter.api.Test; /** @@ -273,7 +273,7 @@ private void performBatchOperationCommit( } } - @NotNull + @Nonnull private List validateDeletedKeysTable( List deletedKeyInfos) throws IOException { List deletedKeyNames = new ArrayList<>(); diff --git a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/key/TestOMKeyCommitRequest.java b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/key/TestOMKeyCommitRequest.java index abc5fcbb495..0f77194c88c 100644 --- a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/key/TestOMKeyCommitRequest.java +++ b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/key/TestOMKeyCommitRequest.java @@ -37,7 +37,7 @@ import org.apache.hadoop.ozone.om.request.OMRequestTestUtils; import org.apache.hadoop.ozone.om.response.key.OMKeyCommitResponse; import org.apache.hadoop.util.Time; -import org.jetbrains.annotations.NotNull; +import jakarta.annotation.Nonnull; import org.junit.jupiter.api.Test; import org.apache.hadoop.hdds.protocol.proto.HddsProtos; @@ -717,13 +717,13 @@ protected String getParentDir() { return parentDir; } - @NotNull + @Nonnull protected String getOzonePathKey() throws IOException { return omMetadataManager.getOzoneKey(volumeName, bucketName, keyName); } - @NotNull + @Nonnull protected String addKeyToOpenKeyTable(List locationList) throws Exception { OMRequestTestUtils.addKeyToTable(true, volumeName, bucketName, keyName, @@ -734,7 +734,7 @@ protected String addKeyToOpenKeyTable(List locationList) keyName, clientID); } - @NotNull + @Nonnull protected OMKeyCommitRequest getOmKeyCommitRequest(OMRequest omRequest) { return new OMKeyCommitRequest(omRequest, BucketLayout.DEFAULT); } diff --git a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/key/TestOMKeyCommitRequestWithFSO.java b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/key/TestOMKeyCommitRequestWithFSO.java index 7155a1564aa..d258c1cfde4 100644 --- a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/key/TestOMKeyCommitRequestWithFSO.java +++ b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/key/TestOMKeyCommitRequestWithFSO.java @@ -29,7 +29,7 @@ import org.apache.hadoop.ozone.om.request.OMRequestTestUtils; import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.OMRequest; import org.apache.hadoop.util.Time; -import org.jetbrains.annotations.NotNull; +import jakarta.annotation.Nonnull; import java.io.IOException; import java.util.List; @@ -90,7 +90,7 @@ protected String addKeyToOpenKeyTable(List locationList) } - @NotNull + @Nonnull protected OMKeyCommitRequest getOmKeyCommitRequest(OMRequest omRequest) { return new OMKeyCommitRequestWithFSO(omRequest, BucketLayout.FILE_SYSTEM_OPTIMIZED); diff --git a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/key/TestOMKeyCreateRequest.java b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/key/TestOMKeyCreateRequest.java index b765e902811..12d9d02a72d 100644 --- a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/key/TestOMKeyCreateRequest.java +++ b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/key/TestOMKeyCreateRequest.java @@ -69,14 +69,12 @@ import static org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.Status.OK; import static org.assertj.core.api.Assertions.assertThat; import static org.junit.jupiter.api.Assertions.assertEquals; -import static org.junit.jupiter.api.Assertions.assertInstanceOf; import static org.junit.jupiter.api.Assertions.assertNotEquals; import static org.junit.jupiter.api.Assertions.assertNotNull; import static org.junit.jupiter.api.Assertions.assertNull; import static org.junit.jupiter.api.Assertions.assertSame; import static org.junit.jupiter.api.Assertions.assertThrows; import static org.junit.jupiter.api.Assertions.assertTrue; -import static org.junit.jupiter.api.Assertions.fail; import static org.mockito.Mockito.when; /** @@ -802,18 +800,13 @@ protected void addToKeyTable(String keyName) throws Exception { private void checkNotAValidPath(String keyName) { OMRequest omRequest = createKeyRequest(false, 0, keyName); OMKeyCreateRequest omKeyCreateRequest = getOMKeyCreateRequest(omRequest); - - try { - omKeyCreateRequest.preExecute(ozoneManager); - fail("checkNotAValidPath failed for path" + keyName); - } catch (IOException ex) { - OMException omException = assertInstanceOf(OMException.class, ex); - assertEquals(OMException.ResultCodes.INVALID_KEY_NAME, - omException.getResult()); - } - - + OMException ex = + assertThrows(OMException.class, () -> omKeyCreateRequest.preExecute(ozoneManager), + "checkNotAValidPath failed for path" + keyName); + assertEquals(OMException.ResultCodes.INVALID_KEY_NAME, + ex.getResult()); } + private void checkNotAFile(String keyName) throws Exception { OMRequest omRequest = createKeyRequest(false, 0, keyName); diff --git a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/key/TestOMKeyDeleteRequest.java b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/key/TestOMKeyDeleteRequest.java index cb585caefd9..00d1883d749 100644 --- a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/key/TestOMKeyDeleteRequest.java +++ b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/key/TestOMKeyDeleteRequest.java @@ -28,7 +28,6 @@ import org.apache.hadoop.ozone.om.exceptions.OMException; import org.apache.hadoop.ozone.om.helpers.BucketLayout; import org.apache.hadoop.ozone.om.request.OMRequestTestUtils; -import org.junit.jupiter.api.Assertions; import org.junit.jupiter.api.Test; import org.apache.hadoop.ozone.om.helpers.OmKeyInfo; @@ -52,7 +51,7 @@ public void testPreExecute(String testKeyName) throws Exception { OMRequestTestUtils.addVolumeAndBucketToDB(volumeName, bucketName, omMetadataManager, getBucketLayout()); String ozoneKey = addKeyToTable(testKeyName); OmKeyInfo omKeyInfo = omMetadataManager.getKeyTable(getBucketLayout()).get(ozoneKey); - Assertions.assertNotNull(omKeyInfo); + assertNotNull(omKeyInfo); doPreExecute(createDeleteKeyRequest(testKeyName)); } diff --git a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/key/TestOMKeyDeleteRequestWithFSO.java b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/key/TestOMKeyDeleteRequestWithFSO.java index 96483fb587c..9dafab09029 100644 --- a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/key/TestOMKeyDeleteRequestWithFSO.java +++ b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/key/TestOMKeyDeleteRequestWithFSO.java @@ -21,8 +21,9 @@ import static org.junit.jupiter.api.Assertions.assertEquals; import static org.junit.jupiter.api.Assertions.assertFalse; import static org.junit.jupiter.api.Assertions.assertNotNull; +import static org.junit.jupiter.api.Assertions.assertThrows; import static org.junit.jupiter.api.Assertions.assertTrue; -import static org.junit.jupiter.api.Assertions.fail; + import org.apache.hadoop.hdds.protocol.proto.HddsProtos; import org.apache.hadoop.ozone.om.OzonePrefixPathImpl; import org.apache.hadoop.ozone.om.exceptions.OMException; @@ -33,7 +34,6 @@ import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.OMRequest; import org.apache.hadoop.ozone.security.acl.OzonePrefixPath; import org.apache.hadoop.util.Time; -import org.junit.jupiter.api.Assertions; import org.junit.jupiter.api.Test; import org.junit.jupiter.params.ParameterizedTest; import org.junit.jupiter.params.provider.ValueSource; @@ -112,7 +112,7 @@ public void testPreExecute(String testKeyName) throws Exception { OMRequestTestUtils.addVolumeAndBucketToDB(volumeName, bucketName, omMetadataManager, getBucketLayout()); String ozoneKey = addKeyToTable(); OmKeyInfo omKeyInfo = omMetadataManager.getKeyTable(getBucketLayout()).get(ozoneKey); - Assertions.assertNotNull(omKeyInfo); + assertNotNull(omKeyInfo); doPreExecute(createDeleteKeyRequest()); } @@ -142,19 +142,14 @@ public void testOzonePrefixPathViewer() throws Exception { verifyPath(ozonePrefixPath, "c/d", "c/d/e"); verifyPath(ozonePrefixPath, "c/d/e", "c/d/e/file1"); - try { - ozonePrefixPath.getChildren("c/d/e/file1"); - fail("Should throw INVALID_KEY_NAME as the given " + - "path is a file."); - } catch (OMException ome) { - assertEquals(OMException.ResultCodes.INVALID_KEY_NAME, - ome.getResult()); - } + OMException ome = assertThrows(OMException.class, () -> ozonePrefixPath.getChildren("c/d/e/file1"), + "Should throw INVALID_KEY_NAME as the given path is a file."); + assertEquals(OMException.ResultCodes.INVALID_KEY_NAME, ome.getResult()); // OzonePrefixPathImpl on a file - ozonePrefixPath = new OzonePrefixPathImpl(volumeName, + OzonePrefixPathImpl ozonePrefixPathFile1 = new OzonePrefixPathImpl(volumeName, bucketName, "c/d/e/file1", keyManager); - status = ozonePrefixPath.getOzoneFileStatus(); + status = ozonePrefixPathFile1.getOzoneFileStatus(); assertNotNull(status); assertEquals("c/d/e/file1", status.getTrimmedName()); assertEquals("c/d/e/file1", status.getKeyInfo().getKeyName()); @@ -168,12 +163,7 @@ private void verifyPath(OzonePrefixPath ozonePrefixPath, String pathName, pathName); assertTrue(pathItr.hasNext(), "Failed to list keyPaths"); assertEquals(expectedPath, pathItr.next().getTrimmedName()); - try { - pathItr.next(); - fail("Reached end of the list!"); - } catch (NoSuchElementException nse) { - // expected - } + assertThrows(NoSuchElementException.class, () -> pathItr.next(), "Reached end of the list!"); } @Test diff --git a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/key/TestOMKeyPurgeRequestAndResponse.java b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/key/TestOMKeyPurgeRequestAndResponse.java index 46cf4b0ef29..a1d616c0756 100644 --- a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/key/TestOMKeyPurgeRequestAndResponse.java +++ b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/key/TestOMKeyPurgeRequestAndResponse.java @@ -49,7 +49,7 @@ import static org.junit.jupiter.api.Assertions.assertNotEquals; import static org.junit.jupiter.api.Assertions.assertNotNull; import static org.junit.jupiter.api.Assertions.assertTrue; -import static org.mockito.ArgumentMatchers.any; +import static org.mockito.Mockito.any; import static org.mockito.Mockito.when; /** diff --git a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/key/TestOMKeyRequest.java b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/key/TestOMKeyRequest.java index 2f28c54516e..4dfb3c67c96 100644 --- a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/key/TestOMKeyRequest.java +++ b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/key/TestOMKeyRequest.java @@ -50,7 +50,7 @@ import org.apache.hadoop.ozone.security.acl.OzoneNativeAuthorizer; import org.apache.hadoop.security.UserGroupInformation; import org.apache.ozone.test.GenericTestUtils; -import org.jetbrains.annotations.NotNull; +import jakarta.annotation.Nonnull; import org.junit.jupiter.api.AfterEach; import org.junit.jupiter.api.BeforeEach; import org.junit.jupiter.api.io.TempDir; @@ -79,10 +79,10 @@ import static org.apache.hadoop.ozone.om.request.OMRequestTestUtils.setupReplicationConfigValidation; import static org.junit.jupiter.api.Assertions.assertNotNull; -import static org.mockito.ArgumentMatchers.any; -import static org.mockito.ArgumentMatchers.anyInt; -import static org.mockito.ArgumentMatchers.anyLong; -import static org.mockito.ArgumentMatchers.anyString; +import static org.mockito.Mockito.any; +import static org.mockito.Mockito.anyInt; +import static org.mockito.Mockito.anyLong; +import static org.mockito.Mockito.anyString; import static org.mockito.Mockito.doNothing; import static org.mockito.Mockito.framework; import static org.mockito.Mockito.mock; @@ -247,7 +247,7 @@ public void setup() throws Exception { GenericTestUtils.setLogLevel(OMKeyCommitRequestWithFSO.LOG, Level.DEBUG); } - @NotNull + @Nonnull protected OzoneConfiguration getOzoneConfiguration() { return new OzoneConfiguration(); } diff --git a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/key/TestOMPrefixAclRequest.java b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/key/TestOMPrefixAclRequest.java index 9cc45dfac57..9c5a9257245 100644 --- a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/key/TestOMPrefixAclRequest.java +++ b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/key/TestOMPrefixAclRequest.java @@ -17,13 +17,18 @@ */ package org.apache.hadoop.ozone.om.request.key; +import java.util.Collections; +import java.util.List; import java.util.UUID; +import java.util.stream.Collectors; + import org.apache.hadoop.ozone.OzoneAcl; -import org.apache.hadoop.ozone.om.PrefixManager; import org.apache.hadoop.ozone.om.PrefixManagerImpl; +import org.apache.hadoop.ozone.om.helpers.OmPrefixInfo; import org.apache.hadoop.ozone.om.request.OMRequestTestUtils; import org.apache.hadoop.ozone.om.request.key.acl.prefix.OMPrefixAddAclRequest; import org.apache.hadoop.ozone.om.request.key.acl.prefix.OMPrefixRemoveAclRequest; +import org.apache.hadoop.ozone.om.request.key.acl.prefix.OMPrefixSetAclRequest; import org.apache.hadoop.ozone.om.response.OMClientResponse; import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos; import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.AddAclRequest; @@ -32,7 +37,10 @@ import org.apache.hadoop.ozone.security.acl.OzoneObjInfo; import org.junit.jupiter.api.Test; +import static org.apache.hadoop.ozone.OzoneConsts.OZONE_URI_DELIMITER; import static org.junit.jupiter.api.Assertions.assertEquals; +import static org.junit.jupiter.api.Assertions.assertNull; +import static org.junit.jupiter.api.Assertions.assertTrue; import static org.mockito.Mockito.when; /** @@ -41,87 +49,251 @@ public class TestOMPrefixAclRequest extends TestOMKeyRequest { @Test - public void testAclRequest() throws Exception { - PrefixManager prefixManager = new PrefixManagerImpl( + public void testAddAclRequest() throws Exception { + PrefixManagerImpl prefixManager = new PrefixManagerImpl( ozoneManager.getMetadataManager(), true); when(ozoneManager.getPrefixManager()).thenReturn(prefixManager); + String prefixName = UUID.randomUUID() + OZONE_URI_DELIMITER; + OzoneObj prefixObj = createPrefixObj(prefixName); // Manually add volume, bucket and key to DB OMRequestTestUtils.addVolumeAndBucketToDB(volumeName, bucketName, omMetadataManager); - OMRequestTestUtils.addKeyToTable(false, false, volumeName, bucketName, - keyName, clientID, replicationType, replicationFactor, 1L, - omMetadataManager); + OMRequestTestUtils.addPrefixToTable(volumeName, bucketName, prefixName, + 1L, omMetadataManager); OzoneAcl acl = OzoneAcl.parseAcl("user:bilbo:rwdlncxy[ACCESS]"); // Create KeyAddAcl request - OMRequest originalRequest = createAddAclkeyRequest(acl); - OMPrefixAddAclRequest omKeyPrefixAclRequest = new OMPrefixAddAclRequest( + OMRequest originalRequest = createAddAclPrefixRequest(prefixName, acl); + OMPrefixAddAclRequest omPrefixAddAclRequest = new OMPrefixAddAclRequest( originalRequest); - omKeyPrefixAclRequest.preExecute(ozoneManager); + omPrefixAddAclRequest.preExecute(ozoneManager); // Execute original request - OMClientResponse omClientResponse = omKeyPrefixAclRequest + OMClientResponse omClientResponse = omPrefixAddAclRequest .validateAndUpdateCache(ozoneManager, 2); assertEquals(OzoneManagerProtocolProtos.Status.OK, omClientResponse.getOMResponse().getStatus()); + + // Check that it exists in Prefix tree (PrefixManagerImpl) + OmPrefixInfo prefixInfo = prefixManager.getPrefixInfo(prefixObj); + assertEquals(prefixObj.getPath(), prefixInfo.getName()); + assertEquals(2L, prefixInfo.getUpdateID()); + + List ozoneAcls = prefixManager.getAcl(prefixObj); + assertEquals(1, ozoneAcls.size()); + assertEquals(acl, ozoneAcls.get(0)); + + // Check that it exists in Prefix table (cache) + OmPrefixInfo prefixInfoFromTable = omMetadataManager.getPrefixTable().get( + prefixObj.getPath()); + assertEquals(prefixObj.getPath(), prefixInfoFromTable.getName()); + assertEquals(2L, prefixInfoFromTable.getUpdateID()); + + // Adding ACL that already exists + OMClientResponse omClientResponse1 = omPrefixAddAclRequest + .validateAndUpdateCache(ozoneManager, 3); + + // Check that it exists in Prefix tree (PrefixManagerImpl) + prefixInfo = prefixManager.getPrefixInfo(prefixObj); + assertEquals(prefixObj.getPath(), prefixInfo.getName()); + assertEquals(3L, prefixInfo.getUpdateID()); + + ozoneAcls = prefixManager.getAcl(prefixObj); + assertEquals(1, ozoneAcls.size()); + assertEquals(acl, ozoneAcls.get(0)); + + // Check that it exists in Prefix table (cache) + prefixInfoFromTable = omMetadataManager.getPrefixTable().get( + prefixObj.getPath()); + assertEquals(prefixObj.getPath(), prefixInfoFromTable.getName()); + assertEquals(3L, prefixInfoFromTable.getUpdateID()); + + assertEquals(OzoneManagerProtocolProtos.Status.OK, + omClientResponse1.getOMResponse().getStatus()); + } + + @Test + public void testValidationFailure() { + PrefixManagerImpl prefixManager = new PrefixManagerImpl( + ozoneManager.getMetadataManager(), true); + when(ozoneManager.getPrefixManager()).thenReturn(prefixManager); + + OzoneAcl acl = OzoneAcl.parseAcl("user:bilbo:rwdlncxy[ACCESS]"); + + // No trailing slash + OMPrefixAddAclRequest invalidRequest1 = new OMPrefixAddAclRequest( + createAddAclPrefixRequest("dir1", acl) + ); + OMClientResponse response1 = + invalidRequest1.validateAndUpdateCache(ozoneManager, 1); + assertEquals(OzoneManagerProtocolProtos.Status.PREFIX_NOT_FOUND, + response1.getOMResponse().getStatus()); + + // Not a valid FS path + OMPrefixAddAclRequest invalidRequest2 = new OMPrefixAddAclRequest( + createAddAclPrefixRequest("/dir1//dir2/", acl) + ); + OMClientResponse response2 = + invalidRequest2.validateAndUpdateCache(ozoneManager, 2); + assertEquals(OzoneManagerProtocolProtos.Status. + INVALID_PATH_IN_ACL_REQUEST, response2.getOMResponse().getStatus()); } @Test public void testRemoveAclRequest() throws Exception { - PrefixManager prefixManager = new PrefixManagerImpl( + PrefixManagerImpl prefixManager = new PrefixManagerImpl( ozoneManager.getMetadataManager(), true); when(ozoneManager.getPrefixManager()).thenReturn(prefixManager); + String prefixName = UUID.randomUUID() + OZONE_URI_DELIMITER; + OzoneObj prefixObj = createPrefixObj(prefixName); // Manually add volume, bucket and key to DB OMRequestTestUtils.addVolumeAndBucketToDB(volumeName, bucketName, omMetadataManager); - OMRequestTestUtils.addKeyToTable(false, false, volumeName, bucketName, - keyName, clientID, replicationType, replicationFactor, 1L, - omMetadataManager); OzoneAcl acl = OzoneAcl.parseAcl("user:mohanad.elsafty:rwdlncxy[ACCESS]"); - // Create KeyAddAcl request - OMRequest originalRequest = createAddAclkeyRequest(acl); - OMPrefixAddAclRequest omKeyPrefixAclRequest = new OMPrefixAddAclRequest( + // Create an initial prefix ACL + OMRequest originalRequest = createAddAclPrefixRequest(prefixName, acl); + OMPrefixAddAclRequest omPrefixAddAclRequest = new OMPrefixAddAclRequest( originalRequest); - omKeyPrefixAclRequest.preExecute(ozoneManager); - omKeyPrefixAclRequest.validateAndUpdateCache(ozoneManager, 2); + omPrefixAddAclRequest.preExecute(ozoneManager); + OMClientResponse createResponse = omPrefixAddAclRequest.validateAndUpdateCache(ozoneManager, 1L); + assertEquals(OzoneManagerProtocolProtos.Status.OK, createResponse.getOMResponse().getStatus()); + + // Check update ID + OmPrefixInfo prefixInfo = prefixManager.getPrefixInfo(prefixObj); + assertEquals(1L, prefixInfo.getUpdateID()); + OmPrefixInfo prefixInfoFromTable = omMetadataManager.getPrefixTable().get( + prefixObj.getPath()); + assertEquals(1L, prefixInfoFromTable.getUpdateID()); + + // Remove acl that does not exist + OzoneAcl notExistAcl = OzoneAcl.parseAcl("user:nonexist:r[ACCESS]"); + OMRequest notExistRemoveAclRequest = createRemoveAclPrefixRequest(prefixName, notExistAcl); + OMPrefixRemoveAclRequest omPrefixRemoveAclRequest = + new OMPrefixRemoveAclRequest(notExistRemoveAclRequest); + omPrefixRemoveAclRequest.preExecute(ozoneManager); + OMClientResponse omClientResponse = omPrefixRemoveAclRequest + .validateAndUpdateCache(ozoneManager, 2L); + assertEquals(OzoneManagerProtocolProtos.Status.OK, + omClientResponse.getOMResponse().getStatus()); + + // Check that the update ID is updated + prefixInfo = prefixManager.getPrefixInfo(prefixObj); + assertEquals(2L, prefixInfo.getUpdateID()); + prefixInfoFromTable = omMetadataManager.getPrefixTable().get( + prefixObj.getPath()); + assertEquals(2L, prefixInfoFromTable.getUpdateID()); // Remove existing prefix acl. - OMRequest validRemoveAclRequest = createRemoveAclKeyRequest(acl, keyName); + OMRequest validRemoveAclRequest = createRemoveAclPrefixRequest(prefixName, acl); OMPrefixRemoveAclRequest omPrefixRemoveAclRequest1 = new OMPrefixRemoveAclRequest(validRemoveAclRequest); omPrefixRemoveAclRequest1.preExecute(ozoneManager); OMClientResponse omClientResponse1 = omPrefixRemoveAclRequest1 - .validateAndUpdateCache(ozoneManager, 3); + .validateAndUpdateCache(ozoneManager, 3L); assertEquals(OzoneManagerProtocolProtos.Status.OK, omClientResponse1.getOMResponse().getStatus()); + // Check that the entry is deleted in Prefix tree (PrefixManagerImpl) + prefixInfo = prefixManager.getPrefixInfo(prefixObj); + assertNull(prefixInfo); + // Non-existent prefix should return empty ACL + List ozoneAcls = prefixManager.getAcl(prefixObj); + assertTrue(ozoneAcls.isEmpty()); + + // Check that it is also deleted in Prefix table (cache) + prefixInfoFromTable = omMetadataManager.getPrefixTable().get( + prefixObj.getPath()); + assertNull(prefixInfoFromTable); + // Remove non-existing prefix acl. - OMRequest invalidRemoveAclRequest = createRemoveAclKeyRequest(acl, keyName); + OMRequest invalidRemoveAclRequest = createRemoveAclPrefixRequest(prefixName, acl); OMPrefixRemoveAclRequest omPrefixRemoveAclRequest2 = new OMPrefixRemoveAclRequest(invalidRemoveAclRequest); omPrefixRemoveAclRequest1.preExecute(ozoneManager); OMClientResponse omClientResponse2 = omPrefixRemoveAclRequest2 - .validateAndUpdateCache(ozoneManager, 4); + .validateAndUpdateCache(ozoneManager, 4L); assertEquals(OzoneManagerProtocolProtos.Status.PREFIX_NOT_FOUND, omClientResponse2.getOMResponse().getStatus()); } + @Test + public void testSetAclRequest() throws Exception { + PrefixManagerImpl prefixManager = new PrefixManagerImpl( + ozoneManager.getMetadataManager(), true); + when(ozoneManager.getPrefixManager()).thenReturn(prefixManager); + String prefixName = UUID.randomUUID() + OZONE_URI_DELIMITER; + OzoneObj prefixObj = createPrefixObj(prefixName); + + // Manually add volume, bucket and key to DB + OMRequestTestUtils.addVolumeAndBucketToDB(volumeName, bucketName, + omMetadataManager); + + OzoneAcl acl = OzoneAcl.parseAcl("user:bilbo:rwdlncxy[ACCESS]"); + + // Create PrefixSetAcl request + OMRequest originalRequest = createSetAclPrefixRequest(prefixName, + Collections.singletonList(acl)); + OMPrefixSetAclRequest omPrefixSetAclRequest = new OMPrefixSetAclRequest( + originalRequest); + omPrefixSetAclRequest.preExecute(ozoneManager); + + // Execute original request + OMClientResponse omClientResponse = omPrefixSetAclRequest + .validateAndUpdateCache(ozoneManager, 1L); + assertEquals(OzoneManagerProtocolProtos.Status.OK, + omClientResponse.getOMResponse().getStatus()); + + // Check that it exists in Prefix tree (PrefixManagerImpl) + OmPrefixInfo prefixInfo = prefixManager.getPrefixInfo(prefixObj); + assertEquals(prefixObj.getPath(), prefixInfo.getName()); + assertEquals(1L, prefixInfo.getUpdateID()); + + List ozoneAcls = prefixManager.getAcl(prefixObj); + assertEquals(1, ozoneAcls.size()); + assertEquals(acl, ozoneAcls.get(0)); + + // Check that it exists in Prefix table (cache) + OmPrefixInfo prefixInfoFromTable = omMetadataManager.getPrefixTable().get( + prefixObj.getPath()); + assertEquals(prefixObj.getPath(), prefixInfoFromTable.getName()); + assertEquals(1L, prefixInfoFromTable.getUpdateID()); + + // Setting ACL that already exists + OMClientResponse omClientResponse1 = omPrefixSetAclRequest + .validateAndUpdateCache(ozoneManager, 2L); + assertEquals(OzoneManagerProtocolProtos.Status.OK, + omClientResponse.getOMResponse().getStatus()); + + prefixInfo = prefixManager.getPrefixInfo(prefixObj); + assertEquals(prefixObj.getPath(), prefixInfo.getName()); + // Unlike add ACL, set prefix ACL will clear the current ACLs + // and re-add the ACL again, so the update ID is updated every time + assertEquals(2L, prefixInfo.getUpdateID()); + + ozoneAcls = prefixManager.getAcl(prefixObj); + assertEquals(1, ozoneAcls.size()); + assertEquals(acl, ozoneAcls.get(0)); + + // Check that it exists in Prefix table (cache) + prefixInfoFromTable = omMetadataManager.getPrefixTable().get( + prefixObj.getPath()); + assertEquals(prefixObj.getPath(), prefixInfoFromTable.getName()); + assertEquals(2L, prefixInfoFromTable.getUpdateID()); + + assertEquals(OzoneManagerProtocolProtos.Status.OK, + omClientResponse1.getOMResponse().getStatus()); + } + /** - * Create OMRequest which encapsulates OMKeyAddAclRequest. + * Create OMRequest which encapsulates OMPrefixAddAclRequest. */ - private OMRequest createAddAclkeyRequest(OzoneAcl acl) { - OzoneObj obj = OzoneObjInfo.Builder.newBuilder() - .setBucketName(bucketName) - .setVolumeName(volumeName) - .setKeyName(keyName) - .setResType(OzoneObj.ResourceType.PREFIX) - .setStoreType(OzoneObj.StoreType.OZONE) - .build(); + private OMRequest createAddAclPrefixRequest(String prefix, OzoneAcl acl) { + OzoneObj obj = createPrefixObj(prefix); AddAclRequest addAclRequest = AddAclRequest.newBuilder() .setObj(OzoneObj.toProtobuf(obj)) .setAcl(OzoneAcl.toProtobuf(acl)) @@ -133,15 +305,11 @@ private OMRequest createAddAclkeyRequest(OzoneAcl acl) { .build(); } - private OMRequest createRemoveAclKeyRequest(OzoneAcl acl, String key) { - OzoneObj obj = OzoneObjInfo.Builder.newBuilder() - .setBucketName(bucketName) - .setVolumeName(volumeName) - .setKeyName(key) - .setResType(OzoneObj.ResourceType.PREFIX) - .setStoreType(OzoneObj.StoreType.OZONE) - .build(); - + /** + * Create OMRequest which encapsulates OMPrefixRemoveAclRequest. + */ + private OMRequest createRemoveAclPrefixRequest(String prefix, OzoneAcl acl) { + OzoneObj obj = createPrefixObj(prefix); OzoneManagerProtocolProtos.RemoveAclRequest removeAclRequest = OzoneManagerProtocolProtos.RemoveAclRequest.newBuilder() .setObj(OzoneObj.toProtobuf(obj)) @@ -153,4 +321,32 @@ private OMRequest createRemoveAclKeyRequest(OzoneAcl acl, String key) { .setRemoveAclRequest(removeAclRequest) .build(); } + + /** + * Create OMRequest which encapsulates OMPrefixSetAclRequest. + */ + private OMRequest createSetAclPrefixRequest(String prefix, List acls) { + OzoneObj obj = createPrefixObj(prefix); + OzoneManagerProtocolProtos.SetAclRequest setAclRequest = + OzoneManagerProtocolProtos.SetAclRequest.newBuilder() + .setObj(OzoneObj.toProtobuf(obj)) + .addAllAcl(acls.stream().map(OzoneAcl::toProtobuf) + .collect(Collectors.toList())) + .build(); + + return OMRequest.newBuilder().setClientId(UUID.randomUUID().toString()) + .setCmdType(OzoneManagerProtocolProtos.Type.SetAcl) + .setSetAclRequest(setAclRequest) + .build(); + } + + private OzoneObj createPrefixObj(String prefix) { + return OzoneObjInfo.Builder.newBuilder() + .setBucketName(bucketName) + .setVolumeName(volumeName) + .setPrefixName(prefix) + .setResType(OzoneObj.ResourceType.PREFIX) + .setStoreType(OzoneObj.StoreType.OZONE) + .build(); + } } diff --git a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/s3/multipart/TestS3ExpiredMultipartUploadsAbortRequest.java b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/s3/multipart/TestS3ExpiredMultipartUploadsAbortRequest.java index 87bb6adb586..25c908b18a2 100644 --- a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/s3/multipart/TestS3ExpiredMultipartUploadsAbortRequest.java +++ b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/s3/multipart/TestS3ExpiredMultipartUploadsAbortRequest.java @@ -26,7 +26,7 @@ import static org.junit.jupiter.api.Assertions.assertNull; import static org.junit.jupiter.api.Assertions.assertSame; import static org.junit.jupiter.api.Assertions.assertTrue; -import com.google.common.base.Optional; + import org.apache.hadoop.hdds.client.ReplicationConfig; import org.apache.hadoop.hdds.client.ReplicationFactor; import org.apache.hadoop.hdds.client.ReplicationType; @@ -658,7 +658,7 @@ private void removeFromOpenKeyTable(List mpuKeys) for (String mpuOpenKey: mpuOpenKeys) { omMetadataManager.getOpenKeyTable(getBucketLayout()) .addCacheEntry(new CacheKey<>(mpuOpenKey), - new CacheValue<>(Optional.absent(), 100L)); + CacheValue.get(100L)); omMetadataManager.getOpenKeyTable(getBucketLayout()) .delete(mpuOpenKey); } diff --git a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/s3/multipart/TestS3MultipartRequest.java b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/s3/multipart/TestS3MultipartRequest.java index d6ad7d189fe..c01bb459b8f 100644 --- a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/s3/multipart/TestS3MultipartRequest.java +++ b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/s3/multipart/TestS3MultipartRequest.java @@ -55,7 +55,7 @@ import static org.junit.jupiter.api.Assertions.assertNotEquals; import static org.junit.jupiter.api.Assertions.assertNotNull; import static org.junit.jupiter.api.Assertions.assertTrue; -import static org.mockito.ArgumentMatchers.any; +import static org.mockito.Mockito.any; import static org.mockito.Mockito.doNothing; import static org.mockito.Mockito.framework; import static org.mockito.Mockito.mock; diff --git a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/s3/security/TestS3GetSecretRequest.java b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/s3/security/TestS3GetSecretRequest.java index 04b1524287e..47cc293e280 100644 --- a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/s3/security/TestS3GetSecretRequest.java +++ b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/s3/security/TestS3GetSecretRequest.java @@ -67,10 +67,9 @@ import java.io.IOException; import java.nio.file.Path; import java.util.ArrayList; +import java.util.Optional; import java.util.UUID; -import com.google.common.base.Optional; - import static org.apache.hadoop.security.authentication.util.KerberosName.DEFAULT_MECHANISM; import static org.junit.jupiter.api.Assertions.assertEquals; import static org.junit.jupiter.api.Assertions.assertInstanceOf; @@ -79,7 +78,7 @@ import static org.junit.jupiter.api.Assertions.assertNull; import static org.junit.jupiter.api.Assertions.assertTrue; import static org.junit.jupiter.api.Assertions.fail; -import static org.mockito.ArgumentMatchers.any; +import static org.mockito.Mockito.any; import static org.mockito.Mockito.doNothing; import static org.mockito.Mockito.framework; import static org.mockito.Mockito.mock; @@ -172,9 +171,9 @@ public void setUp() throws Exception { when(omMultiTenantManager.getCacheOp()).thenReturn(cacheOp); when(omMultiTenantManager.getTenantForAccessID(USER_CAROL)) - .thenReturn(Optional.absent()); + .thenReturn(Optional.empty()); when(omMultiTenantManager.getTenantForAccessID(USER_ALICE)) - .thenReturn(Optional.absent()); + .thenReturn(Optional.empty()); when(omMultiTenantManager.getTenantForAccessID(ACCESS_ID_BOB)) .thenReturn(Optional.of(ACCESS_ID_BOB)); } diff --git a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/security/TestOMGetDelegationTokenRequest.java b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/security/TestOMGetDelegationTokenRequest.java index e7805f017bf..e24fba879f6 100644 --- a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/security/TestOMGetDelegationTokenRequest.java +++ b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/security/TestOMGetDelegationTokenRequest.java @@ -20,7 +20,6 @@ import java.io.IOException; import java.nio.charset.StandardCharsets; -import com.google.common.base.Optional; import java.util.UUID; import org.apache.hadoop.ozone.audit.AuditLogger; @@ -39,15 +38,18 @@ import org.apache.hadoop.security.token.Token; import org.apache.commons.lang3.RandomStringUtils; import org.apache.hadoop.io.Text; + import static org.junit.jupiter.api.Assertions.assertEquals; import static org.junit.jupiter.api.Assertions.assertFalse; import static org.junit.jupiter.api.Assertions.assertNotEquals; import static org.junit.jupiter.api.Assertions.assertNotNull; +import static org.junit.jupiter.api.Assertions.assertNull; import static org.junit.jupiter.api.Assertions.assertTrue; -import static org.mockito.ArgumentMatchers.any; -import static org.mockito.ArgumentMatchers.anyLong; +import static org.mockito.Mockito.any; +import static org.mockito.Mockito.anyLong; import static org.mockito.Mockito.mock; import static org.mockito.Mockito.when; + import org.junit.jupiter.api.BeforeEach; import org.junit.jupiter.api.Test; @@ -186,10 +188,7 @@ public void testValidateAndUpdateCacheWithNonNullToken() throws Exception { OMClientResponse clientResponse = setValidateAndUpdateCache(); - Optional responseRenewTime = Optional.fromNullable( - omMetadataManager.getDelegationTokenTable().get(identifier)); - assertEquals(Optional.of(renewTime), responseRenewTime); - + assertEquals(renewTime, omMetadataManager.getDelegationTokenTable().get(identifier)); assertEquals(Status.OK, clientResponse.getOMResponse().getStatus()); } @@ -204,10 +203,7 @@ public void testValidateAndUpdateCacheWithNullToken() throws Exception { .getGetDelegationTokenResponse().hasResponse(); assertFalse(hasResponse); - Optional responseRenewTime = Optional.fromNullable( - omMetadataManager.getDelegationTokenTable().get(identifier)); - assertEquals(Optional.absent(), responseRenewTime); - + assertNull(omMetadataManager.getDelegationTokenTable().get(identifier)); assertEquals(Status.OK, clientResponse.getOMResponse().getStatus()); } diff --git a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/snapshot/TestOMSnapshotCreateRequest.java b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/snapshot/TestOMSnapshotCreateRequest.java index 6ecb2020287..45e5b100753 100644 --- a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/snapshot/TestOMSnapshotCreateRequest.java +++ b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/snapshot/TestOMSnapshotCreateRequest.java @@ -62,8 +62,8 @@ import static org.junit.jupiter.api.Assertions.assertNull; import static org.junit.jupiter.api.Assertions.assertThrows; import static org.junit.jupiter.api.Assertions.assertTrue; -import static org.mockito.ArgumentMatchers.any; -import static org.mockito.ArgumentMatchers.anyString; +import static org.mockito.Mockito.any; +import static org.mockito.Mockito.anyString; import static org.mockito.Mockito.doNothing; import static org.mockito.Mockito.framework; import static org.mockito.Mockito.mock; diff --git a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/snapshot/TestOMSnapshotDeleteRequest.java b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/snapshot/TestOMSnapshotDeleteRequest.java index 968c14d4533..ca737d2bd25 100644 --- a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/snapshot/TestOMSnapshotDeleteRequest.java +++ b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/snapshot/TestOMSnapshotDeleteRequest.java @@ -61,8 +61,8 @@ import static org.junit.jupiter.api.Assertions.assertNull; import static org.junit.jupiter.api.Assertions.assertThrows; import static org.junit.jupiter.api.Assertions.assertTrue; -import static org.mockito.ArgumentMatchers.any; -import static org.mockito.ArgumentMatchers.anyString; +import static org.mockito.Mockito.any; +import static org.mockito.Mockito.anyString; import static org.mockito.Mockito.doNothing; import static org.mockito.Mockito.framework; import static org.mockito.Mockito.mock; diff --git a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/snapshot/TestOMSnapshotPurgeRequestAndResponse.java b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/snapshot/TestOMSnapshotPurgeRequestAndResponse.java index a0972ee7c83..a3b0dae4631 100644 --- a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/snapshot/TestOMSnapshotPurgeRequestAndResponse.java +++ b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/snapshot/TestOMSnapshotPurgeRequestAndResponse.java @@ -69,8 +69,8 @@ import static org.junit.jupiter.api.Assertions.assertNotEquals; import static org.junit.jupiter.api.Assertions.assertNotNull; import static org.junit.jupiter.api.Assertions.assertTrue; -import static org.mockito.ArgumentMatchers.any; -import static org.mockito.ArgumentMatchers.anyString; +import static org.mockito.Mockito.any; +import static org.mockito.Mockito.anyString; import static org.mockito.Mockito.mock; import static org.mockito.Mockito.when; diff --git a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/snapshot/TestOMSnapshotSetPropertyRequestAndResponse.java b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/snapshot/TestOMSnapshotSetPropertyRequestAndResponse.java index 09ce3ac3bff..3856a5b62f5 100644 --- a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/snapshot/TestOMSnapshotSetPropertyRequestAndResponse.java +++ b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/snapshot/TestOMSnapshotSetPropertyRequestAndResponse.java @@ -50,7 +50,7 @@ import static org.junit.jupiter.api.Assertions.assertEquals; import static org.junit.jupiter.api.Assertions.assertFalse; -import static org.mockito.ArgumentMatchers.anyString; +import static org.mockito.Mockito.anyString; import static org.mockito.Mockito.mock; import static org.mockito.Mockito.when; diff --git a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/validation/TestRequestValidations.java b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/validation/TestRequestValidations.java index 3bcf75fc6d7..fdb332f6604 100644 --- a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/validation/TestRequestValidations.java +++ b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/validation/TestRequestValidations.java @@ -158,11 +158,8 @@ public void testNoPostValidationsAreRunningForRequestTypeWithoutValidators() public void testPreProcessorExceptionHandling() throws Exception { ValidationContext ctx = of(aFinalizedVersionManager(), metadataManager); RequestValidations validations = loadValidations(ctx); - - try { - validations.validateRequest(aDeleteKeysRequest(olderClientVersion())); - fail("ServiceException was expected but was not thrown."); - } catch (Exception ignored) { } + assertThrows(Exception.class, + () -> validations.validateRequest(aDeleteKeysRequest(olderClientVersion()))); validationListener.assertNumOfEvents(1); validationListener.assertExactListOfValidatorsCalled( @@ -173,12 +170,8 @@ public void testPreProcessorExceptionHandling() throws Exception { public void testPostProcessorExceptionHandling() { ValidationContext ctx = of(aFinalizedVersionManager(), metadataManager); RequestValidations validations = loadValidations(ctx); - - try { - validations.validateResponse( - aDeleteKeysRequest(olderClientVersion()), aDeleteKeysResponse()); - fail("ServiceException was expected but was not thrown."); - } catch (Exception ignored) { } + assertThrows(Exception.class, + () -> validations.validateResponse(aDeleteKeysRequest(olderClientVersion()), aDeleteKeysResponse())); validationListener.assertNumOfEvents(1); validationListener.assertExactListOfValidatorsCalled( diff --git a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/volume/TestOMVolumeRequest.java b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/volume/TestOMVolumeRequest.java index 960d5cc28cb..14f1438b78b 100644 --- a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/volume/TestOMVolumeRequest.java +++ b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/volume/TestOMVolumeRequest.java @@ -42,7 +42,7 @@ import org.junit.jupiter.api.BeforeEach; import org.junit.jupiter.api.io.TempDir; -import static org.mockito.ArgumentMatchers.any; +import static org.mockito.Mockito.any; import static org.mockito.Mockito.doNothing; import static org.mockito.Mockito.framework; import static org.mockito.Mockito.mock; diff --git a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/response/file/TestOMFileCreateResponseWithFSO.java b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/response/file/TestOMFileCreateResponseWithFSO.java index f8304e35252..c7e2c265b7b 100644 --- a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/response/file/TestOMFileCreateResponseWithFSO.java +++ b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/response/file/TestOMFileCreateResponseWithFSO.java @@ -27,7 +27,7 @@ import org.apache.hadoop.ozone.om.response.key.TestOMKeyCreateResponse; import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.OMResponse; import org.apache.hadoop.util.Time; -import org.jetbrains.annotations.NotNull; +import jakarta.annotation.Nonnull; import java.io.IOException; import java.util.ArrayList; @@ -37,7 +37,7 @@ */ public class TestOMFileCreateResponseWithFSO extends TestOMKeyCreateResponse { - @NotNull + @Nonnull @Override protected OmKeyInfo getOmKeyInfo() { assertNotNull(omBucketInfo); @@ -48,7 +48,7 @@ protected OmKeyInfo getOmKeyInfo() { omBucketInfo.getObjectID(), 100, Time.now()); } - @NotNull + @Nonnull @Override protected String getOpenKeyName() throws IOException { assertNotNull(omBucketInfo); @@ -59,7 +59,7 @@ protected String getOpenKeyName() throws IOException { omBucketInfo.getObjectID(), keyName, clientID); } - @NotNull + @Nonnull @Override protected OMKeyCreateResponse getOmKeyCreateResponse(OmKeyInfo keyInfo, OmBucketInfo bucketInfo, OMResponse response) throws IOException { diff --git a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/response/key/TestOMAllocateBlockResponse.java b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/response/key/TestOMAllocateBlockResponse.java index bbc1926abb3..e5a6b0ab14f 100644 --- a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/response/key/TestOMAllocateBlockResponse.java +++ b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/response/key/TestOMAllocateBlockResponse.java @@ -21,7 +21,7 @@ import static org.junit.jupiter.api.Assertions.assertFalse; import static org.junit.jupiter.api.Assertions.assertTrue; import org.apache.hadoop.ozone.om.helpers.OmBucketInfo; -import org.jetbrains.annotations.NotNull; +import jakarta.annotation.Nonnull; import org.junit.jupiter.api.Test; import org.apache.hadoop.ozone.om.helpers.OmKeyInfo; @@ -100,7 +100,7 @@ protected String getOpenKey() throws Exception { keyName, clientID); } - @NotNull + @Nonnull protected OMAllocateBlockResponse getOmAllocateBlockResponse( OmKeyInfo omKeyInfo, OmBucketInfo omBucketInfo, OMResponse omResponse) throws IOException { diff --git a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/response/key/TestOMAllocateBlockResponseWithFSO.java b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/response/key/TestOMAllocateBlockResponseWithFSO.java index 7f6609887c3..85e9354ca8c 100644 --- a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/response/key/TestOMAllocateBlockResponseWithFSO.java +++ b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/response/key/TestOMAllocateBlockResponseWithFSO.java @@ -26,7 +26,7 @@ import org.apache.hadoop.ozone.om.request.OMRequestTestUtils; import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.OMResponse; import org.apache.hadoop.util.Time; -import org.jetbrains.annotations.NotNull; +import jakarta.annotation.Nonnull; import java.io.IOException; @@ -66,7 +66,7 @@ protected String getOpenKey() throws Exception { parentID, fileName, clientID); } - @NotNull + @Nonnull @Override protected OMAllocateBlockResponse getOmAllocateBlockResponse( OmKeyInfo omKeyInfo, OmBucketInfo omBucketInfo, diff --git a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/response/key/TestOMKeyCommitResponse.java b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/response/key/TestOMKeyCommitResponse.java index c26a07c97e0..25b2f6c1050 100644 --- a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/response/key/TestOMKeyCommitResponse.java +++ b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/response/key/TestOMKeyCommitResponse.java @@ -29,7 +29,7 @@ import org.apache.hadoop.hdds.utils.db.Table; import org.apache.hadoop.ozone.OmUtils; import org.apache.hadoop.ozone.om.helpers.RepeatedOmKeyInfo; -import org.jetbrains.annotations.NotNull; +import jakarta.annotation.Nonnull; import org.junit.jupiter.api.Test; import org.apache.hadoop.ozone.om.helpers.OmKeyInfo; @@ -132,20 +132,20 @@ public void testAddToDBBatchOnOverwrite() throws Exception { } - @NotNull + @Nonnull protected void addKeyToOpenKeyTable() throws Exception { OMRequestTestUtils.addKeyToTable(true, volumeName, bucketName, keyName, clientID, replicationType, replicationFactor, omMetadataManager); } - @NotNull + @Nonnull protected String getOzoneKey() throws IOException { assertNotNull(omBucketInfo); return omMetadataManager.getOzoneKey(volumeName, omBucketInfo.getBucketName(), keyName); } - @NotNull + @Nonnull protected OMKeyCommitResponse getOmKeyCommitResponse(OmKeyInfo omKeyInfo, OzoneManagerProtocolProtos.OMResponse omResponse, String openKey, String ozoneKey, RepeatedOmKeyInfo deleteKeys, Boolean isHSync, OmKeyInfo newOpenKeyInfo) diff --git a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/response/key/TestOMKeyCommitResponseWithFSO.java b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/response/key/TestOMKeyCommitResponseWithFSO.java index f5838ddc0f5..9c3f8c1143e 100644 --- a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/response/key/TestOMKeyCommitResponseWithFSO.java +++ b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/response/key/TestOMKeyCommitResponseWithFSO.java @@ -29,7 +29,7 @@ import org.apache.hadoop.ozone.om.request.OMRequestTestUtils; import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos; import org.apache.hadoop.util.Time; -import org.jetbrains.annotations.NotNull; +import jakarta.annotation.Nonnull; import java.io.IOException; @@ -38,7 +38,7 @@ */ public class TestOMKeyCommitResponseWithFSO extends TestOMKeyCommitResponse { - @NotNull + @Nonnull @Override protected OMKeyCommitResponse getOmKeyCommitResponse(OmKeyInfo omKeyInfo, OzoneManagerProtocolProtos.OMResponse omResponse, String openKey, @@ -58,7 +58,7 @@ protected OMKeyCommitResponse getOmKeyCommitResponse(OmKeyInfo omKeyInfo, openKey, omBucketInfo, deleteKeyMap, volumeId, isHSync, newOpenKeyInfo); } - @NotNull + @Nonnull @Override protected OmKeyInfo getOmKeyInfo() { assertNotNull(omBucketInfo); @@ -69,7 +69,7 @@ protected OmKeyInfo getOmKeyInfo() { omBucketInfo.getObjectID(), 100, Time.now()); } - @NotNull + @Nonnull @Override protected void addKeyToOpenKeyTable() throws Exception { assertNotNull(omBucketInfo); @@ -87,7 +87,7 @@ protected void addKeyToOpenKeyTable() throws Exception { fileName, omKeyInfoFSO, clientID, txnLogId, omMetadataManager); } - @NotNull + @Nonnull @Override protected String getOpenKeyName() throws IOException { assertNotNull(omBucketInfo); @@ -98,7 +98,7 @@ protected String getOpenKeyName() throws IOException { omBucketInfo.getObjectID(), keyName, clientID); } - @NotNull + @Nonnull @Override protected String getOzoneKey() throws IOException { assertNotNull(omBucketInfo); diff --git a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/response/key/TestOMKeyCreateResponse.java b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/response/key/TestOMKeyCreateResponse.java index a7d9984477a..c380689c92b 100644 --- a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/response/key/TestOMKeyCreateResponse.java +++ b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/response/key/TestOMKeyCreateResponse.java @@ -21,7 +21,7 @@ import static org.junit.jupiter.api.Assertions.assertFalse; import static org.junit.jupiter.api.Assertions.assertTrue; import org.apache.hadoop.ozone.om.helpers.OmBucketInfo; -import org.jetbrains.annotations.NotNull; +import jakarta.annotation.Nonnull; import org.junit.jupiter.api.Test; import org.apache.hadoop.ozone.om.helpers.OmKeyInfo; @@ -98,7 +98,7 @@ public void testAddToDBBatchWithErrorResponse() throws Exception { } - @NotNull + @Nonnull protected OMKeyCreateResponse getOmKeyCreateResponse(OmKeyInfo keyInfo, OmBucketInfo bucketInfo, OMResponse response) throws IOException { diff --git a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/response/key/TestOMKeyCreateResponseWithFSO.java b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/response/key/TestOMKeyCreateResponseWithFSO.java index a138c8ba695..ee83f367127 100644 --- a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/response/key/TestOMKeyCreateResponseWithFSO.java +++ b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/response/key/TestOMKeyCreateResponseWithFSO.java @@ -25,7 +25,7 @@ import org.apache.hadoop.ozone.om.request.OMRequestTestUtils; import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.OMResponse; import org.apache.hadoop.util.Time; -import org.jetbrains.annotations.NotNull; +import jakarta.annotation.Nonnull; import java.io.IOException; import java.util.ArrayList; @@ -35,7 +35,7 @@ */ public class TestOMKeyCreateResponseWithFSO extends TestOMKeyCreateResponse { - @NotNull + @Nonnull @Override protected String getOpenKeyName() throws IOException { assertNotNull(omBucketInfo); @@ -46,7 +46,7 @@ protected String getOpenKeyName() throws IOException { omBucketInfo.getObjectID(), keyName, clientID); } - @NotNull + @Nonnull @Override protected OmKeyInfo getOmKeyInfo() { assertNotNull(omBucketInfo); @@ -57,7 +57,7 @@ protected OmKeyInfo getOmKeyInfo() { omBucketInfo.getObjectID(), 100, Time.now()); } - @NotNull + @Nonnull @Override protected OMKeyCreateResponse getOmKeyCreateResponse(OmKeyInfo keyInfo, OmBucketInfo bucketInfo, OMResponse response) throws IOException { diff --git a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/response/key/TestOMKeyResponse.java b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/response/key/TestOMKeyResponse.java index 87495dc8641..1cbf5c6d0b2 100644 --- a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/response/key/TestOMKeyResponse.java +++ b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/response/key/TestOMKeyResponse.java @@ -33,7 +33,7 @@ import org.apache.hadoop.ozone.om.helpers.OmVolumeArgs; import org.apache.hadoop.ozone.om.helpers.RepeatedOmKeyInfo; import org.apache.hadoop.ozone.om.request.OMRequestTestUtils; -import org.jetbrains.annotations.NotNull; +import jakarta.annotation.Nonnull; import org.junit.jupiter.api.AfterEach; import org.junit.jupiter.api.BeforeEach; import org.junit.jupiter.api.io.TempDir; @@ -109,19 +109,19 @@ public void setup() throws Exception { CacheValue.get(1, omBucketInfo)); } - @NotNull + @Nonnull protected String getOpenKeyName() throws IOException { return omMetadataManager.getOpenKey(volumeName, bucketName, keyName, clientID); } - @NotNull + @Nonnull protected OmKeyInfo getOmKeyInfo() { return OMRequestTestUtils.createOmKeyInfo(volumeName, bucketName, keyName, replicationType, replicationFactor); } - @NotNull + @Nonnull protected OzoneConfiguration getOzoneConfiguration() { return new OzoneConfiguration(); } diff --git a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/response/key/acl/prefix/TestOMPrefixAclResponse.java b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/response/key/acl/prefix/TestOMPrefixAclResponse.java new file mode 100644 index 00000000000..b12087785b1 --- /dev/null +++ b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/response/key/acl/prefix/TestOMPrefixAclResponse.java @@ -0,0 +1,158 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with this + * work for additional information regarding copyright ownership. The ASF + * licenses this file to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + *

+ * http://www.apache.org/licenses/LICENSE-2.0 + *

+ * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT + * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the + * License for the specific language governing permissions and limitations under + * the License. + */ + +package org.apache.hadoop.ozone.om.response.key.acl.prefix; + +import org.apache.hadoop.ozone.OzoneAcl; +import org.apache.hadoop.ozone.om.PrefixManagerImpl; +import org.apache.hadoop.ozone.om.helpers.OmPrefixInfo; +import org.apache.hadoop.ozone.om.response.key.TestOMKeyResponse; +import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos; +import org.apache.hadoop.ozone.security.acl.IAccessAuthorizer.ACLType; +import org.apache.hadoop.ozone.security.acl.OzoneObj; +import org.apache.hadoop.ozone.security.acl.OzoneObjInfo; +import org.junit.jupiter.api.Test; + +import java.util.Arrays; +import java.util.Collections; +import java.util.List; +import java.util.concurrent.ThreadLocalRandom; + +import static org.apache.hadoop.ozone.OzoneAcl.AclScope.ACCESS; +import static org.apache.hadoop.ozone.security.acl.IAccessAuthorizer.ACLIdentityType.USER; +import static org.junit.jupiter.api.Assertions.assertEquals; +import static org.junit.jupiter.api.Assertions.assertNull; + +/** + * Tests TestOMPrefixAclResponse. + */ +public class TestOMPrefixAclResponse extends TestOMKeyResponse { + + @Test + public void testAddToDBBatch() throws Exception { + final OzoneAcl user1 = new OzoneAcl(USER, "user1", + ACLType.READ_ACL, ACCESS); + final OzoneAcl user2 = new OzoneAcl(USER, "user2", + ACLType.WRITE, ACCESS); + final String prefixName = "/vol/buck/prefix/"; + List acls = Arrays.asList(user1, user2); + + OmPrefixInfo omPrefixInfo = OmPrefixInfo.newBuilder() + .setName(prefixName) + .setAcls(acls) + .setUpdateID(1L) + .setObjectID(ThreadLocalRandom.current().nextLong()) + .build(); + + OzoneManagerProtocolProtos.OMResponse setAclResponse = + OzoneManagerProtocolProtos.OMResponse.newBuilder() + .setStatus(OzoneManagerProtocolProtos.Status.OK) + .setCmdType(OzoneManagerProtocolProtos.Type.SetAcl) + .setSetAclResponse( + OzoneManagerProtocolProtos.SetAclResponse.newBuilder().setResponse(true).build()) + .build(); + + OMPrefixAclResponse prefixAclResponse = + new OMPrefixAclResponse(setAclResponse, omPrefixInfo); + prefixAclResponse.addToDBBatch(omMetadataManager, batchOperation); + + // Do manual commit and see whether addToBatch is successful or not. + omMetadataManager.getStore().commitBatchOperation(batchOperation); + + OmPrefixInfo persistedPrefixInfo = omMetadataManager.getPrefixTable() + .getSkipCache(prefixName); + assertEquals(omPrefixInfo, persistedPrefixInfo); + + // Verify that in-memory Prefix Tree (Radix Tree) is able to reload from + // DB successfully + PrefixManagerImpl prefixManager = + new PrefixManagerImpl(omMetadataManager, true); + OzoneObj prefixObj = OzoneObjInfo.Builder.newBuilder() + .setVolumeName("vol") + .setBucketName("buck") + .setPrefixName("prefix/") + .setResType(OzoneObj.ResourceType.PREFIX) + .setStoreType(OzoneObj.StoreType.OZONE) + .build(); + OmPrefixInfo prefixInfo = prefixManager.getPrefixInfo(prefixObj); + assertEquals(prefixName, prefixInfo.getName()); + assertEquals(1L, prefixInfo.getUpdateID()); + + List ozoneAcls = prefixManager.getAcl(prefixObj); + assertEquals(2, ozoneAcls.size()); + assertEquals(acls, ozoneAcls); + + OzoneManagerProtocolProtos.OMResponse removeAclResponse = + OzoneManagerProtocolProtos.OMResponse.newBuilder() + .setStatus(OzoneManagerProtocolProtos.Status.OK) + .setCmdType(OzoneManagerProtocolProtos.Type.RemoveAcl) + .setRemoveAclResponse( + OzoneManagerProtocolProtos.RemoveAclResponse + .newBuilder().setResponse(true).build()) + .build(); + + // Remove user2 ACL + OmPrefixInfo removeOnePrefixInfo = OmPrefixInfo.newBuilder() + .setName(prefixName) + .setAcls(Collections.singletonList(user1)) + .setUpdateID(2L) + .setObjectID(ThreadLocalRandom.current().nextLong()) + .build(); + + prefixAclResponse = + new OMPrefixAclResponse(removeAclResponse, removeOnePrefixInfo); + + prefixAclResponse.addToDBBatch(omMetadataManager, batchOperation); + + // Do manual commit and see whether addToBatch is successful or not. + omMetadataManager.getStore().commitBatchOperation(batchOperation); + + // Reload prefix tree from DB and validate again. + prefixManager = + new PrefixManagerImpl(omMetadataManager, true); + prefixInfo = prefixManager.getPrefixInfo(prefixObj); + assertEquals(2L, prefixInfo.getUpdateID()); + + ozoneAcls = prefixManager.getAcl(prefixObj); + assertEquals(1, ozoneAcls.size()); + assertEquals(Collections.singletonList(user1), ozoneAcls); + + persistedPrefixInfo = omMetadataManager.getPrefixTable() + .getSkipCache(prefixName); + assertEquals(removeOnePrefixInfo, persistedPrefixInfo); + + // Remove all ACL + OmPrefixInfo removeAllPrefixInfo = OmPrefixInfo.newBuilder() + .setName(prefixName) + .setAcls(Collections.emptyList()) + .setUpdateID(3L) + .setObjectID(ThreadLocalRandom.current().nextLong()) + .build(); + + prefixAclResponse = + new OMPrefixAclResponse(removeAclResponse, removeAllPrefixInfo); + + prefixAclResponse.addToDBBatch(omMetadataManager, batchOperation); + + // Do manual commit and see whether addToBatch is successful or not. + omMetadataManager.getStore().commitBatchOperation(batchOperation); + + assertNull(omMetadataManager.getPrefixTable() + .getSkipCache(prefixName)); + } + +} diff --git a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/response/volume/TestOMVolumeCreateResponse.java b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/response/volume/TestOMVolumeCreateResponse.java index 58c3da3eb03..9d8de4bbb20 100644 --- a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/response/volume/TestOMVolumeCreateResponse.java +++ b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/response/volume/TestOMVolumeCreateResponse.java @@ -37,12 +37,10 @@ import org.junit.jupiter.api.Test; import org.junit.jupiter.api.io.TempDir; -import java.io.IOException; import java.nio.file.Path; import java.util.UUID; import static org.junit.jupiter.api.Assertions.assertEquals; -import static org.junit.jupiter.api.Assertions.fail; /** * This class tests OMVolumeCreateResponse. @@ -110,7 +108,7 @@ public void testAddToDBBatch() throws Exception { } @Test - public void testAddToDBBatchNoOp() throws Exception { + void testAddToDBBatchNoOp() throws Exception { OMResponse omResponse = OMResponse.newBuilder() .setCmdType(OzoneManagerProtocolProtos.Type.CreateVolume) @@ -122,15 +120,10 @@ public void testAddToDBBatchNoOp() throws Exception { OMVolumeCreateResponse omVolumeCreateResponse = new OMVolumeCreateResponse(omResponse); - try { - omVolumeCreateResponse.checkAndUpdateDB(omMetadataManager, - batchOperation); - assertEquals(0, omMetadataManager.countRowsInTable( - omMetadataManager.getVolumeTable())); - } catch (IOException ex) { - fail("testAddToDBBatchFailure failed"); - } - + omVolumeCreateResponse.checkAndUpdateDB(omMetadataManager, + batchOperation); + assertEquals(0, omMetadataManager.countRowsInTable( + omMetadataManager.getVolumeTable())); } diff --git a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/response/volume/TestOMVolumeSetOwnerResponse.java b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/response/volume/TestOMVolumeSetOwnerResponse.java index 448061d399b..aa640067ca4 100644 --- a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/response/volume/TestOMVolumeSetOwnerResponse.java +++ b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/response/volume/TestOMVolumeSetOwnerResponse.java @@ -37,12 +37,10 @@ import org.junit.jupiter.api.Test; import org.junit.jupiter.api.io.TempDir; -import java.io.IOException; import java.nio.file.Path; import java.util.UUID; import static org.junit.jupiter.api.Assertions.assertEquals; -import static org.junit.jupiter.api.Assertions.fail; /** * This class tests OMVolumeCreateResponse. @@ -140,7 +138,7 @@ public void testAddToDBBatch() throws Exception { } @Test - public void testAddToDBBatchNoOp() throws Exception { + void testAddToDBBatchNoOp() throws Exception { OMResponse omResponse = OMResponse.newBuilder() .setCmdType(OzoneManagerProtocolProtos.Type.SetVolumeProperty) @@ -152,15 +150,10 @@ public void testAddToDBBatchNoOp() throws Exception { OMVolumeSetOwnerResponse omVolumeSetOwnerResponse = new OMVolumeSetOwnerResponse(omResponse); - try { - omVolumeSetOwnerResponse.checkAndUpdateDB(omMetadataManager, - batchOperation); - assertEquals(0, omMetadataManager.countRowsInTable( - omMetadataManager.getVolumeTable())); - } catch (IOException ex) { - fail("testAddToDBBatchFailure failed"); - } - + omVolumeSetOwnerResponse.checkAndUpdateDB(omMetadataManager, + batchOperation); + assertEquals(0, omMetadataManager.countRowsInTable( + omMetadataManager.getVolumeTable())); } diff --git a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/response/volume/TestOMVolumeSetQuotaResponse.java b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/response/volume/TestOMVolumeSetQuotaResponse.java index fc4121b3a3f..fbc8e3c944d 100644 --- a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/response/volume/TestOMVolumeSetQuotaResponse.java +++ b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/response/volume/TestOMVolumeSetQuotaResponse.java @@ -36,12 +36,10 @@ import org.junit.jupiter.api.Test; import org.junit.jupiter.api.io.TempDir; -import java.io.IOException; import java.nio.file.Path; import java.util.UUID; import static org.junit.jupiter.api.Assertions.assertEquals; -import static org.junit.jupiter.api.Assertions.fail; /** * This class tests OMVolumeCreateResponse. @@ -108,7 +106,7 @@ public void testAddToDBBatch() throws Exception { } @Test - public void testAddToDBBatchNoOp() throws Exception { + void testAddToDBBatchNoOp() throws Exception { OMResponse omResponse = OMResponse.newBuilder() .setCmdType(OzoneManagerProtocolProtos.Type.CreateVolume) @@ -120,14 +118,10 @@ public void testAddToDBBatchNoOp() throws Exception { OMVolumeSetQuotaResponse omVolumeSetQuotaResponse = new OMVolumeSetQuotaResponse(omResponse); - try { - omVolumeSetQuotaResponse.checkAndUpdateDB(omMetadataManager, - batchOperation); - assertEquals(0, omMetadataManager.countRowsInTable( - omMetadataManager.getVolumeTable())); - } catch (IOException ex) { - fail("testAddToDBBatchFailure failed"); - } + omVolumeSetQuotaResponse.checkAndUpdateDB(omMetadataManager, + batchOperation); + assertEquals(0, omMetadataManager.countRowsInTable( + omMetadataManager.getVolumeTable())); } diff --git a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/service/TestKeyDeletingService.java b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/service/TestKeyDeletingService.java index cc8dee24bd4..77bf15ed76b 100644 --- a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/service/TestKeyDeletingService.java +++ b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/service/TestKeyDeletingService.java @@ -21,18 +21,18 @@ import java.io.File; import java.io.IOException; -import java.nio.file.Path; +import java.io.UncheckedIOException; import java.util.ArrayList; import java.util.Collection; import java.util.Collections; -import java.util.HashMap; import java.util.LinkedList; import java.util.List; import java.util.Map; import java.util.concurrent.TimeUnit; import java.util.concurrent.TimeoutException; -import java.util.stream.Collectors; +import java.util.concurrent.atomic.AtomicInteger; +import com.google.common.collect.ImmutableMap; import org.apache.hadoop.hdds.client.RatisReplicationConfig; import org.apache.hadoop.hdds.utils.db.Table; import org.apache.hadoop.hdds.utils.db.TableIterator; @@ -53,8 +53,13 @@ import org.apache.hadoop.ozone.om.request.OMRequestTestUtils; import org.apache.hadoop.ozone.om.snapshot.ReferenceCounted; import org.apache.hadoop.ozone.om.snapshot.SnapshotCache; +import org.apache.ozone.test.OzoneTestBase; import org.apache.ratis.util.ExitUtils; +import org.junit.jupiter.api.AfterAll; +import org.junit.jupiter.api.AfterEach; import org.junit.jupiter.api.BeforeAll; +import org.junit.jupiter.api.Nested; +import org.junit.jupiter.api.TestInstance; import org.junit.jupiter.api.io.TempDir; import org.slf4j.Logger; import org.slf4j.LoggerFactory; @@ -63,30 +68,27 @@ import org.apache.hadoop.hdds.conf.OzoneConfiguration; import org.apache.hadoop.hdds.protocol.proto.HddsProtos; import org.apache.hadoop.hdds.scm.container.common.helpers.ExcludeList; -import org.apache.hadoop.hdds.scm.protocol.ScmBlockLocationProtocol; import org.apache.hadoop.hdds.server.ServerUtils; import org.apache.hadoop.ozone.om.helpers.OmBucketInfo; import org.apache.hadoop.ozone.om.helpers.OmKeyArgs; import org.apache.hadoop.ozone.om.helpers.OmVolumeArgs; import org.apache.hadoop.ozone.om.helpers.OpenKeySession; import org.apache.hadoop.ozone.om.protocol.OzoneManagerProtocol; -import org.apache.hadoop.security.authentication.client.AuthenticationException; import org.apache.ozone.test.GenericTestUtils; import org.apache.hadoop.hdds.utils.db.DBConfigFromFile; -import org.apache.commons.lang3.RandomStringUtils; import static org.apache.hadoop.hdds.HddsConfigKeys.HDDS_CONTAINER_REPORT_INTERVAL; import static org.apache.hadoop.hdds.protocol.proto.HddsProtos.ReplicationFactor.THREE; import static org.apache.hadoop.ozone.OzoneConfigKeys.OZONE_BLOCK_DELETING_SERVICE_INTERVAL; import static org.apache.hadoop.ozone.OzoneConfigKeys.OZONE_SNAPSHOT_DELETING_SERVICE_INTERVAL; +import static org.apache.hadoop.ozone.om.OMConfigKeys.OZONE_SNAPSHOT_SST_FILTERING_SERVICE_INTERVAL; import static org.apache.hadoop.ozone.om.OmSnapshotManager.getSnapshotPrefix; import static org.assertj.core.api.Assertions.assertThat; import static org.junit.jupiter.api.Assertions.assertEquals; import static org.junit.jupiter.api.Assertions.assertFalse; -import static org.junit.jupiter.api.Assertions.assertTrue; +import static org.junit.jupiter.api.Assertions.assertNotNull; import static org.junit.jupiter.api.Assertions.fail; -import org.junit.jupiter.api.AfterEach; import org.junit.jupiter.api.Test; import org.junit.jupiter.api.Timeout; @@ -99,608 +101,533 @@ * Metadata Manager. 3. Waits for a while for the KeyDeleting Service to pick up * and call into SCM. 4. Confirms that calls have been successful. */ +@TestInstance(TestInstance.Lifecycle.PER_CLASS) @Timeout(300) -public class TestKeyDeletingService { - @TempDir - private Path folder; - private OzoneManagerProtocol writeClient; - private OzoneManager om; +class TestKeyDeletingService extends OzoneTestBase { private static final Logger LOG = LoggerFactory.getLogger(TestKeyDeletingService.class); + private static final AtomicInteger OBJECT_COUNTER = new AtomicInteger(); + + private OzoneConfiguration conf; + private OzoneManagerProtocol writeClient; + private OzoneManager om; + private KeyManager keyManager; + private OMMetadataManager metadataManager; + private KeyDeletingService keyDeletingService; + private ScmBlockLocationTestingClient scmBlockTestingClient; @BeforeAll - public static void setup() { + void setup() { ExitUtils.disableSystemExit(); } - private OzoneConfiguration createConfAndInitValues() throws IOException { - OzoneConfiguration conf = new OzoneConfiguration(); - File newFolder = folder.toFile(); - if (!newFolder.exists()) { - assertTrue(newFolder.mkdirs()); - } + private void createConfig(File testDir) { + conf = new OzoneConfiguration(); System.setProperty(DBConfigFromFile.CONFIG_DIR, "/"); - ServerUtils.setOzoneMetaDirPath(conf, newFolder.toString()); - conf.setTimeDuration(OZONE_BLOCK_DELETING_SERVICE_INTERVAL, 1000, - TimeUnit.MILLISECONDS); + ServerUtils.setOzoneMetaDirPath(conf, testDir.toString()); + conf.setTimeDuration(OZONE_BLOCK_DELETING_SERVICE_INTERVAL, + 100, TimeUnit.MILLISECONDS); conf.setTimeDuration(OZONE_SNAPSHOT_DELETING_SERVICE_INTERVAL, 100, TimeUnit.MILLISECONDS); - conf.setTimeDuration(HDDS_CONTAINER_REPORT_INTERVAL, 200, - TimeUnit.MILLISECONDS); + conf.setTimeDuration(OZONE_SNAPSHOT_SST_FILTERING_SERVICE_INTERVAL, + 1, TimeUnit.SECONDS); + conf.setTimeDuration(HDDS_CONTAINER_REPORT_INTERVAL, + 200, TimeUnit.MILLISECONDS); conf.setQuietMode(false); - - return conf; } - @AfterEach - public void cleanup() throws Exception { - om.stop(); + private void createSubject() throws Exception { + OmTestManagers omTestManagers = new OmTestManagers(conf, scmBlockTestingClient, null); + keyManager = omTestManagers.getKeyManager(); + keyDeletingService = keyManager.getDeletingService(); + writeClient = omTestManagers.getWriteClient(); + om = omTestManagers.getOzoneManager(); + metadataManager = omTestManagers.getMetadataManager(); } /** - * In this test, we create a bunch of keys and delete them. Then we start the - * KeyDeletingService and pass a SCMClient which does not fail. We make sure - * that all the keys that we deleted is picked up and deleted by - * OzoneManager. - * - * @throws IOException - on Failure. + * Tests happy path. */ + @Nested + @TestInstance(TestInstance.Lifecycle.PER_CLASS) + class Normal { - @Test - public void checkIfDeleteServiceIsDeletingKeys() - throws IOException, TimeoutException, InterruptedException, - AuthenticationException { - OzoneConfiguration conf = createConfAndInitValues(); - OmTestManagers omTestManagers - = new OmTestManagers(conf); - KeyManager keyManager = omTestManagers.getKeyManager(); - writeClient = omTestManagers.getWriteClient(); - om = omTestManagers.getOzoneManager(); + @BeforeAll + void setup(@TempDir File testDir) throws Exception { + // failCallsFrequency = 0 means all calls succeed + scmBlockTestingClient = new ScmBlockLocationTestingClient(null, null, 0); - final int keyCount = 100; - createAndDeleteKeys(keyManager, keyCount, 1); - KeyDeletingService keyDeletingService = - (KeyDeletingService) keyManager.getDeletingService(); - GenericTestUtils.waitFor( - () -> keyDeletingService.getDeletedKeyCount().get() >= keyCount, - 1000, 10000); - assertThat(keyDeletingService.getRunCount().get()).isGreaterThan(1); - assertEquals(0, keyManager.getPendingDeletionKeys( - Integer.MAX_VALUE).getKeyBlocksList().size()); - } + createConfig(testDir); + createSubject(); + } - @Test - public void checkIfDeleteServiceWithFailingSCM() - throws IOException, TimeoutException, InterruptedException, - AuthenticationException { - OzoneConfiguration conf = createConfAndInitValues(); - ScmBlockLocationProtocol blockClient = - //failCallsFrequency = 1 , means all calls fail. - new ScmBlockLocationTestingClient(null, null, 1); - OmTestManagers omTestManagers - = new OmTestManagers(conf, blockClient, null); - KeyManager keyManager = omTestManagers.getKeyManager(); - writeClient = omTestManagers.getWriteClient(); - om = omTestManagers.getOzoneManager(); + @AfterEach + void resume() { + keyDeletingService.resume(); + } - final int keyCount = 100; - createAndDeleteKeys(keyManager, keyCount, 1); - KeyDeletingService keyDeletingService = - (KeyDeletingService) keyManager.getDeletingService(); - GenericTestUtils.waitFor( - () -> { - try { - int numPendingDeletionKeys = - keyManager.getPendingDeletionKeys(Integer.MAX_VALUE) - .getKeyBlocksList().size(); - if (numPendingDeletionKeys != keyCount) { - LOG.info("Expected {} keys to be pending deletion, but got {}", - keyCount, numPendingDeletionKeys); - return false; - } - return true; - } catch (IOException e) { - LOG.error("Error while getting pending deletion keys.", e); - return false; - } - }, 100, 2000); - // Make sure that we have run the background thread 5 times more - GenericTestUtils.waitFor( - () -> keyDeletingService.getRunCount().get() >= 5, - 100, 10000); - // Since SCM calls are failing, deletedKeyCount should be zero. - assertEquals(0, keyDeletingService.getDeletedKeyCount().get()); - assertEquals(keyCount, keyManager - .getPendingDeletionKeys(Integer.MAX_VALUE).getKeyBlocksList().size()); - } + @AfterAll + void cleanup() { + if (om.stop()) { + om.join(); + } + } - @Test - public void checkDeletionForEmptyKey() - throws IOException, TimeoutException, InterruptedException, - AuthenticationException { - OzoneConfiguration conf = createConfAndInitValues(); - ScmBlockLocationProtocol blockClient = - //failCallsFrequency = 1 , means all calls fail. - new ScmBlockLocationTestingClient(null, null, 1); - OmTestManagers omTestManagers - = new OmTestManagers(conf, blockClient, null); - KeyManager keyManager = omTestManagers.getKeyManager(); - writeClient = omTestManagers.getWriteClient(); - om = omTestManagers.getOzoneManager(); + /** + * In this test, we create a bunch of keys and delete them. Then we start the + * KeyDeletingService and pass a SCMClient which does not fail. We make sure + * that all the keys that we deleted is picked up and deleted by + * OzoneManager. + */ + @Test + void checkIfDeleteServiceIsDeletingKeys() + throws IOException, TimeoutException, InterruptedException { + final long initialDeletedCount = getDeletedKeyCount(); + final long initialRunCount = getRunCount(); + + final int keyCount = 100; + createAndDeleteKeys(keyCount, 1); + + GenericTestUtils.waitFor( + () -> getDeletedKeyCount() >= initialDeletedCount + keyCount, + 100, 10000); + assertThat(getRunCount()).isGreaterThan(initialRunCount); + assertThat(keyManager.getPendingDeletionKeys(Integer.MAX_VALUE).getKeyBlocksList()) + .isEmpty(); + } - final int keyCount = 100; - createAndDeleteKeys(keyManager, keyCount, 0); - KeyDeletingService keyDeletingService = - (KeyDeletingService) keyManager.getDeletingService(); - - // the pre-allocated blocks are not committed, hence they will be deleted. - GenericTestUtils.waitFor( - () -> { - try { - int numPendingDeletionKeys = - keyManager.getPendingDeletionKeys(Integer.MAX_VALUE) - .getKeyBlocksList().size(); - if (numPendingDeletionKeys != keyCount) { - LOG.info("Expected {} keys to be pending deletion, but got {}", - keyCount, numPendingDeletionKeys); - return false; - } - return true; - } catch (IOException e) { - LOG.error("Error while getting pending deletion keys.", e); - return false; - } - }, 100, 2000); - - // Make sure that we have run the background thread 2 times or more - GenericTestUtils.waitFor( - () -> keyDeletingService.getRunCount().get() >= 2, - 100, 1000); - // the blockClient is set to fail the deletion of key blocks, hence no keys - // will be deleted - assertEquals(0, keyDeletingService.getDeletedKeyCount().get()); - } + @Test + void checkDeletionForKeysWithMultipleVersions() throws Exception { + final long initialDeletedCount = getDeletedKeyCount(); + final long initialRunCount = getRunCount(); + final int initialDeletedBlockCount = scmBlockTestingClient.getNumberOfDeletedBlocks(); - @Test - public void checkDeletionForPartiallyCommitKey() - throws IOException, TimeoutException, InterruptedException, - AuthenticationException { - OzoneConfiguration conf = createConfAndInitValues(); - ScmBlockLocationProtocol blockClient = - //failCallsFrequency = 1 , means all calls fail. - new ScmBlockLocationTestingClient(null, null, 1); - OmTestManagers omTestManagers - = new OmTestManagers(conf, blockClient, null); - KeyManager keyManager = omTestManagers.getKeyManager(); - writeClient = omTestManagers.getWriteClient(); - om = omTestManagers.getOzoneManager(); + final String volumeName = getTestName(); + final String bucketName = uniqueObjectName("bucket"); - String volumeName = String.format("volume%s", - RandomStringUtils.randomAlphanumeric(5)); - String bucketName = String.format("bucket%s", - RandomStringUtils.randomAlphanumeric(5)); - String keyName = String.format("key%s", - RandomStringUtils.randomAlphanumeric(5)); - - // Create Volume and Bucket - createVolumeAndBucket(keyManager, volumeName, bucketName, false); - - OmKeyArgs keyArg = createAndCommitKey(keyManager, volumeName, bucketName, - keyName, 3, 1); - - // Only the uncommitted block should be pending to be deleted. - GenericTestUtils.waitFor( - () -> { - try { - return keyManager.getPendingDeletionKeys(Integer.MAX_VALUE) - .getKeyBlocksList() - .stream() - .map(BlockGroup::getBlockIDList) - .flatMap(Collection::stream) - .collect(Collectors.toList()).size() == 1; - } catch (IOException e) { - e.printStackTrace(); - } - return false; - }, - 500, 3000); - - // Delete the key - writeClient.deleteKey(keyArg); + // Create Volume and Bucket with versioning enabled + createVolumeAndBucket(volumeName, bucketName, true); - KeyDeletingService keyDeletingService = - (KeyDeletingService) keyManager.getDeletingService(); - - // All blocks should be pending to be deleted. - GenericTestUtils.waitFor( - () -> { - try { - return keyManager.getPendingDeletionKeys(Integer.MAX_VALUE) - .getKeyBlocksList() - .stream() - .map(BlockGroup::getBlockIDList) - .flatMap(Collection::stream) - .collect(Collectors.toList()).size() == 3; - } catch (IOException e) { - e.printStackTrace(); - } - return false; - }, - 500, 3000); - - // the blockClient is set to fail the deletion of key blocks, hence no keys - // will be deleted - assertEquals(0, keyDeletingService.getDeletedKeyCount().get()); - } + // Create 2 versions of the same key + final String keyName = uniqueObjectName("key"); + OmKeyArgs keyArgs = createAndCommitKey(volumeName, bucketName, keyName, 1); + createAndCommitKey(volumeName, bucketName, keyName, 2); - @Test - public void checkDeletionForKeysWithMultipleVersions() - throws IOException, TimeoutException, InterruptedException, - AuthenticationException { - OzoneConfiguration conf = createConfAndInitValues(); - OmTestManagers omTestManagers = new OmTestManagers(conf); - KeyManager keyManager = omTestManagers.getKeyManager(); - writeClient = omTestManagers.getWriteClient(); - om = omTestManagers.getOzoneManager(); + // Delete the key + writeClient.deleteKey(keyArgs); + + GenericTestUtils.waitFor( + () -> getDeletedKeyCount() >= initialDeletedCount + 1, + 1000, 10000); + assertThat(getRunCount()) + .isGreaterThan(initialRunCount); + assertThat(keyManager.getPendingDeletionKeys(Integer.MAX_VALUE).getKeyBlocksList()) + .isEmpty(); + + // The 1st version of the key has 1 block and the 2nd version has 2 + // blocks. Hence, the ScmBlockClient should have received at least 3 + // blocks for deletion from the KeyDeletionService + assertThat(scmBlockTestingClient.getNumberOfDeletedBlocks()) + .isGreaterThanOrEqualTo(initialDeletedBlockCount + 3); + } - String volumeName = String.format("volume%s", - RandomStringUtils.randomAlphanumeric(5)); - String bucketName = String.format("bucket%s", - RandomStringUtils.randomAlphanumeric(5)); - - // Create Volume and Bucket with versioning enabled - createVolumeAndBucket(keyManager, volumeName, bucketName, true); - - // Create 2 versions of the same key - String keyName = String.format("key%s", - RandomStringUtils.randomAlphanumeric(5)); - OmKeyArgs keyArgs = createAndCommitKey(keyManager, volumeName, bucketName, - keyName, 1); - createAndCommitKey(keyManager, volumeName, bucketName, keyName, 2); - - // Delete the key - writeClient.deleteKey(keyArgs); - - KeyDeletingService keyDeletingService = - (KeyDeletingService) keyManager.getDeletingService(); - GenericTestUtils.waitFor( - () -> keyDeletingService.getDeletedKeyCount().get() >= 1, - 1000, 10000); - assertThat(keyDeletingService.getRunCount().get()).isGreaterThan(1); - assertEquals(0, keyManager.getPendingDeletionKeys( - Integer.MAX_VALUE).getKeyBlocksList().size()); - - // The 1st version of the key has 1 block and the 2nd version has 2 - // blocks. Hence, the ScmBlockClient should have received atleast 3 - // blocks for deletion from the KeyDeletionService - ScmBlockLocationTestingClient scmBlockTestingClient = - (ScmBlockLocationTestingClient) omTestManagers.getScmBlockClient(); - assertThat(scmBlockTestingClient.getNumberOfDeletedBlocks()).isGreaterThanOrEqualTo(3); - } + @Test + void checkDeletedTableCleanUpForSnapshot() throws Exception { + final String volumeName = getTestName(); + final String bucketName1 = uniqueObjectName("bucket"); + final String bucketName2 = uniqueObjectName("bucket"); + final String keyName = uniqueObjectName("key"); - private void createAndDeleteKeys(KeyManager keyManager, int keyCount, - int numBlocks) throws IOException { - for (int x = 0; x < keyCount; x++) { - String volumeName = String.format("volume%s", - RandomStringUtils.randomAlphanumeric(5)); - String bucketName = String.format("bucket%s", - RandomStringUtils.randomAlphanumeric(5)); - String keyName = String.format("key%s", - RandomStringUtils.randomAlphanumeric(5)); + final long initialDeletedCount = getDeletedKeyCount(); + final long initialRunCount = getRunCount(); - // Create Volume and Bucket - createVolumeAndBucket(keyManager, volumeName, bucketName, false); + // Create Volume and Buckets + createVolumeAndBucket(volumeName, bucketName1, false); + createVolumeAndBucket(volumeName, bucketName2, false); - // Create the key - OmKeyArgs keyArg = createAndCommitKey(keyManager, volumeName, bucketName, - keyName, numBlocks); + // Create the keys + OmKeyArgs key1 = createAndCommitKey(volumeName, bucketName1, keyName, 3); + OmKeyArgs key2 = createAndCommitKey(volumeName, bucketName2, keyName, 3); + + // Create snapshot + String snapName = uniqueObjectName("snap"); + writeClient.createSnapshot(volumeName, bucketName1, snapName); // Delete the key - writeClient.deleteKey(keyArg); + writeClient.deleteKey(key1); + writeClient.deleteKey(key2); + + // Run KeyDeletingService + GenericTestUtils.waitFor( + () -> getDeletedKeyCount() >= initialDeletedCount + 1, + 1000, 10000); + assertThat(getRunCount()) + .isGreaterThan(initialRunCount); + assertThat(keyManager.getPendingDeletionKeys(Integer.MAX_VALUE).getKeyBlocksList()) + .isEmpty(); + + // deletedTable should have deleted key of the snapshot bucket + assertFalse(metadataManager.getDeletedTable().isEmpty()); + String ozoneKey1 = + metadataManager.getOzoneKey(volumeName, bucketName1, keyName); + String ozoneKey2 = + metadataManager.getOzoneKey(volumeName, bucketName2, keyName); + + // key1 belongs to snapshot, so it should not be deleted when + // KeyDeletingService runs. But key2 can be reclaimed as it doesn't + // belong to any snapshot scope. + List> rangeKVs + = metadataManager.getDeletedTable().getRangeKVs( + null, 100, ozoneKey1); + assertThat(rangeKVs.size()).isGreaterThan(0); + rangeKVs + = metadataManager.getDeletedTable().getRangeKVs( + null, 100, ozoneKey2); + assertEquals(0, rangeKVs.size()); } - } - @Test - public void checkDeletedTableCleanUpForSnapshot() - throws Exception { - OzoneConfiguration conf = createConfAndInitValues(); - OmTestManagers omTestManagers - = new OmTestManagers(conf); - KeyManager keyManager = omTestManagers.getKeyManager(); - writeClient = omTestManagers.getWriteClient(); - om = omTestManagers.getOzoneManager(); - OMMetadataManager metadataManager = omTestManagers.getMetadataManager(); - - String volumeName = String.format("volume%s", - RandomStringUtils.randomAlphanumeric(5)); - String bucketName1 = String.format("bucket%s", - RandomStringUtils.randomAlphanumeric(5)); - String bucketName2 = String.format("bucket%s", - RandomStringUtils.randomAlphanumeric(5)); - String keyName = String.format("key%s", - RandomStringUtils.randomAlphanumeric(5)); - - // Create Volume and Buckets - createVolumeAndBucket(keyManager, volumeName, bucketName1, false); - createVolumeAndBucket(keyManager, volumeName, bucketName2, false); - - // Create the keys - OmKeyArgs key1 = createAndCommitKey(keyManager, volumeName, bucketName1, - keyName, 3); - OmKeyArgs key2 = createAndCommitKey(keyManager, volumeName, bucketName2, - keyName, 3); - - // Create snapshot - String snapName = "snap1"; - writeClient.createSnapshot(volumeName, bucketName1, snapName); - - // Delete the key - writeClient.deleteKey(key1); - writeClient.deleteKey(key2); - - // Run KeyDeletingService - KeyDeletingService keyDeletingService = - (KeyDeletingService) keyManager.getDeletingService(); - GenericTestUtils.waitFor( - () -> keyDeletingService.getDeletedKeyCount().get() >= 1, - 1000, 10000); - assertThat(keyDeletingService.getRunCount().get()).isGreaterThan(1); - assertEquals(0, keyManager - .getPendingDeletionKeys(Integer.MAX_VALUE).getKeyBlocksList().size()); - - // deletedTable should have deleted key of the snapshot bucket - assertFalse(metadataManager.getDeletedTable().isEmpty()); - String ozoneKey1 = - metadataManager.getOzoneKey(volumeName, bucketName1, keyName); - String ozoneKey2 = - metadataManager.getOzoneKey(volumeName, bucketName2, keyName); - - // key1 belongs to snapshot, so it should not be deleted when - // KeyDeletingService runs. But key2 can be reclaimed as it doesn't - // belong to any snapshot scope. - List> rangeKVs - = metadataManager.getDeletedTable().getRangeKVs( - null, 100, ozoneKey1); - assertThat(rangeKVs.size()).isGreaterThan(0); - rangeKVs - = metadataManager.getDeletedTable().getRangeKVs( - null, 100, ozoneKey2); - assertEquals(0, rangeKVs.size()); - } + /* + * Create Snap1 + * Create 10 keys + * Create Snap2 + * Delete 10 keys + * Create 5 keys + * Delete 5 keys -> but stop KeyDeletingService so + that keys won't be reclaimed. + * Create snap3 + * Now wait for snap3 to be deepCleaned -> Deleted 5 + keys should be deep cleaned. + * Now delete snap2 -> Wait for snap3 to be deep cleaned so deletedTable + of Snap3 should be empty. + */ + @Test + void testSnapshotDeepClean() throws Exception { + Table snapshotInfoTable = + om.getMetadataManager().getSnapshotInfoTable(); + Table deletedTable = + om.getMetadataManager().getDeletedTable(); + Table keyTable = + om.getMetadataManager().getKeyTable(BucketLayout.DEFAULT); + + // Suspend KeyDeletingService + keyDeletingService.suspend(); + + final long initialSnapshotCount = metadataManager.countRowsInTable(snapshotInfoTable); + final long initialKeyCount = metadataManager.countRowsInTable(keyTable); + final long initialDeletedCount = metadataManager.countRowsInTable(deletedTable); + + final String volumeName = getTestName(); + final String bucketName = uniqueObjectName("bucket"); + + // Create Volume and Buckets + createVolumeAndBucket(volumeName, bucketName, false); + + writeClient.createSnapshot(volumeName, bucketName, uniqueObjectName("snap")); + assertTableRowCount(snapshotInfoTable, initialSnapshotCount + 1, metadataManager); + + List createdKeys = new ArrayList<>(); + for (int i = 1; i <= 10; i++) { + OmKeyArgs args = createAndCommitKey(volumeName, bucketName, + uniqueObjectName("key"), 3); + createdKeys.add(args); + } + assertTableRowCount(keyTable, initialKeyCount + 10, metadataManager); - /* - * Create Snap1 - * Create 10 keys - * Create Snap2 - * Delete 10 keys - * Create 5 keys - * Delete 5 keys -> but stop KeyDeletingService so - that keys won't be reclaimed. - * Create snap3 -t - * Now wait for snap3 to be deepCleaned -> Deleted 5 - keys should be deep cleaned. - * Now delete snap2 -> Wait for snap3 to be deep cleaned so deletedTable - of Snap3 should be empty. - */ - @Test - public void testSnapshotDeepClean() throws Exception { - OzoneConfiguration conf = createConfAndInitValues(); - OmTestManagers omTestManagers - = new OmTestManagers(conf); - KeyManager keyManager = omTestManagers.getKeyManager(); - writeClient = omTestManagers.getWriteClient(); - om = omTestManagers.getOzoneManager(); - OMMetadataManager metadataManager = omTestManagers.getMetadataManager(); - Table snapshotInfoTable = - om.getMetadataManager().getSnapshotInfoTable(); - Table deletedTable = - om.getMetadataManager().getDeletedTable(); - Table keyTable = - om.getMetadataManager().getKeyTable(BucketLayout.DEFAULT); - - KeyDeletingService keyDeletingService = keyManager.getDeletingService(); - // Suspend KeyDeletingService - keyDeletingService.suspend(); - - String volumeName = String.format("volume%s", - RandomStringUtils.randomAlphanumeric(5)); - String bucketName = String.format("bucket%s", - RandomStringUtils.randomAlphanumeric(5)); - String keyName = String.format("key%s", - RandomStringUtils.randomAlphanumeric(5)); - - // Create Volume and Buckets - createVolumeAndBucket(keyManager, volumeName, bucketName, false); - - writeClient.createSnapshot(volumeName, bucketName, "snap1"); - assertTableRowCount(snapshotInfoTable, 1, metadataManager); - - List createdKeys = new ArrayList<>(); - for (int i = 1; i <= 10; i++) { - OmKeyArgs args = createAndCommitKey(keyManager, volumeName, bucketName, - keyName + i, 3); - createdKeys.add(args); - } - assertTableRowCount(keyTable, 10, metadataManager); + String snap2 = uniqueObjectName("snap"); + writeClient.createSnapshot(volumeName, bucketName, snap2); + assertTableRowCount(snapshotInfoTable, initialSnapshotCount + 2, metadataManager); + + // Create 5 Keys + for (int i = 11; i <= 15; i++) { + OmKeyArgs args = createAndCommitKey(volumeName, bucketName, + uniqueObjectName("key"), 3); + createdKeys.add(args); + } - writeClient.createSnapshot(volumeName, bucketName, "snap2"); - assertTableRowCount(snapshotInfoTable, 2, metadataManager); + // Delete all 15 keys. + for (int i = 0; i < 15; i++) { + writeClient.deleteKey(createdKeys.get(i)); + } - // Create 5 Keys - for (int i = 11; i <= 15; i++) { - OmKeyArgs args = createAndCommitKey(keyManager, volumeName, bucketName, - keyName + i, 3); - createdKeys.add(args); - } + assertTableRowCount(deletedTable, initialDeletedCount + 15, metadataManager); - // Delete all 15 keys. - for (int i = 0; i < 15; i++) { - writeClient.deleteKey(createdKeys.get(i)); - } + // Create Snap3, traps all the deleted keys. + String snap3 = uniqueObjectName("snap"); + writeClient.createSnapshot(volumeName, bucketName, snap3); + assertTableRowCount(snapshotInfoTable, initialSnapshotCount + 3, metadataManager); + checkSnapDeepCleanStatus(snapshotInfoTable, volumeName, false); - assertTableRowCount(deletedTable, 15, metadataManager); + keyDeletingService.resume(); - // Create Snap3, traps all the deleted keys. - writeClient.createSnapshot(volumeName, bucketName, "snap3"); - assertTableRowCount(snapshotInfoTable, 3, metadataManager); - checkSnapDeepCleanStatus(snapshotInfoTable, false); + try (ReferenceCounted rcOmSnapshot = + om.getOmSnapshotManager().checkForSnapshot( + volumeName, bucketName, getSnapshotPrefix(snap3), true)) { + OmSnapshot snapshot3 = (OmSnapshot) rcOmSnapshot.get(); - keyDeletingService.resume(); + Table snap3deletedTable = + snapshot3.getMetadataManager().getDeletedTable(); - try (ReferenceCounted rcOmSnapshot = - om.getOmSnapshotManager().checkForSnapshot( - volumeName, bucketName, getSnapshotPrefix("snap3"), true)) { - OmSnapshot snap3 = (OmSnapshot) rcOmSnapshot.get(); + // 5 keys can be deep cleaned as it was stuck previously + assertTableRowCount(snap3deletedTable, initialDeletedCount + 10, metadataManager); - Table snap3deletedTable = - snap3.getMetadataManager().getDeletedTable(); + writeClient.deleteSnapshot(volumeName, bucketName, snap2); + assertTableRowCount(snapshotInfoTable, initialSnapshotCount + 2, metadataManager); - // 5 keys can be deep cleaned as it was stuck previously - assertTableRowCount(snap3deletedTable, 10, metadataManager); + assertTableRowCount(snap3deletedTable, initialDeletedCount, metadataManager); + assertTableRowCount(deletedTable, initialDeletedCount, metadataManager); + checkSnapDeepCleanStatus(snapshotInfoTable, volumeName, true); + } + } - writeClient.deleteSnapshot(volumeName, bucketName, "snap2"); - assertTableRowCount(snapshotInfoTable, 2, metadataManager); + @Test + void testSnapshotExclusiveSize() throws Exception { + Table snapshotInfoTable = + om.getMetadataManager().getSnapshotInfoTable(); + Table deletedTable = + om.getMetadataManager().getDeletedTable(); + Table renamedTable = + om.getMetadataManager().getSnapshotRenamedTable(); + Table keyTable = + om.getMetadataManager().getKeyTable(BucketLayout.DEFAULT); + + // Supspend KDS + keyDeletingService.suspend(); + + final long initialSnapshotCount = metadataManager.countRowsInTable(snapshotInfoTable); + final long initialKeyCount = metadataManager.countRowsInTable(keyTable); + final long initialDeletedCount = metadataManager.countRowsInTable(deletedTable); + final long initialRenamedCount = metadataManager.countRowsInTable(renamedTable); + + final String volumeName = getTestName(); + final String bucketName = uniqueObjectName("bucket"); + final String keyName = uniqueObjectName("key"); + + // Create Volume and Buckets + createVolumeAndBucket(volumeName, bucketName, false); + + // Create 3 keys + for (int i = 1; i <= 3; i++) { + createAndCommitKey(volumeName, bucketName, keyName + i, 3); + } + assertTableRowCount(keyTable, initialKeyCount + 3, metadataManager); + + // Create Snapshot1 + String snap1 = uniqueObjectName("snap"); + writeClient.createSnapshot(volumeName, bucketName, snap1); + assertTableRowCount(snapshotInfoTable, initialSnapshotCount + 1, metadataManager); + assertTableRowCount(deletedTable, initialDeletedCount, metadataManager); + + // Create 2 keys + for (int i = 4; i <= 5; i++) { + createAndCommitKey(volumeName, bucketName, keyName + i, 3); + } + // Delete a key, rename 2 keys. We will be using this to test + // how we handle renamed key for exclusive size calculation. + renameKey(volumeName, bucketName, keyName + 1, "renamedKey1"); + renameKey(volumeName, bucketName, keyName + 2, "renamedKey2"); + deleteKey(volumeName, bucketName, keyName + 3); + assertTableRowCount(deletedTable, initialDeletedCount + 1, metadataManager); + assertTableRowCount(renamedTable, initialRenamedCount + 2, metadataManager); + + // Create Snapshot2 + String snap2 = uniqueObjectName("snap"); + writeClient.createSnapshot(volumeName, bucketName, snap2); + assertTableRowCount(snapshotInfoTable, initialSnapshotCount + 2, metadataManager); + assertTableRowCount(deletedTable, initialDeletedCount, metadataManager); + + // Create 2 keys + for (int i = 6; i <= 7; i++) { + createAndCommitKey(volumeName, bucketName, keyName + i, 3); + } + + deleteKey(volumeName, bucketName, "renamedKey1"); + deleteKey(volumeName, bucketName, keyName + 4); + // Do a second rename of already renamedKey2 + renameKey(volumeName, bucketName, "renamedKey2", "renamedKey22"); + assertTableRowCount(deletedTable, initialDeletedCount + 2, metadataManager); + assertTableRowCount(renamedTable, initialRenamedCount + 1, metadataManager); + + // Create Snapshot3 + String snap3 = uniqueObjectName("snap"); + writeClient.createSnapshot(volumeName, bucketName, snap3); + // Delete 4 keys + deleteKey(volumeName, bucketName, "renamedKey22"); + for (int i = 5; i <= 7; i++) { + deleteKey(volumeName, bucketName, keyName + i); + } - assertTableRowCount(snap3deletedTable, 0, metadataManager); - assertTableRowCount(deletedTable, 0, metadataManager); - checkSnapDeepCleanStatus(snapshotInfoTable, true); + // Create Snapshot4 + String snap4 = uniqueObjectName("snap"); + writeClient.createSnapshot(volumeName, bucketName, snap4); + createAndCommitKey(volumeName, bucketName, uniqueObjectName("key"), 3); + + long prevKdsRunCount = getRunCount(); + keyDeletingService.resume(); + + Map expectedSize = new ImmutableMap.Builder() + .put(snap1, 1000L) + .put(snap2, 1000L) + .put(snap3, 2000L) + .put(snap4, 0L) + .build(); + + // Let KeyDeletingService to run for some iterations + GenericTestUtils.waitFor( + () -> (getRunCount() > prevKdsRunCount + 5), + 100, 10000); + + // Check if the exclusive size is set. + try (TableIterator> + iterator = snapshotInfoTable.iterator()) { + while (iterator.hasNext()) { + Table.KeyValue snapshotEntry = iterator.next(); + String snapshotName = snapshotEntry.getValue().getName(); + Long expected = expectedSize.getOrDefault(snapshotName, 0L); + assertNotNull(expected); + assertEquals(expected, snapshotEntry.getValue().getExclusiveSize()); + // Since for the test we are using RATIS/THREE + assertEquals(expected * 3, snapshotEntry.getValue().getExclusiveReplicatedSize()); + } + } } } - @Test - public void testSnapshotExclusiveSize() throws Exception { - OzoneConfiguration conf = createConfAndInitValues(); - OmTestManagers omTestManagers - = new OmTestManagers(conf); - KeyManager keyManager = omTestManagers.getKeyManager(); - writeClient = omTestManagers.getWriteClient(); - om = omTestManagers.getOzoneManager(); - OMMetadataManager metadataManager = omTestManagers.getMetadataManager(); - Table snapshotInfoTable = - om.getMetadataManager().getSnapshotInfoTable(); - Table deletedTable = - om.getMetadataManager().getDeletedTable(); - Table renamedTable = - om.getMetadataManager().getSnapshotRenamedTable(); - Table keyTable = - om.getMetadataManager().getKeyTable(BucketLayout.DEFAULT); - - KeyDeletingService keyDeletingService = keyManager.getDeletingService(); - // Supspend KDS - keyDeletingService.suspend(); - - String volumeName = "volume1"; - String bucketName = "bucket1"; - String keyName = "key"; - - // Create Volume and Buckets - createVolumeAndBucket(keyManager, volumeName, bucketName, false); - - // Create 3 keys - for (int i = 1; i <= 3; i++) { - createAndCommitKey(keyManager, volumeName, bucketName, keyName + i, 3); + /** + * Tests failure scenarios. + */ + @Nested + @TestInstance(TestInstance.Lifecycle.PER_CLASS) + class Failing { + + @BeforeAll + void setup(@TempDir File testDir) throws Exception { + // failCallsFrequency = 1 means all calls fail + scmBlockTestingClient = new ScmBlockLocationTestingClient(null, null, 1); + createConfig(testDir); + createSubject(); } - assertTableRowCount(keyTable, 3, metadataManager); - // Create Snapshot1 - writeClient.createSnapshot(volumeName, bucketName, "snap1"); - assertTableRowCount(snapshotInfoTable, 1, metadataManager); - assertTableRowCount(deletedTable, 0, metadataManager); + @AfterEach + void resume() { + keyDeletingService.resume(); + } - // Create 2 keys - for (int i = 4; i <= 5; i++) { - createAndCommitKey(keyManager, volumeName, bucketName, keyName + i, 3); + @AfterAll + void cleanup() { + if (om.stop()) { + om.join(); + } } - // Delete a key, rename 2 keys. We will be using this to test - // how we handle renamed key for exclusive size calculation. - renameKey(volumeName, bucketName, keyName + 1, "renamedKey1"); - renameKey(volumeName, bucketName, keyName + 2, "renamedKey2"); - deleteKey(volumeName, bucketName, keyName + 3); - assertTableRowCount(deletedTable, 1, metadataManager); - assertTableRowCount(renamedTable, 2, metadataManager); - - // Create Snapshot2 - writeClient.createSnapshot(volumeName, bucketName, "snap2"); - assertTableRowCount(snapshotInfoTable, 2, metadataManager); - assertTableRowCount(deletedTable, 0, metadataManager); - - // Create 2 keys - for (int i = 6; i <= 7; i++) { - createAndCommitKey(keyManager, volumeName, bucketName, keyName + i, 3); + + @Test + void checkIfDeleteServiceWithFailingSCM() throws Exception { + final int initialCount = countKeysPendingDeletion(); + final long initialRunCount = getRunCount(); + final int keyCount = 100; + + createAndDeleteKeys(keyCount, 1); + + GenericTestUtils.waitFor( + () -> countKeysPendingDeletion() == initialCount + keyCount, + 100, 2000); + // Make sure that we have run the background thread 5 times more + GenericTestUtils.waitFor( + () -> getRunCount() >= initialRunCount + 5, + 100, 10000); + // Since SCM calls are failing, deletedKeyCount should be zero. + assertEquals(0, getDeletedKeyCount()); + assertEquals(initialCount + keyCount, countKeysPendingDeletion()); } - deleteKey(volumeName, bucketName, "renamedKey1"); - deleteKey(volumeName, bucketName, "key4"); - // Do a second rename of already renamedKey2 - renameKey(volumeName, bucketName, "renamedKey2", "renamedKey22"); - assertTableRowCount(deletedTable, 2, metadataManager); - assertTableRowCount(renamedTable, 1, metadataManager); - - // Create Snapshot3 - writeClient.createSnapshot(volumeName, bucketName, "snap3"); - // Delete 4 keys - deleteKey(volumeName, bucketName, "renamedKey22"); - for (int i = 5; i <= 7; i++) { - deleteKey(volumeName, bucketName, keyName + i); + @Test + void checkDeletionForEmptyKey() throws Exception { + final int initialCount = countKeysPendingDeletion(); + final long initialRunCount = getRunCount(); + final int keyCount = 100; + + createAndDeleteKeys(keyCount, 0); + + // the pre-allocated blocks are not committed, hence they will be deleted. + GenericTestUtils.waitFor( + () -> countKeysPendingDeletion() == initialCount + keyCount, + 100, 2000); + // Make sure that we have run the background thread 2 times or more + GenericTestUtils.waitFor( + () -> getRunCount() >= initialRunCount + 2, + 100, 1000); + // the blockClient is set to fail the deletion of key blocks, hence no keys + // will be deleted + assertEquals(0, getDeletedKeyCount()); } - // Create Snapshot4 - writeClient.createSnapshot(volumeName, bucketName, "snap4"); - createAndCommitKey(keyManager, volumeName, bucketName, "key8", 3); - keyDeletingService.resume(); + @Test + void checkDeletionForPartiallyCommitKey() throws Exception { + final String volumeName = getTestName(); + final String bucketName = uniqueObjectName("bucket"); + final String keyName = uniqueObjectName("key"); + final long initialCount = countBlocksPendingDeletion(); + createVolumeAndBucket(volumeName, bucketName, false); - Map expectedSize = new HashMap() {{ - put("snap1", 1000L); - put("snap2", 1000L); - put("snap3", 2000L); - put("snap4", 0L); - }}; + OmKeyArgs keyArg = createAndCommitKey(volumeName, bucketName, keyName, 3, 1); - long prevKdsRunCount = keyDeletingService.getRunCount().get(); + // Only the uncommitted block should be pending to be deleted. + GenericTestUtils.waitFor( + () -> countBlocksPendingDeletion() == initialCount + 1, + 500, 3000); - // Let KeyDeletingService to run for some iterations - GenericTestUtils.waitFor( - () -> (keyDeletingService.getRunCount().get() > prevKdsRunCount + 5), - 100, 10000); + writeClient.deleteKey(keyArg); - // Check if the exclusive size is set. - try (TableIterator> - iterator = snapshotInfoTable.iterator()) { - while (iterator.hasNext()) { - Table.KeyValue snapshotEntry = iterator.next(); - System.out.println(snapshotEntry.getValue()); - String snapshotName = snapshotEntry.getValue().getName(); - assertEquals(expectedSize.get(snapshotName), - snapshotEntry.getValue(). - getExclusiveSize()); - // Since for the test we are using RATIS/THREE - assertEquals(expectedSize.get(snapshotName) * 3, - snapshotEntry.getValue().getExclusiveReplicatedSize()); - } + // All blocks should be pending to be deleted. + GenericTestUtils.waitFor( + () -> countBlocksPendingDeletion() == initialCount + 3, + 500, 3000); + + // the blockClient is set to fail the deletion of key blocks, hence no keys + // will be deleted + assertEquals(0, getDeletedKeyCount()); } } - private void checkSnapDeepCleanStatus(Table - snapshotInfoTable, boolean deepClean) throws IOException { + private void createAndDeleteKeys(int keyCount, int numBlocks) throws IOException { + for (int x = 0; x < keyCount; x++) { + final String volumeName = getTestName(); + final String bucketName = uniqueObjectName("bucket"); + final String keyName = uniqueObjectName("key"); - try (TableIterator> - iterator = snapshotInfoTable.iterator()) { + // Create Volume and Bucket + createVolumeAndBucket(volumeName, bucketName, false); + + // Create the key + OmKeyArgs keyArg = createAndCommitKey(volumeName, bucketName, + keyName, numBlocks); + + // Delete the key + writeClient.deleteKey(keyArg); + } + } + + private static void checkSnapDeepCleanStatus(Table table, String volumeName, boolean deepClean) + throws IOException { + try (TableIterator> iterator = table.iterator()) { while (iterator.hasNext()) { SnapshotInfo snapInfo = iterator.next().getValue(); - assertEquals(snapInfo.getDeepClean(), deepClean); + if (volumeName.equals(snapInfo.getVolumeName())) { + assertThat(snapInfo.getDeepClean()) + .as(snapInfo.toAuditMap().toString()) + .isEqualTo(deepClean); + } } } } - private void assertTableRowCount(Table table, - int count, OMMetadataManager metadataManager) + private static void assertTableRowCount(Table table, + long count, OMMetadataManager metadataManager) throws TimeoutException, InterruptedException { GenericTestUtils.waitFor(() -> assertTableRowCount(count, table, metadataManager), 1000, 120000); // 2 minutes } - private boolean assertTableRowCount(int expectedCount, + private static boolean assertTableRowCount(long expectedCount, Table table, OMMetadataManager metadataManager) { long count = 0L; @@ -714,7 +641,7 @@ private boolean assertTableRowCount(int expectedCount, return count == expectedCount; } - private void createVolumeAndBucket(KeyManager keyManager, String volumeName, + private void createVolumeAndBucket(String volumeName, String bucketName, boolean isVersioningEnabled) throws IOException { // cheat here, just create a volume and bucket entry so that we can // create the keys, we put the same data for key and value since the @@ -764,13 +691,13 @@ private void renameKey(String volumeName, writeClient.renameKey(keyArg, toKeyName); } - private OmKeyArgs createAndCommitKey(KeyManager keyManager, String volumeName, + private OmKeyArgs createAndCommitKey(String volumeName, String bucketName, String keyName, int numBlocks) throws IOException { - return createAndCommitKey(keyManager, volumeName, bucketName, keyName, + return createAndCommitKey(volumeName, bucketName, keyName, numBlocks, 0); } - private OmKeyArgs createAndCommitKey(KeyManager keyManager, String volumeName, + private OmKeyArgs createAndCommitKey(String volumeName, String bucketName, String keyName, int numBlocks, int numUncommitted) throws IOException { // Even if no key size is appointed, there will be at least one @@ -819,4 +746,44 @@ private OmKeyArgs createAndCommitKey(KeyManager keyManager, String volumeName, writeClient.commitKey(keyArg, session.getId()); return keyArg; } + + private long getDeletedKeyCount() { + final long count = keyDeletingService.getDeletedKeyCount().get(); + LOG.debug("KeyDeletingService deleted keys: {}", count); + return count; + } + + private long getRunCount() { + final long count = keyDeletingService.getRunCount().get(); + LOG.debug("KeyDeletingService run count: {}", count); + return count; + } + + private int countKeysPendingDeletion() { + try { + final int count = keyManager.getPendingDeletionKeys(Integer.MAX_VALUE) + .getKeyBlocksList().size(); + LOG.debug("KeyManager keys pending deletion: {}", count); + return count; + } catch (IOException e) { + throw new UncheckedIOException(e); + } + } + + private long countBlocksPendingDeletion() { + try { + return keyManager.getPendingDeletionKeys(Integer.MAX_VALUE) + .getKeyBlocksList() + .stream() + .map(BlockGroup::getBlockIDList) + .mapToLong(Collection::size) + .sum(); + } catch (IOException e) { + throw new UncheckedIOException(e); + } + } + + private static String uniqueObjectName(String prefix) { + return prefix + OBJECT_COUNTER.getAndIncrement(); + } } diff --git a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/service/TestMultipartUploadCleanupService.java b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/service/TestMultipartUploadCleanupService.java index 272b7b72db1..762d8740565 100644 --- a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/service/TestMultipartUploadCleanupService.java +++ b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/service/TestMultipartUploadCleanupService.java @@ -36,23 +36,23 @@ import org.apache.hadoop.ozone.om.helpers.OpenKeySession; import org.apache.hadoop.ozone.om.protocol.OzoneManagerProtocol; import org.apache.hadoop.ozone.om.request.OMRequestTestUtils; -import org.apache.ozone.test.GenericTestUtils; +import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.ExpiredMultipartUploadsBucket; import org.apache.ratis.util.ExitUtils; -import org.junit.jupiter.api.AfterEach; +import org.junit.jupiter.api.AfterAll; import org.junit.jupiter.api.BeforeAll; -import org.junit.jupiter.api.BeforeEach; +import org.junit.jupiter.api.TestInstance; import org.junit.jupiter.api.Timeout; import org.junit.jupiter.api.io.TempDir; import org.junit.jupiter.params.ParameterizedTest; import org.junit.jupiter.params.provider.CsvSource; -import org.slf4j.Logger; -import org.slf4j.LoggerFactory; import java.io.IOException; +import java.io.UncheckedIOException; import java.nio.file.Path; import java.time.Duration; import java.util.ArrayList; import java.util.Collections; +import java.util.List; import java.util.UUID; import java.util.concurrent.TimeUnit; @@ -60,31 +60,27 @@ import static org.apache.hadoop.ozone.om.OMConfigKeys.OZONE_OM_MPU_CLEANUP_SERVICE_INTERVAL; import static org.apache.hadoop.ozone.om.OMConfigKeys.OZONE_OM_MPU_EXPIRE_THRESHOLD; import static org.apache.hadoop.ozone.om.OMConfigKeys.OZONE_OM_MPU_PARTS_CLEANUP_LIMIT_PER_TASK; +import static org.apache.ozone.test.GenericTestUtils.waitFor; import static org.assertj.core.api.Assertions.assertThat; -import static org.junit.jupiter.api.Assertions.assertFalse; -import static org.junit.jupiter.api.Assertions.assertTrue; /** * Test Multipart Upload Cleanup Service. */ -public class TestMultipartUploadCleanupService { +@TestInstance(TestInstance.Lifecycle.PER_CLASS) +@Timeout(300) +class TestMultipartUploadCleanupService { private OzoneManagerProtocol writeClient; private OzoneManager om; - private static final Logger LOG = - LoggerFactory.getLogger(TestMultipartUploadCleanupService.class); - private static final Duration SERVICE_INTERVAL = Duration.ofMillis(500); - private static final Duration EXPIRE_THRESHOLD = Duration.ofMillis(1000); + private static final Duration SERVICE_INTERVAL = Duration.ofMillis(100); + private static final Duration EXPIRE_THRESHOLD = Duration.ofMillis(200); private KeyManager keyManager; private OMMetadataManager omMetadataManager; @BeforeAll - public static void setup() { + void setup(@TempDir Path tempDir) throws Exception { ExitUtils.disableSystemExit(); - } - @BeforeEach - public void createConfAndInitValues(@TempDir Path tempDir) throws Exception { OzoneConfiguration conf = new OzoneConfiguration(); System.setProperty(DBConfigFromFile.CONFIG_DIR, "/"); ServerUtils.setOzoneMetaDirPath(conf, tempDir.toString()); @@ -101,8 +97,8 @@ public void createConfAndInitValues(@TempDir Path tempDir) throws Exception { om = omTestManagers.getOzoneManager(); } - @AfterEach - public void cleanup() throws Exception { + @AfterAll + void cleanup() { om.stop(); } @@ -110,7 +106,6 @@ public void cleanup() throws Exception { * Create a bunch incomplete/inflight multipart upload info. Then we start * the MultipartUploadCleanupService. We make sure that all the multipart * upload info is picked up and aborted by OzoneManager. - * @throws Exception */ @ParameterizedTest @CsvSource({ @@ -118,9 +113,7 @@ public void cleanup() throws Exception { "0, 88", "66, 77" }) - @Timeout(300) - public void checkIfCleanupServiceIsDeletingExpiredMultipartUpload( - int numDEFKeys, int numFSOKeys) throws Exception { + void deletesExpiredUpload(int numDEFKeys, int numFSOKeys) throws Exception { MultipartUploadCleanupService multipartUploadCleanupService = (MultipartUploadCleanupService) @@ -140,24 +133,27 @@ public void checkIfCleanupServiceIsDeletingExpiredMultipartUpload( // wait for MPU info to expire Thread.sleep(EXPIRE_THRESHOLD.toMillis()); - assertFalse(keyManager.getExpiredMultipartUploads(EXPIRE_THRESHOLD, - 10000).isEmpty()); + assertThat(getExpiredMultipartUploads()).isNotEmpty(); multipartUploadCleanupService.resume(); - GenericTestUtils.waitFor(() -> multipartUploadCleanupService - .getRunCount() > oldRunCount, - (int) SERVICE_INTERVAL.toMillis(), - 5 * (int) SERVICE_INTERVAL.toMillis()); - // wait for requests to complete - Thread.sleep(10 * SERVICE_INTERVAL.toMillis()); + waitFor(() -> getExpiredMultipartUploads().isEmpty(), + (int) SERVICE_INTERVAL.toMillis(), + 15 * (int) SERVICE_INTERVAL.toMillis()); + assertThat(multipartUploadCleanupService.getRunCount()) + .isGreaterThan(oldRunCount); assertThat(multipartUploadCleanupService.getSubmittedMpuInfoCount()) .isGreaterThanOrEqualTo(oldMpuInfoCount + numDEFKeys + numFSOKeys); - assertTrue(keyManager.getExpiredMultipartUploads(EXPIRE_THRESHOLD, - 10000).isEmpty()); + } + private List getExpiredMultipartUploads() { + try { + return keyManager.getExpiredMultipartUploads(EXPIRE_THRESHOLD, 10000); + } catch (IOException e) { + throw new UncheckedIOException(e); + } } private void createIncompleteMPUKeys(int mpuKeyCount, @@ -202,10 +198,6 @@ private void createVolumeAndBucket(String volumeName, String bucketName, /** * Create inflight multipart upload that are not completed / aborted yet. - * @param volumeName - * @param bucketName - * @param keyName - * @throws IOException */ private void createIncompleteMPUKey(String volumeName, String bucketName, String keyName, int numParts) throws IOException { diff --git a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/service/TestOpenKeyCleanupService.java b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/service/TestOpenKeyCleanupService.java index bbd379b561d..30fe6f6ffb0 100644 --- a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/service/TestOpenKeyCleanupService.java +++ b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/service/TestOpenKeyCleanupService.java @@ -51,10 +51,13 @@ import org.apache.hadoop.ozone.om.request.OMRequestTestUtils; import org.apache.ozone.test.GenericTestUtils; import org.apache.ratis.util.ExitUtils; -import org.junit.jupiter.api.AfterEach; +import org.junit.jupiter.api.AfterAll; import org.junit.jupiter.api.BeforeAll; -import org.junit.jupiter.api.BeforeEach; +import org.junit.jupiter.api.MethodOrderer.OrderAnnotation; +import org.junit.jupiter.api.Order; import org.junit.jupiter.api.Test; +import org.junit.jupiter.api.TestInstance; +import org.junit.jupiter.api.TestMethodOrder; import org.junit.jupiter.api.Timeout; import org.junit.jupiter.api.io.TempDir; import org.junit.jupiter.params.ParameterizedTest; @@ -81,6 +84,8 @@ import static org.mockito.ArgumentMatchers.anyLong; import static org.mockito.Mockito.when; +@TestInstance(TestInstance.Lifecycle.PER_CLASS) +@TestMethodOrder(OrderAnnotation.class) class TestOpenKeyCleanupService { private OzoneManagerProtocol writeClient; private OzoneManager om; @@ -97,12 +102,9 @@ class TestOpenKeyCleanupService { private OMMetadataManager omMetadataManager; @BeforeAll - public static void setup() { + void setup(@TempDir Path tempDir) throws Exception { ExitUtils.disableSystemExit(); - } - @BeforeEach - public void createConfAndInitValues(@TempDir Path tempDir) throws Exception { OzoneConfiguration conf = new OzoneConfiguration(); System.setProperty(DBConfigFromFile.CONFIG_DIR, "/"); ServerUtils.setOzoneMetaDirPath(conf, tempDir.toString()); @@ -122,8 +124,8 @@ public void createConfAndInitValues(@TempDir Path tempDir) throws Exception { om = omTestManagers.getOzoneManager(); } - @AfterEach - public void cleanup() throws Exception { + @AfterAll + void cleanup() { if (om.stop()) { om.join(); } @@ -161,12 +163,11 @@ public void testCleanupExpiredOpenKeys( final long oldkeyCount = openKeyCleanupService.getSubmittedOpenKeyCount(); final long oldrunCount = openKeyCleanupService.getRunCount(); LOG.info("oldkeyCount={}, oldrunCount={}", oldkeyCount, oldrunCount); - assertEquals(0, oldkeyCount); final OMMetrics metrics = om.getMetrics(); - assertEquals(0, metrics.getNumKeyHSyncs()); - assertEquals(0, metrics.getNumOpenKeysCleaned()); - assertEquals(0, metrics.getNumOpenKeysHSyncCleaned()); + long numKeyHSyncs = metrics.getNumKeyHSyncs(); + long numOpenKeysCleaned = metrics.getNumOpenKeysCleaned(); + long numOpenKeysHSyncCleaned = metrics.getNumOpenKeysHSyncCleaned(); final int keyCount = numDEFKeys + numFSOKeys; createOpenKeys(numDEFKeys, false, BucketLayout.DEFAULT, false); createOpenKeys(numFSOKeys, hsync, BucketLayout.FILE_SYSTEM_OPTIMIZED, false); @@ -181,7 +182,7 @@ public void testCleanupExpiredOpenKeys( openKeyCleanupService.resume(); GenericTestUtils.waitFor( - () -> openKeyCleanupService.getSubmittedOpenKeyCount() >= keyCount, + () -> openKeyCleanupService.getSubmittedOpenKeyCount() >= oldkeyCount + keyCount, SERVICE_INTERVAL, WAIT_TIME); GenericTestUtils.waitFor( () -> openKeyCleanupService.getRunCount() >= oldrunCount + 2, @@ -191,13 +192,13 @@ public void testCleanupExpiredOpenKeys( waitForOpenKeyCleanup(hsync, BucketLayout.FILE_SYSTEM_OPTIMIZED); if (hsync) { - assertAtLeast(numDEFKeys, metrics.getNumOpenKeysCleaned()); - assertAtLeast(numFSOKeys, metrics.getNumOpenKeysHSyncCleaned()); - assertEquals(numFSOKeys, metrics.getNumKeyHSyncs()); + assertAtLeast(numOpenKeysCleaned + numDEFKeys, metrics.getNumOpenKeysCleaned()); + assertAtLeast(numOpenKeysHSyncCleaned + numFSOKeys, metrics.getNumOpenKeysHSyncCleaned()); + assertEquals(numKeyHSyncs + numFSOKeys, metrics.getNumKeyHSyncs()); } else { - assertAtLeast(keyCount, metrics.getNumOpenKeysCleaned()); - assertEquals(0, metrics.getNumOpenKeysHSyncCleaned()); - assertEquals(0, metrics.getNumKeyHSyncs()); + assertAtLeast(numOpenKeysCleaned + keyCount, metrics.getNumOpenKeysCleaned()); + assertEquals(numOpenKeysHSyncCleaned, metrics.getNumOpenKeysHSyncCleaned()); + assertEquals(numKeyHSyncs, metrics.getNumKeyHSyncs()); } } @@ -209,6 +210,8 @@ public void testCleanupExpiredOpenKeys( */ @Test @Timeout(300) + // Run this test first to avoid any lingering keys generated by other tests. + @Order(1) public void testIgnoreExpiredRecoverhsyncKeys() throws Exception { OpenKeyCleanupService openKeyCleanupService = (OpenKeyCleanupService) keyManager.getOpenKeyCleanupService(); @@ -282,12 +285,11 @@ public void testExcludeMPUOpenKeys( final long oldkeyCount = openKeyCleanupService.getSubmittedOpenKeyCount(); final long oldrunCount = openKeyCleanupService.getRunCount(); LOG.info("oldMpuKeyCount={}, oldMpuRunCount={}", oldkeyCount, oldrunCount); - assertEquals(0, oldkeyCount); final OMMetrics metrics = om.getMetrics(); - assertEquals(0, metrics.getNumKeyHSyncs()); - assertEquals(0, metrics.getNumOpenKeysCleaned()); - assertEquals(0, metrics.getNumOpenKeysHSyncCleaned()); + long numKeyHSyncs = metrics.getNumKeyHSyncs(); + long numOpenKeysCleaned = metrics.getNumOpenKeysCleaned(); + long numOpenKeysHSyncCleaned = metrics.getNumOpenKeysHSyncCleaned(); createIncompleteMPUKeys(numDEFKeys, BucketLayout.DEFAULT, NUM_MPU_PARTS, true); createIncompleteMPUKeys(numFSOKeys, BucketLayout.FILE_SYSTEM_OPTIMIZED, @@ -316,7 +318,9 @@ public void testExcludeMPUOpenKeys( assertExpiredOpenKeys(true, false, BucketLayout.FILE_SYSTEM_OPTIMIZED); - assertEquals(0, metrics.getNumOpenKeysCleaned()); + assertEquals(numKeyHSyncs, metrics.getNumKeyHSyncs()); + assertEquals(numOpenKeysCleaned, metrics.getNumOpenKeysCleaned()); + assertEquals(numOpenKeysHSyncCleaned, metrics.getNumOpenKeysHSyncCleaned()); } /** @@ -346,12 +350,9 @@ public void testCleanupExpiredOpenMPUPartKeys( final long oldkeyCount = openKeyCleanupService.getSubmittedOpenKeyCount(); final long oldrunCount = openKeyCleanupService.getRunCount(); LOG.info("oldMpuKeyCount={}, oldMpuRunCount={}", oldkeyCount, oldrunCount); - assertEquals(0, oldkeyCount); final OMMetrics metrics = om.getMetrics(); - assertEquals(0, metrics.getNumKeyHSyncs()); - assertEquals(0, metrics.getNumOpenKeysCleaned()); - assertEquals(0, metrics.getNumOpenKeysHSyncCleaned()); + long numOpenKeysCleaned = metrics.getNumOpenKeysCleaned(); final int keyCount = numDEFKeys + numFSOKeys; final int partCount = NUM_MPU_PARTS * keyCount; createIncompleteMPUKeys(numDEFKeys, BucketLayout.DEFAULT, NUM_MPU_PARTS, @@ -371,7 +372,7 @@ public void testCleanupExpiredOpenMPUPartKeys( openKeyCleanupService.resume(); GenericTestUtils.waitFor( - () -> openKeyCleanupService.getSubmittedOpenKeyCount() >= partCount, + () -> openKeyCleanupService.getSubmittedOpenKeyCount() >= oldkeyCount + partCount, SERVICE_INTERVAL, WAIT_TIME); GenericTestUtils.waitFor( () -> openKeyCleanupService.getRunCount() >= oldrunCount + 2, @@ -380,7 +381,7 @@ public void testCleanupExpiredOpenMPUPartKeys( // No expired MPU parts fetched waitForOpenKeyCleanup(false, BucketLayout.DEFAULT); waitForOpenKeyCleanup(false, BucketLayout.FILE_SYSTEM_OPTIMIZED); - assertAtLeast(partCount, metrics.getNumOpenKeysCleaned()); + assertAtLeast(numOpenKeysCleaned + partCount, metrics.getNumOpenKeysCleaned()); } private static void assertAtLeast(long expectedMinimum, long actual) { diff --git a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/service/TestSnapshotDiffCleanupService.java b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/service/TestSnapshotDiffCleanupService.java index 8c9507b20ff..dfbc6a3b944 100644 --- a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/service/TestSnapshotDiffCleanupService.java +++ b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/service/TestSnapshotDiffCleanupService.java @@ -30,12 +30,12 @@ import org.apache.hadoop.ozone.om.helpers.SnapshotDiffJob; import org.apache.hadoop.ozone.snapshot.SnapshotDiffReportOzone; import org.apache.hadoop.ozone.snapshot.SnapshotDiffResponse.JobStatus; -import org.apache.ozone.test.GenericTestUtils; import org.junit.jupiter.api.AfterAll; import org.junit.jupiter.api.AfterEach; import org.junit.jupiter.api.BeforeAll; import org.junit.jupiter.api.BeforeEach; import org.junit.jupiter.api.Test; +import org.junit.jupiter.api.io.TempDir; import org.mockito.Mock; import org.mockito.MockitoAnnotations; import org.rocksdb.ColumnFamilyDescriptor; @@ -44,6 +44,7 @@ import java.io.File; import java.io.IOException; +import java.nio.file.Path; import java.nio.file.Paths; import java.time.Duration; import java.util.ArrayList; @@ -72,7 +73,8 @@ * Tests SnapshotDiffCleanupService. */ public class TestSnapshotDiffCleanupService { - private static File file; + @TempDir + private static Path tempDir; private static ManagedRocksDB db; private static ManagedDBOptions dbOptions; private static ManagedColumnFamilyOptions columnFamilyOptions; @@ -102,7 +104,7 @@ public static void staticInit() throws RocksDBException { dbOptions.setCreateIfMissing(true); columnFamilyOptions = new ManagedColumnFamilyOptions(); - file = new File("./test-snap-diff-clean-up"); + File file = tempDir.resolve("./test-snap-diff-clean-up").toFile(); if (!file.mkdirs() && !file.exists()) { throw new IllegalArgumentException("Unable to create directory " + file); @@ -133,8 +135,6 @@ public static void staticTearDown() { if (db != null) { db.close(); } - - GenericTestUtils.deleteDirectory(file); } @BeforeEach diff --git a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/snapshot/TestFSODirectoryPathResolver.java b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/snapshot/TestFSODirectoryPathResolver.java index b077f4841d8..5669d9e9961 100644 --- a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/snapshot/TestFSODirectoryPathResolver.java +++ b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/snapshot/TestFSODirectoryPathResolver.java @@ -38,7 +38,7 @@ import static org.apache.hadoop.ozone.OzoneConsts.OM_KEY_PREFIX; import static org.junit.jupiter.api.Assertions.assertEquals; import static org.junit.jupiter.api.Assertions.assertThrows; -import static org.mockito.ArgumentMatchers.anyString; +import static org.mockito.Mockito.anyString; import static org.mockito.Mockito.mock; import static org.mockito.Mockito.when; diff --git a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/snapshot/TestOmSnapshotUtils.java b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/snapshot/TestOmSnapshotUtils.java index da182730bc8..190db469c19 100644 --- a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/snapshot/TestOmSnapshotUtils.java +++ b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/snapshot/TestOmSnapshotUtils.java @@ -20,7 +20,6 @@ import org.apache.hadoop.ozone.om.helpers.SnapshotInfo; import org.apache.hadoop.ozone.om.service.SnapshotDeletingService; -import org.apache.ozone.test.GenericTestUtils; import org.junit.jupiter.api.Test; import org.junit.jupiter.api.io.TempDir; import org.junit.jupiter.params.ParameterizedTest; @@ -82,7 +81,6 @@ public void testLinkFiles(@TempDir File tempDir) throws Exception { map(Path::toString).collect(Collectors.toSet()); assertEquals(tree1Files, tree2Files); - GenericTestUtils.deleteDirectory(tempDir); } diff --git a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/snapshot/TestRocksDbPersistentList.java b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/snapshot/TestRocksDbPersistentList.java index 990bdc48493..10df90b434f 100644 --- a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/snapshot/TestRocksDbPersistentList.java +++ b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/snapshot/TestRocksDbPersistentList.java @@ -18,6 +18,7 @@ import java.io.File; import java.io.IOException; +import java.nio.file.Path; import java.nio.file.Paths; import java.util.ArrayList; import java.util.Arrays; @@ -29,10 +30,10 @@ import org.apache.hadoop.hdds.utils.db.managed.ManagedDBOptions; import org.apache.hadoop.hdds.utils.db.managed.ManagedRocksDB; import org.apache.hadoop.util.ClosableIterator; -import org.apache.ozone.test.GenericTestUtils; import org.junit.jupiter.api.AfterAll; import org.junit.jupiter.api.BeforeAll; import org.junit.jupiter.api.Test; +import org.junit.jupiter.api.io.TempDir; import org.rocksdb.ColumnFamilyDescriptor; import org.rocksdb.ColumnFamilyHandle; import org.rocksdb.RocksDBException; @@ -44,7 +45,8 @@ * Test persistent list backed by RocksDB. */ public class TestRocksDbPersistentList { - private static File file; + @TempDir + private static Path tempDir; private static ManagedRocksDB db; private static ManagedDBOptions dbOptions; private static ManagedColumnFamilyOptions columnFamilyOptions; @@ -55,7 +57,7 @@ public static void staticInit() throws RocksDBException { dbOptions.setCreateIfMissing(true); columnFamilyOptions = new ManagedColumnFamilyOptions(); - file = new File("./test-persistent-list"); + File file = tempDir.resolve("./test-persistent-list").toFile(); if (!file.mkdirs() && !file.exists()) { throw new IllegalArgumentException("Unable to create directory " + file); @@ -86,8 +88,6 @@ public static void staticTearDown() { if (db != null) { db.close(); } - - GenericTestUtils.deleteDirectory(file); } @Test diff --git a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/snapshot/TestRocksDbPersistentMap.java b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/snapshot/TestRocksDbPersistentMap.java index 46946e09a2f..7fc29b0e4aa 100644 --- a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/snapshot/TestRocksDbPersistentMap.java +++ b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/snapshot/TestRocksDbPersistentMap.java @@ -18,6 +18,7 @@ import java.io.File; import java.io.IOException; +import java.nio.file.Path; import java.nio.file.Paths; import java.util.ArrayList; import java.util.Arrays; @@ -38,10 +39,10 @@ import org.apache.hadoop.hdds.utils.db.managed.ManagedDBOptions; import org.apache.hadoop.hdds.utils.db.managed.ManagedRocksDB; import org.apache.hadoop.util.ClosableIterator; -import org.apache.ozone.test.GenericTestUtils; import org.junit.jupiter.api.AfterAll; import org.junit.jupiter.api.BeforeAll; import org.junit.jupiter.api.Test; +import org.junit.jupiter.api.io.TempDir; import org.junit.jupiter.params.ParameterizedTest; import org.junit.jupiter.params.provider.Arguments; import org.junit.jupiter.params.provider.MethodSource; @@ -56,7 +57,8 @@ * Test persistent map backed by RocksDB. */ public class TestRocksDbPersistentMap { - private static File file; + @TempDir + private static Path tempDir; private static ManagedRocksDB db; private static ManagedDBOptions dbOptions; private static ManagedColumnFamilyOptions columnFamilyOptions; @@ -69,7 +71,7 @@ public static void staticInit() throws RocksDBException { dbOptions.setCreateIfMissing(true); columnFamilyOptions = new ManagedColumnFamilyOptions(); - file = new File("./test-persistent-map"); + File file = tempDir.resolve("./test-persistent-map").toFile(); if (!file.mkdirs() && !file.exists()) { throw new IllegalArgumentException("Unable to create directory " + file); @@ -101,8 +103,6 @@ public static void teardown() throws RocksDBException { if (db != null) { db.close(); } - - GenericTestUtils.deleteDirectory(file); } @Test diff --git a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/snapshot/TestRocksDbPersistentSet.java b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/snapshot/TestRocksDbPersistentSet.java index 6f2248f75c4..7bf7071353e 100644 --- a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/snapshot/TestRocksDbPersistentSet.java +++ b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/snapshot/TestRocksDbPersistentSet.java @@ -18,6 +18,7 @@ import java.io.File; import java.io.IOException; +import java.nio.file.Path; import java.nio.file.Paths; import java.util.ArrayList; import java.util.Arrays; @@ -32,10 +33,10 @@ import org.apache.hadoop.hdds.utils.db.managed.ManagedDBOptions; import org.apache.hadoop.hdds.utils.db.managed.ManagedRocksDB; import org.apache.hadoop.util.ClosableIterator; -import org.apache.ozone.test.GenericTestUtils; import org.junit.jupiter.api.AfterAll; import org.junit.jupiter.api.BeforeAll; import org.junit.jupiter.api.Test; +import org.junit.jupiter.api.io.TempDir; import org.rocksdb.ColumnFamilyDescriptor; import org.rocksdb.ColumnFamilyHandle; import org.rocksdb.RocksDBException; @@ -48,7 +49,8 @@ * Test persistent set backed by RocksDB. */ public class TestRocksDbPersistentSet { - private static File file; + @TempDir + private static Path tempDir; private static ManagedRocksDB db; private static ManagedDBOptions dbOptions; private static ManagedColumnFamilyOptions columnFamilyOptions; @@ -59,7 +61,7 @@ public static void staticInit() throws RocksDBException { dbOptions.setCreateIfMissing(true); columnFamilyOptions = new ManagedColumnFamilyOptions(); - file = new File("./test-persistent-set"); + File file = tempDir.resolve("./test-persistent-set").toFile(); if (!file.mkdirs() && !file.exists()) { throw new IllegalArgumentException("Unable to create directory " + file); @@ -91,8 +93,6 @@ public static void staticTearDown() { if (db != null) { db.close(); } - - GenericTestUtils.deleteDirectory(file); } @Test diff --git a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/snapshot/TestSnapshotCache.java b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/snapshot/TestSnapshotCache.java index 37a8c26e7b6..cecd7a99af2 100644 --- a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/snapshot/TestSnapshotCache.java +++ b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/snapshot/TestSnapshotCache.java @@ -40,8 +40,8 @@ import static org.junit.jupiter.api.Assertions.assertNotEquals; import static org.junit.jupiter.api.Assertions.assertNotNull; import static org.junit.jupiter.api.Assertions.fail; -import static org.mockito.ArgumentMatchers.any; -import static org.mockito.ArgumentMatchers.eq; +import static org.mockito.Mockito.any; +import static org.mockito.Mockito.eq; import static org.mockito.Mockito.mock; import static org.mockito.Mockito.when; diff --git a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/snapshot/TestSnapshotDiffManager.java b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/snapshot/TestSnapshotDiffManager.java index 57a27468b0c..b92546c2899 100644 --- a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/snapshot/TestSnapshotDiffManager.java +++ b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/snapshot/TestSnapshotDiffManager.java @@ -71,7 +71,7 @@ import org.apache.ozone.test.tag.Unhealthy; import org.apache.ratis.util.ExitUtils; import org.apache.ratis.util.TimeDuration; -import org.jetbrains.annotations.NotNull; +import jakarta.annotation.Nonnull; import org.junit.jupiter.api.AfterEach; import org.junit.jupiter.api.BeforeAll; import org.junit.jupiter.api.BeforeEach; @@ -162,19 +162,19 @@ import static org.junit.jupiter.api.Assertions.assertNull; import static org.junit.jupiter.api.Assertions.assertThrows; import static org.junit.jupiter.api.Assertions.assertTrue; -import static org.mockito.ArgumentMatchers.anyBoolean; -import static org.mockito.ArgumentMatchers.anyInt; -import static org.mockito.ArgumentMatchers.anyList; -import static org.mockito.ArgumentMatchers.anyMap; -import static org.mockito.ArgumentMatchers.anySet; -import static org.mockito.ArgumentMatchers.anyString; -import static org.mockito.ArgumentMatchers.eq; import static org.mockito.Mockito.any; +import static org.mockito.Mockito.anyBoolean; +import static org.mockito.Mockito.anyInt; import static org.mockito.Mockito.atLeast; +import static org.mockito.Mockito.anyList; +import static org.mockito.Mockito.anyMap; +import static org.mockito.Mockito.anySet; +import static org.mockito.Mockito.anyString; import static org.mockito.Mockito.doAnswer; import static org.mockito.Mockito.doNothing; import static org.mockito.Mockito.doReturn; import static org.mockito.Mockito.doThrow; +import static org.mockito.Mockito.eq; import static org.mockito.Mockito.mock; import static org.mockito.Mockito.mockConstruction; import static org.mockito.Mockito.mockStatic; @@ -384,9 +384,9 @@ public void init() throws RocksDBException, IOException, ExecutionException { CacheLoader loader = new CacheLoader() { - @NotNull + @Nonnull @Override - public OmSnapshot load(@NotNull String key) { + public OmSnapshot load(@Nonnull String key) { return getMockedOmSnapshot(key); } }; diff --git a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/upgrade/OMLayoutFeatureUtil.java b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/upgrade/OMLayoutFeatureUtil.java index 2359127dbc9..0fc55b2f2a9 100644 --- a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/upgrade/OMLayoutFeatureUtil.java +++ b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/upgrade/OMLayoutFeatureUtil.java @@ -19,7 +19,7 @@ package org.apache.hadoop.ozone.om.upgrade; import static org.apache.hadoop.ozone.om.upgrade.OMLayoutFeature.INITIAL_VERSION; -import static org.mockito.ArgumentMatchers.anyString; +import static org.mockito.Mockito.anyString; import static org.mockito.Mockito.mock; import static org.mockito.Mockito.when; diff --git a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/upgrade/TestOMLayoutFeatureAspect.java b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/upgrade/TestOMLayoutFeatureAspect.java index 90f0b558d16..08905afee08 100644 --- a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/upgrade/TestOMLayoutFeatureAspect.java +++ b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/upgrade/TestOMLayoutFeatureAspect.java @@ -21,7 +21,7 @@ import static org.apache.hadoop.ozone.om.upgrade.OMLayoutFeature.INITIAL_VERSION; import static org.assertj.core.api.Assertions.assertThat; import static org.junit.jupiter.api.Assertions.assertThrows; -import static org.mockito.ArgumentMatchers.anyString; +import static org.mockito.Mockito.anyString; import static org.mockito.Mockito.mock; import static org.mockito.Mockito.when; diff --git a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/upgrade/TestOMUpgradeFinalizer.java b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/upgrade/TestOMUpgradeFinalizer.java index 201b2e5c27b..033ec393422 100644 --- a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/upgrade/TestOMUpgradeFinalizer.java +++ b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/upgrade/TestOMUpgradeFinalizer.java @@ -45,10 +45,8 @@ import static org.assertj.core.api.Assertions.assertThat; import static org.junit.jupiter.api.Assertions.assertEquals; import static org.junit.jupiter.api.Assertions.assertFalse; -import static org.junit.jupiter.api.Assertions.assertInstanceOf; import static org.junit.jupiter.api.Assertions.assertThrows; -import static org.junit.jupiter.api.Assertions.fail; -import static org.mockito.ArgumentMatchers.anyInt; +import static org.mockito.Mockito.anyInt; import static org.mockito.Mockito.lenient; import static org.mockito.Mockito.mock; import static org.mockito.Mockito.never; @@ -195,17 +193,9 @@ public void testFinalizationWithFailingUpgradeAction() throws Exception { setupVersionManagerMockToFinalize(lfs); OMUpgradeFinalizer finalizer = new OMUpgradeFinalizer(versionManager); - try { - finalizer.finalize(CLIENT_ID, om); - fail(); - } catch (Exception e) { - assertInstanceOf(UpgradeException.class, e); - assertThat(e.getMessage()).contains(lfs.iterator().next().name()); - assertEquals( - ((UpgradeException) e).getResult(), - LAYOUT_FEATURE_FINALIZATION_FAILED - ); - } + UpgradeException e = assertThrows(UpgradeException.class, () -> finalizer.finalize(CLIENT_ID, om)); + assertThat(e.getMessage()).contains(lfs.iterator().next().name()); + assertEquals(e.getResult(), LAYOUT_FEATURE_FINALIZATION_FAILED); if (finalizer.isFinalizationDone()) { when(versionManager.getUpgradeState()).thenReturn(FINALIZATION_DONE); } diff --git a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/upgrade/TestOMVersionManager.java b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/upgrade/TestOMVersionManager.java index 49b3e27a900..45647c3dffd 100644 --- a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/upgrade/TestOMVersionManager.java +++ b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/upgrade/TestOMVersionManager.java @@ -25,9 +25,9 @@ import static org.assertj.core.api.Assertions.assertThat; import static org.junit.jupiter.api.Assertions.assertEquals; import static org.junit.jupiter.api.Assertions.assertFalse; +import static org.junit.jupiter.api.Assertions.assertThrows; import static org.junit.jupiter.api.Assertions.assertTrue; -import static org.junit.jupiter.api.Assertions.fail; -import static org.mockito.ArgumentMatchers.anyString; +import static org.mockito.Mockito.anyString; import static org.mockito.Mockito.doCallRealMethod; import static org.mockito.Mockito.mock; import static org.mockito.Mockito.times; @@ -65,13 +65,8 @@ public void testOMLayoutVersionManager() throws IOException { public void testOMLayoutVersionManagerInitError() { int lV = OMLayoutFeature.values()[OMLayoutFeature.values().length - 1] .layoutVersion() + 1; - - try { - new OMLayoutVersionManager(lV); - fail(); - } catch (OMException ex) { - assertEquals(NOT_SUPPORTED_OPERATION, ex.getResult()); - } + OMException ome = assertThrows(OMException.class, () -> new OMLayoutVersionManager(lV)); + assertEquals(NOT_SUPPORTED_OPERATION, ome.getResult()); } @Test diff --git a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/security/TestOmCertificateClientInit.java b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/security/TestOmCertificateClientInit.java index 7aa0a0fccaf..b8ecc4fd7e4 100644 --- a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/security/TestOmCertificateClientInit.java +++ b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/security/TestOmCertificateClientInit.java @@ -29,10 +29,10 @@ import org.apache.hadoop.ozone.om.OMStorage; import org.apache.hadoop.ozone.om.OzoneManager; import org.apache.hadoop.security.ssl.KeyStoreTestUtil; -import org.apache.ozone.test.GenericTestUtils; import org.bouncycastle.cert.X509CertificateHolder; import org.junit.jupiter.api.AfterEach; import org.junit.jupiter.api.BeforeEach; +import org.junit.jupiter.api.io.TempDir; import org.junit.jupiter.params.ParameterizedTest; import org.junit.jupiter.params.provider.Arguments; import org.junit.jupiter.params.provider.MethodSource; @@ -66,7 +66,6 @@ public class TestOmCertificateClientInit { private String certSerialId = "3284792342234"; private OMCertificateClient omCertificateClient; private HDDSKeyGenerator keyGenerator; - private Path metaDirPath; private SecurityConfig securityConfig; private KeyCodec omKeyCodec; private X509Certificate x509Certificate; @@ -86,11 +85,8 @@ private static Stream parameters() { } @BeforeEach - public void setUp() throws Exception { + public void setUp(@TempDir Path metaDirPath) throws Exception { OzoneConfiguration config = new OzoneConfiguration(); - final String path = GenericTestUtils - .getTempPath(UUID.randomUUID().toString()); - metaDirPath = Paths.get(path, "test"); config.set(HDDS_METADATA_DIR_NAME, metaDirPath.toString()); securityConfig = new SecurityConfig(config); keyGenerator = new HDDSKeyGenerator(securityConfig); @@ -115,7 +111,6 @@ public void setUp() throws Exception { public void tearDown() throws IOException { omCertificateClient.close(); omCertificateClient = null; - FileUtils.deleteQuietly(metaDirPath.toFile()); } @ParameterizedTest diff --git a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/security/TestOzoneDelegationTokenSecretManager.java b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/security/TestOzoneDelegationTokenSecretManager.java index 49d19d3bc31..ce7c0c848f1 100644 --- a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/security/TestOzoneDelegationTokenSecretManager.java +++ b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/security/TestOzoneDelegationTokenSecretManager.java @@ -106,9 +106,9 @@ public void setUp() throws Exception { serviceRpcAdd = new Text("localhost"); final Map s3Secrets = new HashMap<>(); s3Secrets.put("testuser1", - new S3SecretValue("testuser1", "dbaksbzljandlkandlsd")); + S3SecretValue.of("testuser1", "dbaksbzljandlkandlsd")); s3Secrets.put("abc", - new S3SecretValue("abc", "djakjahkd")); + S3SecretValue.of("abc", "djakjahkd")); om = mock(OzoneManager.class); OMMetadataManager metadataManager = new OmMetadataManagerImpl(conf, om); when(om.getMetadataManager()).thenReturn(metadataManager); diff --git a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/security/TestOzoneTokenIdentifier.java b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/security/TestOzoneTokenIdentifier.java index d13aa6b5bf4..c09c51a624d 100644 --- a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/security/TestOzoneTokenIdentifier.java +++ b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/security/TestOzoneTokenIdentifier.java @@ -18,7 +18,6 @@ package org.apache.hadoop.ozone.security; import static org.junit.jupiter.api.Assertions.assertEquals; -import static org.junit.jupiter.api.Assertions.fail; import javax.crypto.KeyGenerator; import javax.crypto.Mac; import javax.crypto.SecretKey; @@ -29,6 +28,7 @@ import java.io.FileInputStream; import java.io.FileOutputStream; import java.io.IOException; +import java.nio.file.Path; import java.security.GeneralSecurityException; import java.security.InvalidKeyException; import java.security.KeyPair; @@ -45,23 +45,17 @@ import java.util.List; import java.util.Map; -import org.apache.hadoop.fs.FileUtil; -import org.apache.hadoop.hdds.conf.ConfigurationSource; -import org.apache.hadoop.hdds.conf.OzoneConfiguration; import org.apache.hadoop.hdds.utils.db.Codec; import org.apache.hadoop.io.Text; import org.apache.hadoop.ozone.om.codec.TokenIdentifierCodec; import org.apache.hadoop.security.ssl.KeyStoreTestUtil; -import org.apache.hadoop.security.ssl.TestSSLFactory; import org.apache.hadoop.security.token.Token; -import org.apache.ozone.test.GenericTestUtils; import org.apache.hadoop.util.Time; import org.apache.commons.lang3.RandomStringUtils; import org.apache.commons.lang3.RandomUtils; -import org.junit.jupiter.api.AfterAll; -import org.junit.jupiter.api.BeforeAll; import org.junit.jupiter.api.Test; +import org.junit.jupiter.api.io.TempDir; import org.slf4j.Logger; import org.slf4j.LoggerFactory; @@ -72,11 +66,6 @@ public class TestOzoneTokenIdentifier { private static final Logger LOG = LoggerFactory .getLogger(TestOzoneTokenIdentifier.class); - private static final String BASEDIR = GenericTestUtils - .getTempPath(TestOzoneTokenIdentifier.class.getSimpleName()); - private static final String KEYSTORES_DIR = - new File(BASEDIR).getAbsolutePath(); - private static File base; private static String sslConfsDir; private static final String EXCLUDE_CIPHERS = "TLS_ECDHE_RSA_WITH_RC4_128_SHA," @@ -87,35 +76,11 @@ public class TestOzoneTokenIdentifier { + "SSL_RSA_EXPORT_WITH_DES40_CBC_SHA," + "SSL_RSA_WITH_RC4_128_MD5"; - @BeforeAll - public static void setUp() throws Exception { - base = new File(BASEDIR); - FileUtil.fullyDelete(base); - base.mkdirs(); - } - - private ConfigurationSource createConfiguration(boolean clientCert, - boolean trustStore) - throws Exception { - OzoneConfiguration conf = new OzoneConfiguration(); - KeyStoreTestUtil.setupSSLConfig(KEYSTORES_DIR, sslConfsDir, conf, - clientCert, trustStore, EXCLUDE_CIPHERS); - sslConfsDir = KeyStoreTestUtil.getClasspathDir(TestSSLFactory.class); - return conf; - } - - @AfterAll - public static void cleanUp() throws Exception { - FileUtil.fullyDelete(base); - KeyStoreTestUtil.cleanupSSLConfig(KEYSTORES_DIR, sslConfsDir); - } @Test - public void testSignToken() throws GeneralSecurityException, IOException { - String keystore = new File(KEYSTORES_DIR, "keystore.jks") - .getAbsolutePath(); - String truststore = new File(KEYSTORES_DIR, "truststore.jks") - .getAbsolutePath(); + public void testSignToken(@TempDir Path baseDir) throws GeneralSecurityException, IOException { + String keystore = baseDir.resolve("keystore.jks").toFile().getAbsolutePath(); + String truststore = baseDir.resolve("truststore.jks").toFile().getAbsolutePath(); String trustPassword = "trustPass"; String keyStorePassword = "keyStorePass"; String keyPassword = "keyPass"; @@ -282,9 +247,9 @@ public void testSymmetricTokenPerfHelper(String hmacAlgorithm, int keyLen) { * Test serialization/deserialization of OzoneTokenIdentifier. */ @Test - public void testReadWriteInProtobuf() throws IOException { + public void testReadWriteInProtobuf(@TempDir Path baseDir) throws IOException { OzoneTokenIdentifier id = getIdentifierInst(); - File idFile = new File(BASEDIR + "/tokenFile"); + File idFile = baseDir.resolve("tokenFile").toFile(); FileOutputStream fop = new FileOutputStream(idFile); DataOutputStream dataOutputStream = new DataOutputStream(fop); @@ -332,7 +297,7 @@ public void testTokenSerialization() throws IOException { } @Test - public void testTokenPersistence() throws IOException { + void testTokenPersistence() throws IOException { OzoneTokenIdentifier idWrite = getIdentifierInst(); idWrite.setOmServiceId("defaultServiceId"); @@ -340,11 +305,7 @@ public void testTokenPersistence() throws IOException { Codec idCodec = TokenIdentifierCodec.get(); OzoneTokenIdentifier idRead = null; - try { - idRead = idCodec.fromPersistedFormat(oldIdBytes); - } catch (IOException ex) { - fail("Should not fail to load old token format"); - } + idRead = idCodec.fromPersistedFormat(oldIdBytes); assertEquals(idWrite, idRead, "Deserialize Serialized Token should equal."); } diff --git a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/security/acl/TestOzoneAuthorizerFactory.java b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/security/acl/TestOzoneAuthorizerFactory.java index 127170debc8..4a63da44774 100644 --- a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/security/acl/TestOzoneAuthorizerFactory.java +++ b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/security/acl/TestOzoneAuthorizerFactory.java @@ -25,7 +25,7 @@ import org.junit.jupiter.params.provider.MethodSource; import org.junit.jupiter.params.provider.ValueSource; -import javax.annotation.Nonnull; +import jakarta.annotation.Nonnull; import java.util.stream.Stream; diff --git a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/security/acl/TestOzoneNativeAuthorizer.java b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/security/acl/TestOzoneNativeAuthorizer.java index e5723f0a2a8..f5bb8d35350 100644 --- a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/security/acl/TestOzoneNativeAuthorizer.java +++ b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/security/acl/TestOzoneNativeAuthorizer.java @@ -16,7 +16,6 @@ */ package org.apache.hadoop.ozone.security.acl; -import org.apache.commons.io.FileUtils; import org.apache.commons.lang3.RandomUtils; import org.apache.hadoop.hdds.client.StandaloneReplicationConfig; import org.apache.hadoop.hdds.conf.OzoneConfiguration; @@ -42,10 +41,9 @@ import org.apache.hadoop.ozone.security.acl.IAccessAuthorizer.ACLIdentityType; import org.apache.hadoop.ozone.security.acl.IAccessAuthorizer.ACLType; import org.apache.hadoop.security.UserGroupInformation; -import org.apache.ozone.test.GenericTestUtils; -import org.junit.jupiter.api.AfterAll; import org.junit.jupiter.api.BeforeAll; import org.junit.jupiter.api.extension.ExtendWith; +import org.junit.jupiter.api.io.TempDir; import org.junit.jupiter.params.ParameterizedTest; import org.junit.jupiter.params.provider.MethodSource; import org.mockito.junit.jupiter.MockitoExtension; @@ -90,6 +88,7 @@ public class TestOzoneNativeAuthorizer { private static final List ADMIN_USERNAMES = singletonList("om"); + @TempDir private static File testDir; private String vol; private String buck; @@ -144,7 +143,6 @@ public static void setup() throws Exception { OzoneConfiguration ozConfig = new OzoneConfiguration(); ozConfig.set(OZONE_ACL_AUTHORIZER_CLASS, OZONE_ACL_AUTHORIZER_CLASS_NATIVE); - testDir = GenericTestUtils.getRandomizedTestDir(); ozConfig.set(OZONE_METADATA_DIRS, testDir.toString()); ozConfig.set(OZONE_ADMINISTRATORS, "om"); @@ -164,11 +162,6 @@ public static void setup() throws Exception { new String[]{"test"}); } - @AfterAll - public static void cleanup() throws IOException { - FileUtils.deleteDirectory(testDir); - } - private void createKey(String volume, String bucket, String keyName) throws IOException { OmKeyArgs keyArgs = new OmKeyArgs.Builder() diff --git a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/security/acl/TestParentAcl.java b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/security/acl/TestParentAcl.java index e9249b1ab79..f17d477bd79 100644 --- a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/security/acl/TestParentAcl.java +++ b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/security/acl/TestParentAcl.java @@ -18,7 +18,6 @@ package org.apache.hadoop.ozone.security.acl; -import org.apache.commons.io.FileUtils; import org.apache.commons.lang3.RandomUtils; import org.apache.hadoop.hdds.client.StandaloneReplicationConfig; import org.apache.hadoop.hdds.conf.OzoneConfiguration; @@ -44,12 +43,10 @@ import org.apache.hadoop.ozone.om.request.OMRequestTestUtils; import org.apache.hadoop.ozone.security.acl.IAccessAuthorizer.ACLType; import org.apache.hadoop.security.UserGroupInformation; -import org.apache.hadoop.security.authentication.client.AuthenticationException; -import org.apache.ozone.test.GenericTestUtils; import org.apache.ozone.test.tag.Unhealthy; -import org.junit.jupiter.api.AfterAll; import org.junit.jupiter.api.BeforeAll; import org.junit.jupiter.api.Test; +import org.junit.jupiter.api.io.TempDir; import java.io.File; import java.io.IOException; @@ -92,14 +89,14 @@ public class TestParentAcl { private static UserGroupInformation adminUgi; private static UserGroupInformation testUgi, testUgi1; private static OzoneManagerProtocol writeClient; + @TempDir private static File testDir; @BeforeAll - public static void setup() throws IOException, AuthenticationException { + static void setup() throws Exception { ozConfig = new OzoneConfiguration(); ozConfig.set(OZONE_ACL_AUTHORIZER_CLASS, OZONE_ACL_AUTHORIZER_CLASS_NATIVE); - testDir = GenericTestUtils.getRandomizedTestDir(); ozConfig.set(OZONE_METADATA_DIRS, testDir.toString()); ozConfig.set(OZONE_ADMINISTRATORS, "om"); @@ -122,11 +119,6 @@ public static void setup() throws IOException, AuthenticationException { new String[]{"test1"}); } - @AfterAll - public static void cleanup() throws IOException { - FileUtils.deleteDirectory(testDir); - } - @Test @Unhealthy("HDDS-6335") public void testKeyAcl() diff --git a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/security/acl/TestSharedTmpDirAuthorizer.java b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/security/acl/TestSharedTmpDirAuthorizer.java index ff3609b810c..ce5aa12c568 100644 --- a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/security/acl/TestSharedTmpDirAuthorizer.java +++ b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/security/acl/TestSharedTmpDirAuthorizer.java @@ -22,10 +22,12 @@ import org.junit.jupiter.params.ParameterizedTest; import org.junit.jupiter.params.provider.Arguments; import org.junit.jupiter.params.provider.MethodSource; -import org.mockito.Mockito; import java.util.stream.Stream; +import static org.mockito.Mockito.mock; +import static org.mockito.Mockito.verify; + /** * Tests for {@link SharedTmpDirAuthorizer}. */ @@ -37,8 +39,8 @@ public class TestSharedTmpDirAuthorizer { @BeforeAll public static void setUp() { - nativeAuthorizer = Mockito.mock(OzoneNativeAuthorizer.class); - authorizer = Mockito.mock(TestOzoneAuthorizerFactory + nativeAuthorizer = mock(OzoneNativeAuthorizer.class); + authorizer = mock(TestOzoneAuthorizerFactory .MockThirdPartyAuthorizer.class); sharedTmpDirAuthorizer = @@ -65,13 +67,13 @@ public void testCheckAccess(String volumeName, .setKeyName("key1") .build(); - RequestContext context = Mockito.mock(RequestContext.class); + RequestContext context = mock(RequestContext.class); sharedTmpDirAuthorizer.checkAccess(objInfo, context); if (isNative) { - Mockito.verify(nativeAuthorizer).checkAccess(objInfo, context); + verify(nativeAuthorizer).checkAccess(objInfo, context); } else { - Mockito.verify(authorizer).checkAccess(objInfo, context); + verify(authorizer).checkAccess(objInfo, context); } } } diff --git a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/security/acl/TestVolumeOwner.java b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/security/acl/TestVolumeOwner.java index 7a2d22b338b..983086d251b 100644 --- a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/security/acl/TestVolumeOwner.java +++ b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/security/acl/TestVolumeOwner.java @@ -18,7 +18,6 @@ package org.apache.hadoop.ozone.security.acl; -import org.apache.commons.io.FileUtils; import org.apache.hadoop.hdds.client.StandaloneReplicationConfig; import org.apache.hadoop.hdds.conf.OzoneConfiguration; import org.apache.hadoop.hdds.protocol.proto.HddsProtos; @@ -37,11 +36,9 @@ import org.apache.hadoop.ozone.om.protocol.OzoneManagerProtocol; import org.apache.hadoop.ozone.om.request.OMRequestTestUtils; import org.apache.hadoop.security.UserGroupInformation; -import org.apache.hadoop.security.authentication.client.AuthenticationException; -import org.apache.ozone.test.GenericTestUtils; -import org.junit.jupiter.api.AfterAll; import org.junit.jupiter.api.BeforeAll; import org.junit.jupiter.api.Test; +import org.junit.jupiter.api.io.TempDir; import java.io.File; import java.io.IOException; @@ -74,14 +71,14 @@ public class TestVolumeOwner { private static OMMetadataManager metadataManager; private static UserGroupInformation testUgi; private static OzoneManagerProtocol writeClient; + @TempDir private static File testDir; @BeforeAll - public static void setup() throws IOException, AuthenticationException { + static void setup() throws Exception { ozoneConfig = new OzoneConfiguration(); ozoneConfig.set(OZONE_ACL_AUTHORIZER_CLASS, OZONE_ACL_AUTHORIZER_CLASS_NATIVE); - testDir = GenericTestUtils.getRandomizedTestDir(); ozoneConfig.set(OZONE_METADATA_DIRS, testDir.toString()); OmTestManagers omTestManagers = @@ -104,11 +101,6 @@ public static void setup() throws IOException, AuthenticationException { prepareTestKeys(); } - @AfterAll - public static void cleanup() throws IOException { - FileUtils.deleteDirectory(testDir); - } - // create 2 volumes private static void prepareTestVols() throws IOException { for (int i = 0; i < 2; i++) { diff --git a/hadoop-ozone/ozonefs-common/pom.xml b/hadoop-ozone/ozonefs-common/pom.xml index df52a3d29d0..0543d461ea7 100644 --- a/hadoop-ozone/ozonefs-common/pom.xml +++ b/hadoop-ozone/ozonefs-common/pom.xml @@ -28,7 +28,6 @@ UTF-8 true - false diff --git a/hadoop-ozone/ozonefs-common/src/main/java/org/apache/hadoop/fs/ozone/OzoneFsShell.java b/hadoop-ozone/ozonefs-common/src/main/java/org/apache/hadoop/fs/ozone/OzoneFsShell.java index 1cde5fecb99..a8b963a7ab2 100644 --- a/hadoop-ozone/ozonefs-common/src/main/java/org/apache/hadoop/fs/ozone/OzoneFsShell.java +++ b/hadoop-ozone/ozonefs-common/src/main/java/org/apache/hadoop/fs/ozone/OzoneFsShell.java @@ -31,7 +31,7 @@ @InterfaceAudience.Private public class OzoneFsShell extends FsShell { - private final String ozoneUsagePrefix = "Usage: ozone fs [generic options]"; + private static final String OZONE_USAGE_PREFIX = "Usage: ozone fs [generic options]"; /** * Default ctor with no configuration. Be sure to invoke @@ -66,7 +66,7 @@ protected void registerCommands(CommandFactory factory) { @Override protected String getUsagePrefix() { - return ozoneUsagePrefix; + return OZONE_USAGE_PREFIX; } /** diff --git a/hadoop-ozone/recon/pom.xml b/hadoop-ozone/recon/pom.xml index f6226a44f33..2895840aaf3 100644 --- a/hadoop-ozone/recon/pom.xml +++ b/hadoop-ozone/recon/pom.xml @@ -25,7 +25,6 @@ ozone-recon 7.33.6 - false diff --git a/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/ReconUtils.java b/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/ReconUtils.java index c5485610738..39d091ee03c 100644 --- a/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/ReconUtils.java +++ b/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/ReconUtils.java @@ -63,7 +63,7 @@ import org.apache.hadoop.security.authentication.client.AuthenticationException; import org.hadoop.ozone.recon.schema.tables.daos.GlobalStatsDao; import org.hadoop.ozone.recon.schema.tables.pojos.GlobalStats; -import org.jetbrains.annotations.NotNull; +import jakarta.annotation.Nonnull; import org.jooq.Configuration; import org.slf4j.Logger; import org.slf4j.LoggerFactory; @@ -86,7 +86,7 @@ public static File getReconScmDbDir(ConfigurationSource conf) { return new ReconUtils().getReconDbDir(conf, OZONE_RECON_SCM_DB_DIR); } - @NotNull + @Nonnull public static List> initContainerReportQueue( OzoneConfiguration configuration) { diff --git a/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/api/OMDBInsightEndpoint.java b/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/api/OMDBInsightEndpoint.java index 37d6ae42b5c..84f55749a68 100644 --- a/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/api/OMDBInsightEndpoint.java +++ b/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/api/OMDBInsightEndpoint.java @@ -54,6 +54,7 @@ import java.util.List; import java.util.Map; +import static org.apache.hadoop.ozone.OzoneConsts.OM_KEY_PREFIX; import static org.apache.hadoop.ozone.om.OmMetadataManagerImpl.OPEN_FILE_TABLE; import static org.apache.hadoop.ozone.om.OmMetadataManagerImpl.OPEN_KEY_TABLE; import static org.apache.hadoop.ozone.om.OmMetadataManagerImpl.DELETED_TABLE; @@ -536,8 +537,8 @@ private void getPendingForDeletionDirInfo( continue; } KeyEntityInfo keyEntityInfo = new KeyEntityInfo(); - keyEntityInfo.setKey(key); - keyEntityInfo.setPath(omKeyInfo.getKeyName()); + keyEntityInfo.setKey(omKeyInfo.getFileName()); + keyEntityInfo.setPath(createPath(omKeyInfo)); keyEntityInfo.setInStateSince(omKeyInfo.getCreationTime()); keyEntityInfo.setSize( fetchSizeForDeletedDirectory(omKeyInfo.getObjectID())); @@ -664,6 +665,12 @@ private void updateReplicatedAndUnReplicatedTotal( }); } + private String createPath(OmKeyInfo omKeyInfo) { + return omKeyInfo.getVolumeName() + OM_KEY_PREFIX + + omKeyInfo.getBucketName() + OM_KEY_PREFIX + omKeyInfo.getKeyName(); + } + + @VisibleForTesting public GlobalStatsDao getDao() { return this.globalStatsDao; diff --git a/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/api/types/UnhealthyContainerMetadata.java b/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/api/types/UnhealthyContainerMetadata.java index 808e85dd2f5..42564412b1f 100644 --- a/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/api/types/UnhealthyContainerMetadata.java +++ b/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/api/types/UnhealthyContainerMetadata.java @@ -76,6 +76,10 @@ public UnhealthyContainerMetadata(UnhealthyContainers rec, this.keys = keyCount; } + // Default constructor, used by jackson lib for object deserialization. + public UnhealthyContainerMetadata() { + } + public long getContainerID() { return containerID; } diff --git a/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/api/types/UnhealthyContainersResponse.java b/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/api/types/UnhealthyContainersResponse.java index ef40329c80c..eaf08d9ca83 100644 --- a/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/api/types/UnhealthyContainersResponse.java +++ b/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/api/types/UnhealthyContainersResponse.java @@ -61,6 +61,10 @@ public UnhealthyContainersResponse(Collection this.containers = containers; } + // Default constructor, used by jackson lib for object deserialization. + public UnhealthyContainersResponse() { + } + public void setSummaryCount(String state, long count) { if (state.equals(UnHealthyContainerStates.MISSING.toString())) { this.missingCount = count; diff --git a/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/fsck/ContainerHealthStatus.java b/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/fsck/ContainerHealthStatus.java index 7785e01a373..5e6d55ce706 100644 --- a/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/fsck/ContainerHealthStatus.java +++ b/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/fsck/ContainerHealthStatus.java @@ -17,6 +17,7 @@ */ package org.apache.hadoop.ozone.recon.fsck; +import org.apache.hadoop.hdds.conf.OzoneConfiguration; import org.apache.hadoop.hdds.protocol.DatanodeDetails; import org.apache.hadoop.hdds.protocol.proto.HddsProtos; import org.apache.hadoop.hdds.protocol.proto.StorageContainerDatanodeProtocolProtos.ContainerReplicaProto; @@ -24,9 +25,15 @@ import org.apache.hadoop.hdds.scm.PlacementPolicy; import org.apache.hadoop.hdds.scm.container.ContainerInfo; import org.apache.hadoop.hdds.scm.container.ContainerReplica; +import org.apache.hadoop.hdds.scm.container.replication.ContainerReplicaCount; +import org.apache.hadoop.hdds.scm.container.replication.ECContainerReplicaCount; import org.apache.hadoop.ozone.recon.spi.ReconContainerMetadataManager; import java.io.IOException; +import org.apache.hadoop.hdds.scm.container.replication.RatisContainerReplicaCount; +import org.apache.hadoop.hdds.scm.container.replication.ReplicationManager; + +import java.util.ArrayList; import java.util.List; import java.util.Set; import java.util.stream.Collectors; @@ -38,31 +45,46 @@ public class ContainerHealthStatus { - private ContainerInfo container; - private int replicaDelta; - private Set healthyReplicas; - private ContainerPlacementStatus placementStatus; - private ReconContainerMetadataManager reconContainerMetadataManager; - private int numReplicas; - private long numKeys; + private final ContainerInfo container; + private final int replicaDelta; + private final Set healthyReplicas; + private final Set healthyAvailReplicas; + private final ContainerPlacementStatus placementStatus; + private final ReconContainerMetadataManager reconContainerMetadataManager; + private final int numReplicas; + private final long numKeys; + private final ContainerReplicaCount containerReplicaCount; ContainerHealthStatus(ContainerInfo container, - Set healthyReplicas, + Set replicas, PlacementPolicy placementPolicy, ReconContainerMetadataManager - reconContainerMetadataManager) { + reconContainerMetadataManager, + OzoneConfiguration conf) { this.reconContainerMetadataManager = reconContainerMetadataManager; this.container = container; int repFactor = container.getReplicationConfig().getRequiredNodes(); - this.healthyReplicas = healthyReplicas + this.healthyReplicas = replicas .stream() .filter(r -> !r.getState() .equals((ContainerReplicaProto.State.UNHEALTHY))) .collect(Collectors.toSet()); - this.replicaDelta = repFactor - this.healthyReplicas.size(); + this.healthyAvailReplicas = replicas + .stream() + // Filter unhealthy replicas and + // replicas belonging to out-of-service nodes. + .filter(r -> + (!r.getDatanodeDetails().isDecommissioned() && + !r.getDatanodeDetails().isMaintenance() && + !r.getState().equals(ContainerReplicaProto.State.UNHEALTHY))) + .collect(Collectors.toSet()); + this.replicaDelta = repFactor - this.healthyAvailReplicas.size(); this.placementStatus = getPlacementStatus(placementPolicy, repFactor); - this.numReplicas = healthyReplicas.size(); + this.numReplicas = replicas.size(); this.numKeys = getContainerKeyCount(container.getContainerID()); + + this.containerReplicaCount = + getContainerReplicaCountInstance(conf, replicas); } public long getContainerID() { @@ -78,6 +100,14 @@ public int getReplicationFactor() { } public boolean isHealthy() { + return containerReplicaCount.isHealthy(); + } + + public boolean isSufficientlyReplicated() { + return containerReplicaCount.isSufficientlyReplicated(); + } + + public boolean isHealthilyReplicated() { return replicaDelta == 0 && !isMisReplicated(); } @@ -87,11 +117,11 @@ public boolean isDeleted() { } public boolean isOverReplicated() { - return replicaDelta < 0; + return containerReplicaCount.isOverReplicated(); } public boolean isUnderReplicated() { - return !isMissing() && replicaDelta > 0; + return !isMissing() && !containerReplicaCount.isSufficientlyReplicated(); } public int replicaDelta() { @@ -99,7 +129,7 @@ public int replicaDelta() { } public int getReplicaCount() { - return healthyReplicas.size(); + return healthyAvailReplicas.size(); } public boolean isMisReplicated() { @@ -150,4 +180,21 @@ private long getContainerKeyCount(long containerID) { public long getNumKeys() { return numKeys; } + + private ContainerReplicaCount getContainerReplicaCountInstance( + OzoneConfiguration conf, Set replicas) { + ReplicationManager.ReplicationManagerConfiguration rmConf = conf.getObject( + ReplicationManager.ReplicationManagerConfiguration.class); + boolean isEC = container.getReplicationConfig() + .getReplicationType() == HddsProtos.ReplicationType.EC; + return isEC ? + new ECContainerReplicaCount(container, + replicas, new ArrayList<>(), + rmConf.getMaintenanceRemainingRedundancy()) : + // This class ignores unhealthy replicas, + // therefore set 'considerUnhealthy' to false. + new RatisContainerReplicaCount(container, + replicas, new ArrayList<>(), + rmConf.getMaintenanceReplicaMinimum(), false); + } } diff --git a/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/fsck/ContainerHealthTask.java b/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/fsck/ContainerHealthTask.java index 4296dca366a..577fb7d2bcc 100644 --- a/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/fsck/ContainerHealthTask.java +++ b/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/fsck/ContainerHealthTask.java @@ -29,6 +29,7 @@ import java.util.concurrent.locks.ReadWriteLock; import java.util.concurrent.locks.ReentrantReadWriteLock; +import org.apache.hadoop.hdds.conf.OzoneConfiguration; import org.apache.hadoop.hdds.protocol.proto.HddsProtos; import org.apache.hadoop.hdds.scm.PlacementPolicy; import org.apache.hadoop.hdds.scm.container.ContainerID; @@ -68,17 +69,20 @@ public class ContainerHealthTask extends ReconScmTask { LoggerFactory.getLogger(ContainerHealthTask.class); public static final int FETCH_COUNT = Integer.parseInt(DEFAULT_FETCH_COUNT); - private ReadWriteLock lock = new ReentrantReadWriteLock(true); + private final ReadWriteLock lock = new ReentrantReadWriteLock(true); - private StorageContainerServiceProvider scmClient; - private ContainerManager containerManager; - private ContainerHealthSchemaManager containerHealthSchemaManager; - private ReconContainerMetadataManager reconContainerMetadataManager; - private PlacementPolicy placementPolicy; + private final StorageContainerServiceProvider scmClient; + private final ContainerManager containerManager; + private final ContainerHealthSchemaManager containerHealthSchemaManager; + private final ReconContainerMetadataManager reconContainerMetadataManager; + private final PlacementPolicy placementPolicy; private final long interval; - private Set processedContainers = new HashSet<>(); + private final Set processedContainers = new HashSet<>(); + private final OzoneConfiguration conf; + + @SuppressWarnings("checkstyle:ParameterNumber") public ContainerHealthTask( ContainerManager containerManager, StorageContainerServiceProvider scmClient, @@ -86,13 +90,15 @@ public ContainerHealthTask( ContainerHealthSchemaManager containerHealthSchemaManager, PlacementPolicy placementPolicy, ReconTaskConfig reconTaskConfig, - ReconContainerMetadataManager reconContainerMetadataManager) { + ReconContainerMetadataManager reconContainerMetadataManager, + OzoneConfiguration conf) { super(reconTaskStatusDao); this.scmClient = scmClient; this.containerHealthSchemaManager = containerHealthSchemaManager; this.reconContainerMetadataManager = reconContainerMetadataManager; this.placementPolicy = placementPolicy; this.containerManager = containerManager; + this.conf = conf; interval = reconTaskConfig.getMissingContainerTaskInterval().toMillis(); } @@ -220,7 +226,7 @@ private ContainerHealthStatus setCurrentContainer(long recordId) Set replicas = containerManager.getContainerReplicas(container.containerID()); return new ContainerHealthStatus(container, replicas, placementPolicy, - reconContainerMetadataManager); + reconContainerMetadataManager, conf); } private void completeProcessingContainer( @@ -312,8 +318,9 @@ private void processContainer(ContainerInfo container, long currentTime, Set containerReplicas = containerManager.getContainerReplicas(container.containerID()); ContainerHealthStatus h = new ContainerHealthStatus(container, - containerReplicas, placementPolicy, reconContainerMetadataManager); - if (h.isHealthy() || h.isDeleted()) { + containerReplicas, placementPolicy, + reconContainerMetadataManager, conf); + if (h.isHealthilyReplicated() || h.isDeleted()) { return; } // For containers deleted in SCM, we sync the container state here. @@ -426,7 +433,7 @@ public static List generateUnhealthyRecords( Map> unhealthyContainerStateStatsMap) { List records = new ArrayList<>(); - if (container.isHealthy() || container.isDeleted()) { + if (container.isHealthilyReplicated() || container.isDeleted()) { return records; } diff --git a/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/heatmap/HeatMapUtil.java b/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/heatmap/HeatMapUtil.java index b885ea5adae..2f3de1debcd 100644 --- a/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/heatmap/HeatMapUtil.java +++ b/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/heatmap/HeatMapUtil.java @@ -30,7 +30,7 @@ import org.apache.hadoop.ozone.recon.api.types.ResponseStatus; import org.apache.hadoop.ozone.recon.recovery.ReconOMMetadataManager; import org.apache.hadoop.ozone.recon.spi.ReconNamespaceSummaryManager; -import org.jetbrains.annotations.NotNull; +import jakarta.annotation.Nonnull; import org.slf4j.Logger; import org.slf4j.LoggerFactory; @@ -233,7 +233,7 @@ private void updateRootEntitySize( }); } - @NotNull + @Nonnull private static List initializeEntityMinMaxCount( EntityReadAccessHeatMapResponse entity) { diff --git a/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/persistence/ContainerHistory.java b/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/persistence/ContainerHistory.java index 9a0dccdc99a..32b479a19ea 100644 --- a/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/persistence/ContainerHistory.java +++ b/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/persistence/ContainerHistory.java @@ -30,7 +30,7 @@ public class ContainerHistory implements Serializable { private String datanodeHost; private long firstSeenTime; private long lastSeenTime; - private long bcsId; + private long lastBcsId; private String state; public ContainerHistory(long containerId, String datanodeUuid, @@ -41,12 +41,16 @@ public ContainerHistory(long containerId, String datanodeUuid, this.datanodeHost = datanodeHost; this.firstSeenTime = firstSeenTime; this.lastSeenTime = lastSeenTime; - this.bcsId = lastBcsId; + this.lastBcsId = lastBcsId; this.state = state; } + // Default constructor, used by jackson lib for object deserialization. + public ContainerHistory() { + } + public long getLastBcsId() { - return bcsId; + return lastBcsId; } public long getContainerId() { diff --git a/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/scm/ReconNodeManager.java b/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/scm/ReconNodeManager.java index 2db85861615..ab919b7d971 100644 --- a/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/scm/ReconNodeManager.java +++ b/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/scm/ReconNodeManager.java @@ -220,12 +220,11 @@ public void onMessage(CommandForDatanode commandForDatanode, * Send heartbeat to indicate the datanode is alive and doing well. * * @param datanodeDetails - DatanodeDetailsProto. - * @param layoutInfo - Layout Version Proto * @return SCMheartbeat response. */ @Override public List processHeartbeat(DatanodeDetails datanodeDetails, - LayoutVersionProto layoutInfo, CommandQueueReportProto queueReport) { + CommandQueueReportProto queueReport) { List cmds = new ArrayList<>(); long currentTime = Time.now(); if (needUpdate(datanodeDetails, currentTime)) { @@ -237,8 +236,7 @@ public List processHeartbeat(DatanodeDetails datanodeDetails, } // Update heartbeat map with current time datanodeHeartbeatMap.put(datanodeDetails.getUuid(), Time.now()); - cmds.addAll(super.processHeartbeat(datanodeDetails, - layoutInfo, queueReport)); + cmds.addAll(super.processHeartbeat(datanodeDetails, queueReport)); return cmds.stream() .filter(c -> ALLOWED_COMMANDS.contains(c.getType())) .collect(toList()); diff --git a/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/scm/ReconStorageContainerManagerFacade.java b/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/scm/ReconStorageContainerManagerFacade.java index 556c6194192..046662398f1 100644 --- a/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/scm/ReconStorageContainerManagerFacade.java +++ b/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/scm/ReconStorageContainerManagerFacade.java @@ -268,7 +268,7 @@ public ReconStorageContainerManagerFacade(OzoneConfiguration conf, ContainerHealthTask containerHealthTask = new ContainerHealthTask( containerManager, scmServiceProvider, reconTaskStatusDao, containerHealthSchemaManager, containerPlacementPolicy, reconTaskConfig, - reconContainerMetadataManager); + reconContainerMetadataManager, conf); this.containerSizeCountTask = new ContainerSizeCountTask( containerManager, diff --git a/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/spi/impl/ReconContainerMetadataManagerImpl.java b/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/spi/impl/ReconContainerMetadataManagerImpl.java index 63d5f99d0ea..46b75e45fad 100644 --- a/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/spi/impl/ReconContainerMetadataManagerImpl.java +++ b/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/spi/impl/ReconContainerMetadataManagerImpl.java @@ -58,7 +58,7 @@ import org.apache.hadoop.hdds.utils.db.TableIterator; import org.hadoop.ozone.recon.schema.tables.daos.GlobalStatsDao; import org.hadoop.ozone.recon.schema.tables.pojos.GlobalStats; -import org.jetbrains.annotations.NotNull; +import jakarta.annotation.Nonnull; import org.jooq.Configuration; import org.slf4j.Logger; import org.slf4j.LoggerFactory; @@ -483,7 +483,7 @@ public Map getContainers(int limit, return containers; } - @NotNull + @Nonnull private List getPipelines(ContainerKeyPrefix containerKeyPrefix) throws IOException { OmKeyInfo omKeyInfo = omMetadataManager.getKeyTable(BucketLayout.LEGACY) diff --git a/hadoop-ozone/recon/src/test/java/org/apache/hadoop/ozone/recon/ReconTestInjector.java b/hadoop-ozone/recon/src/test/java/org/apache/hadoop/ozone/recon/ReconTestInjector.java index 6800604248b..f295f4d1ff0 100644 --- a/hadoop-ozone/recon/src/test/java/org/apache/hadoop/ozone/recon/ReconTestInjector.java +++ b/hadoop-ozone/recon/src/test/java/org/apache/hadoop/ozone/recon/ReconTestInjector.java @@ -203,7 +203,7 @@ protected void configure() { } if (withReconSqlDb) { - reconSqlDB = new AbstractReconSqlDBTest(); + reconSqlDB = new AbstractReconSqlDBTest(tmpDir.toPath()); modules.addAll(reconSqlDB.getReconSqlDBModules()); } diff --git a/hadoop-ozone/recon/src/test/java/org/apache/hadoop/ozone/recon/TestReconUtils.java b/hadoop-ozone/recon/src/test/java/org/apache/hadoop/ozone/recon/TestReconUtils.java index b34c8d31c6f..f49826e67d8 100644 --- a/hadoop-ozone/recon/src/test/java/org/apache/hadoop/ozone/recon/TestReconUtils.java +++ b/hadoop-ozone/recon/src/test/java/org/apache/hadoop/ozone/recon/TestReconUtils.java @@ -23,8 +23,8 @@ import static org.junit.jupiter.api.Assertions.assertEquals; import static org.junit.jupiter.api.Assertions.assertTrue; import static org.junit.jupiter.api.Assertions.assertNotNull; -import static org.mockito.ArgumentMatchers.any; -import static org.mockito.ArgumentMatchers.anyBoolean; +import static org.mockito.Mockito.any; +import static org.mockito.Mockito.anyBoolean; import static org.mockito.Mockito.mock; import static org.mockito.Mockito.when; @@ -41,8 +41,8 @@ import java.nio.file.Path; import java.nio.file.Paths; import java.net.URL; -import java.util.Random; +import org.apache.commons.lang3.RandomUtils; import org.apache.commons.io.FileUtils; import org.apache.commons.io.IOUtils; import org.apache.hadoop.hdds.conf.OzoneConfiguration; @@ -206,9 +206,8 @@ public void testNextClosestPowerIndexOfTwo() { assertNextClosestPowerIndexOfTwo(n - 1); } - final Random random = new Random(); for (int i = 0; i < 10; i++) { - assertNextClosestPowerIndexOfTwo(random.nextLong()); + assertNextClosestPowerIndexOfTwo(RandomUtils.nextLong()); } } diff --git a/hadoop-ozone/recon/src/test/java/org/apache/hadoop/ozone/recon/api/TestContainerEndpoint.java b/hadoop-ozone/recon/src/test/java/org/apache/hadoop/ozone/recon/api/TestContainerEndpoint.java index 6733bce59b4..854ac74bd39 100644 --- a/hadoop-ozone/recon/src/test/java/org/apache/hadoop/ozone/recon/api/TestContainerEndpoint.java +++ b/hadoop-ozone/recon/src/test/java/org/apache/hadoop/ozone/recon/api/TestContainerEndpoint.java @@ -103,8 +103,8 @@ import static org.junit.jupiter.api.Assertions.assertEquals; import static org.junit.jupiter.api.Assertions.assertNotNull; import static org.junit.jupiter.api.Assertions.assertNull; +import static org.junit.jupiter.api.Assertions.assertThrows; import static org.junit.jupiter.api.Assertions.assertTrue; -import static org.junit.jupiter.api.Assertions.fail; import static org.junit.jupiter.api.Assertions.assertNotEquals; import static org.mockito.Mockito.mock; import static org.mockito.Mockito.when; @@ -930,12 +930,9 @@ public void testUnhealthyContainersFilteredResponse() @Test public void testUnhealthyContainersInvalidState() { - try { - containerEndpoint.getUnhealthyContainers("invalid", 1000, 1); - fail("Expected exception to be raised"); - } catch (WebApplicationException e) { - assertEquals("HTTP 400 Bad Request", e.getMessage()); - } + WebApplicationException e = assertThrows(WebApplicationException.class, + () -> containerEndpoint.getUnhealthyContainers("invalid", 1000, 1)); + assertEquals("HTTP 400 Bad Request", e.getMessage()); } @Test diff --git a/hadoop-ozone/recon/src/test/java/org/apache/hadoop/ozone/recon/api/TestContainerStateCounts.java b/hadoop-ozone/recon/src/test/java/org/apache/hadoop/ozone/recon/api/TestContainerStateCounts.java index db6693ea534..08d1a73dc05 100644 --- a/hadoop-ozone/recon/src/test/java/org/apache/hadoop/ozone/recon/api/TestContainerStateCounts.java +++ b/hadoop-ozone/recon/src/test/java/org/apache/hadoop/ozone/recon/api/TestContainerStateCounts.java @@ -74,6 +74,10 @@ public class TestContainerStateCounts extends AbstractReconSqlDBTest { private static final int NUM_DELETED_CONTAINERS = 4; private static final int NUM_CLOSED_CONTAINERS = 3; + public TestContainerStateCounts() { + super(); + } + @BeforeEach public void setUp() throws Exception { diff --git a/hadoop-ozone/recon/src/test/java/org/apache/hadoop/ozone/recon/api/TestEndpoints.java b/hadoop-ozone/recon/src/test/java/org/apache/hadoop/ozone/recon/api/TestEndpoints.java index 6bd0cba4df1..05d9927d6c9 100644 --- a/hadoop-ozone/recon/src/test/java/org/apache/hadoop/ozone/recon/api/TestEndpoints.java +++ b/hadoop-ozone/recon/src/test/java/org/apache/hadoop/ozone/recon/api/TestEndpoints.java @@ -88,7 +88,6 @@ import org.apache.hadoop.ozone.recon.tasks.ContainerSizeCountTask; import org.apache.hadoop.ozone.recon.tasks.FileSizeCountTask; import org.apache.hadoop.ozone.recon.tasks.OmTableInsightTask; -import org.apache.ozone.test.GenericTestUtils; import org.apache.hadoop.ozone.security.acl.IAccessAuthorizer; import org.apache.ozone.test.LambdaTestUtils; import org.hadoop.ozone.recon.schema.UtilizationSchemaDefinition; @@ -119,10 +118,10 @@ import static org.junit.jupiter.api.Assertions.assertFalse; import static org.junit.jupiter.api.Assertions.assertNull; import static org.junit.jupiter.api.Assertions.fail; -import static org.mockito.ArgumentMatchers.any; -import static org.mockito.ArgumentMatchers.anyBoolean; -import static org.mockito.ArgumentMatchers.anyString; import static org.mockito.BDDMockito.given; +import static org.mockito.Mockito.any; +import static org.mockito.Mockito.anyBoolean; +import static org.mockito.Mockito.anyString; import static org.mockito.Mockito.mock; import static org.mockito.Mockito.verify; import static org.mockito.Mockito.when; @@ -189,6 +188,10 @@ public class TestEndpoints extends AbstractReconSqlDBTest { private PipelineManager pipelineManager; private ReconPipelineManager reconPipelineManager; + public TestEndpoints() { + super(); + } + private void initializeInjector() throws Exception { reconOMMetadataManager = getTestReconOmMetadataManager( initializeNewOmMetadataManager(Files.createDirectory( @@ -241,7 +244,7 @@ private void initializeInjector() throws Exception { when(reconUtilsMock.makeHttpCall(any(URLConnectionFactory.class), anyString(), anyBoolean())).thenReturn(urlConnectionMock); when(reconUtilsMock.getReconDbDir(any(OzoneConfiguration.class), - anyString())).thenReturn(GenericTestUtils.getRandomizedTestDir()); + anyString())).thenReturn(temporaryFolder.resolve("reconDbDir").toFile()); when(reconUtilsMock.getReconNodeDetails( any(OzoneConfiguration.class))).thenReturn( commonUtils.getReconNodeDetails()); diff --git a/hadoop-ozone/recon/src/test/java/org/apache/hadoop/ozone/recon/api/TestOmDBInsightEndPoint.java b/hadoop-ozone/recon/src/test/java/org/apache/hadoop/ozone/recon/api/TestOmDBInsightEndPoint.java index 2c6253cc781..d89fdd6660e 100644 --- a/hadoop-ozone/recon/src/test/java/org/apache/hadoop/ozone/recon/api/TestOmDBInsightEndPoint.java +++ b/hadoop-ozone/recon/src/test/java/org/apache/hadoop/ozone/recon/api/TestOmDBInsightEndPoint.java @@ -92,6 +92,10 @@ public class TestOmDBInsightEndPoint extends AbstractReconSqlDBTest { private OzoneConfiguration ozoneConfiguration; private Set generatedIds = new HashSet<>(); + public TestOmDBInsightEndPoint() { + super(); + } + private long generateUniqueRandomLong() { long newValue; do { @@ -660,7 +664,7 @@ public void testGetDeletedDirInfoLimitParam() throws Exception { assertNotNull(keyInsightInfoResp); assertEquals(2, keyInsightInfoResp.getDeletedDirInfoList().size()); - assertEquals("/sampleVol/bucketOne/dir_one", + assertEquals("dir_one", keyInsightInfoResp.getDeletedDirInfoList().get(0).getKey()); } @@ -692,7 +696,7 @@ public void testGetDeletedDirInfoPrevKeyParam() throws Exception { assertNotNull(keyInsightInfoResp); assertEquals(2, keyInsightInfoResp.getDeletedDirInfoList().size()); - assertEquals("/sampleVol/bucketOne/dir_three", + assertEquals("dir_three", keyInsightInfoResp.getDeletedDirInfoList().get(0).getKey()); assertEquals("/sampleVol/bucketOne/dir_two", keyInsightInfoResp.getLastKey()); @@ -725,7 +729,9 @@ public void testGetDeletedDirInfo() throws Exception { assertNotNull(keyInsightInfoResp); assertEquals(3, keyInsightInfoResp.getDeletedDirInfoList().size()); - assertEquals("/sampleVol/bucketOne/dir_one", + assertEquals("sampleVol/bucketOne/dir_one", keyInsightInfoResp + .getDeletedDirInfoList().get(0).getPath()); + assertEquals("dir_one", keyInsightInfoResp.getDeletedDirInfoList().get(0).getKey()); assertEquals("/sampleVol/bucketOne/dir_two", keyInsightInfoResp.getLastKey()); diff --git a/hadoop-ozone/recon/src/test/java/org/apache/hadoop/ozone/recon/api/TestOpenContainerCount.java b/hadoop-ozone/recon/src/test/java/org/apache/hadoop/ozone/recon/api/TestOpenContainerCount.java index eee7f44cd94..8a9452a8629 100644 --- a/hadoop-ozone/recon/src/test/java/org/apache/hadoop/ozone/recon/api/TestOpenContainerCount.java +++ b/hadoop-ozone/recon/src/test/java/org/apache/hadoop/ozone/recon/api/TestOpenContainerCount.java @@ -57,7 +57,6 @@ import org.apache.hadoop.ozone.recon.spi.StorageContainerServiceProvider; import org.apache.hadoop.ozone.recon.spi.impl.OzoneManagerServiceProviderImpl; import org.apache.hadoop.ozone.recon.spi.impl.StorageContainerServiceProviderImpl; -import org.apache.ozone.test.GenericTestUtils; import org.apache.ozone.test.LambdaTestUtils; import org.junit.jupiter.api.BeforeEach; import org.junit.jupiter.api.Test; @@ -70,9 +69,9 @@ import static org.apache.hadoop.ozone.recon.OMMetadataManagerTestUtils.initializeNewOmMetadataManager; import static org.junit.jupiter.api.Assertions.assertEquals; import static org.junit.jupiter.api.Assertions.fail; -import static org.mockito.ArgumentMatchers.any; -import static org.mockito.ArgumentMatchers.anyBoolean; -import static org.mockito.ArgumentMatchers.anyString; +import static org.mockito.Mockito.any; +import static org.mockito.Mockito.anyBoolean; +import static org.mockito.Mockito.anyString; import static org.mockito.Mockito.mock; import static org.mockito.Mockito.when; @@ -197,7 +196,7 @@ private void initializeInjector() throws Exception { when(reconUtilsMock.makeHttpCall(any(URLConnectionFactory.class), anyString(), anyBoolean())).thenReturn(urlConnectionMock); when(reconUtilsMock.getReconDbDir(any(OzoneConfiguration.class), - anyString())).thenReturn(GenericTestUtils.getRandomizedTestDir()); + anyString())).thenReturn(temporaryFolder.resolve("reconDbDir").toFile()); when(reconUtilsMock.getReconNodeDetails( any(OzoneConfiguration.class))).thenReturn( commonUtils.getReconNodeDetails()); diff --git a/hadoop-ozone/recon/src/test/java/org/apache/hadoop/ozone/recon/api/TestTaskStatusService.java b/hadoop-ozone/recon/src/test/java/org/apache/hadoop/ozone/recon/api/TestTaskStatusService.java index 5c071d353b5..741dcf3be4c 100644 --- a/hadoop-ozone/recon/src/test/java/org/apache/hadoop/ozone/recon/api/TestTaskStatusService.java +++ b/hadoop-ozone/recon/src/test/java/org/apache/hadoop/ozone/recon/api/TestTaskStatusService.java @@ -39,6 +39,10 @@ public class TestTaskStatusService extends AbstractReconSqlDBTest { private TaskStatusService taskStatusService; + public TestTaskStatusService() { + super(); + } + @BeforeEach public void setUp() { Injector parentInjector = getInjector(); diff --git a/hadoop-ozone/recon/src/test/java/org/apache/hadoop/ozone/recon/api/TestTriggerDBSyncEndpoint.java b/hadoop-ozone/recon/src/test/java/org/apache/hadoop/ozone/recon/api/TestTriggerDBSyncEndpoint.java index fc6b6555ecd..9bf824e4eea 100644 --- a/hadoop-ozone/recon/src/test/java/org/apache/hadoop/ozone/recon/api/TestTriggerDBSyncEndpoint.java +++ b/hadoop-ozone/recon/src/test/java/org/apache/hadoop/ozone/recon/api/TestTriggerDBSyncEndpoint.java @@ -59,9 +59,9 @@ import static org.apache.hadoop.ozone.recon.ReconServerConfigKeys.OZONE_RECON_OM_SNAPSHOT_DB_DIR; import static org.apache.hadoop.ozone.recon.ReconUtils.createTarFile; import static org.junit.jupiter.api.Assertions.assertEquals; -import static org.mockito.ArgumentMatchers.any; -import static org.mockito.ArgumentMatchers.anyString; -import static org.mockito.ArgumentMatchers.anyBoolean; +import static org.mockito.Mockito.any; +import static org.mockito.Mockito.anyString; +import static org.mockito.Mockito.anyBoolean; import static org.mockito.Mockito.mock; import static org.mockito.Mockito.when; diff --git a/hadoop-ozone/recon/src/test/java/org/apache/hadoop/ozone/recon/fsck/TestContainerHealthStatus.java b/hadoop-ozone/recon/src/test/java/org/apache/hadoop/ozone/recon/fsck/TestContainerHealthStatus.java index 1d1687869ac..d404a168c74 100644 --- a/hadoop-ozone/recon/src/test/java/org/apache/hadoop/ozone/recon/fsck/TestContainerHealthStatus.java +++ b/hadoop-ozone/recon/src/test/java/org/apache/hadoop/ozone/recon/fsck/TestContainerHealthStatus.java @@ -18,6 +18,7 @@ package org.apache.hadoop.ozone.recon.fsck; import org.apache.hadoop.hdds.client.RatisReplicationConfig; +import org.apache.hadoop.hdds.conf.OzoneConfiguration; import org.apache.hadoop.hdds.protocol.MockDatanodeDetails; import org.apache.hadoop.hdds.protocol.proto.HddsProtos; import org.apache.hadoop.hdds.protocol.proto.StorageContainerDatanodeProtocolProtos.ContainerReplicaProto; @@ -29,14 +30,18 @@ import org.apache.hadoop.ozone.recon.spi.ReconContainerMetadataManager; import org.junit.jupiter.api.BeforeEach; import org.junit.jupiter.api.Test; +import org.junit.jupiter.params.ParameterizedTest; +import org.junit.jupiter.params.provider.Arguments; +import org.junit.jupiter.params.provider.MethodSource; import java.util.HashSet; import java.util.Set; +import java.util.stream.Stream; import static org.junit.jupiter.api.Assertions.assertEquals; import static org.junit.jupiter.api.Assertions.assertTrue; import static org.junit.jupiter.api.Assertions.assertFalse; -import static org.mockito.ArgumentMatchers.anyInt; -import static org.mockito.ArgumentMatchers.anyList; +import static org.mockito.Mockito.anyInt; +import static org.mockito.Mockito.anyList; import static org.mockito.Mockito.mock; import static org.mockito.Mockito.when; @@ -49,15 +54,28 @@ public class TestContainerHealthStatus { private PlacementPolicy placementPolicy; private ContainerInfo container; private ReconContainerMetadataManager reconContainerMetadataManager; + private static final OzoneConfiguration CONF = new OzoneConfiguration(); + + private static Stream outOfServiceNodeStates() { + return Stream.of( + Arguments.of(HddsProtos.NodeOperationalState.DECOMMISSIONING), + Arguments.of(HddsProtos.NodeOperationalState.DECOMMISSIONED), + Arguments.of(HddsProtos.NodeOperationalState.ENTERING_MAINTENANCE), + Arguments.of(HddsProtos.NodeOperationalState.IN_MAINTENANCE) + ); + } @BeforeEach public void setup() { placementPolicy = mock(PlacementPolicy.class); container = mock(ContainerInfo.class); reconContainerMetadataManager = mock(ReconContainerMetadataManager.class); + when(container.getReplicationFactor()) + .thenReturn(HddsProtos.ReplicationFactor.THREE); when(container.getReplicationConfig()) .thenReturn(RatisReplicationConfig .getInstance(HddsProtos.ReplicationFactor.THREE)); + when(container.getState()).thenReturn(HddsProtos.LifeCycleState.CLOSED); when(container.containerID()).thenReturn(ContainerID.valueOf(123456)); when(container.getContainerID()).thenReturn((long)123456); when(placementPolicy.validateContainerPlacement( @@ -73,8 +91,8 @@ public void testHealthyContainer() { ContainerReplicaProto.State.CLOSED); ContainerHealthStatus status = new ContainerHealthStatus(container, replicas, placementPolicy, - reconContainerMetadataManager); - assertTrue(status.isHealthy()); + reconContainerMetadataManager, CONF); + assertTrue(status.isHealthilyReplicated()); assertFalse(status.isOverReplicated()); assertFalse(status.isUnderReplicated()); assertEquals(0, status.replicaDelta()); @@ -97,8 +115,8 @@ public void testHealthyContainerWithExtraUnhealthyReplica() { ContainerReplicaProto.State.UNHEALTHY); ContainerHealthStatus status = new ContainerHealthStatus(container, replicas, placementPolicy, - reconContainerMetadataManager); - assertTrue(status.isHealthy()); + reconContainerMetadataManager, CONF); + assertTrue(status.isHealthilyReplicated()); assertFalse(status.isOverReplicated()); assertFalse(status.isUnderReplicated()); assertEquals(0, status.replicaDelta()); @@ -112,8 +130,8 @@ public void testMissingContainer() { Set replicas = new HashSet<>(); ContainerHealthStatus status = new ContainerHealthStatus(container, replicas, placementPolicy, - reconContainerMetadataManager); - assertFalse(status.isHealthy()); + reconContainerMetadataManager, CONF); + assertFalse(status.isHealthilyReplicated()); assertFalse(status.isOverReplicated()); assertFalse(status.isUnderReplicated()); assertEquals(3, status.replicaDelta()); @@ -128,8 +146,8 @@ public void testUnderReplicatedContainer() { ContainerReplicaProto.State.CLOSED); ContainerHealthStatus status = new ContainerHealthStatus(container, replicas, placementPolicy, - reconContainerMetadataManager); - assertFalse(status.isHealthy()); + reconContainerMetadataManager, CONF); + assertFalse(status.isHealthilyReplicated()); assertFalse(status.isMissing()); assertFalse(status.isOverReplicated()); assertTrue(status.isUnderReplicated()); @@ -147,8 +165,8 @@ public void testOverReplicatedContainer() { ContainerReplicaProto.State.CLOSED); ContainerHealthStatus status = new ContainerHealthStatus(container, replicas, placementPolicy, - reconContainerMetadataManager); - assertFalse(status.isHealthy()); + reconContainerMetadataManager, CONF); + assertFalse(status.isHealthilyReplicated()); assertFalse(status.isMissing()); assertFalse(status.isUnderReplicated()); assertTrue(status.isOverReplicated()); @@ -157,6 +175,185 @@ public void testOverReplicatedContainer() { assertEquals(0, status.misReplicatedDelta()); } + /** + * Starting with a ContainerHealthStatus of 1 over-replicated container + * replica and then updating a datanode to one of the out-of-service states. + * Replicas belonging to out-of-service nodes should be ignored and + * the container should be considered properly replicated. + */ + @ParameterizedTest + @MethodSource("outOfServiceNodeStates") + public void testOverReplicationWithOutOfServiceNodes( + HddsProtos.NodeOperationalState state) { + Set replicas = generateReplicas(container, + ContainerReplicaProto.State.CLOSED, + ContainerReplicaProto.State.CLOSED, + ContainerReplicaProto.State.CLOSED, + ContainerReplicaProto.State.CLOSED); + ContainerHealthStatus status = + new ContainerHealthStatus(container, replicas, placementPolicy, + reconContainerMetadataManager, CONF); + assertFalse(status.isHealthilyReplicated()); + assertFalse(status.isMissing()); + assertFalse(status.isUnderReplicated()); + assertFalse(status.isMisReplicated()); + assertTrue(status.isOverReplicated()); + + for (ContainerReplica replica : replicas) { + replicas.remove(replica); + replica.getDatanodeDetails().setPersistedOpState(state); + replicas.add(replica); + break; + } + + status = new ContainerHealthStatus(container, replicas, placementPolicy, + reconContainerMetadataManager, CONF); + assertTrue(status.isHealthilyReplicated()); + assertFalse(status.isMissing()); + assertFalse(status.isUnderReplicated()); + assertFalse(status.isMisReplicated()); + assertFalse(status.isOverReplicated()); + } + + /** + * Nodes in Decommission aren't expected to come back. + * If 1/3 nodes goes into decommission, the container is + * considered under-replicated. If 1/3 nodes goes into maintenance, + * because the node is expected to come back and there are + * 2 available replicas (minimum required num for Ratis THREE) + * the container isn't considered under-replicated. + */ + @ParameterizedTest + @MethodSource("outOfServiceNodeStates") + public void testUnderReplicationWithOutOfServiceNodes( + HddsProtos.NodeOperationalState state) { + Set replicas = generateReplicas(container, + ContainerReplicaProto.State.CLOSED, + ContainerReplicaProto.State.CLOSED, + ContainerReplicaProto.State.CLOSED); + // IN_SERVICE, IN_SERVICE, IN_SERVICE + ContainerHealthStatus status = + new ContainerHealthStatus(container, replicas, placementPolicy, + reconContainerMetadataManager, CONF); + assertTrue(status.isHealthy()); + assertTrue(status.isSufficientlyReplicated()); + assertTrue(status.isHealthilyReplicated()); + assertFalse(status.isMissing()); + assertFalse(status.isUnderReplicated()); + assertFalse(status.isMisReplicated()); + assertFalse(status.isOverReplicated()); + + for (ContainerReplica replica : replicas) { + replicas.remove(replica); + replica.getDatanodeDetails().setPersistedOpState(state); + replicas.add(replica); + break; + } + + // IN_SERVICE, IN_SERVICE, DECOMMISSION/MAINTENANCE + status = new ContainerHealthStatus(container, replicas, placementPolicy, + reconContainerMetadataManager, CONF); + assertTrue(status.isHealthy()); + assertFalse(status.isHealthilyReplicated()); + assertFalse(status.isMissing()); + assertFalse(status.isMisReplicated()); + assertFalse(status.isOverReplicated()); + + if (state.equals(HddsProtos.NodeOperationalState.DECOMMISSIONING) || + state.equals(HddsProtos.NodeOperationalState.DECOMMISSIONED)) { + assertFalse(status.isSufficientlyReplicated()); + assertTrue(status.isUnderReplicated()); + } else { + assertTrue(status.isSufficientlyReplicated()); + assertFalse(status.isUnderReplicated()); + } + } + + /** + * Starting with a healthy ContainerHealthStatus and then updating + * a datanode to a maintenance state. + * Any node in maintenance is expected to come back and since 2 replicas + * in online nodes are meeting the minimum requirement for + * proper replication, no additional replica-copy is made. + * + * IN_SERVICE, IN_SERVICE, IN_MAINTENANCE + * + * If 1 more node goes into maintenance, then 1 replica copy is made to + * maintain the minimum requirement for proper replication. + * + * IN_SERVICE, IN_SERVICE, IN_MAINTENANCE, IN_MAINTENANCE + * + * Before the copy is made we have + * + * IN_SERVICE, IN_MAINTENANCE, ENTERING_MAINTENANCE + * + * for that short time, the container is under-replicated. + * + * When the copy is made, the container is again considered + * sufficiently replicated. + */ + @Test + public void testReplicationWithNodesInMaintenance() { + Set replicas = generateReplicas(container, + ContainerReplicaProto.State.CLOSED, + ContainerReplicaProto.State.CLOSED, + ContainerReplicaProto.State.CLOSED); + // IN_SERVICE, IN_SERVICE, IN_SERVICE + ContainerHealthStatus status = + new ContainerHealthStatus(container, replicas, placementPolicy, + reconContainerMetadataManager, CONF); + assertTrue(status.isHealthy()); + assertTrue(status.isSufficientlyReplicated()); + assertTrue(status.isHealthilyReplicated()); + assertFalse(status.isMissing()); + assertFalse(status.isUnderReplicated()); + assertFalse(status.isMisReplicated()); + assertFalse(status.isOverReplicated()); + + // 1/3 replicas goes into maintenance + // IN_SERVICE, IN_SERVICE, IN_MAINTENANCE + for (ContainerReplica replica : replicas) { + replicas.remove(replica); + replica.getDatanodeDetails().setPersistedOpState( + HddsProtos.NodeOperationalState.IN_MAINTENANCE); + replicas.add(replica); + break; + } + + status = new ContainerHealthStatus(container, replicas, placementPolicy, + reconContainerMetadataManager, CONF); + assertTrue(status.isHealthy()); + assertTrue(status.isSufficientlyReplicated()); + assertFalse(status.isHealthilyReplicated()); + assertFalse(status.isMissing()); + assertFalse(status.isUnderReplicated()); + assertFalse(status.isMisReplicated()); + assertFalse(status.isOverReplicated()); + + // IN_SERVICE, IN_MAINTENANCE, ENTERING_MAINTENANCE + for (ContainerReplica replica : replicas) { + if (replica.getDatanodeDetails().getPersistedOpState().equals( + HddsProtos.NodeOperationalState.IN_SERVICE)) { + replicas.remove(replica); + replica.getDatanodeDetails().setPersistedOpState( + HddsProtos.NodeOperationalState.ENTERING_MAINTENANCE); + replicas.add(replica); + break; + } + } + + // Container should be under-replicated. + status = new ContainerHealthStatus(container, replicas, placementPolicy, + reconContainerMetadataManager, CONF); + assertTrue(status.isHealthy()); + assertFalse(status.isSufficientlyReplicated()); + assertFalse(status.isHealthilyReplicated()); + assertFalse(status.isMissing()); + assertTrue(status.isUnderReplicated()); + assertFalse(status.isMisReplicated()); + assertFalse(status.isOverReplicated()); + } + @Test public void testMisReplicated() { Set replicas = generateReplicas(container, @@ -168,8 +365,8 @@ public void testMisReplicated() { .thenReturn(new ContainerPlacementStatusDefault(1, 2, 5)); ContainerHealthStatus status = new ContainerHealthStatus(container, replicas, placementPolicy, - reconContainerMetadataManager); - assertFalse(status.isHealthy()); + reconContainerMetadataManager, CONF); + assertFalse(status.isHealthilyReplicated()); assertFalse(status.isMissing()); assertFalse(status.isUnderReplicated()); assertFalse(status.isOverReplicated()); diff --git a/hadoop-ozone/recon/src/test/java/org/apache/hadoop/ozone/recon/fsck/TestContainerHealthTask.java b/hadoop-ozone/recon/src/test/java/org/apache/hadoop/ozone/recon/fsck/TestContainerHealthTask.java index 358799cc033..371fb6f9d67 100644 --- a/hadoop-ozone/recon/src/test/java/org/apache/hadoop/ozone/recon/fsck/TestContainerHealthTask.java +++ b/hadoop-ozone/recon/src/test/java/org/apache/hadoop/ozone/recon/fsck/TestContainerHealthTask.java @@ -22,8 +22,8 @@ import static org.hadoop.ozone.recon.schema.ContainerSchemaDefinition.UnHealthyContainerStates.ALL_REPLICAS_UNHEALTHY; import static org.junit.jupiter.api.Assertions.assertEquals; import static org.junit.jupiter.api.Assertions.assertNotNull; -import static org.mockito.ArgumentMatchers.any; -import static org.mockito.ArgumentMatchers.anyInt; +import static org.mockito.Mockito.any; +import static org.mockito.Mockito.anyInt; import static org.mockito.Mockito.mock; import static org.mockito.Mockito.when; @@ -38,6 +38,7 @@ import java.util.UUID; import org.apache.hadoop.hdds.client.RatisReplicationConfig; +import org.apache.hadoop.hdds.conf.OzoneConfiguration; import org.apache.hadoop.hdds.protocol.DatanodeDetails; import org.apache.hadoop.hdds.protocol.MockDatanodeDetails; import org.apache.hadoop.hdds.protocol.proto.HddsProtos; @@ -69,6 +70,10 @@ */ public class TestContainerHealthTask extends AbstractReconSqlDBTest { + public TestContainerHealthTask() { + super(); + } + @SuppressWarnings("checkstyle:methodlength") @Test public void testRun() throws Exception { @@ -152,7 +157,8 @@ public void testRun() throws Exception { new ContainerHealthTask(scmMock.getContainerManager(), scmMock.getScmServiceProvider(), reconTaskStatusDao, containerHealthSchemaManager, - placementMock, reconTaskConfig, reconContainerMetadataManager); + placementMock, reconTaskConfig, + reconContainerMetadataManager, new OzoneConfiguration()); containerHealthTask.start(); LambdaTestUtils.await(60000, 1000, () -> (unHealthyContainersTableHandle.count() == 6)); @@ -316,7 +322,8 @@ public void testDeletedContainer() throws Exception { new ContainerHealthTask(scmMock.getContainerManager(), scmMock.getScmServiceProvider(), reconTaskStatusDao, containerHealthSchemaManager, - placementMock, reconTaskConfig, reconContainerMetadataManager); + placementMock, reconTaskConfig, + reconContainerMetadataManager, new OzoneConfiguration()); containerHealthTask.start(); LambdaTestUtils.await(6000, 1000, () -> (unHealthyContainersTableHandle.count() == 2)); @@ -358,6 +365,9 @@ private List getMockContainers(int num) { when(c.getReplicationConfig()) .thenReturn(RatisReplicationConfig.getInstance( HddsProtos.ReplicationFactor.THREE)); + when(c.getReplicationFactor()) + .thenReturn(HddsProtos.ReplicationFactor.THREE); + when(c.getState()).thenReturn(HddsProtos.LifeCycleState.CLOSED); when(c.containerID()).thenReturn(ContainerID.valueOf(i)); containers.add(c); } diff --git a/hadoop-ozone/recon/src/test/java/org/apache/hadoop/ozone/recon/fsck/TestContainerHealthTaskRecordGenerator.java b/hadoop-ozone/recon/src/test/java/org/apache/hadoop/ozone/recon/fsck/TestContainerHealthTaskRecordGenerator.java index dd2d4b42117..7d55e612bad 100644 --- a/hadoop-ozone/recon/src/test/java/org/apache/hadoop/ozone/recon/fsck/TestContainerHealthTaskRecordGenerator.java +++ b/hadoop-ozone/recon/src/test/java/org/apache/hadoop/ozone/recon/fsck/TestContainerHealthTaskRecordGenerator.java @@ -18,6 +18,7 @@ package org.apache.hadoop.ozone.recon.fsck; import org.apache.hadoop.hdds.client.RatisReplicationConfig; +import org.apache.hadoop.hdds.conf.OzoneConfiguration; import org.apache.hadoop.hdds.protocol.MockDatanodeDetails; import org.apache.hadoop.hdds.protocol.proto.HddsProtos; import org.apache.hadoop.hdds.scm.PlacementPolicy; @@ -48,8 +49,8 @@ import static org.junit.jupiter.api.Assertions.assertFalse; import static org.junit.jupiter.api.Assertions.assertNotNull; import static org.junit.jupiter.api.Assertions.assertTrue; -import static org.mockito.ArgumentMatchers.anyInt; -import static org.mockito.ArgumentMatchers.anyList; +import static org.mockito.Mockito.anyInt; +import static org.mockito.Mockito.anyList; import static org.mockito.Mockito.mock; import static org.mockito.Mockito.when; @@ -64,6 +65,7 @@ public class TestContainerHealthTaskRecordGenerator { private ContainerInfo container; private ContainerInfo emptyContainer; private ReconContainerMetadataManager reconContainerMetadataManager; + private static final OzoneConfiguration CONF = new OzoneConfiguration(); @BeforeEach public void setup() throws IOException { @@ -71,14 +73,19 @@ public void setup() throws IOException { container = mock(ContainerInfo.class); emptyContainer = mock(ContainerInfo.class); reconContainerMetadataManager = mock(ReconContainerMetadataManager.class); + when(container.getReplicationFactor()) + .thenReturn(HddsProtos.ReplicationFactor.THREE); when(container.getReplicationConfig()) .thenReturn( RatisReplicationConfig .getInstance(HddsProtos.ReplicationFactor.THREE)); + when(container.getState()).thenReturn(HddsProtos.LifeCycleState.CLOSED); when(container.containerID()).thenReturn(ContainerID.valueOf(123456)); when(container.getContainerID()).thenReturn((long)123456); when(reconContainerMetadataManager.getKeyCountForContainer( (long) 123456)).thenReturn(5L); + when(emptyContainer.getReplicationFactor()) + .thenReturn(HddsProtos.ReplicationFactor.THREE); when(emptyContainer.getReplicationConfig()) .thenReturn( RatisReplicationConfig @@ -95,7 +102,7 @@ public void testMissingRecordRetained() { Set replicas = new HashSet<>(); ContainerHealthStatus status = new ContainerHealthStatus(container, replicas, placementPolicy, - reconContainerMetadataManager); + reconContainerMetadataManager, CONF); // Missing record should be retained assertTrue(ContainerHealthTask.ContainerHealthRecords .retainOrUpdateRecord(status, missingRecord() @@ -114,7 +121,7 @@ public void testMissingRecordRetained() { replicas = generateReplicas(container, CLOSED, CLOSED, CLOSED); status = new ContainerHealthStatus(container, replicas, placementPolicy, - reconContainerMetadataManager); + reconContainerMetadataManager, CONF); assertFalse(ContainerHealthTask.ContainerHealthRecords .retainOrUpdateRecord(status, missingRecord() )); @@ -127,7 +134,7 @@ public void testUnderReplicatedRecordRetainedAndUpdated() { generateReplicas(container, CLOSED, CLOSED); ContainerHealthStatus status = new ContainerHealthStatus(container, replicas, placementPolicy, - reconContainerMetadataManager); + reconContainerMetadataManager, CONF); UnhealthyContainersRecord rec = underReplicatedRecord(); assertTrue(ContainerHealthTask.ContainerHealthRecords @@ -150,7 +157,7 @@ public void testUnderReplicatedRecordRetainedAndUpdated() { // Container is now replicated OK - should be removed. replicas = generateReplicas(container, CLOSED, CLOSED, CLOSED); status = new ContainerHealthStatus(container, replicas, placementPolicy, - reconContainerMetadataManager); + reconContainerMetadataManager, CONF); assertFalse(ContainerHealthTask.ContainerHealthRecords .retainOrUpdateRecord(status, rec)); } @@ -162,7 +169,7 @@ public void testOverReplicatedRecordRetainedAndUpdated() { generateReplicas(container, CLOSED, CLOSED, CLOSED, CLOSED); ContainerHealthStatus status = new ContainerHealthStatus(container, replicas, placementPolicy, - reconContainerMetadataManager); + reconContainerMetadataManager, CONF); UnhealthyContainersRecord rec = overReplicatedRecord(); assertTrue(ContainerHealthTask.ContainerHealthRecords @@ -185,7 +192,7 @@ public void testOverReplicatedRecordRetainedAndUpdated() { // Container is now replicated OK - should be removed. replicas = generateReplicas(container, CLOSED, CLOSED, CLOSED); status = new ContainerHealthStatus(container, replicas, placementPolicy, - reconContainerMetadataManager); + reconContainerMetadataManager, CONF); assertFalse(ContainerHealthTask.ContainerHealthRecords .retainOrUpdateRecord(status, rec)); } @@ -200,7 +207,7 @@ public void testMisReplicatedRecordRetainedAndUpdated() { .thenReturn(new ContainerPlacementStatusDefault(2, 3, 5)); ContainerHealthStatus status = new ContainerHealthStatus(container, replicas, placementPolicy, - reconContainerMetadataManager); + reconContainerMetadataManager, CONF); UnhealthyContainersRecord rec = misReplicatedRecord(); assertTrue(ContainerHealthTask.ContainerHealthRecords @@ -226,7 +233,7 @@ public void testMisReplicatedRecordRetainedAndUpdated() { anyList(), anyInt())) .thenReturn(new ContainerPlacementStatusDefault(3, 3, 5)); status = new ContainerHealthStatus(container, replicas, placementPolicy, - reconContainerMetadataManager); + reconContainerMetadataManager, CONF); assertFalse(ContainerHealthTask.ContainerHealthRecords .retainOrUpdateRecord(status, rec)); } @@ -243,7 +250,7 @@ public void testCorrectRecordsGenerated() { // HEALTHY container - no records generated. ContainerHealthStatus status = new ContainerHealthStatus(container, replicas, placementPolicy, - reconContainerMetadataManager); + reconContainerMetadataManager, CONF); List records = ContainerHealthTask.ContainerHealthRecords .generateUnhealthyRecords(status, (long) 1234567, @@ -273,7 +280,7 @@ public void testCorrectRecordsGenerated() { generateReplicas(container, CLOSED, CLOSED, CLOSED, CLOSED, CLOSED); status = new ContainerHealthStatus(container, replicas, placementPolicy, - reconContainerMetadataManager); + reconContainerMetadataManager, CONF); records = ContainerHealthTask.ContainerHealthRecords .generateUnhealthyRecords(status, (long) 1234567, unhealthyContainerStateStatsMap); @@ -311,7 +318,7 @@ public void testCorrectRecordsGenerated() { .thenReturn(new ContainerPlacementStatusDefault(1, 2, 5)); status = new ContainerHealthStatus(container, replicas, placementPolicy, - reconContainerMetadataManager); + reconContainerMetadataManager, CONF); records = ContainerHealthTask.ContainerHealthRecords .generateUnhealthyRecords(status, (long) 1234567, unhealthyContainerStateStatsMap); @@ -359,7 +366,7 @@ public void testCorrectRecordsGenerated() { .thenReturn(new ContainerPlacementStatusDefault(1, 2, 5)); status = new ContainerHealthStatus(container, replicas, placementPolicy, - reconContainerMetadataManager); + reconContainerMetadataManager, CONF); records = ContainerHealthTask.ContainerHealthRecords .generateUnhealthyRecords(status, (long) 1234567, unhealthyContainerStateStatsMap); @@ -388,7 +395,7 @@ public void testCorrectRecordsGenerated() { status = new ContainerHealthStatus(emptyContainer, replicas, placementPolicy, - reconContainerMetadataManager); + reconContainerMetadataManager, CONF); records = ContainerHealthTask.ContainerHealthRecords .generateUnhealthyRecords(status, (long) 345678, unhealthyContainerStateStatsMap); @@ -432,7 +439,7 @@ public void testRecordNotGeneratedIfAlreadyExists() { container, CLOSED, CLOSED, CLOSED, CLOSED, CLOSED); ContainerHealthStatus status = new ContainerHealthStatus(container, replicas, placementPolicy, - reconContainerMetadataManager); + reconContainerMetadataManager, CONF); List records = ContainerHealthTask.ContainerHealthRecords .generateUnhealthyRecords(status, existingRec, (long) 1234567, @@ -460,7 +467,7 @@ public void testRecordNotGeneratedIfAlreadyExists() { // Missing replicas.clear(); status = new ContainerHealthStatus(container, replicas, placementPolicy, - reconContainerMetadataManager); + reconContainerMetadataManager, CONF); records = ContainerHealthTask.ContainerHealthRecords .generateUnhealthyRecords(status, existingRec, (long) 1234567, unhealthyContainerStateStatsMap); @@ -490,7 +497,7 @@ public void testRecordNotGeneratedIfAlreadyExists() { anyList(), anyInt())) .thenReturn(new ContainerPlacementStatusDefault(1, 2, 5)); status = new ContainerHealthStatus(container, replicas, placementPolicy, - reconContainerMetadataManager); + reconContainerMetadataManager, CONF); records = ContainerHealthTask.ContainerHealthRecords .generateUnhealthyRecords(status, existingRec, (long) 1234567, unhealthyContainerStateStatsMap); diff --git a/hadoop-ozone/recon/src/test/java/org/apache/hadoop/ozone/recon/persistence/AbstractReconSqlDBTest.java b/hadoop-ozone/recon/src/test/java/org/apache/hadoop/ozone/recon/persistence/AbstractReconSqlDBTest.java index d30a6232712..d007fbb1cf7 100644 --- a/hadoop-ozone/recon/src/test/java/org/apache/hadoop/ozone/recon/persistence/AbstractReconSqlDBTest.java +++ b/hadoop-ozone/recon/src/test/java/org/apache/hadoop/ozone/recon/persistence/AbstractReconSqlDBTest.java @@ -46,21 +46,24 @@ import com.google.inject.Injector; import com.google.inject.Module; import com.google.inject.Provider; +import org.junit.jupiter.api.io.TempDir; +import org.springframework.util.FileSystemUtils; /** * Class that provides a Recon SQL DB with all the tables created, and APIs * to access the DAOs easily. */ public class AbstractReconSqlDBTest { - private Path temporaryFolder; - private Injector injector; private DSLContext dslContext; private Provider configurationProvider; public AbstractReconSqlDBTest() { + } + + public void init(Path temporaryFolder) { try { - temporaryFolder = Files.createTempDirectory("JunitConfig"); + FileSystemUtils.deleteRecursively(temporaryFolder.resolve("Config")); configurationProvider = new DerbyDataSourceConfigurationProvider(Files.createDirectory( temporaryFolder.resolve("Config")).toFile()); @@ -69,12 +72,17 @@ public AbstractReconSqlDBTest() { } } + public AbstractReconSqlDBTest(Path temporaryFolder) { + init(temporaryFolder); + } + protected AbstractReconSqlDBTest(Provider provider) { configurationProvider = provider; } @BeforeEach - public void createReconSchemaForTest() throws IOException { + public void createReconSchemaForTest(@TempDir Path temporaryFolder) throws IOException { + init(temporaryFolder); injector = Guice.createInjector(getReconSqlDBModules()); dslContext = DSL.using(new DefaultConfiguration().set( injector.getInstance(DataSource.class))); diff --git a/hadoop-ozone/recon/src/test/java/org/apache/hadoop/ozone/recon/persistence/TestReconInternalSchemaDefinition.java b/hadoop-ozone/recon/src/test/java/org/apache/hadoop/ozone/recon/persistence/TestReconInternalSchemaDefinition.java index 5570484c0f9..1230a9ba543 100644 --- a/hadoop-ozone/recon/src/test/java/org/apache/hadoop/ozone/recon/persistence/TestReconInternalSchemaDefinition.java +++ b/hadoop-ozone/recon/src/test/java/org/apache/hadoop/ozone/recon/persistence/TestReconInternalSchemaDefinition.java @@ -40,6 +40,10 @@ */ public class TestReconInternalSchemaDefinition extends AbstractReconSqlDBTest { + public TestReconInternalSchemaDefinition() { + super(); + } + @Test public void testSchemaCreated() throws Exception { diff --git a/hadoop-ozone/recon/src/test/java/org/apache/hadoop/ozone/recon/persistence/TestReconWithDifferentSqlDBs.java b/hadoop-ozone/recon/src/test/java/org/apache/hadoop/ozone/recon/persistence/TestReconWithDifferentSqlDBs.java index b4cf7689572..18053688468 100644 --- a/hadoop-ozone/recon/src/test/java/org/apache/hadoop/ozone/recon/persistence/TestReconWithDifferentSqlDBs.java +++ b/hadoop-ozone/recon/src/test/java/org/apache/hadoop/ozone/recon/persistence/TestReconWithDifferentSqlDBs.java @@ -63,7 +63,7 @@ public static Stream parametersSource() throws IOException { public void testSchemaSetup(Provider provider) throws SQLException, IOException { AbstractReconSqlDBTest reconSqlDB = new AbstractReconSqlDBTest(provider); - reconSqlDB.createReconSchemaForTest(); + reconSqlDB.createReconSchemaForTest(temporaryFolder); assertNotNull(reconSqlDB.getInjector()); assertNotNull(reconSqlDB.getConfiguration()); assertNotNull(reconSqlDB.getDslContext()); diff --git a/hadoop-ozone/recon/src/test/java/org/apache/hadoop/ozone/recon/persistence/TestSqlSchemaSetup.java b/hadoop-ozone/recon/src/test/java/org/apache/hadoop/ozone/recon/persistence/TestSqlSchemaSetup.java index 498faa3d431..4c6ae91998c 100644 --- a/hadoop-ozone/recon/src/test/java/org/apache/hadoop/ozone/recon/persistence/TestSqlSchemaSetup.java +++ b/hadoop-ozone/recon/src/test/java/org/apache/hadoop/ozone/recon/persistence/TestSqlSchemaSetup.java @@ -33,6 +33,10 @@ */ public class TestSqlSchemaSetup extends AbstractReconSqlDBTest { + public TestSqlSchemaSetup() { + super(); + } + /** * Make sure schema was created correctly. * @throws SQLException diff --git a/hadoop-ozone/recon/src/test/java/org/apache/hadoop/ozone/recon/persistence/TestStatsSchemaDefinition.java b/hadoop-ozone/recon/src/test/java/org/apache/hadoop/ozone/recon/persistence/TestStatsSchemaDefinition.java index 6407e64bf15..36e7edc5667 100644 --- a/hadoop-ozone/recon/src/test/java/org/apache/hadoop/ozone/recon/persistence/TestStatsSchemaDefinition.java +++ b/hadoop-ozone/recon/src/test/java/org/apache/hadoop/ozone/recon/persistence/TestStatsSchemaDefinition.java @@ -40,6 +40,10 @@ */ public class TestStatsSchemaDefinition extends AbstractReconSqlDBTest { + public TestStatsSchemaDefinition() { + super(); + } + @Test public void testIfStatsSchemaCreated() throws Exception { Connection connection = getConnection(); diff --git a/hadoop-ozone/recon/src/test/java/org/apache/hadoop/ozone/recon/persistence/TestUtilizationSchemaDefinition.java b/hadoop-ozone/recon/src/test/java/org/apache/hadoop/ozone/recon/persistence/TestUtilizationSchemaDefinition.java index 1c692b6a488..7745aac9d3f 100644 --- a/hadoop-ozone/recon/src/test/java/org/apache/hadoop/ozone/recon/persistence/TestUtilizationSchemaDefinition.java +++ b/hadoop-ozone/recon/src/test/java/org/apache/hadoop/ozone/recon/persistence/TestUtilizationSchemaDefinition.java @@ -52,6 +52,10 @@ */ public class TestUtilizationSchemaDefinition extends AbstractReconSqlDBTest { + public TestUtilizationSchemaDefinition() { + super(); + } + @Test public void testReconSchemaCreated() throws Exception { Connection connection = getConnection(); diff --git a/hadoop-ozone/recon/src/test/java/org/apache/hadoop/ozone/recon/scm/TestReconIncrementalContainerReportHandler.java b/hadoop-ozone/recon/src/test/java/org/apache/hadoop/ozone/recon/scm/TestReconIncrementalContainerReportHandler.java index 00e0a56aab0..efde79f9bac 100644 --- a/hadoop-ozone/recon/src/test/java/org/apache/hadoop/ozone/recon/scm/TestReconIncrementalContainerReportHandler.java +++ b/hadoop-ozone/recon/src/test/java/org/apache/hadoop/ozone/recon/scm/TestReconIncrementalContainerReportHandler.java @@ -23,13 +23,12 @@ import static org.apache.hadoop.hdds.upgrade.HDDSLayoutVersionManager.maxLayoutVersion; import static org.junit.jupiter.api.Assertions.assertEquals; import static org.junit.jupiter.api.Assertions.assertTrue; -import static org.mockito.ArgumentMatchers.any; +import static org.mockito.Mockito.any; import static org.mockito.Mockito.mock; import static org.mockito.Mockito.when; import java.io.IOException; import java.nio.file.Path; -import java.nio.file.Paths; import java.util.ArrayList; import java.util.Arrays; import java.util.List; @@ -56,8 +55,8 @@ import org.apache.hadoop.hdds.server.events.EventPublisher; import org.apache.hadoop.hdds.server.events.EventQueue; import org.apache.hadoop.hdds.upgrade.HDDSLayoutVersionManager; -import org.apache.ozone.test.GenericTestUtils; import org.junit.jupiter.api.Test; +import org.junit.jupiter.api.io.TempDir; /** * Test Recon ICR handler. @@ -67,7 +66,7 @@ public class TestReconIncrementalContainerReportHandler private HDDSLayoutVersionManager versionManager; @Test - public void testProcessICR() + public void testProcessICR(@TempDir Path scmPath) throws IOException, NodeNotFoundException, TimeoutException { ContainerID containerID = ContainerID.valueOf(100L); @@ -90,9 +89,6 @@ public void testProcessICR() .getExistContainerWithPipelinesInBatch(any( ArrayList.class))).thenReturn(containerWithPipelineList); - final String path = - GenericTestUtils.getTempPath(UUID.randomUUID().toString()); - Path scmPath = Paths.get(path, "scm-meta"); final OzoneConfiguration conf = new OzoneConfiguration(); conf.set(HddsConfigKeys.OZONE_METADATA_DIRS, scmPath.toString()); NetworkTopology clusterMap = new NetworkTopologyImpl(conf); diff --git a/hadoop-ozone/recon/src/test/java/org/apache/hadoop/ozone/recon/scm/TestReconNodeManager.java b/hadoop-ozone/recon/src/test/java/org/apache/hadoop/ozone/recon/scm/TestReconNodeManager.java index 17f2556e59c..99bb482cb51 100644 --- a/hadoop-ozone/recon/src/test/java/org/apache/hadoop/ozone/recon/scm/TestReconNodeManager.java +++ b/hadoop-ozone/recon/src/test/java/org/apache/hadoop/ozone/recon/scm/TestReconNodeManager.java @@ -22,13 +22,12 @@ import static org.apache.hadoop.hdds.protocol.proto.HddsProtos.NodeOperationalState.DECOMMISSIONING; import static org.apache.hadoop.hdds.protocol.proto.HddsProtos.NodeOperationalState.IN_SERVICE; import static org.apache.hadoop.hdds.scm.ScmConfigKeys.OZONE_SCM_NAMES; -import static org.apache.hadoop.ozone.container.upgrade.UpgradeUtils.defaultLayoutVersionProto; import static org.apache.hadoop.ozone.OzoneConfigKeys.OZONE_METADATA_DIRS; import static org.assertj.core.api.Assertions.assertThat; import static org.junit.jupiter.api.Assertions.assertEquals; import static org.junit.jupiter.api.Assertions.assertThrows; import static org.junit.jupiter.api.Assertions.assertNotNull; -import static org.mockito.ArgumentMatchers.eq; +import static org.mockito.Mockito.eq; import static org.mockito.Mockito.mock; import static org.mockito.Mockito.when; @@ -138,8 +137,7 @@ public void testReconNodeDB() throws IOException, NodeNotFoundException { // Upon processing the heartbeat, the illegal command should be filtered out List returnedCmds = - reconNodeManager.processHeartbeat(datanodeDetails, - defaultLayoutVersionProto()); + reconNodeManager.processHeartbeat(datanodeDetails); assertEquals(1, returnedCmds.size()); assertEquals(SCMCommandProto.Type.reregisterCommand, returnedCmds.get(0).getType()); @@ -148,8 +146,7 @@ public void testReconNodeDB() throws IOException, NodeNotFoundException { datanodeDetails.setPersistedOpState( HddsProtos.NodeOperationalState.DECOMMISSIONED); datanodeDetails.setPersistedOpStateExpiryEpochSec(12345L); - reconNodeManager.processHeartbeat(datanodeDetails, - defaultLayoutVersionProto()); + reconNodeManager.processHeartbeat(datanodeDetails); // Check both persistedOpState and NodeStatus#operationalState assertEquals(HddsProtos.NodeOperationalState.DECOMMISSIONED, dnDetails.getPersistedOpState()); @@ -238,8 +235,7 @@ public void testDatanodeUpdate() throws IOException { datanodeDetails.setHostName("hostname2"); // Upon processing the heartbeat, the illegal command should be filtered out List returnedCmds = - reconNodeManager.processHeartbeat(datanodeDetails, - defaultLayoutVersionProto()); + reconNodeManager.processHeartbeat(datanodeDetails); assertEquals(1, returnedCmds.size()); assertEquals(SCMCommandProto.Type.reregisterCommand, returnedCmds.get(0).getType()); diff --git a/hadoop-ozone/recon/src/test/java/org/apache/hadoop/ozone/recon/spi/impl/TestOzoneManagerServiceProviderImpl.java b/hadoop-ozone/recon/src/test/java/org/apache/hadoop/ozone/recon/spi/impl/TestOzoneManagerServiceProviderImpl.java index 497d05c4f80..d15cd6142d3 100644 --- a/hadoop-ozone/recon/src/test/java/org/apache/hadoop/ozone/recon/spi/impl/TestOzoneManagerServiceProviderImpl.java +++ b/hadoop-ozone/recon/src/test/java/org/apache/hadoop/ozone/recon/spi/impl/TestOzoneManagerServiceProviderImpl.java @@ -35,9 +35,9 @@ import static org.junit.jupiter.api.Assertions.assertNotNull; import static org.junit.jupiter.api.Assertions.assertTrue; import static org.junit.jupiter.api.Assertions.assertFalse; -import static org.mockito.ArgumentMatchers.any; -import static org.mockito.ArgumentMatchers.anyBoolean; -import static org.mockito.ArgumentMatchers.anyString; +import static org.mockito.Mockito.any; +import static org.mockito.Mockito.anyBoolean; +import static org.mockito.Mockito.anyString; import static org.mockito.Mockito.doCallRealMethod; import static org.mockito.Mockito.doNothing; import static org.mockito.Mockito.mock; diff --git a/hadoop-ozone/recon/src/test/java/org/apache/hadoop/ozone/recon/tasks/TestContainerSizeCountTask.java b/hadoop-ozone/recon/src/test/java/org/apache/hadoop/ozone/recon/tasks/TestContainerSizeCountTask.java index 9b5dc3bc9bc..eff330a796c 100644 --- a/hadoop-ozone/recon/src/test/java/org/apache/hadoop/ozone/recon/tasks/TestContainerSizeCountTask.java +++ b/hadoop-ozone/recon/src/test/java/org/apache/hadoop/ozone/recon/tasks/TestContainerSizeCountTask.java @@ -54,6 +54,10 @@ public class TestContainerSizeCountTask extends AbstractReconSqlDBTest { private ContainerSizeCountTask task; private DSLContext dslContext; + public TestContainerSizeCountTask() { + super(); + } + @BeforeEach public void setUp() { utilizationSchemaDefinition = diff --git a/hadoop-ozone/recon/src/test/java/org/apache/hadoop/ozone/recon/tasks/TestFileSizeCountTask.java b/hadoop-ozone/recon/src/test/java/org/apache/hadoop/ozone/recon/tasks/TestFileSizeCountTask.java index 2ded437301c..3572f5813ef 100644 --- a/hadoop-ozone/recon/src/test/java/org/apache/hadoop/ozone/recon/tasks/TestFileSizeCountTask.java +++ b/hadoop-ozone/recon/src/test/java/org/apache/hadoop/ozone/recon/tasks/TestFileSizeCountTask.java @@ -47,8 +47,8 @@ import static org.junit.jupiter.api.Assertions.assertTrue; import static org.mockito.AdditionalAnswers.returnsElementsOf; -import static org.mockito.ArgumentMatchers.eq; import static org.mockito.BDDMockito.given; +import static org.mockito.Mockito.eq; import static org.mockito.Mockito.mock; import static org.mockito.Mockito.when; /** @@ -60,6 +60,10 @@ public class TestFileSizeCountTask extends AbstractReconSqlDBTest { private FileSizeCountTask fileSizeCountTask; private DSLContext dslContext; + public TestFileSizeCountTask() { + super(); + } + @BeforeEach public void setUp() { fileCountBySizeDao = getDao(FileCountBySizeDao.class); diff --git a/hadoop-ozone/recon/src/test/java/org/apache/hadoop/ozone/recon/tasks/TestOMDBUpdatesHandler.java b/hadoop-ozone/recon/src/test/java/org/apache/hadoop/ozone/recon/tasks/TestOMDBUpdatesHandler.java index dca0da341a2..9676af01574 100644 --- a/hadoop-ozone/recon/src/test/java/org/apache/hadoop/ozone/recon/tasks/TestOMDBUpdatesHandler.java +++ b/hadoop-ozone/recon/src/test/java/org/apache/hadoop/ozone/recon/tasks/TestOMDBUpdatesHandler.java @@ -48,7 +48,7 @@ import org.apache.hadoop.ozone.om.helpers.OmVolumeArgs; import org.apache.hadoop.hdds.utils.db.RDBStore; import org.apache.hadoop.ozone.security.OzoneTokenIdentifier; -import org.jetbrains.annotations.NotNull; +import jakarta.annotation.Nonnull; import org.junit.jupiter.api.BeforeEach; import org.junit.jupiter.api.Test; import org.junit.jupiter.api.io.TempDir; @@ -315,7 +315,7 @@ public void testGetValueType() throws IOException { omdbDefinition.getColumnFamily(bucketTable).getValueType()); } - @NotNull + @Nonnull private List getBytesFromOmMetaManager(int getUpdatesSince) throws RocksDBException, IOException { RDBStore rdbStore = (RDBStore) omMetadataManager.getStore(); @@ -336,7 +336,7 @@ private List getBytesFromOmMetaManager(int getUpdatesSince) return writeBatches; } - @NotNull + @Nonnull private OMDBUpdatesHandler captureEvents(List writeBatches) throws RocksDBException { OMDBUpdatesHandler omdbUpdatesHandler = diff --git a/hadoop-ozone/recon/src/test/java/org/apache/hadoop/ozone/recon/tasks/TestOmTableInsightTask.java b/hadoop-ozone/recon/src/test/java/org/apache/hadoop/ozone/recon/tasks/TestOmTableInsightTask.java index 9c0193e5020..df014f4276f 100644 --- a/hadoop-ozone/recon/src/test/java/org/apache/hadoop/ozone/recon/tasks/TestOmTableInsightTask.java +++ b/hadoop-ozone/recon/src/test/java/org/apache/hadoop/ozone/recon/tasks/TestOmTableInsightTask.java @@ -77,6 +77,10 @@ public class TestOmTableInsightTask extends AbstractReconSqlDBTest { private boolean isSetupDone = false; private ReconOMMetadataManager reconOMMetadataManager; + public TestOmTableInsightTask() { + super(); + } + private void initializeInjector() throws IOException { reconOMMetadataManager = getTestReconOmMetadataManager( initializeNewOmMetadataManager(Files.createDirectory( diff --git a/hadoop-ozone/recon/src/test/java/org/apache/hadoop/ozone/recon/tasks/TestOmUpdateEventValidator.java b/hadoop-ozone/recon/src/test/java/org/apache/hadoop/ozone/recon/tasks/TestOmUpdateEventValidator.java index 014bdaf8225..0adb44e87ca 100644 --- a/hadoop-ozone/recon/src/test/java/org/apache/hadoop/ozone/recon/tasks/TestOmUpdateEventValidator.java +++ b/hadoop-ozone/recon/src/test/java/org/apache/hadoop/ozone/recon/tasks/TestOmUpdateEventValidator.java @@ -41,7 +41,7 @@ import static org.assertj.core.api.Assertions.assertThat; import static org.junit.jupiter.api.Assertions.assertFalse; import static org.junit.jupiter.api.Assertions.assertTrue; -import static org.mockito.ArgumentMatchers.anyString; +import static org.mockito.Mockito.anyString; import static org.mockito.Mockito.mock; import static org.mockito.Mockito.never; import static org.mockito.Mockito.verify; diff --git a/hadoop-ozone/recon/src/test/java/org/apache/hadoop/ozone/recon/tasks/TestReconTaskControllerImpl.java b/hadoop-ozone/recon/src/test/java/org/apache/hadoop/ozone/recon/tasks/TestReconTaskControllerImpl.java index e6cdae9b85f..b5e82a48a87 100644 --- a/hadoop-ozone/recon/src/test/java/org/apache/hadoop/ozone/recon/tasks/TestReconTaskControllerImpl.java +++ b/hadoop-ozone/recon/src/test/java/org/apache/hadoop/ozone/recon/tasks/TestReconTaskControllerImpl.java @@ -21,7 +21,7 @@ import static org.assertj.core.api.Assertions.assertThat; import static org.junit.jupiter.api.Assertions.assertEquals; import static org.junit.jupiter.api.Assertions.assertSame; -import static org.mockito.ArgumentMatchers.any; +import static org.mockito.Mockito.any; import static org.mockito.Mockito.mock; import static org.mockito.Mockito.times; import static org.mockito.Mockito.verify; @@ -47,6 +47,10 @@ public class TestReconTaskControllerImpl extends AbstractReconSqlDBTest { private ReconTaskController reconTaskController; private ReconTaskStatusDao reconTaskStatusDao; + public TestReconTaskControllerImpl() { + super(); + } + @BeforeEach public void setUp() { OzoneConfiguration ozoneConfiguration = new OzoneConfiguration(); diff --git a/hadoop-ozone/s3-secret-store/pom.xml b/hadoop-ozone/s3-secret-store/pom.xml index 150937f2207..99f99c2d01d 100644 --- a/hadoop-ozone/s3-secret-store/pom.xml +++ b/hadoop-ozone/s3-secret-store/pom.xml @@ -28,7 +28,6 @@ UTF-8 true - false diff --git a/hadoop-ozone/s3-secret-store/src/main/java/org/apache/hadoop/ozone/s3/remote/vault/VaultS3SecretStore.java b/hadoop-ozone/s3-secret-store/src/main/java/org/apache/hadoop/ozone/s3/remote/vault/VaultS3SecretStore.java index 892b86eaa7e..c9bb4d6435e 100644 --- a/hadoop-ozone/s3-secret-store/src/main/java/org/apache/hadoop/ozone/s3/remote/vault/VaultS3SecretStore.java +++ b/hadoop-ozone/s3-secret-store/src/main/java/org/apache/hadoop/ozone/s3/remote/vault/VaultS3SecretStore.java @@ -115,7 +115,7 @@ public S3SecretValue getSecret(String kerberosID) throws IOException { return null; } - return new S3SecretValue(kerberosID, s3Secret); + return S3SecretValue.of(kerberosID, s3Secret); } catch (VaultException e) { LOG.error("Failed to read secret", e); throw new IOException("Failed to read secret", e); diff --git a/hadoop-ozone/s3-secret-store/src/main/java/org/apache/hadoop/ozone/s3/remote/vault/auth/AppRoleAuth.java b/hadoop-ozone/s3-secret-store/src/main/java/org/apache/hadoop/ozone/s3/remote/vault/auth/AppRoleAuth.java index 8ea46f5d6bb..32f37d2734c 100644 --- a/hadoop-ozone/s3-secret-store/src/main/java/org/apache/hadoop/ozone/s3/remote/vault/auth/AppRoleAuth.java +++ b/hadoop-ozone/s3-secret-store/src/main/java/org/apache/hadoop/ozone/s3/remote/vault/auth/AppRoleAuth.java @@ -21,7 +21,7 @@ import com.bettercloud.vault.VaultConfig; import com.bettercloud.vault.VaultException; import com.bettercloud.vault.response.AuthResponse; -import javax.annotation.Nullable; +import jakarta.annotation.Nullable; /** * Authentication method via app role. diff --git a/hadoop-ozone/s3-secret-store/src/test/java/org/apache/hadoop/ozone/s3/remote/vault/TestVaultS3SecretStore.java b/hadoop-ozone/s3-secret-store/src/test/java/org/apache/hadoop/ozone/s3/remote/vault/TestVaultS3SecretStore.java index 4700a283cd4..082a7da3f27 100644 --- a/hadoop-ozone/s3-secret-store/src/test/java/org/apache/hadoop/ozone/s3/remote/vault/TestVaultS3SecretStore.java +++ b/hadoop-ozone/s3-secret-store/src/test/java/org/apache/hadoop/ozone/s3/remote/vault/TestVaultS3SecretStore.java @@ -89,7 +89,7 @@ public void clean() { @Test public void testReadWrite() throws IOException { SUCCESS_OPERATION_LIMIT.set(2); - S3SecretValue secret = new S3SecretValue("id", "value"); + S3SecretValue secret = S3SecretValue.of("id", "value"); s3SecretStore.storeSecret( "id", secret); @@ -101,7 +101,7 @@ public void testReadWrite() throws IOException { public void testReAuth() throws IOException { SUCCESS_OPERATION_LIMIT.set(1); AUTH_OPERATION_PROVIDER.set(1); - S3SecretValue secret = new S3SecretValue("id", "value"); + S3SecretValue secret = S3SecretValue.of("id", "value"); s3SecretStore.storeSecret("id", secret); assertEquals(secret, s3SecretStore.getSecret("id")); @@ -112,7 +112,7 @@ public void testReAuth() throws IOException { @Test public void testAuthFail() throws IOException { SUCCESS_OPERATION_LIMIT.set(1); - S3SecretValue secret = new S3SecretValue("id", "value"); + S3SecretValue secret = S3SecretValue.of("id", "value"); s3SecretStore.storeSecret("id", secret); assertThrows(IOException.class, diff --git a/hadoop-ozone/s3gateway/pom.xml b/hadoop-ozone/s3gateway/pom.xml index 9ad6fbab471..f875047d04a 100644 --- a/hadoop-ozone/s3gateway/pom.xml +++ b/hadoop-ozone/s3gateway/pom.xml @@ -28,7 +28,6 @@ UTF-8 true - false diff --git a/hadoop-ozone/s3gateway/src/main/java/org/apache/hadoop/ozone/s3/commontypes/EncodingTypeObject.java b/hadoop-ozone/s3gateway/src/main/java/org/apache/hadoop/ozone/s3/commontypes/EncodingTypeObject.java index e55eb37a38a..ad1e327c6ed 100644 --- a/hadoop-ozone/s3gateway/src/main/java/org/apache/hadoop/ozone/s3/commontypes/EncodingTypeObject.java +++ b/hadoop-ozone/s3gateway/src/main/java/org/apache/hadoop/ozone/s3/commontypes/EncodingTypeObject.java @@ -19,7 +19,7 @@ import org.apache.commons.lang3.StringUtils; -import javax.annotation.Nullable; +import jakarta.annotation.Nullable; /** * A converter to encode string if needed. diff --git a/hadoop-ozone/s3gateway/src/main/java/org/apache/hadoop/ozone/s3secret/S3SecretManagementEndpoint.java b/hadoop-ozone/s3gateway/src/main/java/org/apache/hadoop/ozone/s3secret/S3SecretManagementEndpoint.java index 3c932da57d7..9c9ccd227dc 100644 --- a/hadoop-ozone/s3gateway/src/main/java/org/apache/hadoop/ozone/s3secret/S3SecretManagementEndpoint.java +++ b/hadoop-ozone/s3gateway/src/main/java/org/apache/hadoop/ozone/s3secret/S3SecretManagementEndpoint.java @@ -24,7 +24,7 @@ import org.slf4j.Logger; import org.slf4j.LoggerFactory; -import javax.annotation.Nullable; +import jakarta.annotation.Nullable; import javax.ws.rs.DELETE; import javax.ws.rs.PUT; import javax.ws.rs.Path; @@ -32,6 +32,7 @@ import javax.ws.rs.core.Response; import java.io.IOException; +import static javax.ws.rs.core.Response.Status.BAD_REQUEST; import static javax.ws.rs.core.Response.Status.NOT_FOUND; /** @@ -55,15 +56,26 @@ public Response generate(@PathParam("username") String username) return generateInternal(username); } - private Response generateInternal(@Nullable String username) - throws IOException { - S3SecretResponse s3SecretResponse = new S3SecretResponse(); - S3SecretValue s3SecretValue = generateS3Secret(username); - s3SecretResponse.setAwsSecret(s3SecretValue.getAwsSecret()); - s3SecretResponse.setAwsAccessKey(s3SecretValue.getAwsAccessKey()); - AUDIT.logReadSuccess(buildAuditMessageForSuccess( - S3GAction.GENERATE_SECRET, getAuditParameters())); - return Response.ok(s3SecretResponse).build(); + private Response generateInternal(@Nullable String username) throws IOException { + try { + S3SecretValue s3SecretValue = generateS3Secret(username); + + S3SecretResponse s3SecretResponse = new S3SecretResponse(); + s3SecretResponse.setAwsSecret(s3SecretValue.getAwsSecret()); + s3SecretResponse.setAwsAccessKey(s3SecretValue.getAwsAccessKey()); + AUDIT.logWriteSuccess(buildAuditMessageForSuccess( + S3GAction.GENERATE_SECRET, getAuditParameters())); + return Response.ok(s3SecretResponse).build(); + } catch (OMException e) { + AUDIT.logWriteFailure(buildAuditMessageForFailure( + S3GAction.GENERATE_SECRET, getAuditParameters(), e)); + if (e.getResult() == OMException.ResultCodes.S3_SECRET_ALREADY_EXISTS) { + return Response.status(BAD_REQUEST.getStatusCode(), e.getResult().toString()).build(); + } else { + LOG.error("Can't execute get secret request: ", e); + return Response.serverError().build(); + } + } } private S3SecretValue generateS3Secret(@Nullable String username) diff --git a/hadoop-ozone/s3gateway/src/test/java/org/apache/hadoop/ozone/client/ClientProtocolStub.java b/hadoop-ozone/s3gateway/src/test/java/org/apache/hadoop/ozone/client/ClientProtocolStub.java index 174af69e255..7515d991eba 100644 --- a/hadoop-ozone/s3gateway/src/test/java/org/apache/hadoop/ozone/client/ClientProtocolStub.java +++ b/hadoop-ozone/s3gateway/src/test/java/org/apache/hadoop/ozone/client/ClientProtocolStub.java @@ -19,7 +19,7 @@ */ package org.apache.hadoop.ozone.client; -import javax.annotation.Nonnull; +import jakarta.annotation.Nonnull; import org.apache.hadoop.crypto.key.KeyProvider; import org.apache.hadoop.hdds.client.ReplicationConfig; import org.apache.hadoop.hdds.client.ReplicationFactor; @@ -379,7 +379,7 @@ public void cancelDelegationToken(Token token) @Override @Nonnull public S3SecretValue getS3Secret(String kerberosID) throws IOException { - return new S3SecretValue(STUB_KERBEROS_ID, STUB_SECRET); + return S3SecretValue.of(STUB_KERBEROS_ID, STUB_SECRET); } @Override @@ -573,6 +573,14 @@ public void setReplicationConfig(String volumeName, String bucketName, } + @Deprecated + @Override + public void setEncryptionKey(String volumeName, String bucketName, + String bekName) + throws IOException { + + } + @Override public OzoneKey headObject(String volumeName, String bucketName, String keyName) throws IOException { diff --git a/hadoop-ozone/s3gateway/src/test/java/org/apache/hadoop/ozone/s3/TestAuthorizationFilter.java b/hadoop-ozone/s3gateway/src/test/java/org/apache/hadoop/ozone/s3/TestAuthorizationFilter.java index 998b5c8cfc9..c42036cb1a3 100644 --- a/hadoop-ozone/s3gateway/src/test/java/org/apache/hadoop/ozone/s3/TestAuthorizationFilter.java +++ b/hadoop-ozone/s3gateway/src/test/java/org/apache/hadoop/ozone/s3/TestAuthorizationFilter.java @@ -131,11 +131,11 @@ public class TestAuthorizationFilter { @SuppressWarnings("checkstyle:ParameterNumber") @ParameterizedTest @MethodSource("testAuthFilterFailuresInput") - public void testAuthFilterFailures( + void testAuthFilterFailures( String method, String authHeader, String contentMd5, String host, String amzContentSha256, String date, String contentType, String path, String expectedErrorMsg - ) { + ) throws Exception { try { ContainerRequestContext context = setupContext(method, authHeader, contentMd5, host, amzContentSha256, date, contentType, path); @@ -169,8 +169,6 @@ public void testAuthFilterFailures( } - } catch (Exception ex) { - fail("Unexpected exception: " + ex); } } @@ -238,58 +236,54 @@ public void testAuthFilterFailures( @SuppressWarnings("checkstyle:ParameterNumber") @ParameterizedTest @MethodSource("testAuthFilterInput") - public void testAuthFilter( + void testAuthFilter( String method, String authHeader, String contentMd5, String host, String amzContentSha256, String date, String contentType, String path - ) { - try { - ContainerRequestContext context = setupContext(method, authHeader, - contentMd5, host, amzContentSha256, date, contentType, path); - - AWSSignatureProcessor awsSignatureProcessor = new AWSSignatureProcessor(); - awsSignatureProcessor.setContext(context); - - SignatureInfo signatureInfo = new SignatureInfo(); - - authorizationFilter.setSignatureParser(awsSignatureProcessor); - authorizationFilter.setSignatureInfo(signatureInfo); - - authorizationFilter.filter(context); - - if (path.startsWith("/secret")) { - assertNull(authorizationFilter.getSignatureInfo().getUnfilteredURI()); - - assertNull(authorizationFilter.getSignatureInfo().getStringToSign()); - } else { - String canonicalRequest = method + "\n" - + path + "\n" - + "\n" - + "host:" + host + "\nx-amz-content-sha256:" + amzContentSha256 + - "\n" - + "x-amz-date:" + DATETIME + "\n" - + "\n" - + "host;x-amz-content-sha256;x-amz-date\n" - + amzContentSha256; - - MessageDigest md = MessageDigest.getInstance("SHA-256"); - md.update(canonicalRequest.getBytes(StandardCharsets.UTF_8)); - - String expectedStrToSign = "AWS4-HMAC-SHA256\n" - + DATETIME + "\n" - + CURDATE + "/us-east-1/s3/aws4_request\n" - + Hex.encode(md.digest()).toLowerCase(); - - assertEquals(path, - authorizationFilter.getSignatureInfo().getUnfilteredURI(), - "Unfiltered URI is not preserved"); - - assertEquals(expectedStrToSign, - authorizationFilter.getSignatureInfo().getStringToSign(), - "String to sign is invalid"); - } - } catch (Exception ex) { - fail("Unexpected exception: " + ex); + ) throws Exception { + ContainerRequestContext context = setupContext(method, authHeader, + contentMd5, host, amzContentSha256, date, contentType, path); + + AWSSignatureProcessor awsSignatureProcessor = new AWSSignatureProcessor(); + awsSignatureProcessor.setContext(context); + + SignatureInfo signatureInfo = new SignatureInfo(); + + authorizationFilter.setSignatureParser(awsSignatureProcessor); + authorizationFilter.setSignatureInfo(signatureInfo); + + authorizationFilter.filter(context); + + if (path.startsWith("/secret")) { + assertNull(authorizationFilter.getSignatureInfo().getUnfilteredURI()); + + assertNull(authorizationFilter.getSignatureInfo().getStringToSign()); + } else { + String canonicalRequest = method + "\n" + + path + "\n" + + "\n" + + "host:" + host + "\nx-amz-content-sha256:" + amzContentSha256 + + "\n" + + "x-amz-date:" + DATETIME + "\n" + + "\n" + + "host;x-amz-content-sha256;x-amz-date\n" + + amzContentSha256; + + MessageDigest md = MessageDigest.getInstance("SHA-256"); + md.update(canonicalRequest.getBytes(StandardCharsets.UTF_8)); + + String expectedStrToSign = "AWS4-HMAC-SHA256\n" + + DATETIME + "\n" + + CURDATE + "/us-east-1/s3/aws4_request\n" + + Hex.encode(md.digest()).toLowerCase(); + + assertEquals(path, + authorizationFilter.getSignatureInfo().getUnfilteredURI(), + "Unfiltered URI is not preserved"); + + assertEquals(expectedStrToSign, + authorizationFilter.getSignatureInfo().getStringToSign(), + "String to sign is invalid"); } } diff --git a/hadoop-ozone/s3gateway/src/test/java/org/apache/hadoop/ozone/s3/endpoint/TestInitiateMultipartUpload.java b/hadoop-ozone/s3gateway/src/test/java/org/apache/hadoop/ozone/s3/endpoint/TestInitiateMultipartUpload.java index 93400a710af..500010f63db 100644 --- a/hadoop-ozone/s3gateway/src/test/java/org/apache/hadoop/ozone/s3/endpoint/TestInitiateMultipartUpload.java +++ b/hadoop-ozone/s3gateway/src/test/java/org/apache/hadoop/ozone/s3/endpoint/TestInitiateMultipartUpload.java @@ -25,7 +25,7 @@ import org.apache.hadoop.ozone.OzoneConsts; import org.apache.hadoop.ozone.client.OzoneClient; import org.apache.hadoop.ozone.client.OzoneClientStub; -import org.jetbrains.annotations.NotNull; +import jakarta.annotation.Nonnull; import org.junit.jupiter.api.Test; import javax.ws.rs.core.HttpHeaders; @@ -92,7 +92,7 @@ public void testInitiateMultipartUploadWithECKey() throws Exception { assertNotNull(multipartUploadInitiateResponse.getUploadID()); } - @NotNull + @Nonnull private ObjectEndpoint getObjectEndpoint(OzoneClient client, HttpHeaders headers) { ObjectEndpoint rest = new ObjectEndpoint(); diff --git a/hadoop-ozone/s3gateway/src/test/java/org/apache/hadoop/ozone/s3/endpoint/TestMultipartUploadComplete.java b/hadoop-ozone/s3gateway/src/test/java/org/apache/hadoop/ozone/s3/endpoint/TestMultipartUploadComplete.java index 3a3b4743155..eedee2855e7 100644 --- a/hadoop-ozone/s3gateway/src/test/java/org/apache/hadoop/ozone/s3/endpoint/TestMultipartUploadComplete.java +++ b/hadoop-ozone/s3gateway/src/test/java/org/apache/hadoop/ozone/s3/endpoint/TestMultipartUploadComplete.java @@ -42,7 +42,7 @@ import static java.nio.charset.StandardCharsets.UTF_8; import static org.apache.hadoop.ozone.s3.util.S3Consts.STORAGE_CLASS_HEADER; import static org.junit.jupiter.api.Assertions.assertEquals; -import static org.junit.jupiter.api.Assertions.fail; +import static org.junit.jupiter.api.Assertions.assertThrows; import static org.junit.jupiter.api.Assertions.assertNotNull; import static org.mockito.Mockito.mock; import static org.mockito.Mockito.when; @@ -181,13 +181,10 @@ public void testMultipartInvalidPartOrderError() throws Exception { CompleteMultipartUploadRequest completeMultipartUploadRequest = new CompleteMultipartUploadRequest(); completeMultipartUploadRequest.setPartList(partsList); - try { - completeMultipartUpload(key, completeMultipartUploadRequest, uploadID); - fail("testMultipartInvalidPartOrderError"); - } catch (OS3Exception ex) { - assertEquals(S3ErrorTable.INVALID_PART_ORDER.getCode(), ex.getCode()); - } - + OS3Exception ex = + assertThrows(OS3Exception.class, + () -> completeMultipartUpload(key, completeMultipartUploadRequest, uploadID)); + assertEquals(S3ErrorTable.INVALID_PART_ORDER.getCode(), ex.getCode()); } @Test @@ -218,12 +215,9 @@ public void testMultipartInvalidPartError() throws Exception { CompleteMultipartUploadRequest completeMultipartUploadRequest = new CompleteMultipartUploadRequest(); completeMultipartUploadRequest.setPartList(partsList); - try { - completeMultipartUpload(key, completeMultipartUploadRequest, uploadID); - fail("testMultipartInvalidPartError"); - } catch (OS3Exception ex) { - assertEquals(ex.getCode(), S3ErrorTable.INVALID_PART.getCode()); - } - + OS3Exception ex = + assertThrows(OS3Exception.class, + () -> completeMultipartUpload(key, completeMultipartUploadRequest, uploadID)); + assertEquals(ex.getCode(), S3ErrorTable.INVALID_PART.getCode()); } } diff --git a/hadoop-ozone/s3gateway/src/test/java/org/apache/hadoop/ozone/s3/endpoint/TestObjectPut.java b/hadoop-ozone/s3gateway/src/test/java/org/apache/hadoop/ozone/s3/endpoint/TestObjectPut.java index dcaee0921f0..ae8279f2586 100644 --- a/hadoop-ozone/s3gateway/src/test/java/org/apache/hadoop/ozone/s3/endpoint/TestObjectPut.java +++ b/hadoop-ozone/s3gateway/src/test/java/org/apache/hadoop/ozone/s3/endpoint/TestObjectPut.java @@ -53,9 +53,9 @@ import static org.assertj.core.api.Assertions.assertThat; import static org.junit.jupiter.api.Assertions.assertEquals; import static org.junit.jupiter.api.Assertions.assertThrows; -import static org.mockito.ArgumentMatchers.any; -import static org.mockito.ArgumentMatchers.eq; +import static org.mockito.Mockito.any; import static org.mockito.Mockito.doThrow; +import static org.mockito.Mockito.eq; import static org.mockito.Mockito.mock; import static org.mockito.Mockito.times; import static org.mockito.Mockito.verify; diff --git a/hadoop-ozone/s3gateway/src/test/java/org/apache/hadoop/ozone/s3/endpoint/TestPartUpload.java b/hadoop-ozone/s3gateway/src/test/java/org/apache/hadoop/ozone/s3/endpoint/TestPartUpload.java index c79b085fd1a..90d490dea0b 100644 --- a/hadoop-ozone/s3gateway/src/test/java/org/apache/hadoop/ozone/s3/endpoint/TestPartUpload.java +++ b/hadoop-ozone/s3gateway/src/test/java/org/apache/hadoop/ozone/s3/endpoint/TestPartUpload.java @@ -43,7 +43,7 @@ import static org.junit.jupiter.api.Assertions.assertEquals; import static org.junit.jupiter.api.Assertions.assertNotEquals; import static org.junit.jupiter.api.Assertions.assertNotNull; -import static org.junit.jupiter.api.Assertions.fail; +import static org.junit.jupiter.api.Assertions.assertThrows; import static org.mockito.Mockito.mock; import static org.mockito.Mockito.when; @@ -128,17 +128,15 @@ public void testPartUploadWithOverride() throws Exception { @Test public void testPartUploadWithIncorrectUploadID() throws Exception { - try { + OS3Exception ex = assertThrows(OS3Exception.class, () -> { String content = "Multipart Upload With Incorrect uploadID"; ByteArrayInputStream body = new ByteArrayInputStream(content.getBytes(UTF_8)); REST.put(OzoneConsts.S3_BUCKET, OzoneConsts.KEY, content.length(), 1, "random", body); - fail("testPartUploadWithIncorrectUploadID failed"); - } catch (OS3Exception ex) { - assertEquals("NoSuchUpload", ex.getCode()); - assertEquals(HTTP_NOT_FOUND, ex.getHttpCode()); - } + }); + assertEquals("NoSuchUpload", ex.getCode()); + assertEquals(HTTP_NOT_FOUND, ex.getHttpCode()); } @Test diff --git a/hadoop-ozone/s3gateway/src/test/java/org/apache/hadoop/ozone/s3/endpoint/TestPartUploadWithStream.java b/hadoop-ozone/s3gateway/src/test/java/org/apache/hadoop/ozone/s3/endpoint/TestPartUploadWithStream.java index 210b5ffb488..787aa6e8777 100644 --- a/hadoop-ozone/s3gateway/src/test/java/org/apache/hadoop/ozone/s3/endpoint/TestPartUploadWithStream.java +++ b/hadoop-ozone/s3gateway/src/test/java/org/apache/hadoop/ozone/s3/endpoint/TestPartUploadWithStream.java @@ -39,8 +39,8 @@ import static org.junit.jupiter.api.Assertions.assertEquals; import static org.junit.jupiter.api.Assertions.assertNotEquals; import static org.junit.jupiter.api.Assertions.assertNotNull; +import static org.junit.jupiter.api.Assertions.assertThrows; import static org.junit.jupiter.api.Assertions.assertTrue; -import static org.junit.jupiter.api.Assertions.fail; import static org.mockito.Mockito.mock; import static org.mockito.Mockito.when; @@ -131,16 +131,14 @@ public void testPartUploadWithOverride() throws Exception { @Test public void testPartUploadWithIncorrectUploadID() throws Exception { - try { + OS3Exception ex = assertThrows(OS3Exception.class, () -> { String content = "Multipart Upload With Incorrect uploadID"; ByteArrayInputStream body = new ByteArrayInputStream(content.getBytes(UTF_8)); REST.put(S3BUCKET, S3KEY, content.length(), 1, "random", body); - fail("testPartUploadWithIncorrectUploadID failed"); - } catch (OS3Exception ex) { - assertEquals("NoSuchUpload", ex.getCode()); - assertEquals(HTTP_NOT_FOUND, ex.getHttpCode()); - } + }); + assertEquals("NoSuchUpload", ex.getCode()); + assertEquals(HTTP_NOT_FOUND, ex.getHttpCode()); } } diff --git a/hadoop-ozone/s3gateway/src/test/java/org/apache/hadoop/ozone/s3/endpoint/TestPermissionCheck.java b/hadoop-ozone/s3gateway/src/test/java/org/apache/hadoop/ozone/s3/endpoint/TestPermissionCheck.java index 92bcec5c6e8..d891573d5f1 100644 --- a/hadoop-ozone/s3gateway/src/test/java/org/apache/hadoop/ozone/s3/endpoint/TestPermissionCheck.java +++ b/hadoop-ozone/s3gateway/src/test/java/org/apache/hadoop/ozone/s3/endpoint/TestPermissionCheck.java @@ -42,12 +42,12 @@ import java.util.Map; import static java.net.HttpURLConnection.HTTP_FORBIDDEN; -import static org.mockito.ArgumentMatchers.any; -import static org.mockito.ArgumentMatchers.anyBoolean; -import static org.mockito.ArgumentMatchers.anyLong; -import static org.mockito.ArgumentMatchers.anyString; -import static org.mockito.ArgumentMatchers.isNull; +import static org.mockito.Mockito.any; +import static org.mockito.Mockito.anyBoolean; +import static org.mockito.Mockito.anyLong; +import static org.mockito.Mockito.anyString; import static org.mockito.Mockito.doThrow; +import static org.mockito.Mockito.isNull; import static org.mockito.Mockito.mock; import static org.mockito.Mockito.when; import static org.junit.jupiter.api.Assertions.assertEquals; diff --git a/hadoop-ozone/s3gateway/src/test/java/org/apache/hadoop/ozone/s3/metrics/TestS3GatewayMetrics.java b/hadoop-ozone/s3gateway/src/test/java/org/apache/hadoop/ozone/s3/metrics/TestS3GatewayMetrics.java index 35821f20038..947b0986c8e 100644 --- a/hadoop-ozone/s3gateway/src/test/java/org/apache/hadoop/ozone/s3/metrics/TestS3GatewayMetrics.java +++ b/hadoop-ozone/s3gateway/src/test/java/org/apache/hadoop/ozone/s3/metrics/TestS3GatewayMetrics.java @@ -57,7 +57,6 @@ import static org.apache.hadoop.ozone.s3.util.S3Utils.urlEncode; import static org.assertj.core.api.Assertions.assertThat; import static org.junit.jupiter.api.Assertions.assertEquals; -import static org.junit.jupiter.api.Assertions.fail; import static org.junit.jupiter.api.Assertions.assertThrows; import static org.junit.jupiter.api.Assertions.assertDoesNotThrow; import static org.mockito.Mockito.mock; @@ -265,12 +264,9 @@ public void testPutAclFailure() throws Exception { InputStream inputBody = TestBucketAcl.class.getClassLoader() .getResourceAsStream("userAccessControlList.xml"); - try { - bucketEndpoint.put("unknown_bucket", ACL_MARKER, headers, - inputBody); - fail(); - } catch (OS3Exception ex) { + assertThrows(OS3Exception.class, () -> bucketEndpoint.put("unknown_bucket", ACL_MARKER, headers, + inputBody)); } finally { inputBody.close(); } diff --git a/hadoop-ozone/s3gateway/src/test/java/org/apache/hadoop/ozone/s3secret/TestSecretGenerate.java b/hadoop-ozone/s3gateway/src/test/java/org/apache/hadoop/ozone/s3secret/TestSecretGenerate.java index f3c17d5807e..681452130a7 100644 --- a/hadoop-ozone/s3gateway/src/test/java/org/apache/hadoop/ozone/s3secret/TestSecretGenerate.java +++ b/hadoop-ozone/s3gateway/src/test/java/org/apache/hadoop/ozone/s3secret/TestSecretGenerate.java @@ -22,6 +22,7 @@ import java.security.Principal; import javax.ws.rs.container.ContainerRequestContext; import javax.ws.rs.core.MultivaluedHashMap; +import javax.ws.rs.core.Response; import javax.ws.rs.core.SecurityContext; import javax.ws.rs.core.UriInfo; @@ -30,6 +31,7 @@ import org.apache.hadoop.ozone.client.OzoneClient; import org.apache.hadoop.ozone.client.OzoneClientStub; import org.apache.hadoop.ozone.client.protocol.ClientProtocol; +import org.apache.hadoop.ozone.om.exceptions.OMException; import org.apache.hadoop.ozone.om.helpers.S3SecretValue; import org.junit.jupiter.api.BeforeEach; import org.junit.jupiter.api.Test; @@ -38,15 +40,16 @@ import org.mockito.invocation.InvocationOnMock; import org.mockito.junit.jupiter.MockitoExtension; +import static javax.ws.rs.core.Response.Status.BAD_REQUEST; import static org.junit.jupiter.api.Assertions.assertEquals; -import static org.mockito.ArgumentMatchers.any; +import static org.mockito.Mockito.notNull; import static org.mockito.Mockito.when; /** * Test for S3 secret generate endpoint. */ @ExtendWith(MockitoExtension.class) -public class TestSecretGenerate { +class TestSecretGenerate { private static final String USER_NAME = "test"; private static final String OTHER_USER_NAME = "test2"; private static final String USER_SECRET = "test_secret"; @@ -66,12 +69,11 @@ public class TestSecretGenerate { private static S3SecretValue getS3SecretValue(InvocationOnMock invocation) { Object[] args = invocation.getArguments(); - return new S3SecretValue((String) args[0], USER_SECRET); + return S3SecretValue.of((String) args[0], USER_SECRET); } @BeforeEach - void setUp() throws IOException { - when(proxy.getS3Secret(any())).then(TestSecretGenerate::getS3SecretValue); + void setUp() { OzoneConfiguration conf = new OzoneConfiguration(); OzoneClient client = new OzoneClientStub(new ObjectStoreStub(conf, proxy)); @@ -86,21 +88,51 @@ void setUp() throws IOException { @Test void testSecretGenerate() throws IOException { - when(principal.getName()).thenReturn(USER_NAME); - when(securityContext.getUserPrincipal()).thenReturn(principal); - when(context.getSecurityContext()).thenReturn(securityContext); + setupSecurityContext(); + hasNoSecretYet(); S3SecretResponse response = (S3SecretResponse) endpoint.generate().getEntity(); + assertEquals(USER_SECRET, response.getAwsSecret()); assertEquals(USER_NAME, response.getAwsAccessKey()); } + @Test + void testIfSecretAlreadyExists() throws IOException { + setupSecurityContext(); + hasSecretAlready(); + + Response response = endpoint.generate(); + + assertEquals(BAD_REQUEST.getStatusCode(), response.getStatus()); + assertEquals(OMException.ResultCodes.S3_SECRET_ALREADY_EXISTS.toString(), + response.getStatusInfo().getReasonPhrase()); + } + @Test void testSecretGenerateWithUsername() throws IOException { + hasNoSecretYet(); + S3SecretResponse response = (S3SecretResponse) endpoint.generate(OTHER_USER_NAME).getEntity(); assertEquals(USER_SECRET, response.getAwsSecret()); assertEquals(OTHER_USER_NAME, response.getAwsAccessKey()); } + + private void setupSecurityContext() { + when(principal.getName()).thenReturn(USER_NAME); + when(securityContext.getUserPrincipal()).thenReturn(principal); + when(context.getSecurityContext()).thenReturn(securityContext); + } + + private void hasNoSecretYet() throws IOException { + when(proxy.getS3Secret(notNull())) + .then(TestSecretGenerate::getS3SecretValue); + } + + private void hasSecretAlready() throws IOException { + when(proxy.getS3Secret(notNull())) + .thenThrow(new OMException("Secret already exists", OMException.ResultCodes.S3_SECRET_ALREADY_EXISTS)); + } } diff --git a/hadoop-ozone/s3gateway/src/test/java/org/apache/hadoop/ozone/s3secret/TestSecretRevoke.java b/hadoop-ozone/s3gateway/src/test/java/org/apache/hadoop/ozone/s3secret/TestSecretRevoke.java index a319496419d..b26df0e8996 100644 --- a/hadoop-ozone/s3gateway/src/test/java/org/apache/hadoop/ozone/s3secret/TestSecretRevoke.java +++ b/hadoop-ozone/s3gateway/src/test/java/org/apache/hadoop/ozone/s3secret/TestSecretRevoke.java @@ -42,9 +42,9 @@ import static org.apache.hadoop.ozone.om.exceptions.OMException.ResultCodes.ACCESS_DENIED; import static org.apache.hadoop.ozone.om.exceptions.OMException.ResultCodes.S3_SECRET_NOT_FOUND; import static org.junit.jupiter.api.Assertions.assertEquals; -import static org.mockito.ArgumentMatchers.any; -import static org.mockito.ArgumentMatchers.eq; +import static org.mockito.Mockito.any; import static org.mockito.Mockito.doThrow; +import static org.mockito.Mockito.eq; import static org.mockito.Mockito.times; import static org.mockito.Mockito.verify; import static org.mockito.Mockito.when; diff --git a/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/admin/nssummary/DiskUsageSubCommand.java b/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/admin/nssummary/DiskUsageSubCommand.java index 59348894326..0585fea000c 100644 --- a/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/admin/nssummary/DiskUsageSubCommand.java +++ b/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/admin/nssummary/DiskUsageSubCommand.java @@ -106,8 +106,7 @@ public Void call() throws Exception { if (duResponse.get("status").equals("PATH_NOT_FOUND")) { printPathNotFound(); } else { - if (parent.isObjectStoreBucket(path) || - !parent.bucketIsPresentInThePath(path)) { + if (parent.isNotValidBucketOrOBSBucket(path)) { printBucketReminder(); } diff --git a/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/admin/nssummary/FileSizeDistSubCommand.java b/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/admin/nssummary/FileSizeDistSubCommand.java index 5a2a2d11c02..f74ee109504 100644 --- a/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/admin/nssummary/FileSizeDistSubCommand.java +++ b/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/admin/nssummary/FileSizeDistSubCommand.java @@ -80,8 +80,7 @@ public Void call() throws Exception { } else if (distResponse.get("status").equals("TYPE_NOT_APPLICABLE")) { printTypeNA("File Size Distribution"); } else { - if (parent.isObjectStoreBucket(path) || - !parent.bucketIsPresentInThePath(path)) { + if (parent.isNotValidBucketOrOBSBucket(path)) { printBucketReminder(); } diff --git a/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/admin/nssummary/NSSummaryAdmin.java b/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/admin/nssummary/NSSummaryAdmin.java index 0cd77626d15..0521e8fd744 100644 --- a/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/admin/nssummary/NSSummaryAdmin.java +++ b/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/admin/nssummary/NSSummaryAdmin.java @@ -37,7 +37,6 @@ import java.io.IOException; import java.util.HashSet; -import java.util.Objects; import static org.apache.hadoop.hdds.recon.ReconConfigKeys.OZONE_RECON_ADDRESS_DEFAULT; import static org.apache.hadoop.hdds.recon.ReconConfigKeys.OZONE_RECON_ADDRESS_KEY; @@ -86,86 +85,46 @@ public Class getParentType() { return OzoneAdmin.class; } - public boolean isFileSystemOptimizedBucket(String path) throws IOException { - OFSPath ofsPath = new OFSPath(path, - OzoneConfiguration.of(getOzoneConfig())); - - OzoneClient ozoneClient = OzoneClientFactory.getRpcClient(getOzoneConfig()); - ObjectStore objectStore = ozoneClient.getObjectStore(); - - try { - OzoneBucket bucket = objectStore.getVolume(ofsPath.getVolumeName()) - .getBucket(ofsPath.getBucketName()); - - // Resolve the bucket layout in case this is a Link Bucket. - BucketLayout resolvedBucketLayout = - OzoneClientUtils.resolveLinkBucketLayout(bucket, objectStore, - new HashSet<>()); - - return resolvedBucketLayout.isFileSystemOptimized(); - } catch (IOException e) { - System.out.println( - "Bucket layout couldn't be verified for path: " + ofsPath + - ". Exception: " + e); - return false; - } - } - - public boolean isObjectStoreBucket(String path) throws IOException { - OFSPath ofsPath = new OFSPath(path, - OzoneConfiguration.of(getOzoneConfig())); - + private boolean isObjectStoreBucket(OzoneBucket bucket, ObjectStore objectStore) { boolean enableFileSystemPaths = getOzoneConfig() .getBoolean(OMConfigKeys.OZONE_OM_ENABLE_FILESYSTEM_PATHS, OMConfigKeys.OZONE_OM_ENABLE_FILESYSTEM_PATHS_DEFAULT); - - OzoneClient ozoneClient = OzoneClientFactory.getRpcClient(getOzoneConfig()); - ObjectStore objectStore = ozoneClient.getObjectStore(); - try { - OzoneBucket bucket = objectStore.getVolume(ofsPath.getVolumeName()) - .getBucket(ofsPath.getBucketName()); - // Resolve the bucket layout in case this is a Link Bucket. BucketLayout resolvedBucketLayout = OzoneClientUtils.resolveLinkBucketLayout(bucket, objectStore, new HashSet<>()); - return resolvedBucketLayout.isObjectStore(enableFileSystemPaths); } catch (IOException e) { System.out.println( - "Bucket layout couldn't be verified for path: " + ofsPath + - ". Exception: " + e); + "Bucket layout couldn't be resolved. Exception thrown: " + e); return false; } } /** - * Checking if the bucket is part of the path. + * Checks if bucket is OBS bucket or if bucket is part of the path. * Return false if path is root, just a volume or invalid. + * Returns false if bucket is part of path but not a OBS bucket. * @param path - * @return true if the bucket - * is not part of the given path. + * @return true if bucket is OBS bucket or not part of provided path. * @throws IOException */ - public boolean bucketIsPresentInThePath(String path) throws IOException { + public boolean isNotValidBucketOrOBSBucket(String path) { OFSPath ofsPath = new OFSPath(path, OzoneConfiguration.of(getOzoneConfig())); - - OzoneClient ozoneClient = OzoneClientFactory.getRpcClient(getOzoneConfig()); - ObjectStore objectStore = ozoneClient.getObjectStore(); - - try { + try (OzoneClient ozoneClient = OzoneClientFactory.getRpcClient(getOzoneConfig())) { + ObjectStore objectStore = ozoneClient.getObjectStore(); + // Checks if the bucket is part of the path. OzoneBucket bucket = objectStore.getVolume(ofsPath.getVolumeName()) .getBucket(ofsPath.getBucketName()); - - return Objects.nonNull(bucket); + return isObjectStoreBucket(bucket, objectStore); } catch (IOException e) { System.out.println( "Bucket layout couldn't be verified for path: " + ofsPath + ". Exception: " + e); - return false; } + return true; } /** diff --git a/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/admin/nssummary/QuotaUsageSubCommand.java b/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/admin/nssummary/QuotaUsageSubCommand.java index c3494cf4ffb..113193c929b 100644 --- a/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/admin/nssummary/QuotaUsageSubCommand.java +++ b/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/admin/nssummary/QuotaUsageSubCommand.java @@ -80,8 +80,7 @@ public Void call() throws Exception { } else if (quotaResponse.get("status").equals("TYPE_NOT_APPLICABLE")) { printTypeNA("Quota"); } else { - if (parent.isObjectStoreBucket(path) || - !parent.bucketIsPresentInThePath(path)) { + if (parent.isNotValidBucketOrOBSBucket(path)) { printBucketReminder(); } diff --git a/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/admin/nssummary/SummarySubCommand.java b/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/admin/nssummary/SummarySubCommand.java index 4a4946bb809..9180274b9c7 100644 --- a/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/admin/nssummary/SummarySubCommand.java +++ b/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/admin/nssummary/SummarySubCommand.java @@ -76,8 +76,7 @@ public Void call() throws Exception { if (summaryResponse.get("status").equals("PATH_NOT_FOUND")) { printPathNotFound(); } else { - if (parent.isObjectStoreBucket(path) || - !parent.bucketIsPresentInThePath(path)) { + if (parent.isNotValidBucketOrOBSBucket(path)) { printBucketReminder(); } diff --git a/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/debug/ChunkKeyHandler.java b/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/debug/ChunkKeyHandler.java index 2c55b4ea4c7..b71dd1c0156 100644 --- a/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/debug/ChunkKeyHandler.java +++ b/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/debug/ChunkKeyHandler.java @@ -67,10 +67,6 @@ public class ChunkKeyHandler extends KeyHandler implements SubcommandWithParent { - private XceiverClientManager xceiverClientManager; - private XceiverClientSpi xceiverClient; - private OzoneManagerProtocol ozoneManagerClient; - @CommandLine.ParentCommand private OzoneDebug parent; @@ -81,11 +77,9 @@ private String getChunkLocationPath(String containerLocation) { @Override protected void execute(OzoneClient client, OzoneAddress address) throws IOException, OzoneClientException { - try (ContainerOperationClient containerOperationClient = new - ContainerOperationClient(parent.getOzoneConf())) { - xceiverClientManager = containerOperationClient.getXceiverClientManager(); - ozoneManagerClient = - client.getObjectStore().getClientProxy().getOzoneManagerClient(); + try (ContainerOperationClient containerOperationClient = new ContainerOperationClient(parent.getOzoneConf()); + XceiverClientManager xceiverClientManager = containerOperationClient.getXceiverClientManager()) { + OzoneManagerProtocol ozoneManagerClient = client.getObjectStore().getClientProxy().getOzoneManagerClient(); address.ensureKeyAddress(); JsonElement element; JsonObject result = new JsonObject(); @@ -127,80 +121,82 @@ protected void execute(OzoneClient client, OzoneAddress address) } else { pipeline = keyPipeline; } - xceiverClient = xceiverClientManager.acquireClientForReadData(pipeline); - // Datanode is queried to get chunk information.Thus querying the - // OM,SCM and datanode helps us get chunk location information - ContainerProtos.DatanodeBlockID datanodeBlockID = - keyLocation.getBlockID().getDatanodeBlockIDProtobuf(); - // doing a getBlock on all nodes - Map - responses = null; - Map - readContainerResponses = null; + XceiverClientSpi xceiverClient = xceiverClientManager.acquireClientForReadData(pipeline); try { - responses = ContainerProtocolCalls.getBlockFromAllNodes(xceiverClient, - datanodeBlockID, keyLocation.getToken()); - readContainerResponses = - containerOperationClient.readContainerFromAllNodes( - keyLocation.getContainerID(), pipeline); - } catch (InterruptedException e) { - LOG.error("Execution interrupted due to " + e); - Thread.currentThread().interrupt(); - } - JsonArray responseFromAllNodes = new JsonArray(); - for (Map.Entry - entry : responses.entrySet()) { - chunkPaths.clear(); - JsonObject jsonObj = new JsonObject(); - if (entry.getValue() == null) { - LOG.error("Cant execute getBlock on this node"); - continue; - } - tempchunks = entry.getValue().getBlockData().getChunksList(); - ContainerProtos.ContainerDataProto containerData = - readContainerResponses.get(entry.getKey()).getContainerData(); - for (ContainerProtos.ChunkInfo chunkInfo : tempchunks) { - String fileName = containerLayoutVersion.getChunkFile(new File( - getChunkLocationPath(containerData.getContainerPath())), - keyLocation.getBlockID(), - ChunkInfo.getFromProtoBuf(chunkInfo)).toString(); - chunkPaths.add(fileName); - ChunkDetails chunkDetails = new ChunkDetails(); - chunkDetails.setChunkName(fileName); - chunkDetails.setChunkOffset(chunkInfo.getOffset()); - chunkDetailsList.add(chunkDetails); - } - containerChunkInfoVerbose.setContainerPath(containerData - .getContainerPath()); - containerChunkInfoVerbose.setPipeline(keyPipeline); - containerChunkInfoVerbose.setChunkInfos(chunkDetailsList); - containerChunkInfo.setFiles(chunkPaths); - containerChunkInfo.setPipelineID(keyPipeline.getId().getId()); - if (isECKey) { - ChunkType blockChunksType = - isECParityBlock(keyPipeline, entry.getKey()) ? - ChunkType.PARITY : ChunkType.DATA; - containerChunkInfoVerbose.setChunkType(blockChunksType); - containerChunkInfo.setChunkType(blockChunksType); + // Datanode is queried to get chunk information.Thus querying the + // OM,SCM and datanode helps us get chunk location information + ContainerProtos.DatanodeBlockID datanodeBlockID = + keyLocation.getBlockID().getDatanodeBlockIDProtobuf(); + // doing a getBlock on all nodes + Map + responses = null; + Map + readContainerResponses = null; + try { + responses = ContainerProtocolCalls.getBlockFromAllNodes(xceiverClient, + datanodeBlockID, keyLocation.getToken()); + readContainerResponses = + containerOperationClient.readContainerFromAllNodes( + keyLocation.getContainerID(), pipeline); + } catch (InterruptedException e) { + LOG.error("Execution interrupted due to " + e); + Thread.currentThread().interrupt(); } - Gson gson = new GsonBuilder().create(); - if (isVerbose()) { - element = gson.toJsonTree(containerChunkInfoVerbose); - } else { - element = gson.toJsonTree(containerChunkInfo); + JsonArray responseFromAllNodes = new JsonArray(); + for (Map.Entry + entry : responses.entrySet()) { + chunkPaths.clear(); + JsonObject jsonObj = new JsonObject(); + if (entry.getValue() == null) { + LOG.error("Cant execute getBlock on this node"); + continue; + } + tempchunks = entry.getValue().getBlockData().getChunksList(); + ContainerProtos.ContainerDataProto containerData = + readContainerResponses.get(entry.getKey()).getContainerData(); + for (ContainerProtos.ChunkInfo chunkInfo : tempchunks) { + String fileName = containerLayoutVersion.getChunkFile(new File( + getChunkLocationPath(containerData.getContainerPath())), + keyLocation.getBlockID(), + ChunkInfo.getFromProtoBuf(chunkInfo)).toString(); + chunkPaths.add(fileName); + ChunkDetails chunkDetails = new ChunkDetails(); + chunkDetails.setChunkName(fileName); + chunkDetails.setChunkOffset(chunkInfo.getOffset()); + chunkDetailsList.add(chunkDetails); + } + containerChunkInfoVerbose.setContainerPath(containerData + .getContainerPath()); + containerChunkInfoVerbose.setPipeline(keyPipeline); + containerChunkInfoVerbose.setChunkInfos(chunkDetailsList); + containerChunkInfo.setFiles(chunkPaths); + containerChunkInfo.setPipelineID(keyPipeline.getId().getId()); + if (isECKey) { + ChunkType blockChunksType = + isECParityBlock(keyPipeline, entry.getKey()) ? + ChunkType.PARITY : ChunkType.DATA; + containerChunkInfoVerbose.setChunkType(blockChunksType); + containerChunkInfo.setChunkType(blockChunksType); + } + Gson gson = new GsonBuilder().create(); + if (isVerbose()) { + element = gson.toJsonTree(containerChunkInfoVerbose); + } else { + element = gson.toJsonTree(containerChunkInfo); + } + jsonObj.addProperty("Datanode-HostName", entry.getKey() + .getHostName()); + jsonObj.addProperty("Datanode-IP", entry.getKey() + .getIpAddress()); + jsonObj.addProperty("Container-ID", containerId); + jsonObj.addProperty("Block-ID", keyLocation.getLocalID()); + jsonObj.add("Locations", element); + responseFromAllNodes.add(jsonObj); } - jsonObj.addProperty("Datanode-HostName", entry.getKey() - .getHostName()); - jsonObj.addProperty("Datanode-IP", entry.getKey() - .getIpAddress()); - jsonObj.addProperty("Container-ID", containerId); - jsonObj.addProperty("Block-ID", keyLocation.getLocalID()); - jsonObj.add("Locations", element); - responseFromAllNodes.add(jsonObj); + responseArrayList.add(responseFromAllNodes); + } finally { + xceiverClientManager.releaseClientForReadData(xceiverClient, false); } - responseArrayList.add(responseFromAllNodes); - xceiverClientManager.releaseClientForReadData(xceiverClient, false); - xceiverClient = null; } result.add("KeyLocations", responseArrayList); Gson gson2 = new GsonBuilder().setPrettyPrinting().create(); diff --git a/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/debug/ReadReplicas.java b/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/debug/ReadReplicas.java index 3b2c2efede0..30f2b4eca1f 100644 --- a/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/debug/ReadReplicas.java +++ b/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/debug/ReadReplicas.java @@ -36,7 +36,7 @@ import org.apache.hadoop.ozone.om.helpers.OmKeyLocationInfo; import org.apache.hadoop.ozone.shell.OzoneAddress; import org.apache.hadoop.ozone.shell.keys.KeyHandler; -import org.jetbrains.annotations.NotNull; +import jakarta.annotation.Nonnull; import org.kohsuke.MetaInfServices; import picocli.CommandLine; @@ -239,7 +239,7 @@ private InputStream getReplica( return input != null ? input : new ByteArrayInputStream(new byte[0]); } - @NotNull + @Nonnull private File createDirectory(String volumeName, String bucketName, String keyName) throws IOException { String fileSuffix diff --git a/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/freon/OmMetadataGenerator.java b/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/freon/OmMetadataGenerator.java index c94048e00d8..60af701e149 100644 --- a/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/freon/OmMetadataGenerator.java +++ b/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/freon/OmMetadataGenerator.java @@ -17,6 +17,7 @@ package org.apache.hadoop.ozone.freon; import java.io.IOException; +import java.io.InputStream; import java.io.OutputStream; import java.nio.file.NoSuchFileException; import java.time.Duration; @@ -41,20 +42,24 @@ import org.apache.hadoop.hdds.conf.StorageSize; import org.apache.hadoop.ozone.client.OzoneBucket; import org.apache.hadoop.ozone.client.OzoneClient; -import org.apache.hadoop.ozone.client.io.OzoneInputStream; +import org.apache.hadoop.ozone.om.helpers.BasicOmKeyInfo; import org.apache.hadoop.ozone.om.helpers.OmKeyArgs; import org.apache.hadoop.ozone.om.helpers.OmKeyArgs.Builder; import org.apache.hadoop.ozone.om.helpers.OmKeyInfo; import org.apache.hadoop.ozone.om.helpers.OzoneAclUtil; import org.apache.hadoop.ozone.om.helpers.OzoneFileStatus; +import org.apache.hadoop.ozone.om.helpers.OzoneFileStatusLight; import org.apache.hadoop.ozone.om.protocol.OzoneManagerProtocol; import com.codahale.metrics.Timer; import org.apache.hadoop.security.UserGroupInformation; import picocli.CommandLine.Command; +import picocli.CommandLine.Mixin; import picocli.CommandLine.Option; import static org.apache.hadoop.ozone.security.acl.IAccessAuthorizer.ACLType.ALL; +import static java.util.Collections.emptyMap; + /** * Data generator tool test om performance. @@ -70,15 +75,19 @@ public class OmMetadataGenerator extends BaseFreonGenerator enum Operation { CREATE_FILE, + CREATE_STREAM_FILE, LOOKUP_FILE, READ_FILE, LIST_STATUS, + LIST_STATUS_LIGHT, CREATE_KEY, + CREATE_STREAM_KEY, LOOKUP_KEY, GET_KEYINFO, HEAD_KEY, READ_KEY, LIST_KEYS, + LIST_KEYS_LIGHT, INFO_BUCKET, INFO_VOLUME, MIXED, @@ -145,6 +154,9 @@ enum Operation { ) private String omServiceID; + @Mixin + private FreonReplicationOptions replication; + private OzoneManagerProtocol ozoneManagerClient; private ThreadLocal omKeyArgsBuilder; @@ -171,7 +183,7 @@ public Void call() throws Exception { contentGenerator = new ContentGenerator(dataSize.toBytes(), bufferSize); omKeyArgsBuilder = ThreadLocal.withInitial(this::createKeyArgsBuilder); OzoneConfiguration conf = createOzoneConfiguration(); - replicationConfig = ReplicationConfig.getDefault(conf); + replicationConfig = replication.fromParamsOrConfig(conf); try (OzoneClient rpcClient = createOzoneClient(omServiceID, conf)) { ensureVolumeAndBucketExist(rpcClient, volumeName, bucketName); @@ -309,145 +321,113 @@ public Supplier realTimeStatusSupplier() { }; } - @SuppressWarnings("checkstyle:EmptyBlock") private void applyOperation(long counter) throws Exception { OmKeyArgs keyArgs; - String keyName; - long threadSeqId; + final long threadSeqId = getThreadSequenceId(); String startKeyName; if (mixedOperation) { - threadSeqId = getThreadSequenceId(); operation = operations[(int)threadSeqId]; } if (randomOp) { counter = ThreadLocalRandom.current().nextLong(getTestNo()); } + final String keyName = getPath(counter); switch (operation) { case CREATE_KEY: - keyName = getPath(counter); - getMetrics().timer(operation.name()).time(() -> { - try (OutputStream stream = bucket.createStreamKey(keyName, - dataSize.toBytes(), replicationConfig, new HashMap<>())) { - contentGenerator.write(stream); - } - return null; - }); + getMetrics().timer(operation.name()).time(() -> performWriteOperation(() -> + bucket.createKey(keyName, dataSize.toBytes(), replicationConfig, emptyMap()), contentGenerator)); + break; + case CREATE_STREAM_KEY: + getMetrics().timer(operation.name()).time(() -> performWriteOperation(() -> + bucket.createStreamKey(keyName, dataSize.toBytes(), replicationConfig, emptyMap()), contentGenerator)); break; case LOOKUP_KEY: - keyName = getPath(counter); keyArgs = omKeyArgsBuilder.get().setKeyName(keyName).build(); - getMetrics().timer(operation.name()).time(() -> { - ozoneManagerClient.lookupKey(keyArgs); - return null; - }); + getMetrics().timer(operation.name()).time(() -> ozoneManagerClient.lookupKey(keyArgs)); break; case GET_KEYINFO: - keyName = getPath(counter); keyArgs = omKeyArgsBuilder.get().setKeyName(keyName).build(); - getMetrics().timer(operation.name()).time(() -> { - ozoneManagerClient.getKeyInfo(keyArgs, false); - return null; - }); + getMetrics().timer(operation.name()).time(() -> ozoneManagerClient.getKeyInfo(keyArgs, false)); break; case HEAD_KEY: - keyName = getPath(counter); keyArgs = omKeyArgsBuilder.get() .setKeyName(keyName).setHeadOp(true).build(); - getMetrics().timer(operation.name()).time(() -> { - ozoneManagerClient.getKeyInfo(keyArgs, false); - return null; - }); + getMetrics().timer(operation.name()).time(() -> ozoneManagerClient.getKeyInfo(keyArgs, false)); break; case READ_KEY: - keyName = getPath(counter); - getMetrics().timer(operation.name()).time(() -> { - try (OzoneInputStream stream = bucket.readKey(keyName)) { - while ((stream.read(readBuffer)) >= 0) { - } - } - return null; - }); + getMetrics().timer(operation.name()).time(() -> performReadOperation(() -> bucket.readKey(keyName), readBuffer)); break; case READ_FILE: - keyName = getPath(counter); - getMetrics().timer(operation.name()).time(() -> { - try (OzoneInputStream stream = bucket.readFile(keyName)) { - while ((stream.read(readBuffer)) >= 0) { - } - } - return null; - }); + getMetrics().timer(operation.name()).time(() -> performReadOperation(() -> bucket.readFile(keyName), readBuffer)); break; case CREATE_FILE: - keyName = getPath(counter); - getMetrics().timer(operation.name()).time(() -> { - try ( - OutputStream stream = bucket.createFile(keyName, dataSize.toBytes(), - replicationConfig, true, false)) { - contentGenerator.write(stream); - } - return null; - }); + getMetrics().timer(operation.name()).time(() -> performWriteOperation(() -> + bucket.createFile(keyName, dataSize.toBytes(), replicationConfig, true, false), contentGenerator)); + break; + case CREATE_STREAM_FILE: + getMetrics().timer(operation.name()).time(() -> performWriteOperation(() -> + bucket.createStreamFile(keyName, dataSize.toBytes(), replicationConfig, true, false), contentGenerator)); break; case LOOKUP_FILE: - keyName = getPath(counter); keyArgs = omKeyArgsBuilder.get().setKeyName(keyName).build(); + getMetrics().timer(operation.name()).time(() -> ozoneManagerClient.lookupFile(keyArgs)); + break; + case LIST_KEYS: + startKeyName = getPath(threadSeqId * batchSize); getMetrics().timer(operation.name()).time(() -> { - ozoneManagerClient.lookupFile(keyArgs); + List keyInfoList = + ozoneManagerClient.listKeys(volumeName, bucketName, startKeyName, "", batchSize).getKeys(); + if (keyInfoList.size() + 1 < batchSize) { + throw new NoSuchFileException("There are not enough keys for testing you should use " + + "CREATE_KEY to create at least batch-size * threads = " + batchSize * getThreadNo()); + } return null; }); break; - case LIST_KEYS: - threadSeqId = getThreadSequenceId(); + case LIST_KEYS_LIGHT: startKeyName = getPath(threadSeqId * batchSize); getMetrics().timer(operation.name()).time(() -> { - List keyInfoList = - ozoneManagerClient.listKeys(volumeName, bucketName, startKeyName, - "", batchSize).getKeys(); + List keyInfoList = + ozoneManagerClient.listKeysLight(volumeName, bucketName, startKeyName, "", batchSize).getKeys(); if (keyInfoList.size() + 1 < batchSize) { - throw new NoSuchFileException( - "There are not enough files for testing you should use " - + "CREATE_FILE to create at least batch-size * threads = " - + batchSize * getThreadNo()); + throw new NoSuchFileException("There are not enough keys for testing you should use " + + "CREATE_KEY to create at least batch-size * threads = " + batchSize * getThreadNo()); } return null; }); break; case LIST_STATUS: - threadSeqId = getThreadSequenceId(); startKeyName = getPath(threadSeqId * batchSize); keyArgs = omKeyArgsBuilder.get().setKeyName("").build(); getMetrics().timer(operation.name()).time(() -> { List fileStatusList = ozoneManagerClient.listStatus( keyArgs, false, startKeyName, batchSize); if (fileStatusList.size() + 1 < batchSize) { - throw new NoSuchFileException( - "There are not enough files for testing you should use " - + "CREATE_FILE to create at least batch-size * threads = " - + batchSize * getThreadNo()); + throw new NoSuchFileException("There are not enough files for testing you should use " + + "CREATE_FILE to create at least batch-size * threads = " + batchSize * getThreadNo()); } return null; }); break; - case INFO_BUCKET: + case LIST_STATUS_LIGHT: + startKeyName = getPath(threadSeqId * batchSize); + keyArgs = omKeyArgsBuilder.get().setKeyName("").build(); getMetrics().timer(operation.name()).time(() -> { - try { - ozoneManagerClient.getBucketInfo(volumeName, bucketName); - } catch (IOException e) { - throw new RuntimeException(e); - } - } + List fileStatusList = ozoneManagerClient.listStatusLight( + keyArgs, false, startKeyName, batchSize, false); + if (fileStatusList.size() + 1 < batchSize) { + throw new NoSuchFileException("There are not enough files for testing you should use " + + "CREATE_FILE to create at least batch-size * threads = " + batchSize * getThreadNo()); + } + return null; + }); + break; + case INFO_BUCKET: + getMetrics().timer(operation.name()).time(() -> ozoneManagerClient.getBucketInfo(volumeName, bucketName) ); break; case INFO_VOLUME: - getMetrics().timer(operation.name()).time(() -> { - try { - ozoneManagerClient.getVolumeInfo(volumeName); - } catch (IOException e) { - throw new RuntimeException(e); - } - } - ); + getMetrics().timer(operation.name()).time(() -> ozoneManagerClient.getVolumeInfo(volumeName)); break; default: throw new IllegalStateException("Unrecognized write command " + @@ -455,4 +435,30 @@ private void applyOperation(long counter) throws Exception { } } + @FunctionalInterface + interface WriteOperation { + OutputStream createStream() throws IOException; + } + + @FunctionalInterface + interface ReadOperation { + InputStream createStream() throws IOException; + } + + private Void performWriteOperation(WriteOperation writeOp, ContentGenerator contentGen) throws IOException { + try (OutputStream stream = writeOp.createStream()) { + contentGen.write(stream); + } + return null; + } + + @SuppressWarnings("checkstyle:EmptyBlock") + private Void performReadOperation(ReadOperation readOp, byte[] buffer) throws IOException { + try (InputStream stream = readOp.createStream()) { + while (stream.read(buffer) >= 0) { + } + return null; + } + } + } diff --git a/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/shell/bucket/BucketCommands.java b/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/shell/bucket/BucketCommands.java index 454660e2ca3..80e26e04451 100644 --- a/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/shell/bucket/BucketCommands.java +++ b/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/shell/bucket/BucketCommands.java @@ -50,7 +50,8 @@ SetAclBucketHandler.class, ClearQuotaHandler.class, SetReplicationConfigHandler.class, - UpdateBucketHandler.class + UpdateBucketHandler.class, + SetEncryptionKey.class }, mixinStandardHelpOptions = true, versionProvider = HddsVersionProvider.class) diff --git a/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/shell/bucket/SetEncryptionKey.java b/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/shell/bucket/SetEncryptionKey.java new file mode 100644 index 00000000000..86a50e9df3c --- /dev/null +++ b/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/shell/bucket/SetEncryptionKey.java @@ -0,0 +1,81 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.hadoop.ozone.shell.bucket; + +import org.apache.hadoop.ozone.client.OzoneBucket; +import org.apache.hadoop.ozone.client.OzoneClient; +import org.apache.hadoop.ozone.client.OzoneClientException; +import org.apache.hadoop.ozone.shell.OzoneAddress; +import picocli.CommandLine; + +import java.io.IOException; + +/** + * Command-line tool to set the encryption key of a bucket. + * + * There are known bugs, HDDS-7449 and HDDS-7526, which could potentially result + * in the loss of bucket encryption properties when either quota or bucket + * replication properties are (re)set on an existing bucket, posing a critical + * issue. This may affect consumers using previous versions of Ozone. + * + * To address this bug, this CLI tool provides the ability to (re)set the + * Bucket Encryption Key (BEK) for HDDS-7449/HDDS-7526 affected buckets using + * the Ozone shell. + * + * Users can execute the following command for setting BEK for a given bucket: + * "ozone sh bucket set-encryption-key -k /" + * + * Please note that this operation only resets the BEK and does not modify any + * other properties of the bucket or the existing keys within it. + * + * Existing keys in the bucket will retain their current properties, and any + * keys added before the BEK reset will remain unencrypted. Keys added after the + * BEK reset will be encrypted using the new BEK details provided. + * + * @deprecated This functionality is deprecated as it is not intended for users + * to reset bucket encryption post-bucket creation under normal circumstances + * and may be removed in the future. Users are advised to exercise caution and + * consider alternative approaches for managing bucket encryption unless + * HDDS-7449 or HDDS-7526 is encountered. As a result, the setter methods and + * this CLI functionality have been marked as deprecated, and the command has + * been hidden. + */ +@Deprecated +@CommandLine.Command(name = "set-encryption-key", + description = "Set Bucket Encryption Key (BEK) for a given bucket. Users " + + "are advised to exercise caution and consider alternative approaches " + + "for managing bucket encryption unless HDDS-7449 or HDDS-7526 is " + + "encountered.", + hidden = true) +public class SetEncryptionKey extends BucketHandler { + + @CommandLine.Option(names = {"--key", "-k"}, + description = "bucket encryption key name") + private String bekName; + + @Override + protected void execute(OzoneClient client, OzoneAddress address) + throws IOException, OzoneClientException { + + String volumeName = address.getVolumeName(); + String bucketName = address.getBucketName(); + OzoneBucket bucket = + client.getObjectStore().getVolume(volumeName).getBucket(bucketName); + bucket.setEncryptionKey(bekName); + } +} diff --git a/hadoop-ozone/tools/src/test/java/org/apache/hadoop/ozone/audit/parser/TestAuditParser.java b/hadoop-ozone/tools/src/test/java/org/apache/hadoop/ozone/audit/parser/TestAuditParser.java index 99406fec0fb..47f465383d0 100644 --- a/hadoop-ozone/tools/src/test/java/org/apache/hadoop/ozone/audit/parser/TestAuditParser.java +++ b/hadoop-ozone/tools/src/test/java/org/apache/hadoop/ozone/audit/parser/TestAuditParser.java @@ -25,8 +25,7 @@ import org.junit.jupiter.api.AfterEach; import org.junit.jupiter.api.AfterAll; import org.junit.jupiter.api.Test; -import static org.junit.jupiter.api.Assertions.fail; -import static org.junit.jupiter.api.Assertions.assertTrue; + import org.slf4j.Logger; import org.slf4j.LoggerFactory; import picocli.CommandLine; @@ -44,7 +43,10 @@ import java.util.Arrays; import java.util.List; +import static org.assertj.core.api.Assertions.assertThat; import static java.nio.charset.StandardCharsets.UTF_8; +import static org.junit.jupiter.api.Assertions.assertInstanceOf; +import static org.junit.jupiter.api.Assertions.assertThrows; /** * Tests AuditParser. @@ -128,8 +130,7 @@ public List handleExecutionException(ExecutionException ex, exceptionHandler, args); try { String output = OUT.toString(DEFAULT_CODING); - assertTrue(output.contains(msg), - "Output:\n" + output + "\nshould contain:\n" + msg); + assertThat(output).contains(msg); } catch (UnsupportedEncodingException ignored) { } } @@ -188,13 +189,10 @@ public void testQueryCommand() { @Test public void testLoadCommand() { String[] args1 = new String[]{dbName, "load", LOGS1}; - try { - execute(args1, ""); - fail("No exception thrown."); - } catch (Exception e) { - assertTrue(e.getCause() instanceof ArrayIndexOutOfBoundsException); - assertTrue(e.getMessage().contains(": 5")); - } + Exception e = + assertThrows(Exception.class, () -> execute(args1, "")); + assertInstanceOf(ArrayIndexOutOfBoundsException.class, e.getCause()); + assertThat(e.getMessage()).contains(": 5"); } /** diff --git a/hadoop-ozone/tools/src/test/java/org/apache/hadoop/ozone/checknative/TestCheckNative.java b/hadoop-ozone/tools/src/test/java/org/apache/hadoop/ozone/checknative/TestCheckNative.java index f53ecca9125..8e291056330 100644 --- a/hadoop-ozone/tools/src/test/java/org/apache/hadoop/ozone/checknative/TestCheckNative.java +++ b/hadoop-ozone/tools/src/test/java/org/apache/hadoop/ozone/checknative/TestCheckNative.java @@ -19,7 +19,6 @@ package org.apache.hadoop.ozone.checknative; import org.apache.hadoop.ozone.shell.checknative.CheckNative; -import static org.junit.jupiter.api.Assertions.assertTrue; import org.junit.jupiter.api.BeforeAll; import org.junit.jupiter.api.AfterEach; import org.junit.jupiter.api.AfterAll; @@ -28,6 +27,7 @@ import java.io.PrintStream; import java.io.UnsupportedEncodingException; +import static org.assertj.core.api.Assertions.assertThat; import static java.nio.charset.StandardCharsets.UTF_8; /** @@ -56,9 +56,9 @@ public void testCheckNativeNotLoaded() throws UnsupportedEncodingException { // trims multiple spaces String stdOut = outputStream.toString(DEFAULT_ENCODING) .replaceAll(" +", " "); - assertTrue(stdOut.contains("Native library checking:")); - assertTrue(stdOut.contains("hadoop: false")); - assertTrue(stdOut.contains("ISA-L: false")); + assertThat(stdOut).contains("Native library checking:"); + assertThat(stdOut).contains("hadoop: false"); + assertThat(stdOut).contains("ISA-L: false"); } @AfterEach diff --git a/hadoop-ozone/tools/src/test/java/org/apache/hadoop/ozone/debug/TestDBDefinitionFactory.java b/hadoop-ozone/tools/src/test/java/org/apache/hadoop/ozone/debug/TestDBDefinitionFactory.java index eae13308a35..5e259012934 100644 --- a/hadoop-ozone/tools/src/test/java/org/apache/hadoop/ozone/debug/TestDBDefinitionFactory.java +++ b/hadoop-ozone/tools/src/test/java/org/apache/hadoop/ozone/debug/TestDBDefinitionFactory.java @@ -32,7 +32,7 @@ import static org.apache.hadoop.ozone.recon.ReconConstants.RECON_CONTAINER_KEY_DB; import static org.apache.hadoop.ozone.recon.ReconConstants.RECON_OM_SNAPSHOT_DB; -import static org.junit.jupiter.api.Assertions.assertTrue; +import static org.junit.jupiter.api.Assertions.assertInstanceOf; import org.junit.jupiter.api.Test; @@ -45,37 +45,37 @@ public class TestDBDefinitionFactory { public void testGetDefinition() { DBDefinition definition = DBDefinitionFactory.getDefinition(new OMDBDefinition().getName()); - assertTrue(definition instanceof OMDBDefinition); + assertInstanceOf(OMDBDefinition.class, definition); definition = DBDefinitionFactory.getDefinition( new SCMDBDefinition().getName()); - assertTrue(definition instanceof SCMDBDefinition); + assertInstanceOf(SCMDBDefinition.class, definition); definition = DBDefinitionFactory.getDefinition( new ReconSCMDBDefinition().getName()); - assertTrue(definition instanceof ReconSCMDBDefinition); + assertInstanceOf(ReconSCMDBDefinition.class, definition); definition = DBDefinitionFactory.getDefinition( RECON_OM_SNAPSHOT_DB + "_1"); - assertTrue(definition instanceof OMDBDefinition); + assertInstanceOf(OMDBDefinition.class, definition); definition = DBDefinitionFactory.getDefinition( RECON_CONTAINER_KEY_DB + "_1"); - assertTrue(definition instanceof ReconDBDefinition); + assertInstanceOf(ReconDBDefinition.class, definition); DBDefinitionFactory.setDnDBSchemaVersion("V2"); definition = DBDefinitionFactory.getDefinition(Paths.get("/tmp/test-container.db"), new OzoneConfiguration()); - assertTrue(definition instanceof DatanodeSchemaTwoDBDefinition); + assertInstanceOf(DatanodeSchemaTwoDBDefinition.class, definition); DBDefinitionFactory.setDnDBSchemaVersion("V1"); definition = DBDefinitionFactory.getDefinition(Paths.get("/tmp/test-container.db"), new OzoneConfiguration()); - assertTrue(definition instanceof DatanodeSchemaOneDBDefinition); + assertInstanceOf(DatanodeSchemaOneDBDefinition.class, definition); DBDefinitionFactory.setDnDBSchemaVersion("V3"); definition = DBDefinitionFactory.getDefinition(Paths.get("/tmp/test-container.db"), new OzoneConfiguration()); - assertTrue(definition instanceof DatanodeSchemaThreeDBDefinition); + assertInstanceOf(DatanodeSchemaThreeDBDefinition.class, definition); } } diff --git a/hadoop-ozone/tools/src/test/java/org/apache/hadoop/ozone/freon/containergenerator/TestGeneratorDatanode.java b/hadoop-ozone/tools/src/test/java/org/apache/hadoop/ozone/freon/containergenerator/TestGeneratorDatanode.java index 7792e03e116..35a1ba20bb5 100644 --- a/hadoop-ozone/tools/src/test/java/org/apache/hadoop/ozone/freon/containergenerator/TestGeneratorDatanode.java +++ b/hadoop-ozone/tools/src/test/java/org/apache/hadoop/ozone/freon/containergenerator/TestGeneratorDatanode.java @@ -16,12 +16,13 @@ */ package org.apache.hadoop.ozone.freon.containergenerator; -import org.junit.jupiter.api.Assertions; import org.junit.jupiter.api.Test; import java.util.Arrays; import java.util.HashSet; +import static org.junit.jupiter.api.Assertions.assertEquals; + /** * Test datanode container generation placement. */ @@ -64,7 +65,7 @@ public void compare( int maxDatanodes, int overlap, Integer... expectations) { - Assertions.assertEquals( + assertEquals( new HashSet(Arrays.asList(expectations)), GeneratorDatanode.getPlacement(containerId, maxDatanodes, overlap)); } diff --git a/hadoop-ozone/tools/src/test/java/org/apache/hadoop/ozone/genconf/TestGenerateOzoneRequiredConfigurations.java b/hadoop-ozone/tools/src/test/java/org/apache/hadoop/ozone/genconf/TestGenerateOzoneRequiredConfigurations.java index 4b0540d82f7..5b580c81c0e 100644 --- a/hadoop-ozone/tools/src/test/java/org/apache/hadoop/ozone/genconf/TestGenerateOzoneRequiredConfigurations.java +++ b/hadoop-ozone/tools/src/test/java/org/apache/hadoop/ozone/genconf/TestGenerateOzoneRequiredConfigurations.java @@ -19,10 +19,9 @@ package org.apache.hadoop.ozone.genconf; import org.apache.commons.io.FileUtils; -import org.apache.commons.lang3.RandomStringUtils; import org.apache.hadoop.hdds.conf.OzoneConfiguration; import org.apache.ozone.test.GenericTestUtils; -import static org.junit.jupiter.api.Assertions.assertTrue; +import static org.assertj.core.api.Assertions.assertThat; import static org.junit.jupiter.api.Assertions.assertNotEquals; import org.junit.jupiter.api.BeforeAll; @@ -30,6 +29,7 @@ import org.junit.jupiter.api.AfterEach; import org.junit.jupiter.api.AfterAll; import org.junit.jupiter.api.Test; +import org.junit.jupiter.api.io.TempDir; import org.slf4j.Logger; import org.slf4j.LoggerFactory; import picocli.CommandLine; @@ -124,7 +124,7 @@ public List handleExecutionException(ExecutionException ex, }; cmd.parseWithHandlers(new CommandLine.RunLast(), exceptionHandler, args); - assertTrue(out.toString(DEFAULT_ENCODING).contains(msg)); + assertThat(out.toString(DEFAULT_ENCODING)).contains(msg); } private void executeWithException(String[] args, String msg) { @@ -150,8 +150,7 @@ public List handleExecutionException(ExecutionException ex, cmd.parseWithHandlers(new CommandLine.RunLast(), exceptionHandler, args); } catch (Exception ex) { - assertTrue(ex.getMessage().contains(msg), - "Expected " + msg + ", but got: " + ex.getMessage()); + assertThat(ex.getMessage()).contains(msg); } } @@ -163,8 +162,7 @@ public List handleExecutionException(ExecutionException ex, * @throws Exception */ @Test - public void testGenerateConfigurations() throws Exception { - File tempPath = getRandomTempDir(); + public void testGenerateConfigurations(@TempDir File tempPath) throws Exception { String[] args = new String[]{tempPath.getAbsolutePath()}; execute(args, "ozone-site.xml has been generated at " + tempPath.getAbsolutePath()); @@ -178,7 +176,7 @@ public void testGenerateConfigurations() throws Exception { //Asserts all properties have a non-empty value for (OzoneConfiguration.Property p : allProperties) { - assertTrue(p.getValue() != null && p.getValue().length() > 0); + assertThat(p.getValue()).isNotNull().isNotEmpty(); } } @@ -190,39 +188,38 @@ public void testGenerateConfigurations() throws Exception { * @throws Exception */ @Test - public void testGenerateSecurityConfigurations() throws Exception { + public void testGenerateSecurityConfigurations(@TempDir File tempPathDefault, @TempDir File tempPathSecure) + throws Exception { int ozoneConfigurationCount, ozoneSecurityConfigurationCount; // Generate default Ozone Configuration - File tempPath = getRandomTempDir(); - String[] args = new String[]{tempPath.getAbsolutePath()}; + String[] args = new String[]{tempPathDefault.getAbsolutePath()}; execute(args, "ozone-site.xml has been generated at " + - tempPath.getAbsolutePath()); + tempPathDefault.getAbsolutePath()); - URL url = new File(tempPath.getAbsolutePath() + "/ozone-site.xml") + URL url = new File(tempPathDefault.getAbsolutePath() + "/ozone-site.xml") .toURI().toURL(); OzoneConfiguration oc = new OzoneConfiguration(); List allProperties = oc.readPropertyFromXml(url); for (OzoneConfiguration.Property p : allProperties) { - assertTrue(p.getValue() != null && p.getValue().length() > 0); + assertThat(p.getValue()).isNotNull().isNotEmpty(); } ozoneConfigurationCount = allProperties.size(); // Generate secure Ozone Configuration - tempPath = getRandomTempDir(); - args = new String[]{"--security", tempPath.getAbsolutePath()}; + args = new String[]{"--security", tempPathSecure.getAbsolutePath()}; execute(args, "ozone-site.xml has been generated at " + - tempPath.getAbsolutePath()); + tempPathSecure.getAbsolutePath()); - url = new File(tempPath.getAbsolutePath() + "/ozone-site.xml") + url = new File(tempPathSecure.getAbsolutePath() + "/ozone-site.xml") .toURI().toURL(); oc = new OzoneConfiguration(); allProperties = oc.readPropertyFromXml(url); for (OzoneConfiguration.Property p : allProperties) { - assertTrue(p.getValue() != null && p.getValue().length() > 0); + assertThat(p.getValue()).isNotNull().isNotEmpty(); } ozoneSecurityConfigurationCount = allProperties.size(); @@ -236,8 +233,7 @@ public void testGenerateSecurityConfigurations() throws Exception { * @throws Exception */ @Test - public void testDoesNotOverwrite() throws Exception { - File tempPath = getRandomTempDir(); + public void testDoesNotOverwrite(@TempDir File tempPath) throws Exception { String[] args = new String[]{tempPath.getAbsolutePath()}; execute(args, "ozone-site.xml has been generated at " + tempPath.getAbsolutePath()); @@ -253,8 +249,7 @@ public void testDoesNotOverwrite() throws Exception { * @throws Exception */ @Test - public void genconfFailureByInsufficientPermissions() throws Exception { - File tempPath = getRandomTempDir(); + public void genconfFailureByInsufficientPermissions(@TempDir File tempPath) throws Exception { tempPath.setReadOnly(); String[] args = new String[]{tempPath.getAbsolutePath()}; executeWithException(args, "Insufficient permission."); @@ -265,8 +260,7 @@ public void genconfFailureByInsufficientPermissions() throws Exception { * @throws Exception */ @Test - public void genconfFailureByInvalidPath() throws Exception { - File tempPath = getRandomTempDir(); + public void genconfFailureByInvalidPath(@TempDir File tempPath) throws Exception { String[] args = new String[]{"invalid-path"}; executeWithException(args, "Invalid directory path."); } @@ -276,8 +270,7 @@ public void genconfFailureByInvalidPath() throws Exception { * @throws Exception */ @Test - public void genconfPathNotSpecified() throws Exception { - File tempPath = getRandomTempDir(); + public void genconfPathNotSpecified(@TempDir File tempPath) throws Exception { String[] args = new String[]{}; executeWithException(args, "Missing required parameter: ''"); } @@ -287,16 +280,8 @@ public void genconfPathNotSpecified() throws Exception { * @throws Exception */ @Test - public void genconfHelp() throws Exception { - File tempPath = getRandomTempDir(); + public void genconfHelp(@TempDir File tempPath) throws Exception { String[] args = new String[]{"--help"}; execute(args, "Usage: ozone genconf [-hV] [--security] [--verbose]"); } - - private File getRandomTempDir() throws IOException { - File tempDir = new File(outputBaseDir, - RandomStringUtils.randomAlphanumeric(5)); - FileUtils.forceMkdir(tempDir); - return tempDir; - } } diff --git a/hadoop-ozone/tools/src/test/java/org/apache/hadoop/ozone/scm/TestDecommissionScmSubcommand.java b/hadoop-ozone/tools/src/test/java/org/apache/hadoop/ozone/scm/TestDecommissionScmSubcommand.java index 2cb3be1d0b4..d44aed70eb4 100644 --- a/hadoop-ozone/tools/src/test/java/org/apache/hadoop/ozone/scm/TestDecommissionScmSubcommand.java +++ b/hadoop-ozone/tools/src/test/java/org/apache/hadoop/ozone/scm/TestDecommissionScmSubcommand.java @@ -25,14 +25,14 @@ import java.io.IOException; import java.util.UUID; - import org.junit.jupiter.api.Test; -import static org.junit.jupiter.api.Assertions.assertTrue; -import org.mockito.Mockito; -import static org.junit.jupiter.api.Assertions.fail; +import static org.assertj.core.api.Assertions.assertThat; +import static org.junit.jupiter.api.Assertions.assertThrows; import static org.mockito.Mockito.any; import static org.mockito.Mockito.mock; +import static org.mockito.Mockito.when; + import picocli.CommandLine; /** @@ -52,8 +52,8 @@ public void testDecommissionScmInputParams() throws Exception { new GenericTestUtils.SystemErrCapturer()) { String[] args = {"scm", "decommission"}; admin.execute(args); - assertTrue(capture.getOutput().contains( - "Usage: ozone admin scm decommission")); + assertThat(capture.getOutput()).contains( + "Usage: ozone admin scm decommission"); } // now give required String @@ -66,15 +66,14 @@ public void testDecommissionScmInputParams() throws Exception { .setSuccess(true) .build(); - Mockito.when(client.decommissionScm(any())) + when(client.decommissionScm(any())) .thenAnswer(invocation -> ( response)); try (GenericTestUtils.SystemOutCapturer capture = new GenericTestUtils.SystemOutCapturer()) { cmd.execute(client); - assertTrue(capture.getOutput().contains( - scmId)); + assertThat(capture.getOutput()).contains(scmId); } } @@ -94,16 +93,13 @@ public void testDecommissionScmScmRemoveErrors() throws Exception { .setErrorMsg("Cannot remove current leader.") .build(); - Mockito.when(client.decommissionScm(any())) + when(client.decommissionScm(any())) .thenAnswer(invocation -> ( response)); - try (GenericTestUtils.SystemOutCapturer capture = - new GenericTestUtils.SystemOutCapturer()) { - cmd.execute(client); - fail(); - } catch (IOException ex) { - assertTrue(ex.getMessage().contains("remove current leader")); + try (GenericTestUtils.SystemOutCapturer capture = new GenericTestUtils.SystemOutCapturer()) { + IOException ioe = assertThrows(IOException.class, () -> cmd.execute(client)); + assertThat(ioe.getMessage()).contains("remove current leader"); } } } diff --git a/hadoop-ozone/tools/src/test/java/org/apache/hadoop/ozone/shell/TestOzoneAddress.java b/hadoop-ozone/tools/src/test/java/org/apache/hadoop/ozone/shell/TestOzoneAddress.java index e831bf5f671..da95fd879c4 100644 --- a/hadoop-ozone/tools/src/test/java/org/apache/hadoop/ozone/shell/TestOzoneAddress.java +++ b/hadoop-ozone/tools/src/test/java/org/apache/hadoop/ozone/shell/TestOzoneAddress.java @@ -23,10 +23,15 @@ import org.apache.hadoop.ozone.client.OzoneClientException; -import org.junit.jupiter.api.Assertions; import org.junit.jupiter.params.ParameterizedTest; import org.junit.jupiter.params.provider.MethodSource; +import static org.assertj.core.api.Assertions.assertThat; +import static org.junit.jupiter.api.Assertions.assertEquals; +import static org.junit.jupiter.api.Assertions.assertFalse; +import static org.junit.jupiter.api.Assertions.assertThrows; +import static org.junit.jupiter.api.Assertions.assertTrue; + /** * Test ozone URL parsing. @@ -60,7 +65,7 @@ public void checkRootUrlType(String prefix) throws OzoneClientException { public void checkVolumeUrlType(String prefix) throws OzoneClientException { address = new OzoneAddress(prefix + "vol1"); address.ensureVolumeAddress(); - Assertions.assertEquals("vol1", address.getVolumeName()); + assertEquals("vol1", address.getVolumeName()); } @ParameterizedTest @@ -68,13 +73,13 @@ public void checkVolumeUrlType(String prefix) throws OzoneClientException { public void checkBucketUrlType(String prefix) throws OzoneClientException { address = new OzoneAddress(prefix + "vol1/bucket"); address.ensureBucketAddress(); - Assertions.assertEquals("vol1", address.getVolumeName()); - Assertions.assertEquals("bucket", address.getBucketName()); + assertEquals("vol1", address.getVolumeName()); + assertEquals("bucket", address.getBucketName()); address = new OzoneAddress(prefix + "vol1/bucket/"); address.ensureBucketAddress(); - Assertions.assertEquals("vol1", address.getVolumeName()); - Assertions.assertEquals("bucket", address.getBucketName()); + assertEquals("vol1", address.getVolumeName()); + assertEquals("bucket", address.getBucketName()); } @ParameterizedTest @@ -82,22 +87,22 @@ public void checkBucketUrlType(String prefix) throws OzoneClientException { public void checkKeyUrlType(String prefix) throws OzoneClientException { address = new OzoneAddress(prefix + "vol1/bucket/key"); address.ensureKeyAddress(); - Assertions.assertEquals("vol1", address.getVolumeName()); - Assertions.assertEquals("bucket", address.getBucketName()); - Assertions.assertEquals("key", address.getKeyName()); + assertEquals("vol1", address.getVolumeName()); + assertEquals("bucket", address.getBucketName()); + assertEquals("key", address.getKeyName()); address = new OzoneAddress(prefix + "vol1/bucket/key/"); address.ensureKeyAddress(); - Assertions.assertEquals("vol1", address.getVolumeName()); - Assertions.assertEquals("bucket", address.getBucketName()); - Assertions.assertEquals("key/", address.getKeyName()); + assertEquals("vol1", address.getVolumeName()); + assertEquals("bucket", address.getBucketName()); + assertEquals("key/", address.getKeyName()); address = new OzoneAddress(prefix + "vol1/bucket/key1/key3/key"); address.ensureKeyAddress(); - Assertions.assertEquals("vol1", address.getVolumeName()); - Assertions.assertEquals("bucket", address.getBucketName()); - Assertions.assertEquals("key1/key3/key", address.getKeyName()); - Assertions.assertFalse(address.isPrefix(), "this should not be a prefix"); + assertEquals("vol1", address.getVolumeName()); + assertEquals("bucket", address.getBucketName()); + assertEquals("key1/key3/key", address.getKeyName()); + assertFalse(address.isPrefix(), "this should not be a prefix"); } @ParameterizedTest @@ -105,10 +110,10 @@ public void checkKeyUrlType(String prefix) throws OzoneClientException { public void checkPrefixUrlType(String prefix) throws OzoneClientException { address = new OzoneAddress(prefix + "vol1/bucket/prefix"); address.ensurePrefixAddress(); - Assertions.assertEquals("vol1", address.getVolumeName()); - Assertions.assertEquals("bucket", address.getBucketName()); - Assertions.assertEquals("prefix", address.getKeyName()); - Assertions.assertTrue(address.isPrefix(), "this should be a prefix"); + assertEquals("vol1", address.getVolumeName()); + assertEquals("bucket", address.getBucketName()); + assertEquals("prefix", address.getKeyName()); + assertTrue(address.isPrefix(), "this should be a prefix"); } @ParameterizedTest @@ -116,11 +121,10 @@ public void checkPrefixUrlType(String prefix) throws OzoneClientException { public void checkSnapshotUrlType(String prefix) throws OzoneClientException { address = new OzoneAddress(prefix + "vol1/bucket/.snapshot/snap1"); address.ensureSnapshotAddress(); - Assertions.assertEquals("vol1", address.getVolumeName()); - Assertions.assertEquals("bucket", address.getBucketName()); - Assertions.assertEquals(".snapshot/snap1", - address.getSnapshotNameWithIndicator()); - Assertions.assertEquals(".snapshot/snap1", address.getKeyName()); + assertEquals("vol1", address.getVolumeName()); + assertEquals("bucket", address.getBucketName()); + assertEquals(".snapshot/snap1", address.getSnapshotNameWithIndicator()); + assertEquals(".snapshot/snap1", address.getKeyName()); String message = "Only a snapshot name with " + @@ -128,9 +132,8 @@ public void checkSnapshotUrlType(String prefix) throws OzoneClientException { address = new OzoneAddress(prefix + "vol1/bucket/.snapshot"); - OzoneClientException exception = Assertions - .assertThrows(OzoneClientException.class, + OzoneClientException exception = assertThrows(OzoneClientException.class, () -> address.ensureSnapshotAddress()); - Assertions.assertTrue(exception.getMessage().contains(message)); + assertThat(exception.getMessage()).contains(message); } } diff --git a/hadoop-ozone/tools/src/test/java/org/apache/hadoop/ozone/shell/TestOzoneAddressClientCreation.java b/hadoop-ozone/tools/src/test/java/org/apache/hadoop/ozone/shell/TestOzoneAddressClientCreation.java index 02bc2cade06..2457a00fe52 100644 --- a/hadoop-ozone/tools/src/test/java/org/apache/hadoop/ozone/shell/TestOzoneAddressClientCreation.java +++ b/hadoop-ozone/tools/src/test/java/org/apache/hadoop/ozone/shell/TestOzoneAddressClientCreation.java @@ -27,7 +27,10 @@ import org.apache.hadoop.hdds.conf.InMemoryConfiguration; import static org.apache.hadoop.ozone.om.OMConfigKeys.OZONE_OM_SERVICE_IDS_KEY; -import org.junit.jupiter.api.Assertions; +import static org.junit.jupiter.api.Assertions.assertEquals; +import static org.junit.jupiter.api.Assertions.assertFalse; +import static org.junit.jupiter.api.Assertions.assertThrows; +import static org.junit.jupiter.api.Assertions.assertTrue; import org.junit.jupiter.api.Test; @@ -41,7 +44,7 @@ public void implicitNonHA() throws OzoneClientException, IOException { TestableOzoneAddress address = new TestableOzoneAddress("/vol1/bucket1/key1"); address.createClient(new InMemoryConfiguration()); - Assertions.assertTrue(address.simpleCreation); + assertTrue(address.simpleCreation); } @Test @@ -51,8 +54,8 @@ public void implicitHAOneServiceId() new TestableOzoneAddress("/vol1/bucket1/key1"); address.createClient( new InMemoryConfiguration(OZONE_OM_SERVICE_IDS_KEY, "service1")); - Assertions.assertFalse(address.simpleCreation); - Assertions.assertEquals("service1", address.serviceId); + assertFalse(address.simpleCreation); + assertEquals("service1", address.serviceId); } @Test @@ -60,7 +63,7 @@ public void implicitHaMultipleServiceId() throws OzoneClientException, IOException { TestableOzoneAddress address = new TestableOzoneAddress("/vol1/bucket1/key1"); - Assertions.assertThrows(OzoneClientException.class, () -> + assertThrows(OzoneClientException.class, () -> address.createClient(new InMemoryConfiguration(OZONE_OM_SERVICE_IDS_KEY, "service1,service2"))); } @@ -73,8 +76,8 @@ public void explicitHaMultipleServiceId() address.createClient( new InMemoryConfiguration(OZONE_OM_SERVICE_IDS_KEY, "service1,service2")); - Assertions.assertFalse(address.simpleCreation); - Assertions.assertEquals("service1", address.serviceId); + assertFalse(address.simpleCreation); + assertEquals("service1", address.serviceId); } @Test @@ -82,9 +85,9 @@ public void explicitNonHAHostPort() throws OzoneClientException, IOException { TestableOzoneAddress address = new TestableOzoneAddress("o3://om:9862/vol1/bucket1/key1"); address.createClient(new InMemoryConfiguration()); - Assertions.assertFalse(address.simpleCreation); - Assertions.assertEquals("om", address.host); - Assertions.assertEquals(9862, address.port); + assertFalse(address.simpleCreation); + assertEquals("om", address.host); + assertEquals(9862, address.port); } @Test @@ -94,9 +97,9 @@ public void explicitHAHostPortWithServiceId() new TestableOzoneAddress("o3://om:9862/vol1/bucket1/key1"); address.createClient( new InMemoryConfiguration(OZONE_OM_SERVICE_IDS_KEY, "service1")); - Assertions.assertFalse(address.simpleCreation); - Assertions.assertEquals("om", address.host); - Assertions.assertEquals(9862, address.port); + assertFalse(address.simpleCreation); + assertEquals("om", address.host); + assertEquals(9862, address.port); } @Test @@ -107,9 +110,9 @@ public void explicitAHostPortWithServiceIds() address.createClient( new InMemoryConfiguration(OZONE_OM_SERVICE_IDS_KEY, "service1,service2")); - Assertions.assertFalse(address.simpleCreation); - Assertions.assertEquals("om", address.host); - Assertions.assertEquals(9862, address.port); + assertFalse(address.simpleCreation); + assertEquals("om", address.host); + assertEquals(9862, address.port); } @Test @@ -118,8 +121,8 @@ public void explicitNonHAHost() throws OzoneClientException, IOException { new TestableOzoneAddress("o3://om/vol1/bucket1/key1"); address.createClient( new InMemoryConfiguration(OZONE_OM_SERVICE_IDS_KEY, "service1")); - Assertions.assertFalse(address.simpleCreation); - Assertions.assertEquals("om", address.host); + assertFalse(address.simpleCreation); + assertEquals("om", address.host); } @Test @@ -127,16 +130,16 @@ public void explicitHAHostPort() throws OzoneClientException, IOException { TestableOzoneAddress address = new TestableOzoneAddress("o3://om:1234/vol1/bucket1/key1"); address.createClient(new InMemoryConfiguration()); - Assertions.assertFalse(address.simpleCreation); - Assertions.assertEquals("om", address.host); - Assertions.assertEquals(1234, address.port); + assertFalse(address.simpleCreation); + assertEquals("om", address.host); + assertEquals(1234, address.port); } @Test public void explicitWrongScheme() throws OzoneClientException, IOException { TestableOzoneAddress address = new TestableOzoneAddress("ssh://host/vol1/bucket1/key1"); - Assertions.assertThrows(OzoneClientException.class, () -> + assertThrows(OzoneClientException.class, () -> address.createClient(new InMemoryConfiguration())); } diff --git a/hadoop-ozone/tools/src/test/java/org/apache/hadoop/ozone/shell/keys/TestChecksumKeyHandler.java b/hadoop-ozone/tools/src/test/java/org/apache/hadoop/ozone/shell/keys/TestChecksumKeyHandler.java index c55cb9f55ac..d4fa929614f 100644 --- a/hadoop-ozone/tools/src/test/java/org/apache/hadoop/ozone/shell/keys/TestChecksumKeyHandler.java +++ b/hadoop-ozone/tools/src/test/java/org/apache/hadoop/ozone/shell/keys/TestChecksumKeyHandler.java @@ -32,10 +32,8 @@ import org.apache.hadoop.ozone.client.checksum.CrcUtil; import org.apache.hadoop.util.DataChecksum; import org.junit.jupiter.api.AfterEach; -import org.junit.jupiter.api.Assertions; import org.junit.jupiter.api.BeforeEach; import org.junit.jupiter.api.Test; -import org.mockito.Mockito; import java.io.ByteArrayOutputStream; import java.io.IOException; @@ -43,8 +41,10 @@ import java.io.UnsupportedEncodingException; import java.nio.charset.StandardCharsets; -import static org.mockito.ArgumentMatchers.anyString; +import static org.junit.jupiter.api.Assertions.assertEquals; +import static org.mockito.Mockito.anyString; import static org.mockito.Mockito.mock; +import static org.mockito.Mockito.when; /** * Tests for ChecksumKeyHandler. @@ -91,32 +91,30 @@ public void testChecksumKeyHandler() ObjectStore objectStore = mock(ObjectStore.class); OzoneClient client = mock(OzoneClient.class); - Mockito.when(client.getObjectStore()).thenReturn(objectStore); + when(client.getObjectStore()).thenReturn(objectStore); OzoneVolume volume = mock(OzoneVolume.class); OzoneBucket bucket = mock(OzoneBucket.class); OzoneKeyDetails key = mock(OzoneKeyDetails.class); - Mockito.when(volume.getBucket(anyString())).thenReturn(bucket); - Mockito.when(bucket.getKey(anyString())) - .thenReturn(key); - Mockito.when(objectStore.getVolume(anyString())). - thenReturn(volume); - Mockito.when(key.getDataSize()).thenReturn(keySize); + when(volume.getBucket(anyString())).thenReturn(bucket); + when(bucket.getKey(anyString())).thenReturn(key); + when(objectStore.getVolume(anyString())).thenReturn(volume); + when(key.getDataSize()).thenReturn(keySize); cmd.execute(client, address); ObjectMapper mapper = new ObjectMapper(); JsonNode json = mapper.readTree(outContent.toString("UTF-8")); - Assertions.assertEquals("volume", json.get("volumeName").asText()); - Assertions.assertEquals("bucket", json.get("bucketName").asText()); - Assertions.assertEquals("key", json.get("name").asText()); - Assertions.assertEquals(keySize, json.get("dataSize").asLong()); - Assertions.assertEquals("COMPOSITE-CRC32", json.get("algorithm").asText()); + assertEquals("volume", json.get("volumeName").asText()); + assertEquals("bucket", json.get("bucketName").asText()); + assertEquals("key", json.get("name").asText()); + assertEquals(keySize, json.get("dataSize").asLong()); + assertEquals("COMPOSITE-CRC32", json.get("algorithm").asText()); String expectedChecksum = javax.xml.bind.DatatypeConverter.printHexBinary( CrcUtil.intToBytes(Integer.valueOf(CHECKSUM))); - Assertions.assertEquals(expectedChecksum, json.get("checksum").asText()); + assertEquals(expectedChecksum, json.get("checksum").asText()); } } diff --git a/pom.xml b/pom.xml index 75a878b5b4c..37dfb139e2c 100644 --- a/pom.xml +++ b/pom.xml @@ -124,7 +124,7 @@ xsi:schemaLocation="http://maven.apache.org/POM/4.0.0 http://maven.apache.org/xs 3.14.0 1.2 1.1 - 3.1.1 + 3.6.1 3.10.0 2.6.0 1.11.0 @@ -141,7 +141,7 @@ xsi:schemaLocation="http://maven.apache.org/POM/4.0.0 http://maven.apache.org/xs 3.12.2 5.0.4 0.8.0.RELEASE - 1.67 + 1.77 3.3.0 10.14.2.0 3.0.2 @@ -169,10 +169,10 @@ xsi:schemaLocation="http://maven.apache.org/POM/4.0.0 http://maven.apache.org/xs 5.4.0 - 1.6.0 + 1.8.1 0.33.0 - 2.5.0 + 2.6.1 4.5.13 @@ -191,7 +191,7 @@ xsi:schemaLocation="http://maven.apache.org/POM/4.0.0 http://maven.apache.org/xs 0.9.11 - 1.1 + 1.7 @@ -205,7 +205,7 @@ xsi:schemaLocation="http://maven.apache.org/POM/4.0.0 http://maven.apache.org/xs 4.12.0 4.2.2 2.6.1 - 1.3.5 + 2.1.1 2.12.5 0.19 @@ -220,14 +220,13 @@ xsi:schemaLocation="http://maven.apache.org/POM/4.0.0 http://maven.apache.org/xs 4.11.0 2.2 1.24 - 4.13.1 5.10.1 3.7.2 0.5.1 3.19.6 - 1.5.0.Final + 1.7.1 @@ -248,13 +247,12 @@ xsi:schemaLocation="http://maven.apache.org/POM/4.0.0 http://maven.apache.org/xs --> [${javac.version},) [3.3.0,) - true -Xmx4096m -XX:+HeapDumpOnOutOfMemoryError flaky | slow | unhealthy - 3.2.2 + 3.0.0-M5 ${maven-surefire-plugin.version} ${maven-surefire-plugin.version} @@ -263,7 +261,7 @@ xsi:schemaLocation="http://maven.apache.org/POM/4.0.0 http://maven.apache.org/xs 3.1.1 3.1.0 3.5.1 - 2.5 + 3.3.0 3.4.0 3.3.0 1.6.1 @@ -271,22 +269,23 @@ xsi:schemaLocation="http://maven.apache.org/POM/4.0.0 http://maven.apache.org/xs 3.4.1 3.0.1 3.6.0 - 0.12 + 0.16.1 2.8.1 1.9 3.6.1 + 4.2.2 0.29.0 1.3.1 2.3.0 1.0-beta-1 1.0-alpha-11 - 3.1.2 + 3.3.1 3.9.1 3.1.0 9.3 1200 1.12.632 - 1.12.0 + 1.15.0 ${hadoop.version} @@ -1156,17 +1155,6 @@ xsi:schemaLocation="http://maven.apache.org/POM/4.0.0 http://maven.apache.org/xs aws-java-sdk-s3 ${aws-java-sdk.version} - - junit - junit - ${junit4.version} - - - org.hamcrest - hamcrest-core - - - org.hamcrest hamcrest @@ -1444,17 +1432,17 @@ xsi:schemaLocation="http://maven.apache.org/POM/4.0.0 http://maven.apache.org/xs org.bouncycastle - bcprov-jdk15on + bcprov-jdk18on ${bouncycastle.version} org.bouncycastle - bcpkix-jdk15on + bcpkix-jdk18on ${bouncycastle.version} org.bouncycastle - bcprov-jdk16 + bcutil-jdk18on ${bouncycastle.version} @@ -1611,18 +1599,6 @@ xsi:schemaLocation="http://maven.apache.org/POM/4.0.0 http://maven.apache.org/xs org.codehaus.mojo license-maven-plugin ${license-maven-plugin.version} - - - org.apache.maven.doxia - doxia-core - 1.6 - - - org.apache.maven.doxia - doxia-site-renderer - 1.6 - - false ${project.basedir} @@ -1744,6 +1720,13 @@ xsi:schemaLocation="http://maven.apache.org/POM/4.0.0 http://maven.apache.org/xs org.apache.maven.plugins maven-dependency-plugin ${maven-dependency-plugin.version} + + + org.codehaus.plexus + plexus-archiver + ${plexus-archiver.version} + + org.apache.maven.plugins @@ -1806,6 +1789,17 @@ xsi:schemaLocation="http://maven.apache.org/POM/4.0.0 http://maven.apache.org/xs org.apache.hadoop.hdds.utils.db.managed.* + + + + + ban-imports + process-sources + + enforce + + + true Use directly from Guava @@ -1818,22 +1812,35 @@ xsi:schemaLocation="http://maven.apache.org/POM/4.0.0 http://maven.apache.org/xs true Disable with @Unhealthy or @Slow instead (see HDDS-9276) - org.junit.Ignore org.junit.jupiter.api.Disabled - - - - - ban-junit4-imports - process-sources - - enforce - - - ${allow.junit4} - + + true + Use Ozone's version of the same class + + org.apache.hadoop.test.GenericTestUtils + org.apache.hadoop.test.LambdaTestUtils + + + org.apache.hadoop.fs.contract.* + org.apache.hadoop.tools.contract.* + + + + true + Use Ozone's version of the same class + + org.apache.hadoop.test.MetricsAssert + + + + true + Use Ozone's similar class + + org.apache.hadoop.hdfs.MiniDFSCluster + + true true @@ -1843,8 +1850,21 @@ xsi:schemaLocation="http://maven.apache.org/POM/4.0.0 http://maven.apache.org/xs org.junit.jupiter.** + org.junit.platform.** + + true + Use jakarta.annotation + + javax.annotation.Nonnull + javax.annotation.Nullable + org.checkerframework.checker.nullness.qual.NonNull + org.checkerframework.checker.nullness.qual.Nullable + org.jetbrains.annotations.NotNull + org.jetbrains.annotations.Nullable + + @@ -1973,6 +1993,7 @@ xsi:schemaLocation="http://maven.apache.org/POM/4.0.0 http://maven.apache.org/xs false ${surefire.fork.timeout} + all ${maven-surefire-plugin.argLine} ${maven-surefire-plugin.argLineAccessArgs} @{argLine} @@ -2256,23 +2277,6 @@ xsi:schemaLocation="http://maven.apache.org/POM/4.0.0 http://maven.apache.org/xs - - scm - - - - org.apache.maven.plugins - maven-surefire-plugin - - - org.apache.hadoop.ozone.scm.** - - ${unstable-test-groups} - - - - - contract @@ -2343,7 +2347,6 @@ xsi:schemaLocation="http://maven.apache.org/POM/4.0.0 http://maven.apache.org/xs org.apache.hadoop.ozone.debug.** org.apache.hadoop.ozone.freon.** org.apache.hadoop.ozone.om.** - org.apache.hadoop.ozone.scm.** org.apache.hadoop.ozone.shell.** ${unstable-test-groups}